code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# If we list all the natural numbers below 10 that are multiples of 3 or 5,
# we get 3, 5, 6 and 9. The sum of these multiples is 23.
# range: search-space. E.g.: [1..10]
# mult: multiples. E.g.: [3, 5]
# Find the sum of all the multiples of 3 or 5 below 1000.
def findSum():
s = 0
rng = list(range(1, 1000)) # defines search-space.
for i in rng:
if i%3 == 0 or i%5 == 0: # check multiples (3 or 5 here).
s += i
print(s)
#Test
findSum()
|
a-n-d-r-e-i/peuler
|
multiSum.py
|
Python
|
gpl-3.0
| 506
|
# Copyright (c) 2015 Red Hat Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api import converters
from neutron_lib.api.definitions import network
from neutron_lib.api.definitions import port
from neutron_lib import constants
from neutron_lib.db import constants as db_const
from neutron_lib.services.qos import constants as qos_const
BANDWIDTH_LIMIT_RULES = "bandwidth_limit_rules"
RULE_TYPES = "rule_types"
POLICIES = 'policies'
POLICY = 'policy'
DSCP_MARKING_RULES = 'dscp_marking_rules'
MIN_BANDWIDTH_RULES = 'minimum_bandwidth_rules'
_QOS_RULE_COMMON_FIELDS = {
'id': {
'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'is_filter': True,
'is_sort_key': True,
'primary_key': True
},
'tenant_id': {
'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'is_visible': True
}
}
ALIAS = 'qos'
IS_SHIM_EXTENSION = False
IS_STANDARD_ATTR_EXTENSION = False
NAME = 'Quality of Service'
API_PREFIX = '/' + ALIAS
DESCRIPTION = 'The Quality of Service extension.'
UPDATED_TIMESTAMP = '2015-06-08T10:00:00-00:00'
RESOURCE_ATTRIBUTE_MAP = {
POLICIES: {
'id': {
'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_filter': True, 'is_sort_key': True,
'is_visible': True, 'primary_key': True
},
'name': {
'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': '',
'is_filter': True, 'is_sort_key': True,
'validate': {'type:string': db_const.NAME_FIELD_SIZE}},
constants.SHARED: {
'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': False,
'is_filter': True,
'convert_to': converters.convert_to_boolean
},
'tenant_id': {
'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string': db_const.PROJECT_ID_FIELD_SIZE},
'is_filter': True, 'is_sort_key': True,
'is_visible': True
},
'rules': {
'allow_post': False,
'allow_put': False,
'is_visible': True
}
},
RULE_TYPES: {
'type': {
'allow_post': False, 'allow_put': False,
'is_visible': True
}
},
port.COLLECTION_NAME: {
qos_const.QOS_POLICY_ID: {
'allow_post': True,
'allow_put': True,
'is_visible': True,
'default': None,
'enforce_policy': True,
'validate': {'type:uuid_or_none': None}
}
},
network.COLLECTION_NAME: {
qos_const.QOS_POLICY_ID: {
'allow_post': True,
'allow_put': True,
'is_visible': True,
'default': None,
'enforce_policy': True,
'validate': {'type:uuid_or_none': None}
}
}
}
_PARENT = {
'collection_name': POLICIES,
'member_name': POLICY
}
SUB_RESOURCE_ATTRIBUTE_MAP = {
BANDWIDTH_LIMIT_RULES: {
'parent': _PARENT,
'parameters': dict(
_QOS_RULE_COMMON_FIELDS,
**{qos_const.MAX_KBPS: {
'allow_post': True, 'allow_put': True,
'convert_to': converters.convert_to_int,
'is_visible': True,
'is_filter': True,
'is_sort_key': True,
'validate': {
'type:range': [0, db_const.DB_INTEGER_MAX_VALUE]}
},
qos_const.MAX_BURST: {
'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': 0,
'is_filter': True,
'is_sort_key': True,
'convert_to': converters.convert_to_int,
'validate': {
'type:range': [0, db_const.DB_INTEGER_MAX_VALUE]}}}),
},
DSCP_MARKING_RULES: {
'parent': _PARENT,
'parameters': dict(
_QOS_RULE_COMMON_FIELDS,
**{qos_const.DSCP_MARK: {
'allow_post': True, 'allow_put': True,
'convert_to': converters.convert_to_int,
'is_visible': True,
'is_filter': True,
'is_sort_key': True,
'validate': {
'type:values': constants.VALID_DSCP_MARKS}}})
},
MIN_BANDWIDTH_RULES: {
'parent': _PARENT,
'parameters': dict(
_QOS_RULE_COMMON_FIELDS,
**{qos_const.MIN_KBPS: {
'allow_post': True, 'allow_put': True,
'is_visible': True,
'is_filter': True,
'is_sort_key': True,
'convert_to': converters.convert_to_int,
'validate': {
'type:range': [0, db_const.DB_INTEGER_MAX_VALUE]}},
qos_const.DIRECTION: {
'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': constants.EGRESS_DIRECTION,
'is_filter': True,
'is_sort_key': True,
'validate': {
'type:values': [constants.EGRESS_DIRECTION]}}})
}
}
ACTION_MAP = {}
REQUIRED_EXTENSIONS = []
OPTIONAL_EXTENSIONS = []
ACTION_STATUS = {}
|
openstack/neutron-lib
|
neutron_lib/api/definitions/qos.py
|
Python
|
apache-2.0
| 6,023
|
# -*- coding: utf-8 -*-
"""
a simple way to remember to remove dead code due to notanymore supported
library of python versions.
"""
from .insupportable import *
__author__ = 'Matthias Bussonnier'
__email__ = 'bussonniermatthias@gmail.com'
__version__ = '0.1.2'
|
Carreau/insupportable
|
insupportable/__init__.py
|
Python
|
bsd-3-clause
| 264
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for the OGR/GPKG provider.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Even Rouault'
__date__ = '2016-04-21'
__copyright__ = 'Copyright 2016, Even Rouault'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import shutil
import sys
import tempfile
import time
import qgis # NOQA
from osgeo import gdal, ogr
from qgis.core import (QgsFeature,
QgsCoordinateReferenceSystem,
QgsFeatureRequest,
QgsFields,
QgsField,
QgsFieldConstraints,
QgsGeometry,
QgsRectangle,
QgsSettings,
QgsVectorLayer,
QgsVectorLayerExporter,
QgsPointXY,
QgsProject,
QgsWkbTypes,
QgsDataProvider,
QgsVectorDataProvider)
from qgis.PyQt.QtCore import QCoreApplication, QVariant
from qgis.testing import start_app, unittest
from qgis.utils import spatialite_connect
def GDAL_COMPUTE_VERSION(maj, min, rev):
return ((maj) * 1000000 + (min) * 10000 + (rev) * 100)
class ErrorReceiver():
def __init__(self):
self.msg = None
def receiveError(self, msg):
self.msg = msg
def count_opened_filedescriptors(filename_to_test):
count = -1
if sys.platform.startswith('linux'):
count = 0
open_files_dirname = '/proc/%d/fd' % os.getpid()
filenames = os.listdir(open_files_dirname)
for filename in filenames:
full_filename = open_files_dirname + '/' + filename
if os.path.exists(full_filename):
link = os.readlink(full_filename)
if os.path.basename(link) == os.path.basename(filename_to_test):
count += 1
return count
class TestPyQgsOGRProviderGpkg(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
QCoreApplication.setOrganizationName("QGIS_Test")
QCoreApplication.setOrganizationDomain("TestPyQgsOGRProviderGpkg.com")
QCoreApplication.setApplicationName("TestPyQgsOGRProviderGpkg")
QgsSettings().clear()
start_app()
# Create test layer
cls.basetestpath = tempfile.mkdtemp()
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
shutil.rmtree(cls.basetestpath, True)
QgsSettings().clear()
def testSingleToMultiPolygonPromotion(self):
tmpfile = os.path.join(self.basetestpath, 'testSingleToMultiPolygonPromotion.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
ds.CreateLayer('test', geom_type=ogr.wkbMultiPolygon)
ds = None
vl = QgsVectorLayer('{}|layerid=0'.format(tmpfile), 'test', 'ogr')
f = QgsFeature()
f.setGeometry(QgsGeometry.fromWkt('POLYGON ((0 0,0 1,1 1,0 0))'))
vl.dataProvider().addFeatures([f])
got = [feat for feat in vl.getFeatures()][0]
got_geom = got.geometry()
reference = QgsGeometry.fromWkt('MultiPolygon (((0 0, 0 1, 1 1, 0 0)))')
# The geometries must be binarily identical
self.assertEqual(got_geom.asWkb(), reference.asWkb(), 'Expected {}, got {}'.format(reference.asWkt(), got_geom.asWkt()))
def testCurveGeometryType(self):
tmpfile = os.path.join(self.basetestpath, 'testCurveGeometryType.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
ds.CreateLayer('test', geom_type=ogr.wkbCurvePolygon)
ds = None
vl = QgsVectorLayer('{}'.format(tmpfile), 'test', 'ogr')
self.assertEqual(vl.dataProvider().subLayers(), [QgsDataProvider.SUBLAYER_SEPARATOR.join(['0', 'test', '0', 'CurvePolygon', 'geom'])])
f = QgsFeature()
f.setGeometry(QgsGeometry.fromWkt('POLYGON ((0 0,0 1,1 1,0 0))'))
vl.dataProvider().addFeatures([f])
got = [feat for feat in vl.getFeatures()][0]
got_geom = got.geometry()
reference = QgsGeometry.fromWkt('CurvePolygon (((0 0, 0 1, 1 1, 0 0)))')
# The geometries must be binarily identical
self.assertEqual(got_geom.asWkb(), reference.asWkb(), 'Expected {}, got {}'.format(reference.asWkt(), got_geom.asWkt()))
def internalTestBug15351(self, orderClosing):
tmpfile = os.path.join(self.basetestpath, 'testBug15351.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbPoint)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(0 0)'))
lyr.CreateFeature(f)
f = None
ds = None
vl = QgsVectorLayer(u'{}'.format(tmpfile), u'test', u'ogr')
self.assertTrue(vl.startEditing())
self.assertTrue(vl.changeGeometry(1, QgsGeometry.fromWkt('Point (3 50)')))
# Iterate over features (will open a new OGR connection), but do not
# close the iterator for now
it = vl.getFeatures()
f = QgsFeature()
it.nextFeature(f)
if orderClosing == 'closeIter_commit_closeProvider':
it = None
# Commit changes
cbk = ErrorReceiver()
vl.dataProvider().raiseError.connect(cbk.receiveError)
self.assertTrue(vl.commitChanges())
self.assertIsNone(cbk.msg)
# Close layer and iterator in different orders
if orderClosing == 'closeIter_commit_closeProvider':
vl = None
elif orderClosing == 'commit_closeProvider_closeIter':
vl = None
it = None
else:
assert orderClosing == 'commit_closeIter_closeProvider'
it = None
vl = None
# Test that we succeeded restoring default journal mode, and we
# are not let in WAL mode.
ds = ogr.Open(tmpfile)
lyr = ds.ExecuteSQL('PRAGMA journal_mode')
f = lyr.GetNextFeature()
res = f.GetField(0)
ds.ReleaseResultSet(lyr)
ds = None
self.assertEqual(res, 'delete')
# We need GDAL 2.0 to issue PRAGMA journal_mode
# Note: for that case, we don't strictly need turning on WAL
def testBug15351_closeIter_commit_closeProvider(self):
self.internalTestBug15351('closeIter_commit_closeProvider')
# We need GDAL 2.0 to issue PRAGMA journal_mode
def testBug15351_commit_closeProvider_closeIter(self):
self.internalTestBug15351('commit_closeProvider_closeIter')
# We need GDAL 2.0 to issue PRAGMA journal_mode
def testBug15351_commit_closeIter_closeProvider(self):
self.internalTestBug15351('commit_closeIter_closeProvider')
@unittest.skip(int(gdal.VersionInfo('VERSION_NUM')) < GDAL_COMPUTE_VERSION(2, 1, 2))
def testGeopackageExtentUpdate(self):
''' test https://issues.qgis.org/issues/15273 '''
tmpfile = os.path.join(self.basetestpath, 'testGeopackageExtentUpdate.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbPoint)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(0 0)'))
lyr.CreateFeature(f)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(1 1)'))
lyr.CreateFeature(f)
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(1 0.5)'))
lyr.CreateFeature(f)
f = None
gdal.ErrorReset()
ds.ExecuteSQL('RECOMPUTE EXTENT ON test')
has_error = gdal.GetLastErrorMsg() != ''
ds = None
if has_error:
print('Too old GDAL trunk version. Please update')
return
vl = QgsVectorLayer(u'{}'.format(tmpfile), u'test', u'ogr')
# Test moving a geometry that touches the bbox
self.assertTrue(vl.startEditing())
self.assertTrue(vl.changeGeometry(1, QgsGeometry.fromWkt('Point (0.5 0)')))
self.assertTrue(vl.commitChanges())
reference = QgsGeometry.fromRect(QgsRectangle(0.5, 0.0, 1.0, 1.0))
provider_extent = QgsGeometry.fromRect(vl.extent())
self.assertTrue(QgsGeometry.compare(provider_extent.asPolygon()[0], reference.asPolygon()[0], 0.00001),
provider_extent.asPolygon()[0])
# Test deleting a geometry that touches the bbox
self.assertTrue(vl.startEditing())
self.assertTrue(vl.deleteFeature(2))
self.assertTrue(vl.commitChanges())
reference = QgsGeometry.fromRect(QgsRectangle(0.5, 0.0, 1.0, 0.5))
provider_extent = QgsGeometry.fromRect(vl.extent())
self.assertTrue(QgsGeometry.compare(provider_extent.asPolygon()[0], reference.asPolygon()[0], 0.00001),
provider_extent.asPolygon()[0])
def testSelectSubsetString(self):
tmpfile = os.path.join(self.basetestpath, 'testSelectSubsetString.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbMultiPolygon)
lyr.CreateField(ogr.FieldDefn('foo', ogr.OFTString))
f = ogr.Feature(lyr.GetLayerDefn())
f['foo'] = 'bar'
lyr.CreateFeature(f)
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f['foo'] = 'baz'
lyr.CreateFeature(f)
f = None
ds = None
vl = QgsVectorLayer('{}|layerid=0'.format(tmpfile), 'test', 'ogr')
vl.setSubsetString("SELECT fid, foo FROM test WHERE foo = 'baz'")
got = [feat for feat in vl.getFeatures()]
self.assertEqual(len(got), 1)
def testStyle(self):
# First test with invalid URI
vl = QgsVectorLayer('/idont/exist.gpkg', 'test', 'ogr')
self.assertFalse(vl.dataProvider().isSaveAndLoadStyleToDatabaseSupported())
related_count, idlist, namelist, desclist, errmsg = vl.listStylesInDatabase()
self.assertEqual(related_count, -1)
self.assertEqual(idlist, [])
self.assertEqual(namelist, [])
self.assertEqual(desclist, [])
self.assertNotEqual(errmsg, "")
qml, errmsg = vl.getStyleFromDatabase("1")
self.assertEqual(qml, "")
self.assertNotEqual(errmsg, "")
qml, success = vl.loadNamedStyle('/idont/exist.gpkg')
self.assertFalse(success)
errorMsg = vl.saveStyleToDatabase("name", "description", False, "")
self.assertNotEqual(errorMsg, "")
# Now with valid URI
tmpfile = os.path.join(self.basetestpath, 'testStyle.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbMultiPolygon)
lyr.CreateField(ogr.FieldDefn('foo', ogr.OFTString))
f = ogr.Feature(lyr.GetLayerDefn())
f['foo'] = 'bar'
lyr.CreateFeature(f)
f = None
lyr = ds.CreateLayer('test2', geom_type=ogr.wkbMultiPolygon)
lyr.CreateField(ogr.FieldDefn('foo', ogr.OFTString))
f = ogr.Feature(lyr.GetLayerDefn())
f['foo'] = 'bar'
lyr.CreateFeature(f)
f = None
ds = None
vl = QgsVectorLayer('{}|layername=test'.format(tmpfile), 'test', 'ogr')
self.assertTrue(vl.isValid())
vl2 = QgsVectorLayer('{}|layername=test2'.format(tmpfile), 'test2', 'ogr')
self.assertTrue(vl2.isValid())
self.assertTrue(vl.dataProvider().isSaveAndLoadStyleToDatabaseSupported())
related_count, idlist, namelist, desclist, errmsg = vl.listStylesInDatabase()
self.assertEqual(related_count, 0)
self.assertEqual(idlist, [])
self.assertEqual(namelist, [])
self.assertEqual(desclist, [])
self.assertNotEqual(errmsg, "")
qml, errmsg = vl.getStyleFromDatabase("not_existing")
self.assertEqual(qml, "")
self.assertNotEqual(errmsg, "")
qml, success = vl.loadNamedStyle('{}|layerid=0'.format(tmpfile))
self.assertFalse(success)
errorMsg = vl.saveStyleToDatabase("name", "description", False, "")
self.assertEqual(errorMsg, "")
qml, errmsg = vl.getStyleFromDatabase("not_existing")
self.assertEqual(qml, "")
self.assertNotEqual(errmsg, "")
related_count, idlist, namelist, desclist, errmsg = vl.listStylesInDatabase()
self.assertEqual(related_count, 1)
self.assertEqual(errmsg, "")
self.assertEqual(idlist, ['1'])
self.assertEqual(namelist, ['name'])
self.assertEqual(desclist, ['description'])
qml, errmsg = vl.getStyleFromDatabase("100")
self.assertEqual(qml, "")
self.assertNotEqual(errmsg, "")
qml, errmsg = vl.getStyleFromDatabase("1")
self.assertTrue(qml.startswith('<!DOCTYPE qgis'), qml)
self.assertEqual(errmsg, "")
# Try overwrite it but simulate answer no
settings = QgsSettings()
settings.setValue("/qgis/overwriteStyle", False)
errorMsg = vl.saveStyleToDatabase("name", "description_bis", False, "")
self.assertNotEqual(errorMsg, "")
related_count, idlist, namelist, desclist, errmsg = vl.listStylesInDatabase()
self.assertEqual(related_count, 1)
self.assertEqual(errmsg, "")
self.assertEqual(idlist, ['1'])
self.assertEqual(namelist, ['name'])
self.assertEqual(desclist, ['description'])
# Try overwrite it and simulate answer yes
settings = QgsSettings()
settings.setValue("/qgis/overwriteStyle", True)
errorMsg = vl.saveStyleToDatabase("name", "description_bis", False, "")
self.assertEqual(errorMsg, "")
related_count, idlist, namelist, desclist, errmsg = vl.listStylesInDatabase()
self.assertEqual(related_count, 1)
self.assertEqual(errmsg, "")
self.assertEqual(idlist, ['1'])
self.assertEqual(namelist, ['name'])
self.assertEqual(desclist, ['description_bis'])
errorMsg = vl2.saveStyleToDatabase("name_test2", "description_test2", True, "")
self.assertEqual(errorMsg, "")
errorMsg = vl.saveStyleToDatabase("name2", "description2", True, "")
self.assertEqual(errorMsg, "")
errorMsg = vl.saveStyleToDatabase("name3", "description3", True, "")
self.assertEqual(errorMsg, "")
related_count, idlist, namelist, desclist, errmsg = vl.listStylesInDatabase()
self.assertEqual(related_count, 3)
self.assertEqual(errmsg, "")
self.assertEqual(idlist, ['1', '3', '4', '2'])
self.assertEqual(namelist, ['name', 'name2', 'name3', 'name_test2'])
self.assertEqual(desclist, ['description_bis', 'description2', 'description3', 'name_test2'])
# Check that layers_style table is not list in subLayers()
vl = QgsVectorLayer(tmpfile, 'test', 'ogr')
sublayers = vl.dataProvider().subLayers()
self.assertEqual(len(sublayers), 2, sublayers)
def testDisablewalForSqlite3(self):
''' Test disabling walForSqlite3 setting '''
QgsSettings().setValue("/qgis/walForSqlite3", False)
tmpfile = os.path.join(self.basetestpath, 'testDisablewalForSqlite3.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbPoint)
lyr.CreateField(ogr.FieldDefn('attr0', ogr.OFTInteger))
lyr.CreateField(ogr.FieldDefn('attr1', ogr.OFTInteger))
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(0 0)'))
lyr.CreateFeature(f)
f = None
ds = None
vl = QgsVectorLayer(u'{}'.format(tmpfile), u'test', u'ogr')
# Test that we are using default delete mode and not WAL
ds = ogr.Open(tmpfile)
lyr = ds.ExecuteSQL('PRAGMA journal_mode')
f = lyr.GetNextFeature()
res = f.GetField(0)
ds.ReleaseResultSet(lyr)
ds = None
self.assertEqual(res, 'delete')
self.assertTrue(vl.startEditing())
feature = next(vl.getFeatures())
self.assertTrue(vl.changeAttributeValue(feature.id(), 1, 1001))
# Commit changes
cbk = ErrorReceiver()
vl.dataProvider().raiseError.connect(cbk.receiveError)
self.assertTrue(vl.commitChanges())
self.assertIsNone(cbk.msg)
vl = None
QgsSettings().setValue("/qgis/walForSqlite3", None)
def testSimulatedDBManagerImport(self):
uri = 'point?field=f1:int'
uri += '&field=f2:double(6,4)'
uri += '&field=f3:string(20)'
lyr = QgsVectorLayer(uri, "x", "memory")
self.assertTrue(lyr.isValid())
f = QgsFeature(lyr.fields())
f['f1'] = 1
f['f2'] = 123.456
f['f3'] = '12345678.90123456789'
f2 = QgsFeature(lyr.fields())
f2['f1'] = 2
lyr.dataProvider().addFeatures([f, f2])
tmpfile = os.path.join(self.basetestpath, 'testSimulatedDBManagerImport.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
ds = None
options = {}
options['update'] = True
options['driverName'] = 'GPKG'
options['layerName'] = 'my_out_table'
err = QgsVectorLayerExporter.exportLayer(lyr, tmpfile, "ogr", lyr.crs(), False, options)
self.assertEqual(err[0], QgsVectorLayerExporter.NoError,
'unexpected import error {0}'.format(err))
lyr = QgsVectorLayer(tmpfile + "|layername=my_out_table", "y", "ogr")
self.assertTrue(lyr.isValid())
features = lyr.getFeatures()
f = next(features)
self.assertEqual(f['f1'], 1)
self.assertEqual(f['f2'], 123.456)
self.assertEqual(f['f3'], '12345678.90123456789')
f = next(features)
self.assertEqual(f['f1'], 2)
features = None
# Test overwriting without overwrite option
err = QgsVectorLayerExporter.exportLayer(lyr, tmpfile, "ogr", lyr.crs(), False, options)
self.assertEqual(err[0], QgsVectorLayerExporter.ErrCreateDataSource)
# Test overwriting
lyr = QgsVectorLayer(uri, "x", "memory")
self.assertTrue(lyr.isValid())
f = QgsFeature(lyr.fields())
f['f1'] = 3
lyr.dataProvider().addFeatures([f])
options['overwrite'] = True
err = QgsVectorLayerExporter.exportLayer(lyr, tmpfile, "ogr", lyr.crs(), False, options)
self.assertEqual(err[0], QgsVectorLayerExporter.NoError,
'unexpected import error {0}'.format(err))
lyr = QgsVectorLayer(tmpfile + "|layername=my_out_table", "y", "ogr")
self.assertTrue(lyr.isValid())
features = lyr.getFeatures()
f = next(features)
self.assertEqual(f['f1'], 3)
features = None
def testExportLayerToExistingDatabase(self):
fields = QgsFields()
fields.append(QgsField('f1', QVariant.Int))
tmpfile = os.path.join(self.basetestpath, 'testCreateNewGeopackage.gpkg')
options = {}
options['update'] = True
options['driverName'] = 'GPKG'
options['layerName'] = 'table1'
exporter = QgsVectorLayerExporter(tmpfile, "ogr", fields, QgsWkbTypes.Polygon, QgsCoordinateReferenceSystem(3111), False, options)
self.assertFalse(exporter.errorCode(),
'unexpected export error {}: {}'.format(exporter.errorCode(), exporter.errorMessage()))
options['layerName'] = 'table2'
exporter = QgsVectorLayerExporter(tmpfile, "ogr", fields, QgsWkbTypes.Point, QgsCoordinateReferenceSystem(3113), False, options)
self.assertFalse(exporter.errorCode(),
'unexpected export error {} : {}'.format(exporter.errorCode(), exporter.errorMessage()))
del exporter
# make sure layers exist
lyr = QgsVectorLayer('{}|layername=table1'.format(tmpfile), "lyr1", "ogr")
self.assertTrue(lyr.isValid())
self.assertEqual(lyr.crs().authid(), 'EPSG:3111')
self.assertEqual(lyr.wkbType(), QgsWkbTypes.Polygon)
lyr2 = QgsVectorLayer('{}|layername=table2'.format(tmpfile), "lyr2", "ogr")
self.assertTrue(lyr2.isValid())
self.assertEqual(lyr2.crs().authid(), 'EPSG:3113')
self.assertEqual(lyr2.wkbType(), QgsWkbTypes.Point)
def testGeopackageTwoLayerEdition(self):
''' test https://issues.qgis.org/issues/17034 '''
tmpfile = os.path.join(self.basetestpath, 'testGeopackageTwoLayerEdition.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('layer1', geom_type=ogr.wkbPoint)
lyr.CreateField(ogr.FieldDefn('attr', ogr.OFTInteger))
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(0 0)'))
lyr.CreateFeature(f)
f = None
lyr = ds.CreateLayer('layer2', geom_type=ogr.wkbPoint)
lyr.CreateField(ogr.FieldDefn('attr', ogr.OFTInteger))
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(1 1)'))
lyr.CreateFeature(f)
f = None
ds = None
vl1 = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=layer1", u'layer1', u'ogr')
vl2 = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=layer2", u'layer2', u'ogr')
# Edit vl1, vl2 multiple times
self.assertTrue(vl1.startEditing())
self.assertTrue(vl2.startEditing())
self.assertTrue(vl1.changeGeometry(1, QgsGeometry.fromWkt('Point (2 2)')))
self.assertTrue(vl2.changeGeometry(1, QgsGeometry.fromWkt('Point (3 3)')))
self.assertTrue(vl1.commitChanges())
self.assertTrue(vl2.commitChanges())
self.assertTrue(vl1.startEditing())
self.assertTrue(vl2.startEditing())
self.assertTrue(vl1.changeAttributeValue(1, 1, 100))
self.assertTrue(vl2.changeAttributeValue(1, 1, 101))
self.assertTrue(vl1.commitChanges())
self.assertTrue(vl2.commitChanges())
self.assertTrue(vl1.startEditing())
self.assertTrue(vl2.startEditing())
self.assertTrue(vl1.changeGeometry(1, QgsGeometry.fromWkt('Point (4 4)')))
self.assertTrue(vl2.changeGeometry(1, QgsGeometry.fromWkt('Point (5 5)')))
self.assertTrue(vl1.commitChanges())
self.assertTrue(vl2.commitChanges())
vl1 = None
vl2 = None
# Check everything is as expected after re-opening
vl1 = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=layer1", u'layer1', u'ogr')
vl2 = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=layer2", u'layer2', u'ogr')
got = [feat for feat in vl1.getFeatures()][0]
got_geom = got.geometry()
self.assertEqual(got['attr'], 100)
reference = QgsGeometry.fromWkt('Point (4 4)')
self.assertEqual(got_geom.asWkb(), reference.asWkb(), 'Expected {}, got {}'.format(reference.asWkt(), got_geom.asWkt()))
got = [feat for feat in vl2.getFeatures()][0]
got_geom = got.geometry()
self.assertEqual(got['attr'], 101)
reference = QgsGeometry.fromWkt('Point (5 5)')
self.assertEqual(got_geom.asWkb(), reference.asWkb(), 'Expected {}, got {}'.format(reference.asWkt(), got_geom.asWkt()))
def testReplaceLayerWhileOpen(self):
''' Replace an existing geopackage layer whilst it's open in the project'''
tmpfile = os.path.join(self.basetestpath, 'testGeopackageReplaceOpenLayer.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('layer1', geom_type=ogr.wkbPoint)
lyr.CreateField(ogr.FieldDefn('attr', ogr.OFTInteger))
lyr.CreateField(ogr.FieldDefn('attr2', ogr.OFTInteger))
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(0 0)'))
lyr.CreateFeature(f)
f = None
vl1 = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=layer1", u'layer1', u'ogr')
p = QgsProject()
p.addMapLayer(vl1)
request = QgsFeatureRequest().setSubsetOfAttributes([0])
features = [f for f in vl1.getFeatures(request)]
self.assertEqual(len(features), 1)
# now, overwrite the layer with a different geometry type and fields
ds.DeleteLayer('layer1')
lyr = ds.CreateLayer('layer1', geom_type=ogr.wkbLineString)
lyr.CreateField(ogr.FieldDefn('attr', ogr.OFTString))
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('LineString(0 0, 1 1)'))
lyr.CreateFeature(f)
f = None
vl2 = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=layer1", u'layer2', u'ogr')
p.addMapLayer(vl2)
features = [f for f in vl1.getFeatures(request)]
self.assertEqual(len(features), 1)
def testSublayerWithComplexLayerName(self):
''' Test reading a gpkg with a sublayer name containing : '''
tmpfile = os.path.join(self.basetestpath, 'testGeopackageComplexLayerName.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('layer1:', geom_type=ogr.wkbPoint, options=['GEOMETRY_NAME=geom:'])
lyr.CreateField(ogr.FieldDefn('attr', ogr.OFTInteger))
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(0 0)'))
lyr.CreateFeature(f)
f = None
vl = QgsVectorLayer(u'{}'.format(tmpfile), u'layer', u'ogr')
self.assertEqual(vl.dataProvider().subLayers(), [QgsDataProvider.SUBLAYER_SEPARATOR.join(['0', 'layer1:', '1', 'Point', 'geom:'])])
def testGeopackageManyLayers(self):
''' test opening more than 64 layers without running out of Spatialite connections '''
tmpfile = os.path.join(self.basetestpath, 'testGeopackageManyLayers.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
for i in range(70):
lyr = ds.CreateLayer('layer%d' % i, geom_type=ogr.wkbPoint)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(%d 0)' % i))
lyr.CreateFeature(f)
f = None
ds = None
vl_tab = []
for i in range(70):
layername = 'layer%d' % i
vl = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + layername, layername, u'ogr')
self.assertTrue(vl.isValid())
vl_tab += [vl]
count = count_opened_filedescriptors(tmpfile)
if count > 0:
self.assertEqual(count, 1)
for i in range(70):
got = [feat for feat in vl.getFeatures()]
self.assertTrue(len(got) == 1)
# We shouldn't have more than 2 file handles opened:
# one shared by the QgsOgrProvider object
# one shared by the feature iterators
count = count_opened_filedescriptors(tmpfile)
if count > 0:
self.assertEqual(count, 2)
# Re-open an already opened layers. We should get a new handle
layername = 'layer%d' % 0
vl_extra0 = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + layername, layername, u'ogr')
self.assertTrue(vl_extra0.isValid())
countNew = count_opened_filedescriptors(tmpfile)
if countNew > 0:
self.assertLessEqual(countNew, 4) # for some reason we get 4 and not 3
layername = 'layer%d' % 1
vl_extra1 = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + layername, layername, u'ogr')
self.assertTrue(vl_extra1.isValid())
countNew2 = count_opened_filedescriptors(tmpfile)
self.assertEqual(countNew2, countNew)
def testGeopackageRefreshIfTableListUpdated(self):
''' test that creating/deleting a layer is reflected when opening a new layer '''
tmpfile = os.path.join(self.basetestpath, 'testGeopackageRefreshIfTableListUpdated.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
ds.CreateLayer('test', geom_type=ogr.wkbPoint)
ds = None
vl = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + "test", 'test', u'ogr')
self.assertTrue(vl.extent().isNull())
time.sleep(1) # so timestamp gets updated
ds = ogr.Open(tmpfile, update=1)
ds.CreateLayer('test2', geom_type=ogr.wkbPoint)
ds = None
vl2 = QgsVectorLayer(u'{}'.format(tmpfile), 'test', u'ogr')
vl2.subLayers()
self.assertEqual(vl2.dataProvider().subLayers(), [QgsDataProvider.SUBLAYER_SEPARATOR.join(['0', 'test', '0', 'Point', 'geom']),
QgsDataProvider.SUBLAYER_SEPARATOR.join(['1', 'test2', '0', 'Point', 'geom'])])
def testGeopackageLargeFID(self):
tmpfile = os.path.join(self.basetestpath, 'testGeopackageLargeFID.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbPoint)
lyr.CreateField(ogr.FieldDefn('str_field', ogr.OFTString))
ds = None
vl = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + "test", 'test', u'ogr')
f = QgsFeature()
f.setAttributes([1234567890123, None])
self.assertTrue(vl.startEditing())
self.assertTrue(vl.dataProvider().addFeatures([f]))
self.assertTrue(vl.commitChanges())
got = [feat for feat in vl.getFeatures()][0]
self.assertEqual(got['fid'], 1234567890123)
self.assertTrue(vl.startEditing())
self.assertTrue(vl.changeGeometry(1234567890123, QgsGeometry.fromWkt('Point (3 50)')))
self.assertTrue(vl.changeAttributeValue(1234567890123, 1, 'foo'))
self.assertTrue(vl.commitChanges())
got = [feat for feat in vl.getFeatures()][0]
self.assertEqual(got['str_field'], 'foo')
got_geom = got.geometry()
self.assertIsNotNone(got_geom)
self.assertTrue(vl.startEditing())
self.assertTrue(vl.deleteFeature(1234567890123))
self.assertTrue(vl.commitChanges())
def test_AddFeatureNullFid(self):
"""Test gpkg feature with NULL fid can be added"""
tmpfile = os.path.join(self.basetestpath, 'testGeopackageSplitFeatures.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbPolygon)
lyr.CreateField(ogr.FieldDefn('str_field', ogr.OFTString))
ds = None
layer = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + "test", 'test', u'ogr')
# Check that pk field has unique constraint
fields = layer.fields()
pkfield = fields.at(0)
self.assertTrue(pkfield.constraints().constraints() & QgsFieldConstraints.ConstraintUnique)
# Test add feature with default Fid (NULL)
layer.startEditing()
f = QgsFeature()
feat = QgsFeature(layer.fields())
feat.setGeometry(QgsGeometry.fromWkt('Polygon ((0 0, 0 1, 1 1, 1 0, 0 0))'))
feat.setAttribute(1, 'test_value')
layer.addFeature(feat)
self.assertTrue(layer.commitChanges())
self.assertEqual(layer.featureCount(), 1)
def test_SplitFeature(self):
"""Test gpkg feature can be split"""
tmpfile = os.path.join(self.basetestpath, 'testGeopackageSplitFeatures.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbPolygon)
lyr.CreateField(ogr.FieldDefn('str_field', ogr.OFTString))
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POLYGON ((0 0,0 1,1 1,1 0,0 0))'))
lyr.CreateFeature(f)
f = None
ds = None
# Split features
layer = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + "test", 'test', u'ogr')
self.assertTrue(layer.isValid())
self.assertTrue(layer.isSpatial())
self.assertEqual([f for f in layer.getFeatures()][0].geometry().asWkt(), 'Polygon ((0 0, 0 1, 1 1, 1 0, 0 0))')
layer.startEditing()
self.assertEqual(layer.splitFeatures([QgsPointXY(0.5, 0), QgsPointXY(0.5, 1)], 0), 0)
self.assertTrue(layer.commitChanges())
self.assertEqual(layer.featureCount(), 2)
layer = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + "test", 'test', u'ogr')
self.assertEqual(layer.featureCount(), 2)
self.assertEqual([f for f in layer.getFeatures()][0].geometry().asWkt(), 'Polygon ((0.5 0, 0.5 1, 1 1, 1 0, 0.5 0))')
self.assertEqual([f for f in layer.getFeatures()][1].geometry().asWkt(), 'Polygon ((0.5 1, 0.5 0, 0 0, 0 1, 0.5 1))')
def testCreateAttributeIndex(self):
tmpfile = os.path.join(self.basetestpath, 'testGeopackageAttributeIndex.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbPolygon)
lyr.CreateField(ogr.FieldDefn('str_field', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('str_field2', ogr.OFTString))
f = None
ds = None
vl = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + "test", 'test', u'ogr')
self.assertTrue(vl.isValid())
self.assertTrue(vl.dataProvider().capabilities() & QgsVectorDataProvider.CreateAttributeIndex)
self.assertFalse(vl.dataProvider().createAttributeIndex(-1))
self.assertFalse(vl.dataProvider().createAttributeIndex(100))
# should not be allowed - there's already a index on the primary key
self.assertFalse(vl.dataProvider().createAttributeIndex(0))
self.assertTrue(vl.dataProvider().createAttributeIndex(1))
con = spatialite_connect(tmpfile, isolation_level=None)
cur = con.cursor()
rs = cur.execute("SELECT * FROM sqlite_master WHERE type='index' AND tbl_name='test'")
res = [row for row in rs]
self.assertEqual(len(res), 1)
index_name = res[0][1]
rs = cur.execute("PRAGMA index_info({})".format(index_name))
res = [row for row in rs]
self.assertEqual(len(res), 1)
self.assertEqual(res[0][2], 'str_field')
# second index
self.assertTrue(vl.dataProvider().createAttributeIndex(2))
rs = cur.execute("SELECT * FROM sqlite_master WHERE type='index' AND tbl_name='test'")
res = [row for row in rs]
self.assertEqual(len(res), 2)
indexed_columns = []
for row in res:
index_name = row[1]
rs = cur.execute("PRAGMA index_info({})".format(index_name))
res = [row for row in rs]
self.assertEqual(len(res), 1)
indexed_columns.append(res[0][2])
self.assertCountEqual(indexed_columns, ['str_field', 'str_field2'])
con.close()
def testCreateSpatialIndex(self):
tmpfile = os.path.join(self.basetestpath, 'testGeopackageSpatialIndex.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbPolygon, options=['SPATIAL_INDEX=NO'])
lyr.CreateField(ogr.FieldDefn('str_field', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('str_field2', ogr.OFTString))
f = None
ds = None
vl = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + "test", 'test', u'ogr')
self.assertTrue(vl.isValid())
self.assertTrue(vl.dataProvider().capabilities() & QgsVectorDataProvider.CreateSpatialIndex)
self.assertTrue(vl.dataProvider().createSpatialIndex())
if __name__ == '__main__':
unittest.main()
|
medspx/QGIS
|
tests/src/python/test_provider_ogr_gpkg.py
|
Python
|
gpl-2.0
| 36,067
|
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
"""Tests pleaseTurnOver, pageBreakBefore, frameBreakBefore, keepWithNext...
"""
__version__='''$Id: test_platypus_pleaseturnover.py 3959 2012-09-27 14:39:39Z robin $'''
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
import sys
import unittest
from reportlab.platypus.flowables import Flowable, PTOContainer, KeepInFrame
from reportlab.lib.units import cm
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.colors import toColor, black
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
from reportlab.platypus.paragraph import Paragraph
from reportlab.platypus.tables import Table
from reportlab.platypus.frames import Frame
from reportlab.lib.randomtext import randomText
from reportlab.platypus.doctemplate import PageTemplate, BaseDocTemplate, FrameBreak
def myMainPageFrame(canvas, doc):
"The page frame used for all PDF documents."
canvas.saveState()
canvas.setFont('Times-Roman', 12)
pageNumber = canvas.getPageNumber()
canvas.drawString(10*cm, cm, str(pageNumber))
canvas.restoreState()
def _showDoc(fn,story):
pageTemplate = PageTemplate('normal', [Frame(72, 440, 170, 284, id='F1'),
Frame(326, 440, 170, 284, id='F2'),
Frame(72, 72, 170, 284, id='F3'),
Frame(326, 72, 170, 284, id='F4'),
], myMainPageFrame)
doc = BaseDocTemplate(outputfile(fn),
pageTemplates = pageTemplate,
showBoundary = 1,
)
doc.multiBuild(story)
text2 ='''We have already seen that the natural general principle that will
subsume this case cannot be arbitrary in the requirement that branching
is not tolerated within the dominance scope of a complex symbol.
Notice, incidentally, that the speaker-hearer's linguistic intuition is
to be regarded as the strong generative capacity of the theory. A
consequence of the approach just outlined is that the descriptive power
of the base component does not affect the structure of the levels of
acceptability from fairly high (e.g. (99a)) to virtual gibberish (e.g.
(98d)). By combining adjunctions and certain deformations, a
descriptively adequate grammar cannot be arbitrary in the strong
generative capacity of the theory.'''
text1='''
On our assumptions, a descriptively adequate grammar delimits the strong
generative capacity of the theory. For one thing, the fundamental error
of regarding functional notions as categorial is to be regarded as a
corpus of utterance tokens upon which conformity has been defined by the
paired utterance test. A majority of informed linguistic specialists
agree that the appearance of parasitic gaps in domains relatively
inaccessible to ordinary extraction is necessary to impose an
interpretation on the requirement that branching is not tolerated within
the dominance scope of a complex symbol. It may be, then, that the
speaker-hearer's linguistic intuition appears to correlate rather
closely with the ultimate standard that determines the accuracy of any
proposed grammar. Analogously, the notion of level of grammaticalness
may remedy and, at the same time, eliminate a general convention
regarding the forms of the grammar.'''
text0 = '''To characterize a linguistic level L,
this selectionally introduced contextual
feature delimits the requirement that
branching is not tolerated within the
dominance scope of a complex
symbol. Notice, incidentally, that the
notion of level of grammaticalness
does not affect the structure of the
levels of acceptability from fairly high
(e.g. (99a)) to virtual gibberish (e.g.
(98d)). Suppose, for instance, that a
subset of English sentences interesting
on quite independent grounds appears
to correlate rather closely with an
important distinction in language use.
Presumably, this analysis of a
formative as a pair of sets of features is
not quite equivalent to the system of
base rules exclusive of the lexicon. We
have already seen that the appearance
of parasitic gaps in domains relatively
inaccessible to ordinary extraction
does not readily tolerate the strong
generative capacity of the theory.'''
def _ptoTestCase(self):
"""PTO stands for Please Turn Over and is a means for
specifying content to be inserted when stuff goes over a page.
This makes one long multi-page paragraph."""
# Build story.
story = []
def fbreak(story=story):
story.append(FrameBreak())
styleSheet = getSampleStyleSheet()
H1 = styleSheet['Heading1']
H1.pageBreakBefore = 0
H1.keepWithNext = 0
bt = styleSheet['BodyText']
pto = ParagraphStyle('pto',parent=bt)
pto.alignment = TA_RIGHT
pto.fontSize -= 1
def ColorParagraph(c,text,style):
return Paragraph('<para color="%s">%s</para>' % (c,text),style)
def ptoblob(blurb,content,trailer=None,header=None, story=story, H1=H1):
if type(content) not in (type([]),type(())): content = [content]
story.append(PTOContainer([Paragraph(blurb,H1)]+list(content),trailer,header))
t0 = [ColorParagraph('blue','Please turn over', pto )]
h0 = [ColorParagraph('blue','continued from previous page', pto )]
t1 = [ColorParagraph('red','Please turn over(inner)', pto )]
h1 = [ColorParagraph('red','continued from previous page(inner)', pto )]
ptoblob('First Try at a PTO',[Paragraph(text0,bt)],t0,h0)
fbreak()
c1 = Table([('alignment', 'align\012alignment'),
('bulletColor', 'bulletcolor\012bcolor'),
('bulletFontName', 'bfont\012bulletfontname'),
('bulletFontSize', 'bfontsize\012bulletfontsize'),
('bulletIndent', 'bindent\012bulletindent'),
('firstLineIndent', 'findent\012firstlineindent'),
('fontName', 'face\012fontname\012font'),
('fontSize', 'size\012fontsize'),
('leading', 'leading'),
('leftIndent', 'leftindent\012lindent'),
('rightIndent', 'rightindent\012rindent'),
('spaceAfter', 'spaceafter\012spacea'),
('spaceBefore', 'spacebefore\012spaceb'),
('textColor', 'fg\012textcolor\012color')],
style = [
('VALIGN',(0,0),(-1,-1),'TOP'),
('INNERGRID', (0,0), (-1,-1), 0.25, black),
('BOX', (0,0), (-1,-1), 0.25, black),
],
)
ptoblob('PTO with a table inside',c1,t0,h0)
fbreak()
ptoblob('A long PTO',[Paragraph(text0+' '+text1,bt)],t0,h0)
fbreak()
ptoblob('2 PTO (inner split)',[ColorParagraph('pink',text0,bt),PTOContainer([ColorParagraph(black,'Inner Starts',H1),ColorParagraph('yellow',text2,bt),ColorParagraph('black','Inner Ends',H1)],t1,h1),ColorParagraph('magenta',text1,bt)],t0,h0)
_showDoc('test_platypus_pto.pdf',story)
def _KeepInFrameTestCase(self,mode,offset=0):
story = []
def fbreak(story=story):
story.append(FrameBreak())
styleSheet = getSampleStyleSheet()
H1 = styleSheet['Heading1']
H1.pageBreakBefore = 0
H1.keepWithNext = 0
bt = styleSheet['BodyText']
def subStory(texts):
style = [
('VALIGN',(0,0),(-1,-1),'TOP'),
('INNERGRID', (0,0), (-1,-1), 0.25, black),
('BOX', (0,0), (-1,-1), 0.25, black),
]
return ([Paragraph(t,bt) for t in texts]
+[Table([('alignment', a.lower())],style = style,hAlign=a)
for a in ('LEFT','RIGHT','CENTER')])
def allModesKIF(just,ifb=True,width=170):
if ifb: fbreak()
story.append(KeepInFrame(width-offset,284-offset,subStory(texts=(text0,)),mode=mode,hAlign=just))
fbreak()
story.append(KeepInFrame(width-offset,284-offset,subStory(texts=(text0,text1)),mode=mode,hAlign=just))
fbreak()
story.append(KeepInFrame(width-offset,284-offset,subStory(texts=(text0,text1,text2)),mode=mode,hAlign=just))
allModesKIF('LEFT',False)
allModesKIF('LEFT',width=100)
allModesKIF('CENTRE',width=100)
allModesKIF('RIGHT',width=100)
_showDoc('test_platypus_KeepInFrame%s.pdf'%mode,story)
class TestCases(unittest.TestCase):
"Test multi-page splitting of paragraphs (eyeball-test)."
def test0(self):
_ptoTestCase(self)
def test1(self):
_KeepInFrameTestCase(self,mode="shrink")
def test2(self):
_KeepInFrameTestCase(self,mode="overflow")
def test3(self):
_KeepInFrameTestCase(self,mode="truncate")
def test4(self):
from reportlab.platypus.doctemplate import LayoutError
self.assertRaises(LayoutError, _KeepInFrameTestCase,*(self,"error"))
def test5(self):
_KeepInFrameTestCase(self,"shrink",0)
def makeSuite():
return makeSuiteForClasses(TestCases)
#noruntests
if __name__ == "__main__": #NORUNTESTS
if 'debug' in sys.argv:
_KeepInFrameTestCase(None)
else:
unittest.TextTestRunner().run(makeSuite())
printLocation()
|
nakagami/reportlab
|
tests/test_platypus_pleaseturnover.py
|
Python
|
bsd-3-clause
| 9,168
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_functions
----------------------------------
Tests for `functions` module.
"""
try:
import unittest2 as unittest
except ImportError:
import unittest
from wikipediabase import functions, skin
def addone(x):
return x+1
class TestFunctions(unittest.TestCase):
def setUp(self):
self.fns = functions.FunctionSkin()
self.fns.set('addone', addone)
def test_serialfn(self):
self.assertEqual(tuple(functions.SerialFunction(fn=addone)), ('tests.test_functions', [], 'addone'))
def test_addition(self):
self.assertEqual(tuple(skin.Skin.get(self.fns, "addone")), ('tests.test_functions', [], 'addone'))
self.assertIs(self.fns['addone'], addone)
def test_calling(self):
self.assertEqual(self.fns.get("addone")(1), 2)
def test_dump(self):
self.assertEqual(self.fns.dump(), '{"addone": ["tests.test_functions", [], "addone"]}')
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
fakedrake/WikipediaBase-skinz
|
tests/test_functions.py
|
Python
|
bsd-3-clause
| 1,053
|
from collections import OrderedDict
def dict_variables(rd, sv, name):
return {
"RELEASE_DATE": rd,
"SHORT_STRING": "".join(sv),
"SHORT_VERSION": ".".join(str(int(x)) for x in sv),
"NAME": name,
}
VERSIONS = OrderedDict([
('3.14.5', dict_variables("201405311735", ("03", "14", "05"), "utopic")),
('3.14.6', dict_variables("201406071635", ("03", "14", "06"), "utopic")),
('3.14.7', dict_variables("201406111644", ("03", "14", "07"), "utopic")),
('3.14.8', dict_variables("201406161755", ("03", "14", "08"), "utopic")),
('3.14.9', dict_variables("201406261553", ("03", "14", "09"), "utopic")),
('3.14.10', dict_variables("201406302353", ("03", "14", "10"), "utopic")),
('3.14.11', dict_variables("201407062254", ("03", "14", "11"), "utopic")),
('3.14.12', dict_variables("201407091455", ("03", "14", "12"), "utopic")),
('3.14.13', dict_variables("201407171953", ("03", "14", "13"), "utopic")),
('3.14.14', dict_variables("201407281153", ("03", "14", "14"), "utopic")),
('3.14.15', dict_variables("201407311853", ("03", "14", "15"), "utopic")),
('3.14.16', dict_variables("201408072035", ("03", "14", "16"), "utopic")),
('3.14.17', dict_variables("201408132253", ("03", "14", "17"), "utopic")),
('3.15.1', dict_variables("201406161841", ("03", "15", "01"), "utopic")),
('3.15.2', dict_variables("201406261639", ("03", "15", "02"), "utopic")),
('3.15.3', dict_variables("201407010040", ("03", "15", "03"), "utopic")),
('3.15.4', dict_variables("201407062345", ("03", "15", "04"), "utopic")),
('3.15.5', dict_variables("201407091543", ("03", "15", "05"), "utopic")),
('3.15.6', dict_variables("201407172034", ("03", "15", "06"), "utopic")),
('3.15.7', dict_variables("201407281235", ("03", "15", "07"), "utopic")),
('3.15.8', dict_variables("201407091543", ("03", "15", "08"), "utopic")),
('3.15.9', dict_variables("201408072114", ("03", "15", "09"), "utopic")),
('3.15.10', dict_variables("201408132333", ("03", "15", "10"), "utopic")),
('3.16.1', dict_variables("201408140014", ("03", "16", "01"), "utopic")),
('3.16.2', dict_variables("201409052035", ("03", "16", "02"), "utopic")),
('3.16.3', dict_variables("201409171435", ("03", "16", "03"), "utopic")),
('3.17.0', dict_variables("201410060605", ("03", "17", "00"), "utopic")),
('3.17.1', dict_variables("201410150735", ("03", "17", "01"), "utopic")),
('3.17.2', dict_variables("201410301416", ("03", "17", "02"), "vivid")),
('3.17.3', dict_variables("201411141335", ("03", "17", "03"), "vivid")),
('3.17.4', dict_variables("201411211317", ("03", "17", "04"), "vivid")),
('3.17.5', dict_variables("201412070036", ("03", "17", "05"), "vivid")),
('3.17.6', dict_variables("201412071535", ("03", "17", "06"), "vivid")),
('3.18.0', dict_variables("201412071935", ("03", "18", "00"), "vivid")),
('3.18.1', dict_variables("201412170637", ("03", "18", "01"), "vivid")),
('3.18.2', dict_variables("201501082011", ("03", "18", "02"), "vivid")),
('3.18.3', dict_variables("201501161810", ("03", "18", "03"), "vivid")),
('3.18.4', dict_variables("201501271243", ("03", "18", "04"), "vivid")),
('3.18.5', dict_variables("201501292218", ("03", "18", "05"), "vivid")),
('3.18.6', dict_variables("201502061036", ("03", "18", "06"), "vivid")),
('3.18.7', dict_variables("201502110759", ("03", "18", "07"), "vivid")),
('3.18.8', dict_variables("201502271935", ("03", "18", "08"), "vivid")),
('3.18.9', dict_variables("201503080036", ("03", "18", "09"), "vivid")),
('3.18.10', dict_variables("201503241436", ("03", "18", "10"), "vivid")),
('3.18.11', dict_variables("201504041535", ("03", "18", "11"), "vivid")),
('3.19.0', dict_variables("201502091451", ("03", "19", "00"), "vivid")),
('3.19.1', dict_variables("201503080052", ("03", "19", "01"), "vivid")),
('3.19.2', dict_variables("201503181436", ("03", "19", "02"), "vivid")),
('3.19.3', dict_variables("201503261036", ("03", "19", "03"), "vivid")),
('3.19.4', dict_variables("201504131440", ("03", "19", "04"), "vivid")),
('4.0.0', dict_variables("201504121935", ("04", "00", "00"), "vivid")),
('4.0.1', dict_variables("201504290935", ("04", "00", "01"), "wily")),
('4.0.2', dict_variables("201505081529", ("04", "00", "02"), "wily")),
('4.0.3', dict_variables("201505131441", ("04", "00", "03"), "wily")),
('4.0.4', dict_variables(".201505171336", ("04", "00", "04"), "wily")),
('4.0.5', dict_variables("201506061639", ("04", "00", "05"), "wily")),
('4.0.6', dict_variables("201506222135", ("04", "00", "06"), "wily")),
('4.0.7', dict_variables("201507031036", ("04", "00", "07"), "wily")),
('4.1.11', dict_variables("201510261146", ("04", "01", "11"), "wily")),
('4.2.25', dict_variables("201510270124", ("04", "02", "25"), "wily")),
('4.3.0', dict_variables("201511020949", ("04", "03", "00"), "wily")),
])
|
hughdbrown/linux_binary
|
src/linux_versions.py
|
Python
|
mit
| 4,985
|
# pAhaggar Python module
#
# Copyright (C) 2012-2013 Djalal Harouni <tixxdz@opendz.org>
# Copyright (C) 2012-2013 LIRE Laboratory.
# University Constantine 2, Algeria.
#
# Author(s): Djalal Harouni <tixxdz@opendz.org>
# Pr. Mohamed Benmohammed
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# of the License.
#
import sys
import pahaggar.gcc_tree
__version__ = '20130110'
version = __version__
ret = gcc_tree.parse_tree_def("pahaggar/tree.def")
if ret == -1:
print "Error: failed to initialize pahaggar modules"
sys.exit(ret)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
tixxdz/ahaggar
|
scripts/pahaggar/__init__.py
|
Python
|
gpl-2.0
| 738
|
import pickle
from matplotlib import pyplot as plt
plt.style.use('classic')
import matplotlib as mpl
fs = 12.
fw = 'bold'
mpl.rc('lines', linewidth=2., color='k')
mpl.rc('font', size=fs, weight=fw, family='Arial')
mpl.rc('legend', fontsize='small')
import numpy
def grad( x, u ) :
return numpy.gradient(u) / numpy.gradient(x)
date = '20160519'
base = '/home/mk-sim-linux/Battery_TempGrad/Python/batt_simulation/battsimpy/'
base_dir = '/home/mk-sim-linux/Battery_TempGrad/JournalPaper2/Paper2/ocv_unif35/'
fig_dir = '/home/mk-sim-linux/Battery_TempGrad/JournalPaper3/modeling_paper_p3/figs/'
#base_dir = '/home/m_klein/tgs_data/ocv_unif35/'
#base_dir = '/Volumes/Data/Paper2/ocv_dat/'
#bsp_path = '/Users/mk/Desktop/battsim/battsimpy/'
nmc_rest_523 = numpy.loadtxt( base+'data/Model_nmc/Model_Pars/solid/thermodynamics/2012Yang_523NMC_dchg_restOCV.csv', delimiter=',' )
nmc_cby25_111 = numpy.loadtxt( base+'data/Model_nmc/Model_Pars/solid/thermodynamics/2012Wu_NMC111_Cby25_dchg.csv' , delimiter=',' )
nmc_YangWu_mix = numpy.loadtxt( base+'data/Model_nmc/Model_Pars/solid/thermodynamics/YangWuMix_NMC_20170607.csv' , delimiter=',' )
lfp_prada_dchg = numpy.loadtxt( base+'data/Model_v1/Model_Pars/solid/thermodynamics/2012Prada_LFP_U_dchg.csv' , delimiter=',' )
graph_hess_dchg = numpy.loadtxt( base+'data/Model_nmc/Model_Pars/solid/thermodynamics/Ua_cell4Fit_NMC_2012Yang_refx.csv' , delimiter=',' ) #graphite_Hess_discharge_x.csv
#xin, Uin = 1.-lfp_prada_dchg[:,0], lfp_prada_dchg[:,1]
#xin, Uin = 1.-nmc_rest_523[:,0], nmc_rest_523[:,1]
xin, Uin = 1.-nmc_YangWu_mix[:,0], nmc_YangWu_mix[:,1]
#xin, Uin = 1.-nmc_cby25_111[:,0], nmc_cby25_111[:,1]
xin2, Uin2 = graph_hess_dchg[:,0], graph_hess_dchg[:,1]#-0.025
pfiles2 = [ base_dir+'slowOCVdat_cell4_slow_ocv_'+date+'.p', ]
# Load the cell ocv c/60 data
d = pickle.load( open( pfiles2[0], 'rb' ) )
max_cap = numpy.amax( d['interp']['cap'] )
x_cell, U_cell = 1-numpy.array(d['interp']['cap'])/max_cap*1., d['interp']['dchg']['volt']
# NMC 532 scale - NMC cyl cells (cell 4)
#scale_x = 1.8#1.5 # 1.55
#shift_x = -.01#-.06 #-.12
scale_x = 1.42 # 1.55
shift_x = -.03 #-.12
#scale_x1 = 1.9
#shift_x1 = -.03
## LFP Prada - (cell 2)
#scale_x = 1.25
#shift_x = 1.05-scale_x
# Graphite - scale NMC cyl cells (cell 4)
scale_x2 = 1/.8 #1./0.83 #
shift_x2 = -.06 #-.035
#scale_x2 = 1/.74
#shift_x2 = -.04
figres = 300
figname = base_dir+'ocv-plots_'+date+'.pdf'
sty = [ '-', '--' ]
fsz = (190./25.4,120./25.4)
f1, axes = plt.subplots(1,2,figsize=fsz)
a1,a2 = axes
# Plot the full cell ocv
a1.plot( x_cell, U_cell, '-b', label='Cell C/60 Data' )
# Plot the cathode curve for the shifted soc operating window
a1.plot( xin*scale_x+shift_x, Uin, '-g', label='Cathode' )
# Plot the anode curve for the shifted soc operating window
#a1t = a1.twinx()
a1.plot( xin2*scale_x2+shift_x2, Uin2, '-k', label='Anode' )
# Compute the cathode ocv for the full cell soc operating window
if xin[1] < xin[0] :
Uc = numpy.interp( x_cell, numpy.flipud(xin*scale_x+shift_x), numpy.flipud(Uin) )
else :
Uc = numpy.interp( x_cell, xin*scale_x+shift_x, Uin )
Ua = numpy.interp( x_cell, xin2*scale_x2+shift_x2, Uin2 )
# Plot the estimated full cell ocv curve for the aligned anode and cathode equilibrium curves
#a1.plot( x_cell, Uc-U_cell, ':k', label='U$_{anode}$ fit' )
#a1t.set_ylim([0.,2.])
a1.plot( x_cell, Uc-Ua, ':k', label='U$_{cell}$ fit' )
# Calculate the alignment stoichs for anode and cathode
Ua_out = Uc - U_cell
xa_out = (x_cell-shift_x2)/scale_x2
#numpy.savetxt( base+'data/Model_v1/Model_Pars/solid/thermodynamics/Ua_lfp_2012Prada.csv', numpy.array([xa_out, Ua_out]).T, delimiter=',' )
#numpy.savetxt( base+'data/Model_v1/Model_Pars/solid/thermodynamics/Ua_nmc_2012Yang.csv', numpy.array([xa_out, Ua_out]).T, delimiter=',' )
yin = 1.-xin
xc_lo = 1. - (-shift_x/scale_x)
xc_hi = 1. - (1.-shift_x)/scale_x
xa_lo = (-shift_x2/scale_x2)
xa_hi = (1.-shift_x2)/scale_x2
# Print out the stoich limits for the anode and cathode
print 'xc_lo, xc_hi:',xc_lo, xc_hi
print 'xa_lo, xa_hi:',xa_lo, xa_hi
a1.set_xlabel( 'State of Charge', fontsize=fs, fontweight=fw )
a1.set_ylabel( 'Voltage vs. Li [V]', fontsize=fs, fontweight=fw )
a1.set_title( 'Full and Half Cell OCV', fontsize=fs, fontweight=fw )
a1.legend(loc='best')
a1.set_axisbelow(True)
a1.grid(color='gray')
a2.plot( x_cell, grad(x_cell, U_cell), label=r'$\frac{\partial U_{cell}}{\partial SOC}$' )
a2.plot( x_cell, -grad(x_cell, Ua), label=r'$\frac{\partial U_{anode}}{\partial SOC}$' )
a2.set_xlabel( 'State of Charge', fontsize=fs, fontweight=fw )
a2.set_ylabel( '$\partial U / \partial SOC$', fontsize=fs, fontweight=fw )
a2.set_title( 'OCV Gradients for Anode Alignment', fontsize=fs, fontweight=fw )
a2.legend(loc='best')
a2.set_axisbelow(True)
a2.grid(color='gray')
a2.set_ylim([-0.1,1.5])
#plt.suptitle('LFP/C$_6$ Half Cell OCV Alignment', fontsize=fs, fontweight=fw)
plt.suptitle('NMC/C$_6$ Half Cell OCV Alignment', fontsize=fs, fontweight=fw)
plt.tight_layout(rect=[0,0.03,1,0.97])
plt.show()
#f1.savefig( fig_dir+'ocv_alignment_cell2_lfp.pdf', dpi=figres)
#f1.savefig( fig_dir+'ocv_alignment_cell4_nmc.pdf', dpi=figres)
|
matthewpklein/battsimpy
|
docs/extra_files/electrode_ocv_gen.py
|
Python
|
gpl-3.0
| 5,199
|
# -*- coding: utf8 -*-
import numpy as np
import matplotlib.pyplot as plt
import pymysql
import pymysql.cursors
import YahooFinance as yf
import KabuCom as kc
import datetime
import json
import hashlib
import config
class CurbFinanceMySQL:
def __init__(self):
self.host = mysql_config['host']
self.port = mysql_config['port']
self.db = mysql_config['db']
self.user = mysql_config['user']
self.passwd = mysql_config['passwd']
self.sqlconfig = pymysql.connect(host = self.host,
port = self.port,
unix_socket = '/tmp/mysql.sock',
user = self.user,
passwd = self.passwd,
db = self.db)
self.sqlcursor = self.sqlconfig.cursor()
self.brand_table = 'brand'
self.stocks_table = 'stock'
def showdb(self):
self.sqlcursor.execute('SHOW TABLES')
rows = self.sqlcursor.fetchall()
print rows
def insert_brand_init(self, arg_code, arg_name):
u"""銘柄を保存するデータベースを初期化する。
arg_code: 企業コード, arg_name: 企業名
"""
if len(self.select_brand(arg_brand)) != 0:
return False
insert_statement = "INSERT " + self.db + "." + self.brand_table + "(brand) VALUES(%s)"
self.sqlcursor.execute(insert_statement, \
(arg_brand))
self.sqlconfig.commit()
def insert_stock(self, arg_code, arg_date, arg_first, arg_high, arg_low, arg_close, arg_turnover, arg_fixclose):
u"""株式情報を1つ保存する
arg_code: 企業コード, arg_date: 日付, arg_first: 始値, arg_high: 高値, arg_low: 低値, arg_close: 終値, arg_turnover: 出来高, arg_fixclose調整後補正値
"""
stock_hash = hashlib.md5(arg_code + arg_date).hexdigest()
if len(self.select_stock(stock_hash)) != 0:
return False
insert_statement = "INSERT " + self.db + "." + self.stocks_table + "(stock_hash, code, date, first, high, low, close, turnover, fixclose) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s)"
self.sqlcursor.execute(insert_statement, \
(stock_hash, arg_code, arg_date,\
arg_first, arg_high, arg_low,\
arg_close, arg_turnover, arg_fixclose))
self.sqlconfig.commit()
def insert_stocks(self, stocks_list):
insert_statement = "INSERT " + self.db + "." + self.stocks_table + "(stock_hash, code, date, first, high, low, close, turnover, fixclose) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s)"
for stock in stocks_list:
stock_hash = hashlib.md5(stock[0] + stock[1]).hexdigest()
if len(self.select_stock(stock_hash)) != 0:
continue
insert_tupple = (stock_hash,) + stock
self.sqlcursor.execute(insert_statement, insert_tupple)
self.sqlconfig.commit()
def select_stock(self, stock_hash):
select_statement = "SELECT 'stock_hash' FROM " + self.stocks_table + "WHERE stock_hash = '" + stock_hash + "'LIMIT 0, 5"
self.sqlcursor.execute(select_statement)
self.sqlconfig.commit()
select_data = self.sqlcursor.fetchall()
return select_data
def write_json_campany_list(self, data):
d = datetime.datetime.today()
date = d.strftime('%Y%m')
FILEOUT = str(date) + 'campany_list.json'
f = open(FILEOUT, 'w')
json.dump(data, f)
f.close()
def read_json_campany_list(self):
d = datetime.datetime.today()
date = d.strftime('%Y%m')
FILEIN = str(date) + 'campany_list.json'
try:
f = open(FILEIN, 'r')
data = json.load(f)
f.close()
return data
except IOError:
return False
if __name__ == '__main__':
cf = CurbFinanceMySQL()
yfc = yf.YahooFinance()
campany_list = cf.read_json_campany_list()
if campany_list == False:
campany_list = yfc.download_stocks_lists()
cf.write_json_campany_list(campany_list)
print 'List downloaded'
yfc.download_stocks_history(campany_list, cf.insert_stocks)
|
EnsekiTT/cfinance
|
finance.py
|
Python
|
mit
| 3,699
|
"""
A pure python (slow) implementation of rijndael with a decent interface
To include -
from rijndael import rijndael
To do a key setup -
r = rijndael(key, block_size = 16)
key must be a string of length 16, 24, or 32
blocksize must be 16, 24, or 32. Default is 16
To use -
ciphertext = r.encrypt(plaintext)
plaintext = r.decrypt(ciphertext)
If any strings are of the wrong length a ValueError is thrown
"""
# ported from the Java reference code by Bram Cohen, April 2001
# this code is public domain, unless someone makes
# an intellectual property claim against the reference
# code, in which case it can be made public domain by
# deleting all the comments and renaming all the variables
import copy
import string
shifts = [[[0, 0], [1, 3], [2, 2], [3, 1]],
[[0, 0], [1, 5], [2, 4], [3, 3]],
[[0, 0], [1, 7], [3, 5], [4, 4]]]
# [keysize][block_size]
num_rounds = {16: {16: 10, 24: 12, 32: 14}, 24: {16: 12, 24: 12, 32: 14}, 32: {16: 14, 24: 14, 32: 14}}
A = [[1, 1, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 0, 0, 1]]
# produce log and alog tables, needed for multiplying in the
# field GF(2^m) (generator = 3)
alog = [1]
for i in xrange(255):
j = (alog[-1] << 1) ^ alog[-1]
if j & 0x100 != 0:
j ^= 0x11B
alog.append(j)
log = [0] * 256
for i in xrange(1, 255):
log[alog[i]] = i
# multiply two elements of GF(2^m)
def mul(a, b):
if a == 0 or b == 0:
return 0
return alog[(log[a & 0xFF] + log[b & 0xFF]) % 255]
# substitution box based on F^{-1}(x)
box = [[0] * 8 for i in xrange(256)]
box[1][7] = 1
for i in xrange(2, 256):
j = alog[255 - log[i]]
for t in xrange(8):
box[i][t] = (j >> (7 - t)) & 0x01
B = [0, 1, 1, 0, 0, 0, 1, 1]
# affine transform: box[i] <- B + A*box[i]
cox = [[0] * 8 for i in xrange(256)]
for i in xrange(256):
for t in xrange(8):
cox[i][t] = B[t]
for j in xrange(8):
cox[i][t] ^= A[t][j] * box[i][j]
# S-boxes and inverse S-boxes
S = [0] * 256
Si = [0] * 256
for i in xrange(256):
S[i] = cox[i][0] << 7
for t in xrange(1, 8):
S[i] ^= cox[i][t] << (7-t)
Si[S[i] & 0xFF] = i
# T-boxes
G = [[2, 1, 1, 3],
[3, 2, 1, 1],
[1, 3, 2, 1],
[1, 1, 3, 2]]
AA = [[0] * 8 for i in xrange(4)]
for i in xrange(4):
for j in xrange(4):
AA[i][j] = G[i][j]
AA[i][i+4] = 1
for i in xrange(4):
pivot = AA[i][i]
if pivot == 0:
t = i + 1
while AA[t][i] == 0 and t < 4:
t += 1
assert t != 4, 'G matrix must be invertible'
for j in xrange(8):
AA[i][j], AA[t][j] = AA[t][j], AA[i][j]
pivot = AA[i][i]
for j in xrange(8):
if AA[i][j] != 0:
AA[i][j] = alog[(255 + log[AA[i][j] & 0xFF] - log[pivot & 0xFF]) % 255]
for t in xrange(4):
if i != t:
for j in xrange(i+1, 8):
AA[t][j] ^= mul(AA[i][j], AA[t][i])
AA[t][i] = 0
iG = [[0] * 4 for i in xrange(4)]
for i in xrange(4):
for j in xrange(4):
iG[i][j] = AA[i][j + 4]
def mul4(a, bs):
if a == 0:
return 0
r = 0
for b in bs:
r <<= 8
if b != 0:
r = r | mul(a, b)
return r
T1 = []
T2 = []
T3 = []
T4 = []
T5 = []
T6 = []
T7 = []
T8 = []
U1 = []
U2 = []
U3 = []
U4 = []
for t in xrange(256):
s = S[t]
T1.append(mul4(s, G[0]))
T2.append(mul4(s, G[1]))
T3.append(mul4(s, G[2]))
T4.append(mul4(s, G[3]))
s = Si[t]
T5.append(mul4(s, iG[0]))
T6.append(mul4(s, iG[1]))
T7.append(mul4(s, iG[2]))
T8.append(mul4(s, iG[3]))
U1.append(mul4(t, iG[0]))
U2.append(mul4(t, iG[1]))
U3.append(mul4(t, iG[2]))
U4.append(mul4(t, iG[3]))
# round constants
rcon = [1]
r = 1
for t in xrange(1, 30):
r = mul(2, r)
rcon.append(r)
del A
del AA
del pivot
del B
del G
del box
del log
del alog
del i
del j
del r
del s
del t
del mul
del mul4
del cox
del iG
class rijndael:
def __init__(self, key, block_size = 16):
if block_size != 16 and block_size != 24 and block_size != 32:
raise ValueError('Invalid block size: ' + str(block_size))
if len(key) != 16 and len(key) != 24 and len(key) != 32:
raise ValueError('Invalid key size: ' + str(len(key)))
self.block_size = block_size
ROUNDS = num_rounds[len(key)][block_size]
BC = block_size / 4
# encryption round keys
Ke = [[0] * BC for i in xrange(ROUNDS + 1)]
# decryption round keys
Kd = [[0] * BC for i in xrange(ROUNDS + 1)]
ROUND_KEY_COUNT = (ROUNDS + 1) * BC
KC = len(key) / 4
# copy user material bytes into temporary ints
tk = []
for i in xrange(0, KC):
tk.append((ord(key[i * 4]) << 24) | (ord(key[i * 4 + 1]) << 16) |
(ord(key[i * 4 + 2]) << 8) | ord(key[i * 4 + 3]))
# copy values into round key arrays
t = 0
j = 0
while j < KC and t < ROUND_KEY_COUNT:
Ke[t / BC][t % BC] = tk[j]
Kd[ROUNDS - (t / BC)][t % BC] = tk[j]
j += 1
t += 1
tt = 0
rconpointer = 0
while t < ROUND_KEY_COUNT:
# extrapolate using phi (the round key evolution function)
tt = tk[KC - 1]
tk[0] ^= (S[(tt >> 16) & 0xFF] & 0xFF) << 24 ^ \
(S[(tt >> 8) & 0xFF] & 0xFF) << 16 ^ \
(S[ tt & 0xFF] & 0xFF) << 8 ^ \
(S[(tt >> 24) & 0xFF] & 0xFF) ^ \
(rcon[rconpointer] & 0xFF) << 24
rconpointer += 1
if KC != 8:
for i in xrange(1, KC):
tk[i] ^= tk[i-1]
else:
for i in xrange(1, KC / 2):
tk[i] ^= tk[i-1]
tt = tk[KC / 2 - 1]
tk[KC / 2] ^= (S[ tt & 0xFF] & 0xFF) ^ \
(S[(tt >> 8) & 0xFF] & 0xFF) << 8 ^ \
(S[(tt >> 16) & 0xFF] & 0xFF) << 16 ^ \
(S[(tt >> 24) & 0xFF] & 0xFF) << 24
for i in xrange(KC / 2 + 1, KC):
tk[i] ^= tk[i-1]
# copy values into round key arrays
j = 0
while j < KC and t < ROUND_KEY_COUNT:
Ke[t / BC][t % BC] = tk[j]
Kd[ROUNDS - (t / BC)][t % BC] = tk[j]
j += 1
t += 1
# inverse MixColumn where needed
for r in xrange(1, ROUNDS):
for j in xrange(BC):
tt = Kd[r][j]
Kd[r][j] = U1[(tt >> 24) & 0xFF] ^ \
U2[(tt >> 16) & 0xFF] ^ \
U3[(tt >> 8) & 0xFF] ^ \
U4[ tt & 0xFF]
self.Ke = Ke
self.Kd = Kd
def encrypt(self, plaintext):
if len(plaintext) != self.block_size:
raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(plaintext)))
Ke = self.Ke
BC = self.block_size / 4
ROUNDS = len(Ke) - 1
if BC == 4:
SC = 0
elif BC == 6:
SC = 1
else:
SC = 2
s1 = shifts[SC][1][0]
s2 = shifts[SC][2][0]
s3 = shifts[SC][3][0]
a = [0] * BC
# temporary work array
t = []
# plaintext to ints + key
for i in xrange(BC):
t.append((ord(plaintext[i * 4 ]) << 24 |
ord(plaintext[i * 4 + 1]) << 16 |
ord(plaintext[i * 4 + 2]) << 8 |
ord(plaintext[i * 4 + 3]) ) ^ Ke[0][i])
# apply round transforms
for r in xrange(1, ROUNDS):
for i in xrange(BC):
a[i] = (T1[(t[ i ] >> 24) & 0xFF] ^
T2[(t[(i + s1) % BC] >> 16) & 0xFF] ^
T3[(t[(i + s2) % BC] >> 8) & 0xFF] ^
T4[ t[(i + s3) % BC] & 0xFF] ) ^ Ke[r][i]
t = copy.copy(a)
# last round is special
result = []
for i in xrange(BC):
tt = Ke[ROUNDS][i]
result.append((S[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF)
result.append((S[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF)
result.append((S[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF)
result.append((S[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF)
return string.join(map(chr, result), '')
def decrypt(self, ciphertext):
if len(ciphertext) != self.block_size:
raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(ciphertext)))
Kd = self.Kd
BC = self.block_size / 4
ROUNDS = len(Kd) - 1
if BC == 4:
SC = 0
elif BC == 6:
SC = 1
else:
SC = 2
s1 = shifts[SC][1][1]
s2 = shifts[SC][2][1]
s3 = shifts[SC][3][1]
a = [0] * BC
# temporary work array
t = [0] * BC
# ciphertext to ints + key
for i in xrange(BC):
t[i] = (ord(ciphertext[i * 4 ]) << 24 |
ord(ciphertext[i * 4 + 1]) << 16 |
ord(ciphertext[i * 4 + 2]) << 8 |
ord(ciphertext[i * 4 + 3]) ) ^ Kd[0][i]
# apply round transforms
for r in xrange(1, ROUNDS):
for i in xrange(BC):
a[i] = (T5[(t[ i ] >> 24) & 0xFF] ^
T6[(t[(i + s1) % BC] >> 16) & 0xFF] ^
T7[(t[(i + s2) % BC] >> 8) & 0xFF] ^
T8[ t[(i + s3) % BC] & 0xFF] ) ^ Kd[r][i]
t = copy.copy(a)
# last round is special
result = []
for i in xrange(BC):
tt = Kd[ROUNDS][i]
result.append((Si[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF)
result.append((Si[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF)
result.append((Si[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF)
result.append((Si[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF)
return string.join(map(chr, result), '')
def encrypt(key, block):
return rijndael(key, len(block)).encrypt(block)
def decrypt(key, block):
return rijndael(key, len(block)).decrypt(block)
def test():
def t(kl, bl):
b = 'b' * bl
r = rijndael('a' * kl, bl)
assert r.decrypt(r.encrypt(b)) == b
t(16, 16)
t(16, 24)
t(16, 32)
t(24, 16)
t(24, 24)
t(24, 32)
t(32, 16)
t(32, 24)
t(32, 32)
|
jevinskie/aes-over-pcie
|
utils/rijndael.py
|
Python
|
bsd-3-clause
| 10,985
|
import csv
from topic import Topic
from query import Query
from document import Document
from data_file import DataFile
from user import User
from condition import Condition
class QueriesSummaryFile(DataFile):
def __init__(self, file_name):
DataFile.__init__(self, file_name)
self.__parse()
def __parse( self ):
with open( self.file_name, 'r' ) as result_file:
result_reader = csv.DictReader( result_file, delimiter=',')
for row in result_reader:
topic = Topic.create_or_update( row['topic'] )
user = User.create_or_update( row['userid'] )
condition = Condition.create_or_update( row['condition'] )
autocomplete = row['autocomplete_used'] == 1
query = Query.create_or_update( row['queryid'], topic = topic, user = user, condition = condition, autocomplete = autocomplete, query_text = row['terms'], precision = self.__build_precision_dict( row ) )
def __build_precision_dict( self, result_row ):
precisions = {}
def add_precision(rank):
precisions[ str( rank ) ] = float( result_row['p' + str(rank)] )
for rank in list(range(1,11)) + [15,20]:
add_precision( rank )
precisions['map'] = float( result_row['map'] )
return precisions
|
fire-uta/iiix-data-parser
|
queries_summary_file.py
|
Python
|
mit
| 1,248
|
import git
import glob
import logging
import os
from .config import AssignmentConfig
logger = logging.getLogger(__name__)
class GradeSheetError(Exception):
"""A general-purpose exception thrown by the Assignment class.
"""
pass
class GradeSheet(object):
"""A gradesheet for an assignment. Gradesheets are git
repositories. They have the following attributes...
A configuration file
Assignment-specific configuration information (refer to
:class:`AssignmentConfig`)
A Dockerfile
Used to build a docker image. That image will be used to
create containers for running student submissions
Grading scripts
The assignment-specific configuration details how to run
grading scripts. The gradesheet repository must have the
scripts in order to run them.
"""
SUB_DIR = "gradesheet"
"""The name to use when creating a new gradesheet directory on disk.
"""
@classmethod
def from_repo(cls, gradesheet_path, repo_url):
"""Clone a gradesheet from a remote git repository.
:param str gradesheet_path: The path to the directory into
which the gradesheet repository will be cloned.
:param str repo_url: A URL pointing to a gradesheet repository
to clone.
:raises GradeSheetError: if there was a problem cloning
the repo
"""
try:
git.Repo.clone_from(repo_url, gradesheet_path)
logger.info("Successfully cloned {}".format(repo_url))
except git.exc.GitCommandError as e:
raise GradeSheetError("Could not clone {}".format(repo_url)) from e
return None
@classmethod
def new(cls, gradesheet_path, assignment_name):
"""Initializes a new gradesheet repository with default files
:param str gradesheet_path: The path to the directory into
which the gradesheet repository will be cloned.
:param str assignment_name: The name of the assignment.
"""
path = gradesheet_path
# Initialize a new gradesheet repo
repo = git.Repo.init(path)
# Create a default assignment config
config = AssignmentConfig.new(path,
{'assignment-name': assignment_name})
# Create a default Dockerfile
dockerfile_path = os.path.join(path, 'Dockerfile')
with open(dockerfile_path, 'w') as dockerfile:
dockerfile.write("# Dockerfile for {}".format(assignment_name))
repo.index.add([config.file_path, dockerfile_path])
repo.index.commit("Add default assignment.yml and Dockerfile")
return None
@property
def dockerfile_path(self):
"""The path to this gradesheet's Dockerfile"""
return os.path.join(self.path, "Dockerfile")
@property
def templates(self):
"""A dictionary of this gradesheet's optional report templates"""
templates = glob.glob(os.path.join(self.path, '*.template'))
return {os.path.basename(t).split('.')[0].lower(): t
for t in templates}
def __init__(self, assignment):
"""Instantiates a GradeSheet.
:param Assignment assignment: The assignment to which this
gradesheet belongs.
:raises AssignmentConfigError: if the assignment-specific
config file in the gradesheet cannot be loaded
:raises GradeSheetError: if the Dockerfile can't be found
"""
self.path = assignment.gradesheet_dir
self.assignment = assignment
self.config = AssignmentConfig(self.path)
self.repository = git.Repo(self.path)
# Verify that paths exist like we expect
if not os.path.exists(self.dockerfile_path):
raise FileNotFoundError("GradeSheet repo has no Dockerfile!")
def pull(self):
if self.repository is not None:
logger.debug("Pulling gradesheet repo...")
self.repository.git.pull('--rebase')
|
grade-it/grader
|
redkyn-grader/grader/models/gradesheet.py
|
Python
|
mit
| 4,035
|
from SimpleCV.base import *
from SimpleCV.Features import Feature, FeatureSet
from SimpleCV.Color import Color
from SimpleCV.ImageClass import Image
class SegmentationBase(object):
"""
Right now I am going to keep this class as brain dead and single threaded as
possible just so I can get the hang of abc in python. The idea behind a segmentation
object is that you pass it frames, it does some sort of operations and you
get a foreground / background segemnted image. Eventually I would like
these processes to by asynchronous and multithreaded so that they can raise
specific image processing events.
"""
__metaclass__ = abc.ABCMeta
def load(cls, fname):
"""
load segmentation settings to file.
"""
return pickle.load(file(fname))
load = classmethod(load)
def save(self, fname):
"""
Save segmentation settings to file.
"""
output = open(fname, 'wb')
pickle.dump(self,output,2) # use two otherwise it borks the system
output.close()
@abc.abstractmethod
def addImage(self, img):
"""
Add a single image to the segmentation algorithm
"""
return
@abc.abstractmethod
def isReady(self):
"""
Returns true if the camera has a segmented image ready.
"""
return False
@abc.abstractmethod
def isError(self):
"""
Returns true if the segmentation system has detected an error.
Eventually we'll consruct a syntax of errors so this becomes
more expressive
"""
return False
@abc.abstractmethod
def resetError(self):
"""
Clear the previous error.
"""
return False
@abc.abstractmethod
def reset(self):
"""
Perform a reset of the segmentation systems underlying data.
"""
@abc.abstractmethod
def getRawImage(self):
"""
Return the segmented image with white representing the foreground
and black the background.
"""
@abc.abstractmethod
def getSegmentedImage(self, whiteFG=True):
"""
Return the segmented image with white representing the foreground
and black the background.
"""
@abc.abstractmethod
def getSegmentedBlobs(self):
"""
return the segmented blobs from the fg/bg image
"""
|
nils-werner/SimpleCV
|
SimpleCV/Segmentation/SegmentationBase.py
|
Python
|
bsd-3-clause
| 2,426
|
# -*- coding: utf-8 -*-
from trytond.pool import Pool
from product import Template, Product, ProductVariationAttributes, \
ProductAttribute
def register():
Pool.register(
Template,
Product,
ProductVariationAttributes,
ProductAttribute,
module='nereid_catalog_variants', type_='model'
)
|
fulfilio/nereid-catalog-variants
|
__init__.py
|
Python
|
bsd-3-clause
| 340
|
"""Support for MAX! binary sensors via MAX! Cube."""
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
)
from homeassistant.helpers.entity import EntityCategory
from . import DATA_KEY
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Iterate through all MAX! Devices and add window shutters."""
devices = []
for handler in hass.data[DATA_KEY].values():
for device in handler.cube.devices:
devices.append(MaxCubeBattery(handler, device))
# Only add Window Shutters
if device.is_windowshutter():
devices.append(MaxCubeShutter(handler, device))
if devices:
add_entities(devices)
class MaxCubeBinarySensorBase(BinarySensorEntity):
"""Base class for maxcube binary sensors."""
_attr_entity_category = EntityCategory.DIAGNOSTIC
def __init__(self, handler, device):
"""Initialize MAX! Cube BinarySensorEntity."""
self._cubehandle = handler
self._device = device
self._room = handler.cube.room_by_id(device.room_id)
def update(self):
"""Get latest data from MAX! Cube."""
self._cubehandle.update()
class MaxCubeShutter(MaxCubeBinarySensorBase):
"""Representation of a MAX! Cube Binary Sensor device."""
_attr_device_class = BinarySensorDeviceClass.WINDOW
def __init__(self, handler, device):
"""Initialize MAX! Cube BinarySensorEntity."""
super().__init__(handler, device)
self._attr_name = f"{self._room.name} {self._device.name}"
self._attr_unique_id = self._device.serial
@property
def is_on(self):
"""Return true if the binary sensor is on/open."""
return self._device.is_open
class MaxCubeBattery(MaxCubeBinarySensorBase):
"""Representation of a MAX! Cube Binary Sensor device."""
_attr_device_class = BinarySensorDeviceClass.BATTERY
def __init__(self, handler, device):
"""Initialize MAX! Cube BinarySensorEntity."""
super().__init__(handler, device)
self._attr_name = f"{self._room.name} {device.name} battery"
self._attr_unique_id = f"{self._device.serial}_battery"
@property
def is_on(self):
"""Return true if the binary sensor is on/open."""
return self._device.battery == 1
|
home-assistant/home-assistant
|
homeassistant/components/maxcube/binary_sensor.py
|
Python
|
apache-2.0
| 2,360
|
#!/usr/bin/env python2.7
# asli@cbs.dtu.dk, thomas@cbs.dtu.dk
import sys, gzip
import re, string
import argparse
import os
from Bio.Blast import NCBIStandalone
from operator import itemgetter, attrgetter
from datetime import datetime as dt
import time
sys.path.append('/home/projects5/pr_53035/people/asli/bin/lib/python2.7/site-packages')
helpstr = '''
This script parses blast/ublast results and filters them based on the given cut-offs.
Blast results should be in -m 0 format or tab separated -m 6 format. With ublast, the results should be
obtained with -blast6out option.
Author: Asli I. Ozen ,(asli@cbs.dtu.dk)
License: GPL 3.0 (http://www.gnu.org/licenses/gpl-3.0.txt)
'''
class BlastDecision:
def __init__(self):
self.start = time.time()
d_ = dt.today()
self.timestarted = d_.strftime("%Y-%m-%d_%H%M")
self.parseArgs()
def parseArgs(self):
self.parser = argparse.ArgumentParser(description= helpstr, prog='PROG', conflict_handler='resolve')
self.parser.add_argument('-bd', help="pre-made blastdecide output file")
self.parser.add_argument('-bt', help="blast tabular output file")
self.parser.add_argument('-bo', help="blast -m 0 output file and its format (type 'blast' or 'psiblast')", nargs=2)
self.parser.add_argument('-l', help="Query lengths file (if tabular blast output is given)")
self.parser.add_argument('-cb', type=str, dest="", help="save new blastdecide or not (type 'savenew' or 'nosave') ",nargs=1)
self.parser.add_argument('-s', default= "50", dest="", help="minimum similarity cutoff")
self.parser.add_argument('-qc', default= "50", dest="", help="minimum query coverage cutoff")
#self.parser.add_argument('-tc', dest="", help="minimum target coverage cutoff")
self.parser.add_argument('-e', default= "1e-10" , dest="", help="evalue cutoff")
self.parser.add_argument('-v', action="store_true" , dest="", help="increase output verbosity")
def read_lengths(self):
fl= open(self.lenfile,"rU")
self.lendict={}
for line in fl:
#print line
query = line.split("\t")[0]
query_name = query.split(" ")[0].strip(">")
length= int(line.split("\t")[1].strip("\n"))
self.lendict[query_name]=length
fl.close()
def ReadBlast(self, file, iszipped = 0, is_psiblast=None):
self.output= open(str(file)+".blastdecide", "w")
self.selfhits=[]
if is_psiblast:
print >> sys.stderr, 'Parsing PSI-Blast'
self.parser = NCBIStandalone.PSIBlastParser()
else:
self.parser = NCBIStandalone.BlastParser()
if file[-3:] == '.gz' or iszipped:
handle = gzip.open(file)
else:
handle = open(file)
self.iter = NCBIStandalone.Iterator(handle = handle, parser = self.parser)
self.blastDict = {}
while 1:
try:
rec = self.iter.next()
if not rec: break
except:
sys.stderr.write('Can\'t iterate on blast records anymore. Abort.\n')
import traceback
traceback.print_exc()
return 'Error parsing %s' % file
self.query = rec.query.split(" ")[0] ## blast_record.query.split(" ")[0]
self.length = rec.query_letters
if self.length < self.min_size:
if self.verbose: print 'Does not meet the minimum length %d' % self.min_size
break
if is_psiblast: rec = rec.rounds[-1]
# each alignment is one potential hit
for n, alignment in enumerate(rec.alignments):
# to make it easy, skip all alignments with multiple HSPS
hsp = alignment.hsps[0]
alnlength=hsp.align_length
hit = alignment.title
#targetlength = alignment.length
#m = re.search("sp\|([A-Z0-9]+)\|([A-Z0-9_]+) ?(.+)?", alignment.title)
m = re.search("sp\|(.+?)\|(.+?) (.+)?", alignment.title)
if m: # pyphynr blast result
hit_sp_ac = m.group(1)
hit_sp_id = m.group(2)
hit_sp_note = m.group(3)
elif alignment.title[0] == '>': # result from qadditional blast databases
hit_sp_ac = None
hit_sp_id = alignment.title[1:].split()[0]
hit_sp_note = None
else:
hit_sp_ac = None
hit_sp_id = None
hit_sp_note = None
# fix annoying dots in ids
if hit_sp_ac: hit_sp_ac = hit_sp_ac.replace('.','_')
if hit_sp_id: hit_sp_id = hit_sp_id.replace('.','_')
#if not hit_sp_id: print 'XXXXXXX', alignment.title
if self.verbose: print hit_sp_id,
similarity = hsp.positives[0]/float(hsp.positives[1])*100
if float(hsp.expect) < int(self.HSP_max_evalue):
if float(similarity) > int(self.HSP_minimal_positives):
coverage = hsp.positives[1]/float(self.length)*100
if float(coverage) > int(self.HSP_minimal_coverage):
#targetcoverage = hsp.positives[1]/float(targetlength)*100
#if float(targetcoverage) > int(self.HSP_minimal_targetcov):
#self.compatibles.append((hit_sp_ac, hit))
#hitlist = [hit_sp_id, n+1 , hsp.positives[0]/float(hsp.positives[1])*100, hsp.positives[1]/float(self.length)*100, hsp.positives[1]/float(targetlength)*100, hsp.score, hsp.expect]
hitlist = [hit_sp_id, hsp.positives[0]/float(hsp.positives[1])*100, hsp.positives[1]/float(self.length)*100, hsp.score, hsp.expect]
if self.cB: self.createblastDict(query,hitlist)
self.output.write("%s\t" % (self.query)),
for element in hitlist:
self.output.write("%s\t" % element),
self.output.write("\n")
self.output.close()
handle.close()
return None
def ReadBlastresultsTab(self, filename):
if filename[-3:] == '.gz':
fh = gzip.open(filename)
else:
fh= open(filename,"rU")
#hitsdict={}
#hitlist = [hit_sp_id, n+1 , hsp.positives[0]/float(hsp.positives[1])*100, hsp.positives[1]/float(self.length)*100, hsp.score, hsp.expect]
self.blastDict={}
self.selfhits=[]
self.read_lengths()
output= open(basename(filename) + ".blastdecide", "w")
#lines=fh.readlines()
for line in fh:
line = line.strip("\n")
if len(line.split("\t")) > 2:
query = line.split("\t")[0]
#print query ###################################################################################
query_name = query.split(" ")[0] # OLD WAY #
hit_sp_id = line.split("\t")[1] # command = "grep -m 1 \"^>" + query_name + "\" " + "HKMN.cdhit.faa.id_length" #
percent_id = float(line.split("\t")[2]) # fout2 = os.popen(command) #
aln_len=float(line.split("\t")[3]) # out2 = fout2.readline().strip("\n") #
query_length=self.lendict[query_name] # if out2: #
coverage = 100*int(aln_len)/float(query_length) # query_length=out2.split("\t")[1] #
bitscore = float(line.split("\t")[11]) ###################################################################################
evalue = float(line.split("\t")[10])
if str(query_name) == str(hit_sp_id):
#print "SameSameSame"
self.selfhits.append(query)
else:
if float(evalue) < int(self.HSP_max_evalue):
if float(percent_id) > int(self.HSP_minimal_positives):
if float(coverage) > int(self.HSP_minimal_coverage):
hitlist=[hit_sp_id, percent_id, coverage, bitscore, evalue]
if self.cB: self.createblastDict(query,hitlist)
output.write("%s\t" % (query_name)),
for element in hitlist:
output.write("%s\t" % element),
output.write("\n")
output.close()
fh.close()
def ReadBlastdecide(self, filename):
#hitsdict={}
#hitlist = [hit_sp_id, n+1 , hsp.positives[0]/float(hsp.positives[1])*100, hsp.positives[1]/float(self.length)*100, hsp.score, hsp.expect]
fh= open(filename,"rU")
lines=fh.readlines()
output= open(basename(filename) + ".new.blastdecide", "w")
for line in lines:
line = line.strip("\n")
if len(line.split("\t")) > 2:
query = line.split("\t")[0]
hit_sp_id = line.split("\t")[1]
#n=float(line.split("\t")[2])
percent_id = float(line.split("\t")[2])
coverage = float(line.split("\t")[3])
#targetcoverage = float(line.split("\t")[5])
bitscore = float(line.split("\t")[4])
evalue = float(line.split("\t")[5])
if str(query_name) == str(hit_sp_id):
#print "SameSameSame"
self.selfhits.append(query)
else:
if float(evalue) < int(self.HSP_max_evalue):
if float(percent_id) > int(self.HSP_minimal_positives):
if float(coverage) > int(self.HSP_minimal_coverage):
hitlist=[hit_sp_id, percent_id, coverage, bitscore, evalue]
if self.cB == 'savenew':
self.createblastDict(query,hitlist)
self.writeoutput(output,query,hitlist)
else:
self.createblastDict(query,hitlist)
output.close()
if self.cB != 'savenew' and os.path.getsize(basename(filename) + ".new.blastdecide") == 0:
os.system("rm " + basename(filename) + ".new.blastdecide")
fh.close()
def writeoutputt(self, oh, query, hitlist):
oh.write("%s\t" % (query_name))
for element in hitlist:
output.write("%s\t" % element),
oh.write("\n")
def createblastDict(self, query, hitlist):
self.blastDict={}
self.selfhits=[]
hit_sp_id=hitlist[0]
if str(query) is not str(hit_sp_id):
#hitlist=[hit_sp_id, n, percent_id, coverage,targetcoverage, bitscore,evalue]
#hitlist=[hit_sp_id, percent_id, coverage, bitscore,evalue]
if query in self.blastDict:
self.blastDict[query].append(hitlist)
else:
self.blastDict[query] = [hitlist]
def mainthing(self):
self.HSP_minimal_positives = self.opts.s
self.HSP_minimal_coverage = self.opts.qc
#self.HSP_minimal_targetcov = self.opts.tc
self.HSP_minimal_coverage_length = 20
self.lenfile= self.opts.l
self.HSP_max_evalue = self.opts.e
self.verbose = self.opts.v
self.min_size = 0
self.cB = self.opts.cb
if self.opts.bd:
blastdecide=self.opts.bd
self.ReadBlastdecide(blastdecide)
elif self.opts.bt:
blasttab = self.opts.bt
self.ReadBlastresultsTabb(blasttab)
else:
try:
blastfile = self.opts.bo[0]
typ = self.opts.bo[1]
if typ == "psiblast":
self.ReadBlast(blastfile, is_psiblast=True)
else:
self.ReadBlast(blastfile)
except:
raise IOError('If you dont have Pre-made blastdecide or ublast-tab results, you should provide a normal blast output (-m0)')
timeused = (time.time() - self.start) / 60
if self.v: #prints end-time
print "Time used: "+str(round(timeused*60)) + " seconds ("+str(round(timeused)) + " min)\n"
if __name__ == '__main__':
try:
obj = BlastDecision()
obj.opts=obj.parser.parse_args(sys.argv[1:])
obj.mainthing()
if obj.opts.v:
obj.parser.print_help()
except Exception,e:
print str(e)
# print sys.stderr
sys.exit()
###############
# INPUT LIST
# blast output in tab format & query lengths file : genecatalogue_vs_uniprot.blasttab OR genecatalogue_vs_genecatalogue.blasttab & genecatalogue.lengths
# blast output in -m 0 format : genecatalogue_vs_uniprot.blastout OR genecatalogue_vs_genecatalogue.blastout
# pre-made blastdecide file : genecatalogue_vs_uniprot.blastdecide
#
# OUTPUT LIST
# new blastdecide file based on given parameters : genecatalogue_vs_uniprot.blastdecide
# if premade blastdecide is given, the blastDict is generated : obj.blastDict
#
# OPTIONS LIST
# '-bd', '--blastdecide', help="pre-made blastdecide output file"
# '-bt', '--blasttab', help="blast tabular output file"
# '-bo', '--blastout', help="blast -m 0 output file and its type ('blast' or 'psiblast')"
# '-l', '--lengths', help="Query lengths file"
# '-s', '--similarity', default= "50", help="minimum similarity cutoff"
# '-qc', '--querycoverage',default= "50", help="minimum query coverage cutoff"
# '-tc', '--targetcoverage', help="minimum target coverage cutoff"
# '-e', '--maxevalue', default= "1e-10" , help="evalue cutoff"
#
#
|
MG-group-tools/MGFunc
|
others/blastdecide_ublast.dbg.py
|
Python
|
gpl-3.0
| 13,354
|
import shutil
from PyQt5.QtCore import QThread, pyqtSignal
from requests import get
from os import path, makedirs, remove, listdir
from sys import platform
from zipfile import ZipFile
from GUI.Downloader import FileDownloaderWindow
from GUI.Strings import Strings
from API.CurseAPI import CurseAPI
from Utils.Utils import parseSemanticVersion, getInstallDir, msgBox
strings = Strings()
translate = strings.get
class UpdateCheckThread(QThread):
done = pyqtSignal(dict, name="done")
def __init__(self, curse: CurseAPI):
super().__init__()
self.curse = curse
def check_updates(self):
ver = parseSemanticVersion(self.curse.version)
vers = get("https://openminemods.digitalfishfun.com/versions.json").json()
latest = parseSemanticVersion(vers["latest_stable"])
if latest > ver:
self.done.emit({
"res": True,
"update": vers["versions"][vers["latest_stable"]],
"ver": vers["latest_stable"]
})
return
self.done.emit({"res": False})
class Update:
def __init__(self, curse: CurseAPI, update: dict):
self.curse = curse
self.update = update
self.dlwin = None
self.idir = None
def apply_update(self):
dl_url = self.update["downloads"][platform]
idir = getInstallDir()
self.idir = idir
self.dlwin = FileDownloaderWindow(dl_url, self.curse, path.dirname(idir), "omm-update.zip", self.zip_downloaded)
def zip_downloaded(self):
idir = self.idir
odir = path.dirname(idir)
makedirs(idir + ".new")
f = ZipFile(odir + "/omm-update.zip")
f.extractall(idir + ".new")
f.close()
remove(odir + "/omm-update.zip")
shutil.move(idir, idir + ".old")
shutil.move(idir + ".new/" + listdir(idir + ".new/")[0], idir)
shutil.rmtree(idir + ".new")
msgBox(text=translate("prompt.update.restart"))
|
Brain888/OpenMineMods
|
Utils/Updater.py
|
Python
|
agpl-3.0
| 2,000
|
from listeners import *
|
natea/django-lfc
|
lfc/__init__.py
|
Python
|
bsd-3-clause
| 23
|
from textwrap import dedent
from typing import Dict, List
import pytest
import gen
from gen.tests.utils import make_arguments, true_false_msg, validate_error
class TestAdminRouterTLSConfig:
"""
Tests for the Admin Router TLS Config creation.
"""
@pytest.mark.parametrize(
'adminrouter_tls_1_0_enabled, tls_versions',
[('true', 'TLSv1 TLSv1.1 TLSv1.2'), ('false', 'TLSv1.1 TLSv1.2')]
)
def test_master(self, adminrouter_tls_1_0_enabled, tls_versions):
"""
Test that Master Admin Router config file has the correct content.
"""
config_path = '/etc/adminrouter-tls-master.conf'
arguments = make_arguments({
'adminrouter_tls_1_0_enabled': adminrouter_tls_1_0_enabled,
})
generated = gen.generate(arguments=arguments)
package = generated.templates['dcos-config.yaml']['package']
[config] = [item for item in package if item['path'] == config_path]
expected_configuration = dedent(
"""\
# Ref: https://github.com/cloudflare/sslconfig/blob/master/conf
# Modulo ChaCha20 cipher.
ssl_ciphers EECDH+AES128:RSA+AES128:EECDH+AES256:RSA+AES256:EECDH+3DES:RSA+3DES:!MD5;
ssl_prefer_server_ciphers on;
# To manually test which TLS versions are enabled on a node, use
# `openssl` commands.
#
# See comments on https://jira.mesosphere.com/browse/DCOS-13437 for more
# details.
ssl_protocols {tls_versions};
""".format(tls_versions=tls_versions)
)
assert config['content'] == expected_configuration
@pytest.mark.parametrize('adminrouter_tls_1_0_enabled', ['true', 'false'])
def test_agent(self, adminrouter_tls_1_0_enabled):
"""
Test that Agent Admin Router config file has the correct content.
"""
config_path = '/etc/adminrouter-tls-agent.conf'
arguments = make_arguments(new_arguments={
'adminrouter_tls_1_0_enabled': adminrouter_tls_1_0_enabled,
})
generated = gen.generate(arguments=arguments)
package = generated.templates['dcos-config.yaml']['package']
[config] = [item for item in package if item['path'] == config_path]
expected_configuration = dedent(
"""\
# Note that Agent Admin Router only serves cluster-internal clients. Hence,
# browser compatibility is not a criterion for the TLS cipher suite selection.
ssl_ciphers EECDH+AES128:RSA+AES128:EECDH+AES256:RSA+AES256:!MD5;
ssl_prefer_server_ciphers on;
ssl_protocols TLSv1.2;
"""
)
assert config['content'] == expected_configuration
class TestSetCipherOverride:
"""
Tests for setting ssl_ciphers
To test manually, either use openssl commands or sslscan
[https://github.com/rbsec/sslscan]
"""
def supported_ssl_ciphers(
self,
new_config_arguments: Dict[str, str],
config_path: str) -> List[str]:
"""
Finds the line that looks like:
ssl_ciphers EECDH+AES256:RSA+AES256:EECDH+AES128:RSA+AES128:EECDH+3DES:RSA+3DES:!MD5;
and returns the list of ciphers.
Args:
new_config_arguments: Arguments which are added to the 'standard'
set of arguments before generating configuration files.
config_path: A path to configuration file which should be examined
for ssl_ciphers configuration.
"""
arguments = make_arguments(new_arguments=new_config_arguments)
generated = gen.generate(arguments=arguments)
package = generated.templates['dcos-config.yaml']['package']
[config] = [item for item in package if item['path'] == config_path]
[ssl_ciphers_line] = [
line for line in config['content'].split('\n') if
# We strip whitespace from the beginning of the line as NGINX
# configuration lines can start with whitespace.
line.lstrip().startswith('ssl_ciphers ')
]
ssl_ciphers_line = ssl_ciphers_line.strip(';')
ciphers = ssl_ciphers_line.split()[1:]
return ciphers
def supported_ssl_ciphers_master(
self,
new_config_arguments: Dict[str, str]) -> List[str]:
"""
Finds the line that looks like:
ssl_ciphers EECDH+AES256:RSA+AES256:EECDH+AES128:RSA+AES128:EECDH+3DES:RSA+3DES:!MD5;
and returns the list of ciphers.
Args:
new_config_arguments: Arguments which are added to the 'standard'
set of arguments before generating configuration files.
"""
config_path = '/etc/adminrouter-tls-master.conf'
return self.supported_ssl_ciphers(new_config_arguments, config_path)
def supported_ssl_ciphers_agent(
self,
new_config_arguments: Dict[str, str]) -> List[str]:
"""
Finds the line that looks like:
ssl_ciphers EECDH+AES256:RSA+AES256;
and returns the list of ciphers.
Args:
new_config_arguments: Arguments which are added to the 'standard'
set of arguments before generating configuration files.
"""
config_path = '/etc/adminrouter-tls-agent.conf'
return self.supported_ssl_ciphers(new_config_arguments, config_path)
def test_cipher_agent_default(self):
"""
The config variable adminrouter_external_cipher_string should not impact internal traffic.
"""
new_arguments = {'adminrouter_external_cipher_override': 'false'}
ciphers = self.supported_ssl_ciphers_agent(
new_config_arguments=new_arguments,
)
assert ciphers == ['EECDH+AES128:RSA+AES128:EECDH+AES256:RSA+AES256:!MD5']
def test_cipher_master_default(self):
"""
The config variable adminrouter_external_cipher_string must not be set.
"""
new_arguments = {'adminrouter_external_cipher_string': ''}
ciphers = self.supported_ssl_ciphers_master(
new_config_arguments=new_arguments,
)
assert ciphers == ['EECDH+AES128:RSA+AES128:EECDH+AES256:RSA+AES256:EECDH+3DES:RSA+3DES:!MD5']
def test_cipher_master_custom(self):
"""
The config variable adminrouter_external_cipher_string must be set
"""
new_arguments = {'adminrouter_tls_cipher_suite': 'EECDH+AES128:RSA+AES128'}
ciphers = self.supported_ssl_ciphers_master(
new_config_arguments=new_arguments,
)
assert ciphers == ['EECDH+AES128:RSA+AES128']
class TestToggleTLSVersions:
"""
Tests for toggling TLS 1.0/1.1.
To manually test that this is, in fact, a working toggle for TLS 1.0/1.1, use
`openssl` commands.
See comments on https://jira.mesosphere.com/browse/DCOS-13437 for more
details.
"""
def supported_tls_protocols_ar_master(
self, new_config_arguments: Dict[str, str]) -> List[str]:
"""
This finds a line which looks like the following:
ssl_protocols TLSv1, TLSv1.1;
in the Admin Router TLS configuration.
It then returns the listed protocols.
Args:
new_config_arguments: Arguments which are added to the 'standard'
set of arguments before generating configuration files.
Returns:
A list of supported SSL protocols.
"""
arguments = make_arguments(new_arguments=new_config_arguments)
generated = gen.generate(arguments=arguments)
package = generated.templates['dcos-config.yaml']['package']
config_path = '/etc/adminrouter-tls-master.conf'
[config] = [item for item in package if item['path'] == config_path]
[ssl_protocols_line] = [
line for line in config['content'].split('\n') if
# We strip whitespace from the beginning of the line as NGINX
# configuration lines can start with whitespace.
line.lstrip().startswith('ssl_protocols ')
]
ssl_protocols_line = ssl_protocols_line.strip(';')
protocols = ssl_protocols_line.split()[1:]
return protocols
def test_validation_1_0(self):
"""
The config variable `tls_1_0_enabled` must be 'true' or 'false'.
"""
validate_error(
new_arguments={'adminrouter_tls_1_0_enabled': 'foo'},
key='adminrouter_tls_1_0_enabled',
message=true_false_msg,
)
def test_validation_1_1(self):
"""
The config variable `tls_1_1_enabled` must be 'true' or 'false'.
"""
validate_error(
new_arguments={'adminrouter_tls_1_1_enabled': 'foo'},
key='adminrouter_tls_1_1_enabled',
message=true_false_msg,
)
def test_validation_1_2(self):
"""
The config variable `tls_1_2_enabled` must be 'true' or 'false'.
"""
validate_error(
new_arguments={'adminrouter_tls_1_2_enabled': 'foo'},
key='adminrouter_tls_1_2_enabled',
message=true_false_msg,
)
@pytest.mark.parametrize(
'new_arguments', [{}, {'adminrouter_tls_1_0_enabled': 'false'}]
)
def test_default(self, new_arguments):
"""
By default TLS 1.0 is disabled, and therefore by default the config
variable is set to 'false'.
This test is parametrized to demonstrate that having no configuration
produces the same results as setting the config variable to `'false'`.
"""
protocols = self.supported_tls_protocols_ar_master(
new_config_arguments=new_arguments,
)
assert protocols == ['TLSv1.1', 'TLSv1.2']
@pytest.mark.parametrize(
'enabled,expected_protocols', [
(('false', 'false', 'true'), ['TLSv1.2']),
(('false', 'true', 'true'), ['TLSv1.1', 'TLSv1.2']),
(('true', 'true', 'true'), ['TLSv1', 'TLSv1.1', 'TLSv1.2']),
(('true', 'false', 'true'), ['TLSv1', 'TLSv1.2']),
(('true', 'false', 'false'), ['TLSv1']),
(('false', 'true', 'false'), ['TLSv1.1']),
]
)
def test_enable_custom_tls_versions(self, enabled, expected_protocols):
new_arguments = {'adminrouter_tls_1_0_enabled': enabled[0],
'adminrouter_tls_1_1_enabled': enabled[1],
'adminrouter_tls_1_2_enabled': enabled[2]}
protocols = self.supported_tls_protocols_ar_master(
new_config_arguments=new_arguments,
)
assert protocols == expected_protocols
def test_no_tls_version_enabled(self):
"""
Not setting the `adminrouter_tls_version_override` or any of the
TLS version configuration options results in error.
"""
new_arguments = {'adminrouter_tls_1_0_enabled': 'false',
'adminrouter_tls_1_1_enabled': 'false',
'adminrouter_tls_1_2_enabled': 'false'}
expected_error_msg = (
'At least one of adminrouter_tls_1_0_enabled, '
'adminrouter_tls_1_1_enabled and adminrouter_tls_1_2_enabled must '
"be set to 'true'."
)
result = gen.validate(arguments=make_arguments(new_arguments))
assert result['status'] == 'errors'
key = 'adminrouter_tls_1_2_enabled'
assert result['errors'][key]['message'] == expected_error_msg
|
branden/dcos
|
gen/tests/test_adminrouter_tls_conf.py
|
Python
|
apache-2.0
| 11,615
|
# Copyright 2019 Virgil Dupras
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
from datetime import date
import pytest
from ..testutil import eq_, with_app
from ...model.currency import Currencies
from ..base import TestApp
# --- Pristine
def test_can_load():
# When there's no selection, we don't raise a panel
app = TestApp()
assert app.mw.edit_item() is None # no panel returned
# --- One Entry
def app_one_entry():
app = TestApp()
app.add_account()
app.show_account()
app.add_entry(date='06/07/2008', description='description', payee='payee', checkno='42')
return app
def test_add_cancel_then_load():
# when loading the tpanel right after cancelling a txn addition, the wrong txn is loaded
app = app_one_entry()
app.etable.add()
app.etable.cancel_edits()
tpanel = app.mw.edit_item()
eq_(tpanel.description, 'description')
def test_buffer():
# tpanel's edition is buffered.
app = app_one_entry()
tpanel = app.mw.edit_item()
tpanel.date = '07/07/2008'
tpanel.description = 'foo'
tpanel.payee = 'bar'
tpanel.checkno = 'baz'
tpanel = app.mw.edit_item()
eq_(tpanel.date, '06/07/2008')
eq_(tpanel.description, 'description')
eq_(tpanel.payee, 'payee')
eq_(tpanel.checkno, '42')
def test_can_load_selected_transaction():
# Whether load() is possible is based on the last selection of either the etable or the ttable
app = app_one_entry()
app.etable.select([])
app.show_tview()
app.ttable.select([0])
app.mw.edit_item() # no crash
def test_load_refreshes_mct_button():
# loading the panel refreshes the mct button
app = app_one_entry()
tpanel = app.mw.edit_item()
tpanel.view.check_gui_calls_partial(['refresh_for_multi_currency'])
def test_load_while_etable_is_editing():
# loading the tpanel while etable is editing aborts the edits and stops
# editing mode.
app = app_one_entry()
app.etable.add()
row = app.etable.edited
row.date = '07/07/2008'
app.clear_gui_calls()
app.mw.edit_item()
assert app.etable.edited is None
eq_(app.etable_count(), 1)
app.etable.view.check_gui_calls_partial(['stop_editing'])
def test_load_while_ttable_is_editing():
# loading the tpanel while ttable is editing aborts the edits and stops
# editing mode.
app = app_one_entry()
app.show_tview()
app.ttable.add()
row = app.ttable.edited
row.date = '07/07/2008'
app.clear_gui_calls()
app.mw.edit_item()
assert app.ttable.edited is None
eq_(app.ttable.row_count, 1)
app.ttable.view.check_gui_calls_partial(['stop_editing'])
def test_values():
# The values of the panel are correct.
app = app_one_entry()
tpanel = app.mw.edit_item()
eq_(tpanel.date, '06/07/2008')
eq_(tpanel.description, 'description')
eq_(tpanel.payee, 'payee')
eq_(tpanel.checkno, '42')
def test_values_after_deselect():
# When there is no selection, load() is not possible
app = app_one_entry()
app.etable.select([])
assert app.mw.edit_item() is None # no panel returned
# --- Amountless Entry Panel Loaded
def app_amountless_entry_panel_loaded():
app = TestApp()
app.add_account()
app.show_account()
app.add_entry(date='06/07/2008', description='description', payee='payee', checkno='42')
app.show_tview()
app.ttable.select([0])
app.mw.edit_item()
app.clear_gui_calls()
return app
# --- Entry With Amount Panel Loaded
def app_entry_with_amount_panel_loaded():
app = TestApp()
app.add_account()
app.show_account()
app.add_entry(date='06/07/2008', description='description', increase='42')
app.show_tview()
app.ttable.select([0])
app.mw.edit_item()
app.clear_gui_calls()
return app
def test_change_date():
# Changing the date no longer calls refresh_repeat_options() on the view (this stuff is now
# in schedules)
app = app_entry_with_amount_panel_loaded()
tpanel = app.get_current_panel()
tpanel.date = '17/07/2008'
tpanel.view.check_gui_calls_partial(not_expected=['refresh_repeat_options'])
# --- Two Amountless Entries
def app_two_amountless_entries():
app = TestApp()
app.add_account()
app.show_account()
app.add_entry(date='06/07/2008', description='desc1', payee='payee1', checkno='42')
app.add_entry(date='07/07/2008', description='desc2', payee='payee2', checkno='43')
return app
def test_loads_last_selected_transaction():
# the tpanel also works with the ttable. If the ttable is the last to have had a selection,
# tpanel loads this one.
app = app_two_amountless_entries()
app.show_tview()
app.ttable.select([0]) # etable has index 1 selected
tpanel = app.mw.edit_item()
eq_(tpanel.description, 'desc1')
@pytest.mark.parametrize(
'attrname, newvalue, othervalue', [
('date', '08/07/2008', '06/07/2008'),
('description', 'new', 'desc1'),
('payee', 'new', 'payee1'),
('checkno', '44', '42'),
('notes', 'foo\nbar', ''),
])
def test_set_values(attrname, newvalue, othervalue):
# the load/save mechanism works for all attributes.
# The reason why we select another entry is to make sure that the value we're testing isn't
# simply a buffer in the gui layer.
app = app_two_amountless_entries()
tpanel = app.mw.edit_item()
setattr(tpanel, attrname, newvalue)
tpanel.save()
app.etable.select([0])
tpanel = app.mw.edit_item()
eq_(getattr(tpanel, attrname), othervalue)
app.etable.select([1])
tpanel = app.mw.edit_item()
eq_(getattr(tpanel, attrname), newvalue)
# --- Multi-Currency Transaction
def app_multi_currency_transaction():
app = TestApp()
Currencies.get_rates_db().set_CAD_value(date(2008, 1, 1), 'USD', 0.8)
splits = [
('first', '', '', '44 usd'),
('second', '', '42 cad', ''),
]
app.add_txn_with_splits(splits)
tpanel = app.mw.edit_item()
stable = tpanel.split_table
stable.select([1])
app.clear_gui_calls()
return app
def test_mct_balance():
# a mct balance takes the "lowest side" of the transaction and adds a split with the
# difference on that side. For this example, the usd side is the weakest side (if they were
# equal, it would be 52.50 usd).
app = app_multi_currency_transaction()
tpanel = app.get_current_panel()
stable = tpanel.split_table
tpanel.mct_balance()
eq_(len(stable), 3)
eq_(stable[2].credit, 'CAD 6.80') # the selected split is the 2nd one
stable.view.check_gui_calls_partial(['refresh'])
@with_app(app_multi_currency_transaction)
def test_mct_balance_reuses_unassigned_split(app):
# mct balance reuses unassigned split if available
tpanel = app.get_current_panel()
stable = tpanel.split_table
stable.add()
stable[2].credit = '1 cad'
stable.save_edits()
tpanel.mct_balance()
eq_(len(stable), 3)
eq_(stable[2].credit, 'CAD 6.80')
def test_mct_balance_select_null_split():
# if the selected split has no amount, use the default currency
app = app_multi_currency_transaction()
tpanel = app.get_current_panel()
stable = tpanel.split_table
stable.add()
tpanel.mct_balance()
eq_(stable[2].credit, '8.50') # the newly added split is re-used
def test_mct_balance_select_usd_split():
# the currency of the new split is the currency of the selected split
app = app_multi_currency_transaction()
tpanel = app.get_current_panel()
stable = tpanel.split_table
stable.select([0])
tpanel.mct_balance()
eq_(stable[2].credit, '8.50')
def test_mct_balance_twice():
# if there is nothing to balance, don't add anything.
app = app_multi_currency_transaction()
tpanel = app.get_current_panel()
stable = tpanel.split_table
tpanel.mct_balance()
tpanel.mct_balance()
eq_(len(stable), 3)
def test_stop_edition_on_mct_balance():
# edition must stop before mct balance or else we end up with a crash
app = app_multi_currency_transaction()
tpanel = app.get_current_panel()
stable = tpanel.split_table
stable[1].account = 'foo'
tpanel.mct_balance()
stable.view.check_gui_calls_partial(['stop_editing'])
@with_app(app_multi_currency_transaction)
def test_mct_assign_imbalance_assigns_only_same_currency(app):
# When doing Assign imbalance in an MCT context, assign only imbalance in the same currency
tpanel = app.get_current_panel()
stable = tpanel.split_table
stable.add()
stable[2].debit = '1 cad'
stable.save_edits()
stable.add()
stable[3].credit = '2 usd'
stable.save_edits()
stable.select(0)
tpanel.assign_imbalance() # no crash
eq_(stable[0].credit, '46.00')
@with_app(app_multi_currency_transaction)
def test_mct_assign_imbalance_zero_amount_selected(app):
# When doing Assign imbalance in an MCT context with a 0 amount selected, use whichever
# unassigned split comes first as the base currency.
tpanel = app.get_current_panel()
stable = tpanel.split_table
stable.add()
stable[2].debit = '1 cad'
stable.save_edits()
stable.add()
stable[3].credit = '2 usd'
stable.save_edits()
stable.add()
stable[4].account = 'whatever'
stable.save_edits()
stable.select(4)
tpanel.assign_imbalance() # no crash
# CAD split is the first, and the split was deleted so our new index is 3
eq_(len(stable), 4)
eq_(stable[3].debit, 'CAD 1.00')
# --- Unassigned split
def app_with_unassigned_split():
app = TestApp()
splits = [
('account1', '', '42', ''),
('account2', '', '', '42'),
('account3', '', '15', ''),
]
app.add_txn_with_splits(splits=splits, date='07/11/2014')
return app
@with_app(app_with_unassigned_split)
def test_assign_imbalance_same_side(app):
# When triggering Assign imbalance with a split on the "same side" as unassigned splits, we add
# the value to it.
tpanel = app.mw.edit_item()
stable = tpanel.split_table
stable.select(1)
tpanel.assign_imbalance()
eq_(stable[1].credit, '57.00')
@with_app(app_with_unassigned_split)
def test_assign_imbalance_other_side(app):
# When triggering Assign imbalance with a split on the "other side" as unassigned splits, we subtract
# the value to it.
tpanel = app.mw.edit_item()
stable = tpanel.split_table
stable.select(0)
tpanel.assign_imbalance()
eq_(stable[0].debit, '27.00')
@with_app(app_with_unassigned_split)
def test_assign_imbalance_unassigned_selected(app):
# When triggering Assign imbalance with an unassigned split, nothing happens.
tpanel = app.mw.edit_item()
stable = tpanel.split_table
stable.select(3)
tpanel.assign_imbalance()
eq_(stable[3].credit, '15.00')
@with_app(app_with_unassigned_split)
def test_assign_imbalance_nothing_selected(app):
# When triggering Assign imbalance with no selected split, nothing happens.
tpanel = app.mw.edit_item()
stable = tpanel.split_table
stable.select([])
tpanel.assign_imbalance() # no crash
eq_(stable[3].credit, '15.00')
@pytest.mark.parametrize(
'setupfunc, expected', [
# doesn't crash if there is no split with amounts
(app_amountless_entry_panel_loaded, False),
# the mct balance button is enabled if the txn is a MCT
(app_entry_with_amount_panel_loaded, False),
# the mct balance button is enabled if the txn is a MCT
(app_multi_currency_transaction, True),
])
def test_is_multi_currency(setupfunc, expected):
app = setupfunc()
tpanel = app.mw.edit_item()
eq_(tpanel.is_multi_currency, expected)
|
hsoft/moneyguru
|
core/tests/gui/transaction_panel_test.py
|
Python
|
gpl-3.0
| 11,860
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
import datetime
from random import randint
import sqlalchemy as sa
import structlog
from flask import Blueprint
from flask import current_app
from flask import redirect
from flask import url_for
from relengapi.blueprints.archiver import tables
from relengapi.blueprints.archiver.tasks import TASK_TIME_OUT
from relengapi.blueprints.archiver.tasks import create_and_upload_archive
from relengapi.blueprints.archiver.types import MozharnessArchiveTask
from relengapi.lib import api
from relengapi.lib import badpenny
from relengapi.lib.time import now
bp = Blueprint('archiver', __name__)
logger = structlog.get_logger()
GET_EXPIRES_IN = 300
PENDING_EXPIRES_IN = 60
FINISHED_STATES = ['SUCCESS', 'FAILURE', 'REVOKED']
def delete_tracker(tracker):
session = current_app.db.session(tables.DB_DECLARATIVE_BASE)
logger.info("deleting tracker with id: {}".format(tracker.task_id),
archiver_task=tracker.task_id)
session.delete(tracker)
session.commit()
def update_tracker_state(tracker, state):
session = current_app.db.session(tables.DB_DECLARATIVE_BASE)
logger.info("updating tracker with id: {} to state: {}".format(tracker.id, state),
archiver_task=tracker.task_id, archiver_task_state=state)
try:
tracker.state = state
session.commit()
except sa.exc.IntegrityError:
session.rollback()
@badpenny.periodic_task(seconds=TASK_TIME_OUT)
def cleanup_old_tasks(job_status):
"""delete any tracker task if it is older than the time a task can live for."""
session = current_app.db.session(tables.DB_DECLARATIVE_BASE)
expiry_cutoff = now() - datetime.timedelta(seconds=TASK_TIME_OUT)
table = tables.ArchiverTask
for tracker in session.query(table).order_by(table.created_at):
if tracker.created_at < expiry_cutoff:
delete_tracker(tracker)
else:
break
def renew_tracker_pending_expiry(tracker):
pending_expires_at = now() + datetime.timedelta(seconds=PENDING_EXPIRES_IN)
session = current_app.db.session(tables.DB_DECLARATIVE_BASE)
logger.info("renewing tracker {} with pending expiry: {}".format(
tracker.id, pending_expires_at), archiver_task=tracker.task_id)
tracker.pending_expires_at = pending_expires_at
session.commit()
@bp.route('/status/<task_id>')
@api.apimethod(MozharnessArchiveTask, unicode)
def task_status(task_id):
"""
Check and return the current state of the create_and_upload_archive celery task with task id
of <task_id>.
If the task is unknown, state will be PENDING. Once the task starts it will be updated to
STARTED and finally, if it completes, it will be either SUCCESS (no exceptions), or FAILURE.
See update_state() within create_and_upload_archive and
http://celery.readthedocs.org/en/latest/reference/celery.states.html for more details.
If state is SUCCESS, it is safe to check response['s3_urls'] for the archives submitted to s3
"""
task = create_and_upload_archive.AsyncResult(task_id)
task_tracker = tables.ArchiverTask.query.filter(tables.ArchiverTask.task_id == task_id).first()
log = logger.bind(archiver_task=task_id, archiver_task_state=task.state)
log.info("checking status of task id {}: current state {}".format(task_id, task.state))
task_info = task.info or {}
response = {
'state': task.state,
}
if task.state != 'FAILURE':
response['status'] = task_info.get('status', 'no status available at this point.')
response['src_url'] = task_info.get('src_url', '')
response['s3_urls'] = task_info.get('s3_urls', {})
else:
# something went wrong
response['status'] = str(task.info) # this is the exception raised
response['src_url'] = ''
response['s3_urls'] = {}
# archiver does not create any custom states, so we can assume to have only the defaults:
# http://docs.celeryproject.org/en/latest/userguide/tasks.html#task-states
# therefore, delete our state_id tracker from the db if the celery state is in a final state:
# e.g. not RETRY, STARTED, or PENDING
if task_tracker:
if task.state in FINISHED_STATES:
delete_tracker(task_tracker)
elif task.state == "PENDING" and task_tracker.pending_expires_at < now():
log.info("Task {} has expired from pending too long. Re-creating task".format(task.id))
renew_tracker_pending_expiry(task_tracker) # let exceptions bubble up before moving on
create_and_upload_archive.apply_async(args=[task_tracker.src_url, task_tracker.s3_key],
task_id=task.id)
response['state'] = 'RETRY'
response['status'] = 'Task has expired from pending for too long. Re-creating task.'
elif task_tracker.state != task.state:
update_tracker_state(task_tracker, task.state)
return MozharnessArchiveTask(**response)
@bp.route('/hgmo/<path:repo>/<rev>')
@api.apimethod(None, unicode, unicode, unicode, unicode, unicode, status_code=302)
def get_hgmo_archive(repo, rev, subdir=None, suffix='tar.gz', preferred_region=None):
"""
An archiver for hg.mozilla.org related requests. Uses relengapi.blueprints.archiver.get_archive
:param repo: the repo location off of hg.mozilla.org/
:param rev: the rev associated with the repo
:param subdir: optional subdir path to only archive a portion of the repo
:param suffix: the archive extension type. defaulted to tar.gz
:param preferred_region: the preferred s3 region to use
"""
# allow for the short hash and full hash to be passed
rev = rev[0:12]
src_url = current_app.config['ARCHIVER_HGMO_URL_TEMPLATE'].format(
repo=repo, rev=rev, suffix=suffix, subdir=subdir or ''
)
# though slightly odd to append the archive suffix extension with a subdir, this:
# 1) allows us to have archives based on different subdir locations from the same repo and rev
# 2) is aligned with the hg.mozilla.org format
key = '{repo}-{rev}.{suffix}'.format(repo=repo, rev=rev, suffix=suffix)
if subdir:
key += '/{}'.format(subdir)
return get_archive(src_url, key, preferred_region)
def get_archive(src_url, key, preferred_region):
"""
A generic getter for retrieving an s3 location of an archive where the archive is based off a
src_url.
sub-dir: hg.mozilla.org supports archives of sub directories within a repository. This
flexibility allows for creating archives of only a portion of what would normally be an entire
repo archive.
logic flow:
If their is already a key within s3, a re-direct link is given for the
s3 location. If the key does not exist, download the archive from src url, upload it to s3
for each region supported and return all uploaded s3 url locations.
When the key does not exist, the remaining work will be assigned to a celery background task
with a url location returned immediately for obtaining task state updates.
"""
buckets = current_app.config['ARCHIVER_S3_BUCKETS']
random_region = buckets.keys()[randint(0, len(buckets.keys()) - 1)]
# use preferred region if available otherwise choose a valid one at random
region = preferred_region if preferred_region and preferred_region in buckets else random_region
bucket = buckets[region]
s3 = current_app.aws.connect_to('s3', region)
session = current_app.db.session(tables.DB_DECLARATIVE_BASE)
# first, see if the key exists
if not s3.get_bucket(bucket).get_key(key):
task_id = key.replace('/', '_') # keep things simple and avoid slashes in task url
# can't use unique support:
# api.pub.build.mozilla.org/docs/development/databases/#unique-row-support-get-or-create
# because we want to know when the row doesn't exist before creating it
tracker = tables.ArchiverTask.query.filter(tables.ArchiverTask.task_id == task_id).first()
if tracker and tracker.state in FINISHED_STATES:
log = logger.bind(archiver_task=task_id, archiver_task_state=tracker.state)
log.info('Task tracker: {} exists but finished with state: '
'{}'.format(task_id, tracker.state))
# remove tracker and try celery task again
delete_tracker(tracker)
tracker = None
if not tracker:
log = logger.bind(archiver_task=task_id)
log.info("Creating new celery task and task tracker for: {}".format(task_id))
task = create_and_upload_archive.apply_async(args=[src_url, key], task_id=task_id)
if task and task.id:
pending_expires_at = now() + datetime.timedelta(seconds=PENDING_EXPIRES_IN)
session.add(tables.ArchiverTask(task_id=task.id, s3_key=key, created_at=now(),
pending_expires_at=pending_expires_at,
src_url=src_url, state="PENDING"))
session.commit()
else:
return {}, 500
return {}, 202, {'Location': url_for('archiver.task_status', task_id=task_id)}
logger.info("generating GET URL to {}, expires in {}s".format(key, GET_EXPIRES_IN))
# return 302 pointing to s3 url with archive
signed_url = s3.generate_url(
method='GET', expires_in=GET_EXPIRES_IN,
bucket=bucket, key=key
)
return redirect(signed_url)
|
mozilla/build-relengapi
|
relengapi/blueprints/archiver/__init__.py
|
Python
|
mpl-2.0
| 9,767
|
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
from cerbero.commands import Command, register_command
from cerbero.build.cookbook import CookBook
from cerbero.packages.packagesstore import PackagesStore
from cerbero.utils import _, N_, ArgparseArgument, remove_list_duplicates
from cerbero.utils import messages as m
class Fetch(Command):
def __init__(self, args=[]):
args.append(ArgparseArgument('--reset-rdeps', action='store_true',
default=False, help=_('reset the status of reverse '
'dependencies too')))
args.append(ArgparseArgument('--full-reset', action='store_true',
default=False, help=_('reset to extract step if rebuild is needed')))
Command.__init__(self, args)
def fetch(self, cookbook, recipes, no_deps, reset_rdeps, full_reset):
fetch_recipes = []
if not recipes:
fetch_recipes = cookbook.get_recipes_list()
elif no_deps:
fetch_recipes = [cookbook.get_recipe(x) for x in recipes]
else:
for recipe in recipes:
fetch_recipes += cookbook.list_recipe_deps(recipe)
fetch_recipes = remove_list_duplicates (fetch_recipes)
m.message(_("Fetching the following recipes: %s") %
' '.join([x.name for x in fetch_recipes]))
to_rebuild = []
for i in range(len(fetch_recipes)):
recipe = fetch_recipes[i]
m.build_step(i + 1, len(fetch_recipes), recipe, 'Fetch')
recipe.fetch()
bv = cookbook.recipe_built_version(recipe.name)
cv = recipe.built_version()
if bv != cv:
# On different versions, only reset recipe if:
# * forced
# * OR it was fully built already
if full_reset or not cookbook.recipe_needs_build(recipe.name):
to_rebuild.append(recipe)
cookbook.reset_recipe_status(recipe.name)
if reset_rdeps:
for r in cookbook.list_recipe_reverse_deps(recipe.name):
to_rebuild.append(r)
cookbook.reset_recipe_status(r.name)
if to_rebuild:
to_rebuild = sorted(list(set(to_rebuild)), key=lambda r:r.name)
m.message(_("These recipes have been updated and will "
"be rebuilt:\n%s") %
'\n'.join([x.name for x in to_rebuild]))
class FetchRecipes(Fetch):
doc = N_('Fetch the recipes sources')
name = 'fetch'
def __init__(self):
args = [
ArgparseArgument('recipes', nargs='*',
help=_('list of the recipes to fetch (fetch all if none '
'is passed)')),
ArgparseArgument('--no-deps', action='store_true',
default=False, help=_('do not fetch dependencies')),
]
Fetch.__init__(self, args)
def run(self, config, args):
cookbook = CookBook(config)
return self.fetch(cookbook, args.recipes, args.no_deps,
args.reset_rdeps, args.full_reset)
class FetchPackage(Fetch):
doc = N_('Fetch the recipes sources from a package')
name = 'fetch-package'
def __init__(self):
args = [
ArgparseArgument('package', nargs=1,
help=_('package to fetch')),
ArgparseArgument('--deps', action='store_false',
default=True, help=_('also fetch dependencies')),
]
Fetch.__init__(self, args)
def run(self, config, args):
store = PackagesStore(config)
package = store.get_package(args.package[0])
return self.fetch(store.cookbook, package.recipes_dependencies(),
args.deps, args.reset_rdeps, args.full_reset)
register_command(FetchRecipes)
register_command(FetchPackage)
|
shoreflyer/cerbero
|
cerbero/commands/fetch.py
|
Python
|
lgpl-2.1
| 4,802
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#+---------------------------------------------------------------------------+
#| 01001110 01100101 01110100 01111010 01101111 01100010 |
#| |
#| Netzob : Inferring communication protocols |
#+---------------------------------------------------------------------------+
#| Copyright (C) 2011-2017 Georges Bossert and Frédéric Guihéry |
#| This program is free software: you can redistribute it and/or modify |
#| it under the terms of the GNU General Public License as published by |
#| the Free Software Foundation, either version 3 of the License, or |
#| (at your option) any later version. |
#| |
#| This program is distributed in the hope that it will be useful, |
#| but WITHOUT ANY WARRANTY; without even the implied warranty of |
#| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
#| GNU General Public License for more details. |
#| |
#| You should have received a copy of the GNU General Public License |
#| along with this program. If not, see <http://www.gnu.org/licenses/>. |
#+---------------------------------------------------------------------------+
#| @url : http://www.netzob.org |
#| @contact : contact@netzob.org |
#| @sponsors : Amossys, http://www.amossys.fr |
#| Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ |
#+---------------------------------------------------------------------------+
# List subpackages to import with the current one
# see docs.python.org/2/tutorial/modules.html
from netzob.Inference.Grammar.ProcessWrappers.ProcessWrapper import ProcessWrapper
from netzob.Inference.Grammar.ProcessWrappers.NetworkProcessWrapper import NetworkProcessWrapperMaker
|
lootr/netzob
|
netzob/src/netzob/Inference/Grammar/ProcessWrappers/all.py
|
Python
|
gpl-3.0
| 2,230
|
"""
OVERALL CREDIT TO:
t0mm0, Eldorado, VOINAGE, BSTRDMKR, tknorris, smokdpi, TheHighway
urlresolver XBMC Addon
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from urlresolver9 import common
from urlresolver9.resolver import UrlResolver, ResolverError
class WatchersResolver(UrlResolver):
name = "watchers"
domains = ['watchers.to']
pattern = '(?://|\.)(watchers\.to)/(?:embed-)?([a-zA-Z0-9]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
response = self.net.http_GET(web_url)
html = response.content
if html:
packed = re.search('(eval\(function.*?)\s*</script>', html, re.DOTALL)
if packed:
js = jsunpack.unpack(packed.group(1))
else:
js = html
video_url = None
link = re.search('([^"]*.m3u8)', js)
if link:
video_url = link.group(1)
common.log_utils.log_debug('watchers.to Link Found: %s' % video_url)
if not video_url:
link = re.search('([^"]*.mp4)', js)
if link:
video_url = link.group(1)
common.log_utils.log_debug('watchers.to Link Found: %s' % video_url)
if video_url:
return video_url
raise ResolverError('No playable video found.')
def get_url(self, host, media_id):
return self._default_get_url(host, media_id)
|
azumimuo/family-xbmc-addon
|
script.mrknow.urlresolver/lib/urlresolver9/plugins/watchers.py
|
Python
|
gpl-2.0
| 2,206
|
#!/usr/bin/python
'Finite difference RTM as a linear operator'
import os, sys, tempfile, subprocess
import rsf.prog, rsf.path
# Madagascar bin directory
bindir=os.path.join(rsf.prog.RSFROOT,'bin')
# Madagascar DATAPATH
datapath=rsf.path.datapath().rstrip('/')
# Madagascar commands
cp=os.path.join(bindir,'sfcp')
rtm=os.path.join(bindir,'sfmpifdlsrtm')
# Random files for input and output
inpd,inpfile=tempfile.mkstemp(dir=datapath)
outd,outfile=tempfile.mkstemp(dir=datapath)
p=subprocess.Popen([cp],stdout=inpd, close_fds=True)
p.wait()
run='ibrun tacc_affinity %s input=%s output=%s %s' %(rtm, inpfile, outfile,' '.join(sys.argv[1:]))
print run
os.system(run)
p=subprocess.Popen([cp],stdin=outd)
p.wait
|
TobbeTripitaka/src
|
user/zhiguang/Mfdlsrtm.py
|
Python
|
gpl-2.0
| 715
|
INF = 9e9
def _min_index(dists, visited):
mn, min_index = INF, -1
for i in range(len(visited)):
if not visited[i] and dists[i] < mn:
mn, min_index = dists[i], i
return min_index
def print_MST(graph):
n = len(graph)
parents = [ 0 ] * n
dists = [ INF ] * n
visited = [ False ] * n
dists[0], parents[0] = 0, -1
for _ in range(n - 1):
u = _min_index(dists, visited)
visited[u] = True
for v in range(n):
if not visited[v] and 0 < graph[u][v] < dists[v]:
parents[v], dists[v] = u, graph[u][v]
for i in range(1, n):
print(f'{parents[i]} -> {i}')
def main():
graph = [
[0, 8, 0, 6, 0],
[8, 0, 4, 1, 3],
[0, 4, 0, 0, 1],
[6, 1, 0, 0, 2],
[0, 3, 1, 2, 0]
]
print_MST(graph)
if __name__ == '__main__':
main()
|
sshh12/SchoolCode
|
Algorithms/Graphs/PrimMST.py
|
Python
|
mit
| 898
|
# coding=utf-8
from __future__ import absolute_import, division, print_function, \
unicode_literals
import json
import re
import socket
import unicodedata
from base64 import standard_b64decode, urlsafe_b64decode
from collections import OrderedDict
from decimal import Decimal as DecimalType
from typing import Any, Callable, Optional, Sequence, Text, Union
from uuid import UUID
from xml.etree.ElementTree import Element, tostring
# noinspection PyCompatibility
import regex
from six import PY2, PY3, binary_type, moves as compat, \
python_2_unicode_compatible, text_type
from filters.base import BaseFilter, Type
from filters.simple import MaxLength
__all__ = [
'Base64Decode',
'ByteString',
'CaseFold',
'IpAddress',
'JsonDecode',
'MaxBytes',
'Regex',
'Split',
'Strip',
'Unicode',
'Uuid',
]
class Base64Decode(BaseFilter):
"""
Decodes an incoming value using the Base64 algo.
"""
CODE_INVALID = 'not_base64'
templates = {
CODE_INVALID: 'Base64-encoded value expected.',
}
def __init__(self):
super(Base64Decode, self).__init__()
self.whitespace_re = regex.compile(b'[ \t\r\n]+', regex.ASCII)
self.base64_re = regex.compile(b'^[-+_/A-Za-z0-9=]+$', regex.ASCII)
def _apply(self, value):
value = self._filter(value, Type(binary_type)) # type: binary_type
if self._has_errors:
return None
# Strip out whitespace.
# Technically, whitespace is not part of the Base64 alphabet,
# but virtually every implementation allows it.
value = self.whitespace_re.sub(b'', value)
# Check for invalid characters.
# Note that Python 3's b64decode does this for us, but we also
# have to support Python 2.
# https://docs.python.org/3/library/base64.html#base64.b64decode
if not self.base64_re.match(value):
return self._invalid_value(
value = value,
reason = self.CODE_INVALID,
)
# Check to see if we are working with a URL-safe dialect.
# https://en.wikipedia.org/wiki/Base64#URL_applications
if (b'_' in value) or (b'-' in value):
# You can't mix dialects, silly!
if (b'+' in value) or (b'/' in value):
return self._invalid_value(
value = value,
reason = self.CODE_INVALID,
)
url_safe = True
else:
url_safe = False
# Normalize padding.
# http://stackoverflow.com/a/9807138/
value = value.rstrip(b'=')
value += (b'=' * (4 - (len(value) % 4)))
try:
return (
urlsafe_b64decode(value)
if url_safe
else standard_b64decode(value)
)
except TypeError:
return self._invalid_value(value, self.CODE_INVALID, exc_info=True)
# noinspection SpellCheckingInspection
class CaseFold(BaseFilter):
"""
Applies case folding to an incoming string, allowing you to perform
case-insensitive comparisons.
The result tends to be lowercase, but it is recommended that you
NOT treat CaseFold as a Unicode-aware lowercase filter! The
proper way to lowercase a string is very much locale-dependent.
Note that the built in :py:meth:`str.upper` and
:py:meth:`str.lower` methods tend do a pretty good job of properly
changing the case of unicode strings.
References:
- http://www.w3.org/International/wiki/Case_folding
- https://docs.python.org/3/library/stdtypes.html#str.lower
- https://docs.python.org/3/library/stdtypes.html#str.upper
"""
def _apply(self, value):
value = self._filter(value, Type(text_type)) # type: Text
if self._has_errors:
return None
# In Python 3, case folding is supported natively.
# In Python 2, this is the best we can do.
# https://docs.python.org/3/library/stdtypes.html#str.casefold
if PY3:
# noinspection PyUnresolvedReferences
return value.casefold()
else:
# noinspection PyUnresolvedReferences
from py2casefold import casefold
return casefold(value)
@python_2_unicode_compatible
class IpAddress(BaseFilter):
"""
Validates an incoming value as an IPv[46] address.
"""
CODE_INVALID = 'not_ip_address'
templates = {
CODE_INVALID: 'This value is not a valid {ip_type} address.',
}
def __init__(self, ipv4=True, ipv6=False):
# type: (bool, bool) -> None
super(IpAddress, self).__init__()
self.ipv4 = ipv4
self.ipv6 = ipv6
def __str__(self):
return '{type}(ipv4={ipv4!r}, ipv6={ipv6!r})'.format(
type = type(self).__name__,
ipv4 = self.ipv4,
ipv6 = self.ipv6,
)
@property
def ip_type(self):
# type: () -> Text
"""
Returns the IP address versions that this Filter accepts.
"""
return '/'.join(filter(None, [
'IPv4' if self.ipv4 else None,
'IPv6' if self.ipv6 else None,
]))
def _apply(self, value):
value = self._filter(value, Type(text_type))
if self._has_errors:
return None
# http://stackoverflow.com/a/4017219
if self.ipv4:
try:
socket.inet_pton(socket.AF_INET, value)
except socket.error:
pass
else:
return value
if self.ipv6:
try:
n = socket.inet_pton(socket.AF_INET6, value)
except socket.error:
pass
else:
# Convert the binary value back into a string
# representation so that the end result is
# normalized.
# https://en.wikipedia.org/wiki/IPv6_address#Presentation
return socket.inet_ntop(socket.AF_INET6, n)
# If we get here, we failed the above checks (or the Filter is
# configured not to allow anything through).
return self._invalid_value(
value = value,
reason = self.CODE_INVALID,
template_vars = {
'ip_type': self.ip_type
},
)
class JsonDecode(BaseFilter):
"""
Interprets the value as JSON.
JSON objects are converted to OrderedDict instances so that key
order is preserved.
"""
CODE_INVALID = 'not_json'
templates = {
CODE_INVALID: 'This value is not valid JSON.',
}
def __init__(self, decoder=json.loads):
# type: (Callable[Text, Any]) -> None
super(JsonDecode, self).__init__()
self.decoder = decoder
def _apply(self, value):
value = self._filter(value, Type(text_type)) # type: Text
if self._has_errors:
return None
try:
# :see: http://stackoverflow.com/a/6921760
return self.decoder(value, object_pairs_hook=OrderedDict)
except ValueError:
return self._invalid_value(value, self.CODE_INVALID, exc_info=True)
@python_2_unicode_compatible
class MaxBytes(BaseFilter):
"""
Ensures that an incoming string value is small enough to fit into a
specified number of bytes when encoded.
Note: The resulting value is a byte string, even if you provide a
unicode.
"""
CODE_TOO_LONG = 'too_long'
templates = {
CODE_TOO_LONG:
'Value is too long (must be < {max_bytes} '
'bytes when encoded using {encoding}).',
}
def __init__(
self,
max_bytes,
truncate = True,
prefix = '',
encoding = 'utf-8',
):
# type: (int, bool, Text, Text) -> None
"""
:param max_bytes:
Max number of bytes to allow.
:param truncate:
Whether to truncate values that are too long.
Set this to ``False`` to save system resources when you
know that you will reject values that are too long.
:param prefix:
Prefix to apply to truncated values.
Ignored when ``truncate`` is ``False``.
:param encoding:
The character encoding to check against.
Note: This filter is optimized for UTF-8.
"""
super(MaxBytes, self).__init__()
self.encoding = encoding
self.max_bytes = max_bytes
self.prefix = prefix
self.truncate = truncate
def __str__(self):
return '{type}({max_bytes!r}, encoding={encoding!r})'.format(
type = type(self).__name__,
max_bytes = self.max_bytes,
encoding = self.encoding,
)
def _apply(self, value):
"""
:return:
Returns bytes, truncated to the correct length.
Note: Might be a bit shorter than the max length, to avoid
orphaning a multibyte sequence.
"""
value = self._filter(
value = value,
filter_chain = (
Type((binary_type, text_type,))
| Unicode(encoding=self.encoding)
),
) # type: Text
if self._has_errors:
return None
str_value = value.encode(self.encoding)
if len(str_value) > self.max_bytes:
replacement = (
self.truncate_string(
# Ensure that we convert back to unicode before
# adding the prefix, just in case `self.encoding`
# indicates a codec that uses a BOM.
value = self.prefix + value,
max_bytes = self.max_bytes,
encoding = self.encoding,
)
if self.truncate
else None
)
return self._invalid_value(
value = value,
reason = self.CODE_TOO_LONG,
replacement = replacement,
context = {
'encoding': self.encoding,
'max_bytes': self.max_bytes,
'prefix': self.prefix,
'truncate': self.truncate,
},
)
return str_value
@staticmethod
def truncate_string(value, max_bytes, encoding):
# type: (Text, int, Text) -> binary_type
"""
Truncates a string value to the specified number of bytes.
:return:
Returns bytes, truncated to the correct length.
Note: Might be a bit shorter than `max_bytes`, to avoid
orphaning a multibyte sequence.
"""
# Convert to bytearray so that we get the same handling in
# Python 2 and Python 3.
bytes_ = bytearray(value.encode(encoding))
# Truncating the value is a bit tricky, as we have to be
# careful not to leave an unterminated multibyte sequence.
if encoding.lower() in ['utf-8', 'utf8']:
#
# This code works a bit faster than the generic routine
# (see below) because we only have to inspect up to 4
# bytes from the end of the encoded value instead of
# having to repeatedly decode the entire string.
#
# But, it only works for UTF-8.
#
truncated = bytes_[0:max_bytes]
# Walk backwards through the string until we hit certain
# sequences.
for i, o in enumerate(reversed(truncated), start=1):
# If the final byte is not part of a multibyte
# sequence, then we can stop right away; there is no
# need to remove anything.
if (i < 2) and (o < 0b10000000):
break
# If this byte is a leading byte (the first byte in a
# multibyte sequence), determine how many bytes we
# need to strip off the end of the string so that we
# can decode it back into a unicode if needed.
if o >= 0b11000000:
# Note: Assuming max 4 bytes per sequence.
# Should be good enough until extraterrestrial
# languages are encountered.
seq_length = (
4 if o >= 0b11110000 else
3 if o >= 0b11100000 else
2
)
# Now that we know how many bytes are in the final
# sequence, check to see if it is complete, and
# discard it if it is incomplete.
if seq_length != i:
truncated = truncated[0:-i]
break
# Else, we have a continuation byte. Continue walking
# backwards through the string.
return truncated
else:
trim = 0
while True:
# Progressively chop bytes off the end of the string
# until we have something that can be successfully
# decoded using the specified encoding.
truncated = bytes_[0:max_bytes - trim]
try:
truncated.decode(encoding)
except UnicodeDecodeError:
trim += 1
else:
return binary_type(truncated)
# We should never get here, but just in case, we need
# to ensure the loop eventually terminates (Python
# won't error if ``max_bytes - trim`` goes negative,
# since the slice operator accepts negative values).
if trim >= max_bytes:
raise ValueError(
'Unable to truncate {bytes_!r} to {max_bytes} '
'bytes when encoded using {encoding}.'.format(
bytes_ = bytes_,
max_bytes = max_bytes,
encoding = encoding,
),
)
@python_2_unicode_compatible
class Regex(BaseFilter):
"""
Matches a regular expression in the value.
IMPORTANT: This filter returns a LIST of all sequences in the
input value that matched the regex!
IMPORTANT: This Filter uses the ``regex`` library, which behaves
slightly differently than Python's ``re`` library.
If you've never used ``regex`` before, try it; you'll never want to
go back!
References:
- https://pypi.python.org/pypi/regex
"""
CODE_INVALID = 'malformed'
templates = {
CODE_INVALID:
'Value does not match regular expression {pattern}.',
}
# noinspection PyProtectedMember
def __init__(self, pattern):
# type: (Union[Text, regex._pattern_type, re._pattern_type]) -> None
"""
:param pattern:
String pattern, or pre-compiled regex.
IMPORTANT: If you specify your own compiled regex, be sure to
add the ``UNICODE`` flag for Unicode support!
"""
super(Regex, self).__init__()
self.regex = (
pattern
if isinstance(pattern, (regex._pattern_type, re._pattern_type))
else regex.compile(pattern, regex.UNICODE)
)
def __str__(self):
return '{type}({pattern!r})'.format(
type = type(self).__name__,
pattern = self.regex.pattern,
)
def _apply(self, value):
value = self._filter(value, Type(text_type))
if self._has_errors:
return None
matches = [
match.group(0)
for match in self.regex.finditer(value)
]
if not matches:
return self._invalid_value(
value = value,
reason = self.CODE_INVALID,
template_vars = {
'pattern': self.regex.pattern,
},
)
return matches
@python_2_unicode_compatible
class Split(BaseFilter):
"""
Splits an incoming string into parts.
The result is either a list or an OrderedDict, depending on whether
you specify keys to map to the result.
"""
# noinspection PyProtectedMember
def __init__(self, pattern, keys=None):
# type: (Union[Text, regex._pattern_type, re._pattern_type], Optional[Sequence[Text]]) -> None
"""
:param pattern:
Regex used to split incoming string values.
IMPORTANT: If you specify your own compiled regex, be sure
to add the ``UNICODE`` flag for Unicode support!
:param keys:
If set, the resulting list will be converted into an
OrderedDict, using the specified keys.
IMPORTANT: If ``keys`` is set, the split value's length
must be less than or equal to ``len(keys)``.
"""
super(Split, self).__init__()
self.regex = (
pattern
if isinstance(pattern, (regex._pattern_type, re._pattern_type))
else regex.compile(pattern, regex.UNICODE)
)
self.keys = keys
def __str__(self):
return '{type}({pattern!r}, keys={keys!r}'.format(
type = type(self).__name__,
pattern = self.regex.pattern,
keys = self.keys,
)
def _apply(self, value):
value = self._filter(value, Type(text_type))
if self._has_errors:
return None
split = self.regex.split(value)
if self.keys:
# The split value can have at most as many items as
# ``self.keys``.
split = self._filter(split, MaxLength(len(self.keys)))
if self._has_errors:
return None
return OrderedDict(compat.zip_longest(self.keys, split))
else:
return split
@python_2_unicode_compatible
class Strip(BaseFilter):
"""
Strips characters (whitespace and non-printables by default) from
the end(s) of a string.
IMPORTANT: This Filter uses the ``regex`` library, which behaves
slightly differently than Python's ``re`` library.
If you've never used ``regex`` before, try it; you'll never want to
go back!
"""
def __init__(self, leading=r'[\p{C}\s]+', trailing=r'[\p{C}\s]+'):
# type: (Text, Text) -> None
"""
:param leading:
Regex to match at the start of the string.
:param trailing:
Regex to match at the end of the string.
"""
super(Strip, self).__init__()
if leading:
self.leading = regex.compile(
r'^{pattern}'.format(pattern=leading),
regex.UNICODE,
)
else:
self.leading = None
if trailing:
self.trailing = regex.compile(
r'{pattern}$'.format(pattern=trailing),
regex.UNICODE,
)
else:
self.trailing = None
def __str__(self):
return '{type}(leading={leading!r}, trailing={trailing!r})'.format(
type = type(self).__name__,
leading = self.leading.pattern,
trailing = self.trailing.pattern,
)
def _apply(self, value):
value = self._filter(value, Type(text_type))
if self._has_errors:
return None
if self.leading:
value = self.leading.sub('', value)
if self.trailing:
value = self.trailing.sub('', value)
return value
@python_2_unicode_compatible
class Unicode(BaseFilter):
"""
Converts a value into a unicode string.
Note: By default, additional normalization is applied to the
resulting value. See the initializer docstring for more info.
References:
- https://docs.python.org/2/howto/unicode.html
- https://en.wikipedia.org/wiki/Unicode_equivalence
"""
CODE_DECODE_ERROR = 'wrong_encoding'
templates = {
CODE_DECODE_ERROR: 'This value cannot be decoded using {encoding}.',
}
def __init__(self, encoding='utf-8', normalize=True):
# type: (Text, bool) -> None
"""
:param encoding:
Used to decode non-unicode values.
:param normalize:
Whether to normalize the resulting value:
- Convert to NFC form.
- Remove non-printable characters.
- Convert all line endings to unix-style ('\n').
"""
super(Unicode, self).__init__()
self.encoding = encoding
self.normalize = normalize
if self.normalize:
#
# Compile the regex that we will use to remove non-
# printables from the resulting unicode.
# http://www.regular-expressions.info/unicode.html#category
#
# Note: using a double negative so that we can exclude
# newlines, which are technically considered control chars.
# http://stackoverflow.com/a/3469155
#
self.npr = regex.compile(r'[^\P{C}\s]+', regex.UNICODE)
def __str__(self):
return '{type}(encoding={encoding!r})'.format(
type = type(self).__name__,
encoding = self.encoding,
)
def _apply(self, value):
try:
if isinstance(value, text_type):
decoded = value
elif isinstance(value, binary_type):
decoded = value.decode(self.encoding)
elif isinstance(value, bool):
decoded = text_type(int(value))
# In Python 3, ``bytes(<int>)`` does weird things.
# https://www.python.org/dev/peps/pep-0467/
elif isinstance(value, (int, float)):
decoded = text_type(value)
elif isinstance(value, DecimalType):
decoded = format(value, 'f')
elif isinstance(value, Element):
# There's no way (that I know of) to get
# :py:meth:`ElementTree.tostring` to return a unicode.
decoded = tostring(value, 'utf-8').decode('utf-8')
elif (
PY2 and hasattr(value, '__str__')
or PY3 and hasattr(value, '__bytes__')
):
decoded = binary_type(value).decode(self.encoding)
else:
decoded = text_type(value)
except UnicodeDecodeError:
return self._invalid_value(
value = value,
reason = self.CODE_DECODE_ERROR,
exc_info = True,
template_vars = {
'encoding': self.encoding,
},
)
if self.normalize:
return (
# Return the final string in composed form.
# https://en.wikipedia.org/wiki/Unicode_equivalence
unicodedata.normalize('NFC',
# Remove non-printables.
self.npr.sub('', decoded)
)
# Normalize line endings.
# http://stackoverflow.com/a/1749887
.replace('\r\n', '\n')
.replace('\r', '\n')
)
else:
return decoded
class ByteString(Unicode):
"""
Converts a value into a byte string, encoded as UTF-8.
IMPORTANT: This filter returns bytes objects, not bytearrays!
"""
def __init__(self, encoding='utf-8', normalize=False):
# type: (Text, bool) -> None
"""
:param encoding:
Used to decode non-unicode values.
:param normalize:
Whether to normalize the unicode value before converting
back into bytes:
- Convert to NFC form.
- Remove non-printable characters.
- Convert all line endings to unix-style ('\n').
Note that ``normalize`` is ``False`` by default for
:py:class:`ByteString`, but ``True`` by default for
:py:class:`Unicode`.
"""
super(ByteString, self).__init__(encoding, normalize)
# noinspection SpellCheckingInspection
def _apply(self, value):
decoded = super(ByteString, self)._apply(value) # type: Text
#
# No need to catch UnicodeEncodeErrors here; UTF-8 can handle
# any unicode value.
#
# Technically, we could get this error if we encounter a code
# point beyond U+10FFFF (the highest valid code point in the
# Unicode standard).
#
# However, it's not possible to create a `unicode` object with
# an invalid code point, so we wouldn't even be able to get
# this far if the incoming value contained a character that
# can't be represented using UTF-8.
#
# Note that in some versions of Python, it is possible (albeit
# really difficult) to trick Python into creating unicode
# objects with invalid code points, but it generally requires
# using specific codecs that aren't UTF-8.
#
# Example of exploit and release notes from the Python release
# (2.7.6) that fixes the issue:
#
# - https://gist.github.com/rspeer/7559750
# - https://hg.python.org/cpython/raw-file/99d03261c1ba/Misc/NEWS
#
# Normally we return ``None`` if we get any errors, but in this
# case, we'll let the superclass method decide.
return decoded if self._has_errors else decoded.encode('utf-8')
@python_2_unicode_compatible
class Uuid(BaseFilter):
"""
Interprets an incoming value as a UUID.
"""
CODE_INVALID = 'not_uuid'
CODE_WRONG_VERSION = 'wrong_version'
templates = {
CODE_INVALID: 'This value is not a well-formed UUID.',
CODE_WRONG_VERSION:
'v{incoming} UUID not allowed (expected v{expected}).',
}
def __init__(self, version=None):
# type: (Optional[int]) -> None
"""
:type version:
If specified, requires the resulting UUID to match the
specified version.
References:
- https://en.wikipedia.org/wiki/Uuid#RFC_4122_Variant
"""
super(Uuid, self).__init__()
self.version = version
def __str__(self):
return '{type}(version={version!r})'.format(
type = type(self).__name__,
version = self.version,
)
def _apply(self, value):
value = self._filter(value, Type((text_type, UUID,))) # type: Union[Text, UUID]
if self._has_errors:
return None
try:
uuid = (
value
if isinstance(value, UUID)
else UUID(hex=value)
)
except ValueError:
return self._invalid_value(value, self.CODE_INVALID, exc_info=True)
else:
if self.version not in (None, uuid.version):
return self._invalid_value(
value = text_type(uuid),
reason = self.CODE_WRONG_VERSION,
context = {
'expected': self.version,
'incoming': uuid.version,
},
)
return uuid
|
eflglobal/filters
|
filters/string.py
|
Python
|
mit
| 27,879
|
# coding: utf-8
'''Tests for CovManager collection views
@author: Jesse Schwartzentruber (:truber)
@license:
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
'''
import json
import logging
import os
import re
import pytest
import requests
from django.urls import reverse
LOG = logging.getLogger("fm.covmanager.tests.collections")
pytestmark = pytest.mark.usefixtures("covmanager_test") # pylint: disable=invalid-name
@pytest.mark.parametrize("name", ["covmanager:collections",
"covmanager:collections_api",
"covmanager:collections_diff",
"covmanager:collections_patch"])
def test_collections_no_login(name, client):
"""Request without login hits the login redirect"""
path = reverse(name)
response = client.get(path, follow=False)
assert response.status_code == requests.codes["found"]
assert response.url == "/login/?next=" + path
@pytest.mark.parametrize("name", ["covmanager:collections",
"covmanager:collections_api",
"covmanager:collections_diff",
"covmanager:collections_patch"])
def test_collections_view_simple_get(name, client):
"""No errors are thrown in template"""
client.login(username='test', password='test')
response = client.get(reverse(name))
LOG.debug(response)
assert response.status_code == requests.codes["ok"]
def test_collections_diff_no_login(client):
"""Request without login hits the login redirect"""
path = reverse("covmanager:collections_diff_api", kwargs={'path': ''})
response = client.get(path, follow=False)
assert response.status_code == requests.codes["found"]
assert response.url == "/login/?next=" + path
def test_collections_diff_simple_get(client, cm):
"""No errors are thrown in template"""
repo = cm.create_repository("git")
col1 = cm.create_collection(repository=repo, coverage=json.dumps({"children": []}))
col2 = cm.create_collection(repository=repo, coverage=json.dumps({"children": []}))
client.login(username='test', password='test')
response = client.get(reverse("covmanager:collections_diff_api", kwargs={'path': ''}),
{'ids': '%d,%d' % (col1.pk, col2.pk)})
LOG.debug(response)
assert response.status_code == requests.codes['ok']
def test_collections_patch_no_login(client):
"""Request without login hits the login redirect"""
path = reverse("covmanager:collections_patch_api", kwargs={'collectionid': 0, 'patch_revision': 'abc'})
response = client.get(path, follow=False)
assert response.status_code == requests.codes["found"]
assert response.url == "/login/?next=" + path
def test_collections_patch_simple_get(client, cm):
"""No errors are thrown in template"""
client.login(username='test', password='test')
repo = cm.create_repository("hg")
col = cm.create_collection(repository=repo,
coverage=json.dumps({"linesTotal": 1,
"name": None,
"coveragePercent": 0.0,
"children": {"test.c": {"coverage": []}},
"linesMissed": 1,
"linesCovered": 0}))
with open(os.path.join(repo.location, "test.c"), "w") as fp:
fp.write("hello")
cm.hg(repo, "add", "test.c")
cm.hg(repo, "commit", "-m", "init")
with open(os.path.join(repo.location, "test.c"), "w") as fp:
fp.write("world")
cm.hg(repo, "commit", "-m", "update")
rev = re.match(r"changeset: 1:([0-9a-f]+)", cm.hg(repo, "log")).group(1)
response = client.get(reverse("covmanager:collections_patch_api",
kwargs={'collectionid': col.pk, 'patch_revision': rev}))
LOG.debug(response)
assert response.status_code == requests.codes['ok']
def test_collections_browse_no_login(client):
"""Request without login hits the login redirect"""
path = reverse("covmanager:collections_browse", kwargs={'collectionid': 0})
response = client.get(path, follow=False)
assert response.status_code == requests.codes["found"]
assert response.url == "/login/?next=" + path
def test_collections_browse_simple_get(client):
"""No errors are thrown in template"""
client.login(username='test', password='test')
response = client.get(reverse("covmanager:collections_browse", kwargs={'collectionid': 0}))
LOG.debug(response)
assert response.status_code == requests.codes['ok']
def test_collections_browse_api_no_login(client):
"""Request without login hits the login redirect"""
path = reverse("covmanager:collections_browse_api", kwargs={'collectionid': 0, 'path': ''})
response = client.get(path, follow=False)
assert response.status_code == requests.codes["found"]
assert response.url == "/login/?next=" + path
def test_collections_browse_api_simple_get(client, cm):
"""No errors are thrown in template"""
client.login(username='test', password='test')
repo = cm.create_repository("git")
col = cm.create_collection(repository=repo)
response = client.get(reverse("covmanager:collections_browse_api", kwargs={'collectionid': col.pk, 'path': ''}))
LOG.debug(response)
assert response.status_code == requests.codes['ok']
|
MozillaSecurity/FuzzManager
|
server/covmanager/tests/test_collections.py
|
Python
|
mpl-2.0
| 5,682
|
import asyncio
import colorsys
import enum
import functools
import psmove
import time
import traceback
import random
SETTINGSFILE = 'joustsettings.yaml'
#Human speeds[slow, mid, fast]
#SLOW_WARNING = [0.1, 0.15, 0.28]
#SLOW_MAX = [0.25, 0.8, 1]
#FAST_WARNING = [0.5, 0.6, 0.8]
#FAST_MAX = [1, 1.4, 1.8]
SLOW_WARNING = [1.2, 1.3, 1.6, 2.0, 2.5]
SLOW_MAX = [1.3, 1.5, 1.8, 2.5, 3.2]
FAST_WARNING = [1.4, 1.6, 1.9, 2.7, 2.8]
FAST_MAX = [1.6, 1.8, 2.8, 3.2, 3.5]
#WERE_SLOW_WARNING = [0.2, 0.3, 0.4]
#WERE_SLOW_MAX = [0.7, 0.9, 1.1]
#WERE_FAST_WARNING = [0.6, 0.7, 0.9]
#WERE_FAST_MAX = [1.1, 1.5, 2.0]
WERE_SLOW_WARNING = [1.2, 1.4, 1.7, 2.1, 2.9]
WERE_SLOW_MAX = [1.3, 1.6, 1.9, 2.6, 3.9]
WERE_FAST_WARNING = [1.4, 1.7, 2.0, 2.8, 3.5]
WERE_FAST_MAX = [1.6, 1.9, 2.9, 3.3, 4.9]
#ZOMBIE_WARNING = [0.5, 0.6, 0.8]
#ZOMBIE_MAX = [0.8, 1, 1.4]
ZOMBIE_WARNING = [1.2, 1.5, 1.8, 2.6, 2.7]
ZOMBIE_MAX = [1.4, 1.7, 2.7, 3.1, 3.4]
def get_move(serial, move_num):
time.sleep(0.02)
move = psmove.PSMove(move_num)
time.sleep(0.05)
if move.get_serial() != serial:
for move_num in range(psmove.count_connected()):
move = psmove.PSMove(move_num)
if move.get_serial() == serial:
print("returning " +str(move.get_serial()))
return move
return None
else:
return move
def lerp(a, b, p):
return a*(1 - p) + b*p
class Games(enum.Enum):
JoustFFA = (0, 'Joust Free-for-All', 2)
JoustTeams = (1, 'Joust Teams', 3)
JoustRandomTeams = (2, 'Joust Random Teams', 3)
Traitor = (3, 'Traitors', 6)
WereJoust = (4, 'Werewolves', 3)
Zombies = (5, 'Zombies', 4)
Commander = (6, 'Commander', 4)
Swapper = (7, 'Swapper', 3)
FightClub = (8, 'Fight Club', 2)
Tournament = (9, 'Tournament', 3)
NonStop = (10, 'Non Stop Joust', 2)
Ninja = (11, 'Ninja Bomb', 2)
Random = (12, 'Random', 2)
def __new__(cls, value, pretty_name, min_players):
"""This odd constructor lets us keep Foo.value as an integer, but also
add some extra properties to each option."""
obj = object.__new__(cls)
obj._value_ = value
obj.pretty_name = pretty_name
obj.minimum_players = min_players
return obj
def next(self):
"""Return the next game mode after this one in the list. Wraps around after hitting bottom."""
return Games((self.value + 1) % len(Games))
def previous(self):
"""Return the previous game mode after this one in the list. Wraps around after hitting bottom."""
return Games((self.value - 1) % len(Games))
#These buttons are based off of
#The mapping of PS Move controllers
class Button(enum.Flag):
NONE = 0
TRIANGLE = psmove.Btn_TRIANGLE
CIRCLE = psmove.Btn_CIRCLE
CROSS = psmove.Btn_CROSS
SQUARE = psmove.Btn_SQUARE
SELECT = psmove.Btn_SELECT
START = psmove.Btn_START
SYNC = psmove.Btn_PS
MIDDLE = psmove.Btn_MOVE
TRIGGER = psmove.Btn_T
SHAPES = TRIANGLE | CIRCLE | CROSS | SQUARE
UPDATE = SELECT | START
all_shapes = [Button.TRIANGLE, Button.CIRCLE, Button.CROSS, Button.SQUARE]
battery_levels = {
psmove.Batt_MIN: "Low",
psmove.Batt_20Percent: "20%",
psmove.Batt_40Percent: "40%",
psmove.Batt_60Percent: "60%",
psmove.Batt_80Percent: "80%",
psmove.Batt_MAX: "100%",
psmove.Batt_CHARGING: "Charging",
psmove.Batt_CHARGING_DONE: "Charged",
}
# Common colors lifted from https://xkcd.com/color/rgb/
# TODO: Add more colors -- probably need to have 14 player colors at least.
class Color(enum.Enum):
BLACK = 0x000000
WHITE = 0xffffff
RED = 0xff0000
GREEN = 0x00ff00
BLUE = 0x0000ff
YELLOW = 0xffff14
PURPLE = 0x7e1e9c
ORANGE = 0xf97306
PINK = 0xff81c0
TURQUOISE = 0x06c2ac
BROWN = 0x653700
def rgb_bytes(self):
v = self.value
return v >> 16, (v >> 8) & 0xff, v & 0xff
# Red is reserved for warnings/knockouts.
PLAYER_COLORS = [ c for c in Color if c not in (Color.RED, Color.WHITE, Color.BLACK) ]
def async_print_exceptions(f):
"""Wraps a coroutine to print exceptions (other than cancellations)."""
@functools.wraps(f)
async def wrapper(*args, **kwargs):
try:
await f(*args, **kwargs)
except asyncio.CancelledError:
raise
except:
traceback.print_exc()
raise
return wrapper
# Represents a pace the game is played at, encapsulating the tempo of the music as well
# as controller sensitivity.
class GamePace:
__slots__ = ['tempo', 'warn_threshold', 'death_threshold']
def __init__(self, tempo, warn_threshold, death_threshold):
self.tempo = tempo
self.warn_threshold = warn_threshold
self.death_threshold = death_threshold
def __str__(self):
return '<GamePace tempo=%s, warn=%s, death=%s>' % (self.tempo, self.warn_threshold, self.death_threshold)
# TODO: These are placeholder values.
# We can't take the values from joust.py, since those are compared to the sum of the
# three accelerometer dimensions, whereas we compute the magnitude of the acceleration
# vector.
SLOW_PACE = GamePace(tempo=0.4, warn_threshold=2, death_threshold=4)
MEDIUM_PACE = GamePace(tempo=1.0, warn_threshold=3, death_threshold=5)
FAST_PACE = GamePace(tempo=1.5, warn_threshold=5, death_threshold=9)
FREEZE_PACE = GamePace(tempo=0, warn_threshold=1.1, death_threshold=1.2)
REQUIRED_SETTINGS = [
'play_audio',
'move_can_be_admin',
'current_game',
'enforce_minimum',
'sensitivity',
'play_instructions',
'random_modes',
'color_lock',
'color_lock_choices',
'red_on_kill',
'random_teams',
'menu_voice',
'random_team_size',
'force_all_start',
]
|
aangert/PiParty
|
common.py
|
Python
|
mit
| 5,854
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2012, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Upgrader models.
Upgrade engine database model for keeping log of which upgrades have been
applied to to a given database.
"""
from invenio_db import db
class Upgrade(db.Model):
"""Represents an Upgrade record."""
__tablename__ = 'upgrade'
upgrade = db.Column(db.String(255), primary_key=True, nullable=False)
applied = db.Column(db.DateTime, nullable=False)
|
inveniosoftware/invenio-upgrader
|
invenio_upgrader/models.py
|
Python
|
gpl-2.0
| 1,169
|
from netpyne import specs
# Simulation options
cfg = specs.SimConfig() # object of class SimConfig to store simulation configuration
cfg.duration = 1*1e3 # Duration of the simulation, in ms
cfg.dt = 0.025 # Internal integration timestep to use
cfg.verbose = False # Show detailed messages
cfg.recordTraces = {'V_soma':{'sec':'soma','loc':0.5,'var':'v'}} # Dict with traces to record
cfg.recordStep = 0.1 # Step size in ms to save data (eg. V traces, LFP, etc)
cfg.filename = 'tut8' # Set file output name
cfg.saveJson = True
cfg.printPopAvgRates = True
cfg.analysis['plotRaster'] = {'saveFig': True} # Plot a raster
cfg.analysis['plotTraces'] = {'include': [0], 'saveFig': True} # Plot recorded traces for this list of cells
cfg.saveDataInclude = ['simData', 'simConfig', 'netParams', 'net']
# Variable parameters (used in netParams)
cfg.synMechTau2 = 5
cfg.connWeight = 0.01
|
Neurosim-lab/netpyne
|
doc/source/code/tut8_cfg.py
|
Python
|
mit
| 945
|
#!/usr/bin/env python
'''Test load using the Python PNG loader. You should see the rgb_8bpp_trans.png
image with a hole on it on a checkboard background.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import unittest
import base_load
from pyglet.image.codecs.png import PNGImageDecoder
class TEST_PNG_INDEXED_TRANS_LOAD(base_load.TestLoad):
texture_file = 'rgb_8bpp_trans.png'
decoder = PNGImageDecoder()
if __name__ == '__main__':
unittest.main()
|
gdkar/pyglet
|
tests/image/PNG_INDEXED_TRANS_LOAD.py
|
Python
|
bsd-3-clause
| 500
|
# -*- coding: utf-8 -*-
def write_rom(rom_path: str, rom_content: bytes):
with open(rom_path, mode='wb') as bios:
print('Writing "', rom_path, '" bios file ...')
return bios.write(rom_content)
|
emeric254/PyHawaiiBiosReader
|
tools/RomWriter.py
|
Python
|
mit
| 215
|
import requests
from bs4 import BeautifulSoup
from Crawler.BzojCrawler.BzojConfig import BzojUser , BzojVIPUser
class BzojScanner:
s = requests.session()
scan_url = 'http://www.lydsy.com/JudgeOnline/status.php?problem_id=&user_id={}&language=-1&jresult=-1'
def Analyse(self, html):
soup = BeautifulSoup(html, 'html5lib')
L = list()
for i in range(2, 30):
html = soup.select_one(
'body > center:nth-of-type(1) > table:nth-of-type(2) > tbody > tr:nth-of-type({})'.format(i))
if html is None: break
dt = dict()
dt['originOJ'] = 'BZOJ'
Term = ['', 'realrunid', 'nickname', 'originProb', 'status', 'runmemory', 'runtime', 'language',
'codelenth', 'timesubmit']
for j in range(1, 10):
html = soup.select_one(
'body > center:nth-of-type(1) > table:nth-of-type(2) > tbody > tr:nth-of-type({}) > td:nth-of-type({})'.format(
i, j))
dt[Term[j]] = html.text
if Term[j] == 'codelenth':
dt[Term[j]] = str(dt[Term[j]]).replace(' B', '')
L.append(dt)
return L
def Scanner(self):
L = list()
Users = list()
Users += BzojUser
Users += BzojVIPUser
for x in Users:
url = self.scan_url.format(x['user_id'])
r = self.s.get(url, timeout=5)
r.encoding = 'utf-8'
html = r.text
tL = self.Analyse(html)
L += tL
return L
def main():
bs = BzojScanner()
L = bs.Scanner()
print(L)
'''
f = open('/tmp/hi.html','r')
html = f.read()
tL = bs.Analyse(html)
print(tL)
'''
def test():
Users = list()
Users += BzojUser
Users += BzojVIPUser
print(Users)
if __name__ == '__main__':
test()
|
CKboss/VirtualJudgePY
|
Crawler/BzojCrawler/BzojScanner.py
|
Python
|
gpl-2.0
| 1,925
|
# coding: utf-8
from elasticsearch import Elasticsearch, helpers
import psycopg2, pprint, sys, time, os
CHUNK_SIZE = 10000
def getEnvOrExit(var):
environment = ''
try:
environment = os.environ[var]
except:
print('[Error]: Environment variable ' + var + ' not defined.')
sys.exit(1)
return environment
dbparams = getEnvOrExit('PANTSU_DBPARAMS')
pantsu_index = getEnvOrExit('PANTSU_ELASTICSEARCH_INDEX')
torrent_tablename = getEnvOrExit('PANTSU_TORRENT_TABLENAME')
es = Elasticsearch()
pgconn = psycopg2.connect(dbparams)
cur = pgconn.cursor()
# We MUST use NO QUERY CACHE because the values are insert on triggers and
# not through pgppool.
cur.execute("""/*NO QUERY CACHE*/ SELECT reindex_torrents_id, torrent_id, action FROM reindex_torrents""")
fetches = cur.fetchmany(CHUNK_SIZE)
while fetches:
actions = list()
delete_cur = pgconn.cursor()
for reindex_id, torrent_id, action in fetches:
new_action = {
'_op_type': action,
'_index': pantsu_index,
'_type': 'torrents',
'_id': torrent_id
}
if action == 'index':
select_cur = pgconn.cursor()
select_cur.execute("""SELECT torrent_id, torrent_name, category, sub_category, status,
torrent_hash, date, uploader, downloads, filesize, seeders, leechers, completed
FROM {torrent_tablename}
WHERE torrent_id = {torrent_id}""".format(torrent_id=torrent_id, torrent_tablename=torrent_tablename))
torrent_id, torrent_name, category, sub_category, status, torrent_hash, date, uploader, downloads, filesize, seeders, leechers, completed = select_cur.fetchone()
doc = {
'id': torrent_id,
'name': torrent_name.decode('utf-8'),
'category': str(category),
'sub_category': str(sub_category),
'status': status,
'hash': torrent_hash,
'date': date,
'uploader_id': uploader,
'downloads': downloads,
'filesize': filesize,
'seeders': seeders,
'leechers': leechers,
'completed': completed
}
new_action['_source'] = doc
select_cur.close()
delete_cur.execute('DELETE FROM reindex_torrents WHERE id = {reindex_id}'.format(reindex_id=reindex_id))
actions.append(new_action)
pgconn.commit() # Commit the deletes transaction
delete_cur.close()
helpers.bulk(es, actions, chunk_size=CHUNK_SIZE, request_timeout=120)
del(fetches)
fetches = cur.fetchmany(CHUNK_SIZE)
cur.close()
pgconn.close()
|
yueou/nyaa
|
deploy/ansible/roles/elasticsearch/files/reindex_nyaapantsu.py
|
Python
|
mit
| 2,722
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=fixme, invalid-name, too-many-arguments, too-many-locals, too-many-lines
# pylint: disable=too-many-branches, too-many-statements
"""Profiler setting methods."""
from __future__ import absolute_import
import ctypes
import warnings
from .base import _LIB, check_call, c_str, ProfileHandle, c_str_array, py_str, KVStoreHandle
profiler_kvstore_handle = KVStoreHandle()
def set_kvstore_handle(handle):
global profiler_kvstore_handle
profiler_kvstore_handle = handle
def set_config(**kwargs):
"""Set up the configure of profiler (only accepts keyword arguments).
Parameters
----------
filename : string,
output file for profile data
profile_all : boolean,
all profile types enabled
profile_symbolic : boolean,
whether to profile symbolic operators
profile_imperative : boolean,
whether to profile imperative operators
profile_memory : boolean,
whether to profile memory usage
profile_api : boolean,
whether to profile the C API
contiguous_dump : boolean,
whether to periodically dump profiling data to file
dump_period : float,
seconds between profile data dumps
aggregate_stats : boolean,
whether to maintain aggregate stats in memory for console
dump. Has some negative performance impact.
profile_process : string
whether to profile kvstore `server` or `worker`.
server can only be profiled when kvstore is of type dist.
if this is not passed, defaults to `worker`
"""
kk = kwargs.keys()
vv = kwargs.values()
check_call(_LIB.MXSetProcessProfilerConfig(len(kwargs),
c_str_array([key for key in kk]),
c_str_array([str(val) for val in vv]),
profiler_kvstore_handle))
def profiler_set_config(mode='symbolic', filename='profile.json'):
"""Set up the configure of profiler (Deprecated).
Parameters
----------
mode : string, optional
Indicates whether to enable the profiler, can
be 'symbolic', or 'all'. Defaults to `symbolic`.
filename : string, optional
The name of output trace file. Defaults to 'profile.json'.
"""
warnings.warn('profiler.profiler_set_config() is deprecated. '
'Please use profiler.set_config() instead')
keys = c_str_array([key for key in ["profile_" + mode, "filename"]])
values = c_str_array([str(val) for val in [True, filename]])
assert len(keys) == len(values)
check_call(_LIB.MXSetProcessProfilerConfig(len(keys), keys, values, profiler_kvstore_handle))
def set_state(state='stop', profile_process='worker'):
"""Set up the profiler state to 'run' or 'stop'.
Parameters
----------
state : string, optional
Indicates whether to run the profiler, can
be 'stop' or 'run'. Default is `stop`.
profile_process : string
whether to profile kvstore `server` or `worker`.
server can only be profiled when kvstore is of type dist.
if this is not passed, defaults to `worker`
"""
state2int = {'stop': 0, 'run': 1}
profile_process2int = {'worker': 0, 'server': 1}
check_call(_LIB.MXSetProcessProfilerState(ctypes.c_int(state2int[state]),
profile_process2int[profile_process],
profiler_kvstore_handle))
def profiler_set_state(state='stop'):
"""Set up the profiler state to 'run' or 'stop' (Deprecated).
Parameters
----------
state : string, optional
Indicates whether to run the profiler, can
be 'stop' or 'run'. Default is `stop`.
"""
warnings.warn('profiler.profiler_set_state() is deprecated. '
'Please use profiler.set_state() instead')
set_state(state)
def dump(finished=True, profile_process='worker'):
"""Dump profile and stop profiler. Use this to save profile
in advance in case your program cannot exit normally.
Parameters
----------
finished : boolean
Indicates whether to stop statistic output (dumping) after this dump.
Default is True
profile_process : string
whether to profile kvstore `server` or `worker`.
server can only be profiled when kvstore is of type dist.
if this is not passed, defaults to `worker`
"""
fin = 1 if finished is True else 0
profile_process2int = {'worker': 0, 'server': 1}
check_call(_LIB.MXDumpProcessProfile(fin,
profile_process2int[profile_process],
profiler_kvstore_handle))
def dump_profile():
"""Dump profile and stop profiler. Use this to save profile
in advance in case your program cannot exit normally."""
warnings.warn('profiler.dump_profile() is deprecated. '
'Please use profiler.dump() instead')
dump(True)
def dumps(reset=False):
"""Return a printable string of aggregate profile stats.
Parameters
----------
reset: boolean
Indicates whether to clean aggeregate statistical data collected up to this point
"""
debug_str = ctypes.c_char_p()
do_reset = 1 if reset is True else 0
check_call(_LIB.MXAggregateProfileStatsPrint(ctypes.byref(debug_str), int(do_reset)))
return py_str(debug_str.value)
def pause(profile_process='worker'):
"""Pause profiling.
Parameters
----------
profile_process : string
whether to profile kvstore `server` or `worker`.
server can only be profiled when kvstore is of type dist.
if this is not passed, defaults to `worker`
"""
profile_process2int = {'worker': 0, 'server': 1}
check_call(_LIB.MXProcessProfilePause(int(1),
profile_process2int[profile_process],
profiler_kvstore_handle))
def resume(profile_process='worker'):
"""
Resume paused profiling.
Parameters
----------
profile_process : string
whether to profile kvstore `server` or `worker`.
server can only be profiled when kvstore is of type dist.
if this is not passed, defaults to `worker`
"""
profile_process2int = {'worker': 0, 'server': 1}
check_call(_LIB.MXProcessProfilePause(int(0),
profile_process2int[profile_process],
profiler_kvstore_handle))
class Domain(object):
"""Profiling domain, used to group sub-objects like tasks, counters, etc into categories
Serves as part of 'categories' for chrome://tracing
Note: Domain handles are never destroyed.
Parameters
----------
name : string
Name of the domain
"""
def __init__(self, name):
self.name = name
self.handle = ProfileHandle()
check_call(_LIB.MXProfileCreateDomain(c_str(self.name), ctypes.byref(self.handle)))
def __str__(self):
return self.name
def new_task(self, name):
"""Create new Task object owned by this domain
Parameters
----------
name : string
Name of the task
"""
return Task(self, name)
def new_frame(self, name):
"""Create new Frame object owned by this domain
Parameters
----------
name : string
Name of the frame
"""
return Frame(self, name)
def new_counter(self, name, value=None):
"""Create new Counter object owned by this domain
Parameters
----------
name : string
Name of the counter
"""
return Counter(self, name, value)
def new_marker(self, name):
"""Create new Marker object owned by this domain
Parameters
----------
name : string
Name of the marker
"""
return Marker(self, name)
class Task(object):
"""Profiling Task class.
A task is a logical unit of work performed by a particular thread.
Tasks can nest; thus, tasks typically correspond to functions, scopes, or a case block
in a switch statement.
You can use the Task API to assign tasks to threads.
This is different from Frame in that all profiling statistics for passes
through the task's begin and endpoints are accumulated together into a single statistical
analysys, rather than a separate analysis for each pass (as with a Frame)
Parameters
----------
domain : Domain object
Domain to which this object belongs
name : string
Name of the task
"""
def __init__(self, domain, name):
self.name = name
self.handle = ProfileHandle()
check_call(_LIB.MXProfileCreateTask(domain.handle,
c_str(self.name),
ctypes.byref(self.handle)))
def __del__(self):
if self.handle is not None:
check_call(_LIB.MXProfileDestroyHandle(self.handle))
def start(self):
"""Start timing scope for this object"""
check_call(_LIB.MXProfileDurationStart(self.handle))
def stop(self):
"""Stop timing scope for this object"""
check_call(_LIB.MXProfileDurationStop(self.handle))
def __str__(self):
return self.name
class Frame(object):
"""Profiling Frame class.
Use the frame API to insert calls to the desired places in your code and analyze
performance per frame, where frame is the time period between frame begin and end points.
When frames are displayed in Intel VTune Amplifier, they are displayed in a
separate track, so they provide a way to visually separate this data from normal task data.
This is different from Task in that each 'Frame' duration will be a discretely-numbered
event in the VTune output, as well as its rate (frame-rate) shown. This is analogous to
profiling each frame of some visual output, such as rendering a video game frame.
Parameters
----------
domain : Domain object
Domain to which this object belongs
name : string
Name of the frame
"""
def __init__(self, domain, name):
self.name = name
self.handle = ProfileHandle()
check_call(_LIB.MXProfileCreateFrame(domain.handle,
c_str(self.name),
ctypes.byref(self.handle)))
def __del__(self):
if self.handle is not None:
check_call(_LIB.MXProfileDestroyHandle(self.handle))
def start(self):
"""Start timing scope for this object"""
check_call(_LIB.MXProfileDurationStart(self.handle))
def stop(self):
"""Stop timing scope for this object"""
check_call(_LIB.MXProfileDurationStop(self.handle))
def __str__(self):
return self.name
class Event(object):
"""Profiling Event class.
The event API is used to observe when demarcated events occur in your application, or to
identify how long it takes to execute demarcated regions of code. Set annotations in the
application to demarcate areas where events of interest occur.
After running analysis, you can see the events marked in the Timeline pane.
Event API is a per-thread function that works in resumed state.
This function does not work in paused state.
Parameters
----------
name : string
Name of the event
"""
def __init__(self, name):
self.name = name
self.handle = ProfileHandle()
check_call(_LIB.MXProfileCreateEvent(c_str(self.name), ctypes.byref(self.handle)))
def __del__(self):
if self.handle is not None:
check_call(_LIB.MXProfileDestroyHandle(self.handle))
def start(self):
"""Start timing scope for this object"""
check_call(_LIB.MXProfileDurationStart(self.handle))
def stop(self):
"""Stop timing scope for this object"""
check_call(_LIB.MXProfileDurationStop(self.handle))
def __str__(self):
return self.name
class Counter(object):
"""Profiling Counter class.
The counter event can track a value as it changes over time.
Parameters
----------
domain : Domain object
Domain to which this object belongs
name : string
Name of the counter
value: integer, optional
Initial value of the counter
"""
def __init__(self, domain, name, value=None):
self.name = name
self.handle = ProfileHandle()
check_call(_LIB.MXProfileCreateCounter(domain.handle,
c_str(name),
ctypes.byref(self.handle)))
if value is not None:
self.set_value(value)
def __del__(self):
if self.handle is not None:
check_call(_LIB.MXProfileDestroyHandle(self.handle))
def set_value(self, value):
"""Set counter value.
Parameters
----------
value : int
Value for the counter
"""
check_call(_LIB.MXProfileSetCounter(self.handle, int(value)))
def increment(self, delta=1):
"""Increment counter value.
Parameters
----------
value_change : int
Amount by which to add to the counter
"""
check_call(_LIB.MXProfileAdjustCounter(self.handle, int(delta)))
def decrement(self, delta=1):
"""Decrement counter value.
Parameters
----------
value_change : int
Amount by which to subtract from the counter
"""
check_call(_LIB.MXProfileAdjustCounter(self.handle, -int(delta)))
def __iadd__(self, delta):
self.increment(delta)
return self
def __isub__(self, delta):
self.decrement(delta)
return self
def __str__(self):
return self.name
class Marker(object):
"""Set marker for an instant in time.
The marker event marks a particular instant in time across some scope boundaries.
Parameters
----------
domain : Domain object
Domain to which this object belongs
name : string
Name of the marker
"""
def __init__(self, domain, name):
self.name = name
self.domain = domain
def mark(self, scope='process'):
"""Set up the profiler state to record operator.
Parameters
----------
scope : string, optional
Indicates what scope the marker should refer to.
Can be 'global', 'process', thread', task', and 'marker'
Default is `process`.
"""
check_call(_LIB.MXProfileSetMarker(self.domain.handle, c_str(self.name), c_str(scope)))
|
dmlc/mxnet
|
python/mxnet/profiler.py
|
Python
|
apache-2.0
| 15,751
|
'''Arsenal Authorization class.'''
#
# Copyright 2015 CityGrid Media, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import logging
import getpass
import ast
import requests
try:
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
except (ImportError, AttributeError):
pass
LOG = logging.getLogger(__name__)
# requests is chatty
logging.getLogger("requests").setLevel(logging.WARNING)
try:
requests.packages.urllib3.disable_warnings()
except AttributeError:
pass
def check_root():
'''Check and see if we're running as root. Returns True or False.'''
if not os.geteuid() == 0:
LOG.error('This command must be run as root.')
return False
return True
class Authorization(object):
'''The Arsenal Authorization class.
Usage::
>>> from arsenalclient.authorization import Authorization
>>> params = {
... 'cookie_file': '/home/abandt/.arsenal_cookie.txt',
... 'user_login': 'abandt',
... }
>>> auth = Authorization(**params)
Required Args:
cookie_file: A string that is the path to the cookie file to use. This
should map to a file in the homedir of the user.
user_login : A string that is the user's login name.
Optional Args:
api_host : A string that is the name of the Arsenal server. Default
value is 'arsenal'.
api_protocol : A string that is the protocol version to use. Valid
values are 'https' (default) or 'http'.
user_password: A string that is the user's password.
verify_ssl : Whether or not to verify the ssl connection to the Arsenal
server. Defaults to True.
'''
def __init__(self,
api_host='arsenal',
api_protocol='https',
verify_ssl=True,
**kwargs
):
self.session = requests.session()
self.cookies = None
self.api_protocol = api_protocol
self.api_host = api_host
self.cookie_file = kwargs.get('cookie_file')
self.user_login = kwargs.get('user_login')
self.user_password = kwargs.get('user_password')
self.verify_ssl = verify_ssl
def get_cookie_auth(self):
'''Gets cookies from cookie file or authenticates if no cookie file is
present.
Returns:
A dict of all cookies if successful, raises an exception otherwise.
'''
try:
self.read_cookie()
if not self.cookies:
self.authenticate()
else:
self.cookies = ast.literal_eval(self.cookies)
except Exception, ex:
LOG.error('Failed to evaluate cookies: {0}'.format(repr(ex)))
raise
def read_cookie(self):
'''Reads cookies from cookie file.
Returns:
A dict of all cookies if cookie_file is present, None otherwise.
'''
LOG.debug('Checking for cookie file: {0}'.format(self.cookie_file))
if os.path.isfile(self.cookie_file):
LOG.debug('Cookie file found: {0}'.format(self.cookie_file))
with open(self.cookie_file, 'r') as contents:
self.cookies = contents.read()
else:
LOG.debug('Cookie file does not exist: {0}'.format(self.cookie_file))
def write_cookie(self, cookies):
'''Writes cookies to cookie file.
Returns:
True if successful, False otherwise.
'''
LOG.info('Writing cookie file: {0}'.format(self.cookie_file))
try:
cookie_dict = dict(cookies)
with open(self.cookie_file, "w") as cookie_file:
cookie_file.write(str(cookie_dict))
os.chmod(self.cookie_file, 0600)
except Exception as ex:
LOG.error('Unable to write cookie: '
'{0}'.format(self.cookie_file))
LOG.error('Exception: {0}'.format(repr(ex)))
raise
def authenticate(self):
'''Prompts for user password and authenticates against the API. Writes
response cookies to file for later use.
Returns:
A dict of all cookies if successful, None otherwise.
'''
if self.user_login == 'read_only':
LOG.error('Write access denied for read_only user.')
return None
else:
LOG.info('Authenticating login: {0}'.format(self.user_login))
# Kaboom password is exposed on dev systems where others have root,
# thus insecurable. This may change in the future.
if self.user_login == 'kaboom':
password = 'password'
elif getattr(self, 'user_password'):
password = self.user_password
else:
password = getpass.getpass('password: ')
try:
payload = {
'form.submitted': True,
'api.client': True,
'return_url': '/api',
'login': self.user_login,
'password': password
}
resp = self.session.post(self.api_protocol
+ '://'
+ self.api_host
+ '/login', data=payload,
verify=self.verify_ssl)
resp.raise_for_status()
LOG.debug('Authentication successful for user: {0}'.format(self.user_login))
self.cookies = self.session.cookies.get_dict()
LOG.debug('Cookies are: {0}'.format(self.cookies))
try:
self.write_cookie(self.cookies)
except Exception, ex:
LOG.error('Exception: {0}'.format(repr(ex)))
raise
except Exception, ex:
LOG.error('Exception: {0}'.format(repr(ex)))
LOG.error('Authentication failed')
raise
|
CityGrid/arsenal
|
client/arsenalclient/authorization.py
|
Python
|
apache-2.0
| 6,734
|
# (C) 2014 Andrew Vaught
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
# WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
# Definitions of keywords, tokens, intrinsics, etc.
class token:
def __init__(self, name):
self.name = name
return
def __str__(self):
return 'TOKEN(%s)' % self.name
pass
class constant:
def __init__(self, value, decl_type):
self.value = value
self.type = decl_type
return
def __str__(self):
return '$' + str(self.value)
def show(self):
sys.stdout.write(str(self.value))
return
def simplify(self):
return self
def used_vars(self, result):
return
def replace_vars(self, repl):
return
pass
class word:
def __init__(self, name):
self.name = name
return
def __str__(self):
return 'WORD(%s)' % self.name
pass
class keyword:
def __init__(self, name):
self.name = name
return
def __str__(self):
return 'KEYWORD(%s)' % self.name
pass
class type_name:
def __init__(self, name):
self.name = name
return
def __str__(self):
return 'TYPENAME(%s)' % self.name
pass
class type_node:
def __init__(self, basic_type, level=0):
self.basic_type = basic_type
self.level = level
return
def __cmp__(self, other):
assert(isinstance(other, type_node))
if self.basic_type is other.basic_type:
return 0
return 1
pass
class intrinsic_name:
def __init__(self, name):
self.name = name
return
def __str__(self):
return 'INTRINSIC(%s)' % self.name
pass
# Tokens
tok_equal = token('==')
tok_not_equal = token('!=')
tok_greater = token('>')
tok_greater_eq = token('>=')
tok_less = token('<')
tok_less_eq = token('<=')
tok_logical_and = token('&&')
tok_logical_or = token('||')
tok_lshift = token('<<')
tok_rshift = token('>>')
tok_logical_not = token('!')
tok_assign = token('=')
tok_bit_and = token('&')
tok_bit_or = token('|')
tok_bit_not = token('~')
tok_plus = token('+')
tok_minus = token('-')
tok_star = token('*')
tok_slash = token('/')
tok_mod = token('%')
tok_not = token('~')
tok_dot = token('.')
tok_comma = token(',')
tok_question = token('?')
tok_colon = token(':')
tok_semi = token(';')
tok_caret = token('^')
tok_lbrace = token('{')
tok_rbrace = token('}')
tok_lparen = token('(')
tok_rparen = token(')')
tok_eof = token('EOF') # Doesn't go in token_list
token_list = [
tok_assign, tok_equal, tok_not_equal, tok_greater, tok_greater_eq,
tok_less, tok_less_eq, tok_bit_and, tok_bit_or, tok_logical_and,
tok_logical_or, tok_logical_not, tok_plus, tok_minus, tok_star,
tok_slash, tok_mod, tok_bit_not, tok_logical_not, tok_dot,
tok_comma, tok_question, tok_colon, tok_semi, tok_caret,
tok_lshift, tok_rshift, tok_lbrace, tok_rbrace, tok_lparen,
tok_rparen, ]
# Keywords
kw_static = keyword('static')
kw_extern = keyword('extern')
kw_if = keyword('if')
kw_else = keyword('else')
kw_return = keyword('return')
kw_goto = keyword('goto')
kw_for = keyword('for')
kw_do = keyword('do')
kw_while = keyword('while')
kw_break = keyword('break')
kw_continue = keyword('continue')
kw_switch = keyword('switch')
kw_default = keyword('default')
kw_case = keyword('case')
keyword_list = [
kw_static, kw_extern, kw_return, kw_for, kw_do, kw_if, kw_else,
kw_while, kw_goto, kw_break, kw_continue, kw_switch, kw_case,
kw_default, ]
# Type names
type_void = type_name('void')
type_float4 = type_name('float4')
type_float8 = type_name('float8')
type_int8 = type_name('int8')
type_int4 = type_name('int4')
type_int2 = type_name('int2')
type_int1 = type_name('int1')
type_uint8 = type_name('uint8')
type_uint4 = type_name('uint4')
type_uint2 = type_name('uint2')
type_uint1 = type_name('uint1')
type_float8_2 = type_name('float8_2')
type_float4_4 = type_name('float4_4')
type_int8_2 = type_name('int8_2')
type_int4_4 = type_name('int4_4')
type_int2_8 = type_name('int2_8')
type_int1_16 = type_name('int1_16')
type_names = [
type_void, type_float4, type_float8,
type_int8, type_int4, type_int2, type_int1,
type_uint8, type_uint4, type_uint2, type_uint1,
type_float8_2,type_float4_4,
type_int8_2, type_int4_4, type_int2_8, type_int1_16 ]
# Intrinsics
intr_sqrt = intrinsic_name('sqrt')
intr_sum = intrinsic_name('sum')
intr_abs = intrinsic_name('abs')
intr_min = intrinsic_name('min')
intr_max = intrinsic_name('max')
intrinsic_names = [ intr_sqrt, intr_sum, intr_abs, intr_min, intr_max, ]
|
andyv/sse
|
kw.py
|
Python
|
bsd-2-clause
| 5,849
|
from luigi_bigquery.config import get_config
from luigi_bigquery.client import ResultProxy
from luigi_bigquery.job import Job
from luigi_bigquery.targets.result import ResultTarget
from luigi_bigquery.targets.bq import DatasetTarget
from luigi_bigquery.targets.bq import TableTarget
from luigi_bigquery.targets.gcs import BucketTarget
from luigi_bigquery.targets.gcs import FileTarget
import luigi
import jinja2
import time
import bigquery
import string
import random
import six
import logging
logger = logging.getLogger('luigi-interface')
def _id_generator(size=16, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
# Dataset
class DatasetTask(luigi.Task):
config = get_config()
dataset_id = luigi.Parameter()
def output(self):
return DatasetTarget(self.dataset_id)
def run(self):
client = self.config.get_client()
logger.info('%s: creating dataset: %s', self, self.dataset_id)
client.create_dataset(self.dataset_id)
max_retry = 30
retry = 0
while True:
time.sleep(5.0)
if client.check_dataset(self.dataset_id):
break
retry += 1
if retry > max_retry:
msg = "DatasetTask(dataset_id={0}) max retry error.".format(self.dataset_id)
logger.error(msg)
raise Exception(msg)
# Table
class TableTask(luigi.Task):
config = get_config()
dataset_id = luigi.Parameter()
table_id = luigi.Parameter()
schema = luigi.Parameter(default=[], significant=False)
empty = luigi.BooleanParameter(default=False, significant=False)
def requires(self):
return DatasetTask(self.dataset_id)
def output(self):
return TableTarget(self.dataset_id, self.table_id, empty=self.empty)
def run(self):
client = self.config.get_client()
logger.info('%s: creating table: %s.%s', self, self.datasset_id, self.table_id)
client.create_table(self.dataset_id, self.table_id, self.schema)
# Query
class QueryTimeout(Exception):
pass
class Query(luigi.Task):
config = get_config()
debug = False
timeout = 3600
source = None
variables = {}
def query(self):
return NotImplemented()
def load_query(self, source):
env = jinja2.Environment(loader=jinja2.PackageLoader(self.__module__, '.'))
template = env.get_template(source)
return template.render(task=self, **self.variables)
def run_query(self, query):
result = self.output()
client = self.config.get_client()
logger.info("%s: query: %s", self, query)
job_id, _ = client.query(query)
logger.info("%s: bigquery.job.id: %s", self, job_id)
complete, result_size = client.check_job(job_id)
try:
if self.timeout:
timeout = time.time() + self.timeout
else:
timeout = None
while not complete:
if timeout and time.time() > timeout:
raise QueryTimeout('{0} timed out'.format(self))
time.sleep(5)
complete, result_size = client.check_job(job_id)
except:
raise
logger.info("%s: bigquery.job.result: job_id=%s result_size=%d", self, job_id, result_size)
return ResultProxy(Job(client, job_id))
def run(self):
query = self.load_query(self.source) if self.source else self.query()
result = self.run_query(query)
target = self.output()
if target and isinstance(target, ResultTarget):
target.save_result_state(result)
if self.debug:
import pandas as pd
TERMINAL_WIDTH = 120
pd.options.display.width = TERMINAL_WIDTH
six.print_('-' * TERMINAL_WIDTH)
six.print_('Query result:')
six.print_(result.to_dataframe())
six.print_('-' * TERMINAL_WIDTH)
class QueryTable(Query):
create_disposition = bigquery.JOB_CREATE_IF_NEEDED
write_disposition = bigquery.JOB_WRITE_EMPTY
def requires(self):
return DatasetTask(self.dataset())
def output(self):
return TableTarget(self.dataset(), self.table(), append=self._append())
def dataset(self):
return NotImplemented()
def table(self):
return NotImplemented()
def _append(self):
return self.write_disposition == bigquery.JOB_WRITE_APPEND
def save_as_table(self, query):
result = self.output()
client = self.config.get_client()
logger.info("%s: query: %s", self, query)
job = client.write_to_table(
query,
dataset=self.dataset(),
table=self.table(),
create_disposition=self.create_disposition,
write_disposition=self.write_disposition,
allow_large_results=True)
job_id = job['jobReference'].get('jobId')
logger.info("%s: bigquery.job.id: %s", self, job_id)
complete, result_size = client.check_job(job_id)
try:
if self.timeout:
timeout = time.time() + self.timeout
else:
timeout = None
while not complete:
if timeout and time.time() > timeout:
raise QueryTimeout('{0} timed out'.format(self))
time.sleep(5)
complete, result_size = client.check_job(job_id)
except:
raise
logger.info("%s: bigquery.job.result: job_id=%s result_size=%d", self, job_id, result_size)
return ResultProxy(Job(client, job_id))
def run(self):
query = self.load_query(self.source) if self.source else self.query()
self.save_as_table(query)
class QueryToGCS(QueryTable):
compression = luigi.Parameter(default='NONE') # or GZIP
format = luigi.Parameter(default='CSV') # or NEWLINE_DELIMITED_JSON
print_header = luigi.Parameter(default=True)
use_temporary_table = luigi.Parameter(default=True)
def __init__(self, *args, **kwargs):
super(QueryToGCS, self).__init__(*args, **kwargs)
self._random_id = 'tmp_{}'.format(_id_generator())
def dataset(self):
if self.use_temporary_table:
return self._random_id
else:
return NotImplemented()
def table(self):
if self.use_temporary_table:
return self._random_id
else:
return NotImplemented()
def output(self):
return FileTarget(self.bucket(), self.path())
def bucket(self):
return NotImplemented()
def path(self):
return NotImplemented()
def export_to_gcs(self):
result = self.output()
client = self.config.get_client()
logger.info("%s: export %s.%s to %s", self, self.dataset(), self.table(), result.uri())
job = client.export_data_to_uris(
destination_uris=[result.uri()],
dataset=self.dataset(),
table=self.table(),
compression=self.compression,
destination_format=self.format,
print_header=self.print_header)
job_id = job['jobReference'].get('jobId')
logger.info("%s: bigquery.job.id: %s", self, job_id)
try:
job_resource = client.wait_for_job(job, timeout=3600)
except:
raise
def _cleanup(self):
if self.use_temporary_table:
client = self.config.get_client()
client.delete_dataset(self.dataset(), delete_contents=True)
def run(self):
query = self.load_query(self.source) if self.source else self.query()
try:
self.save_as_table(query)
self.export_to_gcs()
finally:
self._cleanup()
|
hakobera/luigi-bigquery
|
luigi_bigquery/task.py
|
Python
|
apache-2.0
| 7,892
|
# -*- coding: utf-8 -*-
from yafowil.base import factory
def get_example():
part = factory(u'fieldset', name='yafowilwidgetautosuggest')
part['text'] = factory('field:label:error:autosuggest', props={
'label': 'Enter some text',
'source': sorted((u'Weißburgunder', u'Welschriesling',
u'Sauvingnon Blanc', u'Sämling', u'Scheurebe',
u'Traminer', u'Morrilon', u'Muskateller'))})
return [{'widget': part, 'doc': 'TODO'}]
|
bluedynamics/yafowil.widget.autosuggest
|
src/yafowil/widget/autosuggest/example.py
|
Python
|
bsd-3-clause
| 501
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006, TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
class _constant:
"Constant members implementation"
class ConstError(TypeError):
pass
def __setattr__(self, name, value):
if name in self.__dict__:
raise self.ConstError("Can't rebind constant: %s" % name)
# Binding an attribute once to a const is available
self.__dict__[name] = value
def __delattr__(self, name):
if name in self.__dict__:
raise self.ConstError("Can't unbind constant: %s" % name)
# we don't have an attribute by this name
raise NameError(name)
class Constants:
__c = _constant()
def __getattr__(self, attr):
return getattr(self.__c, attr)
def __setattr__(self, attr, value):
setattr(self.__c, attr, value)
def __delattr__(self, attr):
delattr(self.__c, attr)
consts = Constants()
consts.scenarios_path = "scenarios/"
consts.pisi_db = "db/"
consts.repo_name = "scenario-db"
consts.repo_path = "repo/"
consts.repo_url = consts.repo_path + "pisi-index.xml"
consts.glob_pisis = "*.pisi"
consts.pisi_suffix = ".pisi"
consts.pspec_path = "/tmp/pspec.xml"
consts.actionspy_path = "/tmp/actions.py"
consts.packager_name = "Faik Uygur"
consts.packager_email = "faik@pardus.org.tr"
consts.homepage = "http://cekirdek.uludag.org.tr/~faik/pisi"
consts.summary = "%s is a good application"
consts.description = "%s is a free software that can do anything it wants"
consts.license = ["GPL-2"]
consts.skel_sha1sum = "cc64dfa6e068fe1f6fb68a635878b1ea21acfac7"
consts.skel_type = "targz"
consts.skel_uri = "http://cekirdek.uludag.org.tr/~faik/pisi/skeleton.tar.gz"
consts.skel_bindir = "/usr/bin"
consts.skel_dirtype = "executable"
|
hknyldz/pisitools
|
pisilinux/pisilinux/scenarioapi/constants.py
|
Python
|
gpl-3.0
| 2,030
|
class Outputter:
pass
|
DonaldWhyte/module-dependency
|
tests/outputters/not-subclass.py
|
Python
|
mit
| 28
|
# Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ament_copyright.main import main
def test_copyright():
rc = main(argv=[])
assert rc == 0, 'Found errors'
|
ros2/launch
|
launch_testing/test/launch_testing/test_copyright.py
|
Python
|
apache-2.0
| 722
|
#!/usr/bin/env python
import logging
import logging.handlers
import argparse
import sys
import time # this is only being used as part of the example
import os
import env
# logger should only be imported after initlogging has been called
logger = None
def initlogging(name='docklet'):
# Deafults
global logger
homepath = env.getenv('FS_PREFIX')
LOG_FILENAME = homepath + '/local/log/' + name + '.log'
LOG_LIFE = env.getenv('LOG_LIFE')
LOG_LEVEL = env.getenv('LOG_LEVEL')
if LOG_LEVEL == "DEBUG":
LOG_LEVEL = logging.DEBUG
elif LOG_LEVEL == "INFO":
LOG_LEVEL = logging.INFO
elif LOG_LEVEL == "WARNING":
LOG_LEVEL = logging.WARNING
elif LOG_LEVEL == "ERROR":
LOG_LEVEL = logging.ERROR
elif LOG_LEVEL == "CRITICAL":
LOG_LEVEL = logging.CRITIAL
else:
LOG_LEVEL = logging.DEBUG
logger = logging.getLogger(name)
# Configure logging to log to a file, making a new file at midnight and keeping the last 3 day's data
# Give the logger a unique name (good practice)
# Set the log level to LOG_LEVEL
logger.setLevel(LOG_LEVEL)
# Make a handler that writes to a file, making a new file at midnight and keeping 3 backups
handler = logging.handlers.TimedRotatingFileHandler(LOG_FILENAME,
when="midnight", backupCount=LOG_LIFE, encoding='utf-8')
# Format each log message like this
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(module)s[%(lineno)d] %(message)s')
# Attach the formatter to the handler
handler.setFormatter(formatter)
# Attach the handler to the logger
logger.addHandler(handler)
# Replace stdout with logging to file at INFO level
sys.stdout = RedirectLogger(logger, logging.INFO)
# Replace stderr with logging to file at ERROR level
sys.stderr = RedirectLogger(logger, logging.ERROR)
# Make a class we can use to capture stdout and sterr in the log
class RedirectLogger(object):
def __init__(self, logger, level):
"""Needs a logger and a logger level."""
self.logger = logger
self.level = level
def write(self, message):
# Only log if there is a message (not just a new line)
if message.rstrip() != "":
self.logger.log(self.level, message.rstrip())
def flush(self):
for handler in self.logger.handlers:
handler.flush()
|
caodg/docklet
|
src/log.py
|
Python
|
bsd-3-clause
| 2,418
|
from django import forms
class AfricasTalkingForm(forms.Form):
def __init__(self,*args,**kwargs):
super(AfricasTalkingForm,self).__init__(*args,**kwargs)
self.fields['from'] = forms.CharField()
to = forms.CharField(label='Receiving ShortCode')
text = forms.CharField(label='Message')
date = forms.CharField()
id = forms.CharField()
linkId = forms.CharField()
|
I-TECH-UW/mwachx
|
transports/africas_talking/forms.py
|
Python
|
apache-2.0
| 402
|
from django.core.management.base import BaseCommand
from bibliotik.models import BibliotikFulltext, BibliotikTorrent
class Command(BaseCommand):
help = 'Clears and rebuilds the Biblitoik fulltext table.'
def handle(self, *args, **options):
print u'Deleting all fulltext entries...'
BibliotikFulltext.objects.all().delete()
print u'Fetching Bibliotik torrents...'
torrents = list(BibliotikTorrent.objects.defer('html_page', 'torrent_file').all())
print u'Got {0} torrents. Creating fulltext entries...'.format(len(torrents))
updated = 0
for t in torrents:
ft = BibliotikFulltext(
id=t.id
)
ft.update(t)
updated += 1
if updated % 200 == 0:
print u'Updated {0}/{1} torrents...'.format(updated, len(torrents))
print u'Successfully updated {0} torrents.'.format(len(torrents))
|
grandmasterchef/WhatManager2
|
WhatManager2/management/commands/bibliotik_rebuild_fulltext.py
|
Python
|
mit
| 940
|
import sys, re
import shared, js_optimizer
class AsmModule():
def __init__(self, filename):
self.filename = filename
self.js = open(filename).read()
self.start_asm = self.js.find(js_optimizer.start_asm_marker)
self.start_funcs = self.js.find(js_optimizer.start_funcs_marker)
self.end_funcs = self.js.rfind(js_optimizer.end_funcs_marker)
self.end_asm = self.js.rfind(js_optimizer.end_asm_marker)
# pre and asm
self.pre_js = self.js[:self.start_asm]
self.asm_js = self.js[self.start_asm:self.end_asm]
# heap initializer
self.staticbump = int(re.search(shared.JS.memory_staticbump_pattern, self.pre_js).group(1))
if self.staticbump:
self.mem_init_js = re.search(shared.JS.memory_initializer_pattern, self.pre_js).group(0)
# global initializers
global_inits = re.search(shared.JS.global_initializers_pattern, self.pre_js)
if global_inits:
self.global_inits_js = global_inits.group(0)
self.global_inits = map(lambda init: init.split('{')[2][1:].split('(')[0], global_inits.groups(0)[0].split(','))
else:
self.global_inits_js = ''
self.global_inits = []
# imports (and global variables)
first_var = self.js.find('var ', self.js.find('var ', self.start_asm)+4)
self.pre_imports_js = self.js[self.start_asm:first_var]
self.imports_js = self.js[first_var:self.start_funcs]
self.imports = {}
for imp in js_optimizer.import_sig.finditer(self.imports_js):
key, value = imp.group(0).split('var ')[1][:-1].split('=', 1)
self.imports[key.strip()] = value.strip()
#print >> sys.stderr, 'imports', self.imports
# funcs
self.funcs_js = self.js[self.start_funcs:self.end_funcs]
self.funcs = set([m.group(2) for m in js_optimizer.func_sig.finditer(self.funcs_js)])
#print 'funcs', self.funcs
# tables and exports
post_js = self.js[self.end_funcs:self.end_asm]
ret = post_js.find('return ')
self.tables_js = post_js[:ret]
self.exports_js = post_js[ret:]
self.tables = self.parse_tables(self.tables_js)
self.exports = set([export.strip() for export in self.exports_js[self.exports_js.find('{')+1:self.exports_js.find('}')].split(',')])
# post
self.post_js = self.js[self.end_asm:]
self.sendings = {}
for sending in [sending.strip() for sending in self.post_js[self.post_js.find('}, { ')+5:self.post_js.find(' }, buffer);')].split(',')]:
colon = sending.find(':')
self.sendings[sending[:colon].replace('"', '')] = sending[colon+1:].strip()
self.module_defs = set(re.findall('var [\w\d_$]+ = Module\["[\w\d_$]+"\] = asm\["[\w\d_$]+"\];\n', self.post_js))
def relocate_into(self, main):
# heap initializer
if self.staticbump > 0:
new_mem_init = self.mem_init_js[:self.mem_init_js.rfind(', ')] + ', Runtime.GLOBAL_BASE+%d)' % main.staticbump
main.pre_js = re.sub(shared.JS.memory_staticbump_pattern, 'STATICTOP = STATIC_BASE + %d;\n' % (main.staticbump + self.staticbump) + new_mem_init, main.pre_js, count=1)
# Find function name replacements TODO: do not rename duplicate names with duplicate contents, just merge them
replacements = {}
for func in self.funcs:
rep = func
while rep in main.funcs:
rep += '_'
replacements[func] = rep
#print >> sys.stderr, 'replacements:', replacements
# sendings: add invokes for new tables
all_sendings = main.sendings
added_sending = False
for table in self.tables:
if table not in main.tables:
sig = table[table.rfind('_')+1:]
func = 'invoke_%s' % sig
all_sendings[func] = func
main.pre_js += 'var %s = %s;\n' % (func, shared.JS.make_invoke(sig, named=False))
added_sending = True
# imports
all_imports = main.imports
for key, value in self.imports.iteritems():
if key in self.funcs or key in main.funcs: continue # external function in one module, implemented in the other
value_concrete = '.' not in value # env.key means it is an import, an external value, and not a concrete one
main_value = main.imports.get(key)
main_value_concrete = main_value and '.' not in main_value
if value_concrete and main_value_concrete: continue # standard global var
if not main_value or value_concrete:
if '+' in value:
# relocate
value = value.replace('(', '').replace(')', '').replace('| 0', '').replace('|0', '').replace(' ', '')
left, right = value.split('+')
assert left == 'H_BASE'
value = str(main.staticbump + int(right))
all_imports[key] = value
if (value_concrete or main_value_concrete) and key in all_sendings:
del all_sendings[key] # import of external value no longer needed
main.imports_js = '\n'.join(['var %s = %s;' % (key, value) for key, value in all_imports.iteritems()]) + '\n'
# check for undefined references to global variables
def check_import(key, value):
if value.startswith('+') or value.endswith('|0'): # ignore functions
if key not in all_sendings:
print >> sys.stderr, 'warning: external variable %s is still not defined after linking' % key
all_sendings[key] = '0'
for key, value in all_imports.iteritems(): check_import(key, value)
if added_sending:
sendings_js = ', '.join(['%s: %s' % (key, value) for key, value in all_sendings.iteritems()])
sendings_start = main.post_js.find('}, { ')+5
sendings_end = main.post_js.find(' }, buffer);')
main.post_js = main.post_js[:sendings_start] + sendings_js + main.post_js[sendings_end:]
# tables
f_bases = {}
f_sizes = {}
for table, data in self.tables.iteritems():
main.tables[table] = self.merge_tables(table, main.tables.get(table), data, replacements, f_bases, f_sizes)
main.combine_tables()
#print >> sys.stderr, 'f bases', f_bases
# relocate
temp = shared.Building.js_optimizer(self.filename, ['asm', 'relocate', 'last'], extra_info={
'replacements': replacements,
'fBases': f_bases,
'hBase': main.staticbump
})
#print >> sys.stderr, 'relocated side into', temp
relocated_funcs = AsmModule(temp)
shared.try_delete(temp)
main.extra_funcs_js = relocated_funcs.funcs_js.replace(js_optimizer.start_funcs_marker, '\n')
# update function table uses
ft_marker = 'FUNCTION_TABLE_'
def update_fts(what):
updates = []
i = 1 # avoid seeing marker in recursion
while 1:
i = what.find(ft_marker, i)
if i < 0: break;
start = i
end = what.find('[', start)
table = what[i:end]
if table not in f_sizes:
# table was not modified
i += len(ft_marker)
continue
nesting = 1
while nesting > 0:
next = what.find(']', end+1)
nesting -= 1
nesting += what.count('[', end+1, next)
end = next
assert end > 0
mask = what.rfind('&', start, end)
assert mask > 0 and end - mask <= 13
fixed = update_fts(what[start:mask+1] + str(f_sizes[table]-1) + ']')
updates.append((start, end, fixed))
i = end # additional function table uses were done by recursion
# apply updates
if len(updates) == 0: return what
parts = []
so_far = 0
for i in range(len(updates)):
start, end, fixed = updates[i]
parts.append(what[so_far:start])
parts.append(fixed)
so_far = end+1
parts.append(what[so_far:])
return ''.join(parts)
main.funcs_js = update_fts(main.funcs_js)
main.extra_funcs_js = update_fts(main.extra_funcs_js)
# global initializers
if self.global_inits:
my_global_inits = map(lambda init: replacements[init] if init in replacements else init, self.global_inits)
all_global_inits = map(lambda init: '{ func: function() { %s() } }' % init, main.global_inits + my_global_inits)
all_global_inits_js = '/* global initializers */ __ATINIT__.push(' + ','.join(all_global_inits) + ');'
if main.global_inits:
target = main.global_inits_js
else:
target = '// === Body ===\n'
all_global_inits_js = target + all_global_inits_js
main.pre_js = main.pre_js.replace(target, all_global_inits_js)
# exports
def rep_exp(export):
key, value = export.split(':')
if key in replacements:
repped = replacements[key]
return repped + ': ' + repped
return export
my_exports = map(rep_exp, self.exports)
exports = main.exports.union(my_exports)
main.exports_js = 'return {' + ','.join(list(exports)) + '};\n})\n'
# post
def rep_def(deff):
key = deff.split(' ')[1]
if key in replacements:
rep = replacements[key]
return 'var %s = Module["%s"] = asm["%s"];\n' % (rep, rep, rep)
return deff
my_module_defs = map(rep_def, self.module_defs)
new_module_defs = set(my_module_defs).difference(main.module_defs)
if len(new_module_defs) > 0:
position = main.post_js.find('Runtime.') # Runtime is the start of the hardcoded ones
main.post_js = main.post_js[:position] + ''.join(list(new_module_defs)) + '\n' + main.post_js[position:]
def write(self, out):
f = open(out, 'w')
f.write(self.pre_js)
f.write(self.pre_imports_js)
f.write(self.imports_js)
f.write(self.funcs_js)
f.write(self.extra_funcs_js)
f.write(self.tables_js)
f.write(self.exports_js)
f.write(self.post_js)
f.close()
# Utilities
def parse_tables(self, js):
tables = {}
parts = js.split(';')
for part in parts:
if '=' not in part: continue
part = part.split('var ')[1]
name, data = part.split('=')
tables[name.strip()] = data.strip()
return tables
def merge_tables(self, table, main, side, replacements, f_bases, f_sizes):
sig = table.split('_')[-1]
side = side[1:-1].split(',')
side = map(lambda f: replacements[f] if f in replacements else f, side)
if not main:
f_bases[sig] = 0
f_sizes[table] = len(side)
return '[' + ','.join(side) + ']'
main = main[1:-1].split(',')
# TODO: handle non-aliasing case too
assert len(main) % 2 == 0
f_bases[sig] = len(main)
ret = main + side
size = 2
while size < len(ret): size *= 2
aborter = ret[1] # we can assume odd indexes have an aborting function with the right signature
ret = ret + [aborter]*(size - len(ret))
assert len(ret) == size
f_sizes[table] = size
return '[' + ','.join(ret) + ']'
def combine_tables(self):
self.tables_js = '// EMSCRIPTEN_END_FUNCS\n'
for table, data in self.tables.iteritems():
self.tables_js += 'var %s = %s;\n' % (table, data)
|
shrimpboyho/git.js
|
emscript/emscripten/1.5.6/tools/asm_module.py
|
Python
|
gpl-2.0
| 10,782
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import openni
import pygame
from PIL import Image
import numpy as np
POSE2USE = 'Psi'
class Kinect(object):
def __init__(self, game):
self.game = game
self.ctx = openni.Context()
self.ctx.init()
self.user = openni.UserGenerator()
self.user.create(self.ctx)
self.depth_generator = openni.DepthGenerator()
self.depth_generator.create(self.ctx)
self.depth_generator.set_resolution_preset(openni.RES_VGA)
self.depth_generator.fps = 30
self.image_generator = openni.ImageGenerator()
self.image_generator.create(self.ctx)
self.image_generator.set_resolution_preset(openni.RES_VGA)
self.depth_generator.alternative_view_point_cap.set_view_point(self.image_generator)
self.skel_cap = self.user.skeleton_cap
self.pose_cap = self.user.pose_detection_cap
# Define Joins we want to track
self.joints = ['SKEL_HEAD', 'SKEL_LEFT_FOOT', 'SKEL_RIGHT_SHOULDER',
'SKEL_LEFT_HAND', 'SKEL_NECK',
'SKEL_RIGHT_FOOT', 'SKEL_LEFT_HIP', 'SKEL_RIGHT_HAND',
'SKEL_TORSO', 'SKEL_LEFT_ELBOW', 'SKEL_LEFT_KNEE',
'SKEL_RIGHT_HIP', 'SKEL_LEFT_SHOULDER',
'SKEL_RIGHT_ELBOW', 'SKEL_RIGHT_KNEE']
def new_user(self, src, id):
print "1/4 User {} detected. Looking for pose..." .format(id)
self.pose_cap.start_detection(POSE2USE, id)
def pose_detected(self, src, pose, id):
print "2/4 Pose {} on user {}. Requesting calibration..." .format(pose, id)
self.pose_cap.stop_detection(id)
self.skel_cap.request_calibration(id, True)
def calibration_start(self, src, id):
print "3/4 Calibration started for user {}." .format(id)
def calibration_in_progress(self, src, id, status):
pass
def calibration_complete(self, src, id, status):
if status == openni.CALIBRATION_STATUS_OK:
print "4/4 User {} calibrated successfully! Starting to track." .format(id)
self.skel_cap.start_tracking(id)
else:
print "ERR User {} failed to calibrate. Restarting process." .format(id)
self.new_user(self.user, id)
def lost_user(self, src, id):
print "--- User {} lost." .format(id)
def get_joints(self):
for id in self.user.users:
if self.skel_cap.is_tracking(id) and self.skel_cap.is_calibrated(id):
joints = [self.skel_cap.get_joint_position(id, j)
for j in map(lambda a: getattr(openni, a), self.joints)]
return self.depth_generator.to_projective([j.point for j in joints])
def register(self):
self.user.register_user_cb(self.new_user, self.lost_user)
self.pose_cap.register_pose_detected_cb(self.pose_detected)
self.skel_cap.register_c_start_cb(self.calibration_start)
self.skel_cap.register_c_in_progress_cb(self.calibration_in_progress)
self.skel_cap.register_c_complete_cb(self.calibration_complete)
self.skel_cap.set_profile(openni.SKEL_PROFILE_ALL)
def capture_rgb(self):
rgb_frame = np.fromstring(self.image_generator.get_raw_image_map_bgr(),
dtype=np.uint8).reshape(self.game.size[1],
self.game.size[0], 3)
image = Image.fromarray(rgb_frame)
b, g, r = image.split()
image = Image.merge("RGB", (r, g, b))
self.game.frame = pygame.image.frombuffer(
image.tostring(), image.size, 'RGB')
def update_sensor(self):
self.ctx.wait_any_update_all()
|
celiacintas/kinect_stuff
|
skeleton/mykinect.py
|
Python
|
gpl-2.0
| 3,781
|
"""
Copyright 2016 Gregory Jensen
This file is part of PyWireDI.
PyWireDI is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PyWireDI is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PyWireDI. If not, see <http://www.gnu.org/licenses/>.
"""
from unittest import TestCase
from caseTransform import CaseTransform
class TestCaseTransform(TestCase):
def test_pascal_case_to_underscore(self):
self.assertEquals("test_case", CaseTransform().pascal_case_to_underscore("TestCase"))
self.assertEquals("test_case", CaseTransform().pascal_case_to_underscore("testCase"))
def test_underscore_to_pascal_case(self):
self.assertEquals("TestCase", CaseTransform().underscore_to_pascal_case("test_case"))
|
ThetaSinner/PyWireDI
|
tests/test_caseTransform.py
|
Python
|
gpl-3.0
| 1,155
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright 2014 Techdealer
##############BIBLIOTECAS A IMPORTAR E DEFINICOES####################
import urllib,urllib2,re,xbmcplugin,xbmcgui,sys,xbmc,xbmcaddon,xbmcvfs,socket,HTMLParser
import json
h = HTMLParser.HTMLParser()
addon_id = 'plugin.video.replaypt'
selfAddon = xbmcaddon.Addon(id=addon_id)
addonfolder = selfAddon.getAddonInfo('path')
artfolder = '/resources/img/'
docverdade_url = 'http://docverdade.blogspot.com/'
##################################################
def CATEGORIES_docverdade():
addDir('[B]Mudar para categorias[/B]',docverdade_url,435,addonfolder+artfolder+'docverdade.png',True)
listar_episodios(docverdade_url)
def alterar_vista(url):
addDir('[B]Mudar para últimas[/B]',url,432,addonfolder+artfolder+'docverdade.png');
try:
codigo_fonte = abrir_url(url)
except:
codigo_fonte = ''
if codigo_fonte:
match = re.findall("<li>.*?<a dir='ltr' href='(.+?)'>(.+?)</a>.*?<span dir='ltr'>(.+?)</span>.*?</li>", codigo_fonte, re.DOTALL)
for url, name, quantidade in match:
try:
addDir(name+' - '+quantidade,url,433,addonfolder+artfolder+'docverdade.png')
except:
pass
def listar_episodios(url):
try:
codigo_fonte = abrir_url(url)
except:
codigo_fonte = ''
if codigo_fonte:
match = re.findall("<h3 class='post-title entry-title' itemprop='name'>.*?<a href='(.+?)'>(.+?)</a>.*?</h3>.*?<div class='post-header'>.*?(?:<img[^\r\n]*?src=\"([^\"\r\n]+?)\".*?<div class='post-footer'>|<div class='post-footer'>)", codigo_fonte, re.DOTALL)
for link, name, iconimage in match:
try:
if iconimage:
addDir(name,link,434,iconimage,False)
else:
addDir(name,link,434,addonfolder+artfolder+'docverdade.png',False)
except: pass
match_2 = re.search("<a class='blog-pager-older-link' href='(.+?)'.*?>Postagens mais antigas</a>", codigo_fonte)
if match_2:
try:
url_2 = h.unescape(match_2.group(1))
codigo_fonte_2 = abrir_url(url_2)
match_3 = re.findall("<h3 class='post-title entry-title' itemprop='name'>.*?<a href='(.+?)'>(.+?)</a>.*?</h3>.*?<div class='post-header'>.*?(?:<img[^\r\n]*?src=\"([^\"\r\n]+?)\".*?<div class='post-footer'>|<div class='post-footer'>)", codigo_fonte_2, re.DOTALL)
if match_3:
addDir('[B]Próxima >>[/B]',url_2,433,addonfolder+artfolder+'docverdade.png')
except:
pass
def procurar_fontes(url,name,iconimage):
progress = xbmcgui.DialogProgress()
progress.create('Replay PT', 'Procurando fontes...')
progress.update(0)
playlist = xbmc.PlayList(1)
playlist.clear()
try:
codigo_fonte = abrir_url(url)
except:
codigo_fonte = ''
if codigo_fonte:
html_source_trunk = re.findall('<iframe(.*?)</iframe>', codigo_fonte, re.DOTALL)
for trunk in html_source_trunk:
try:
iframe = re.compile('src=["\'](.+?)["\']').findall(trunk)[0]
except: iframe = ''
if iframe:
if iframe.find('youtube.com/embed/videoseries?list=') > -1: # função para listar playlists do youtube
match = re.compile('.*?youtube.com/embed/videoseries\?list=([^&"]+).*?').findall(iframe)
playlist_id = str(match[0])
page = 1
videos_per_page = 20
index = 1 + ((int(page)-1)*videos_per_page)
count = 0
checker = True
while checker:
codigo_fonte = abrir_url('https://gdata.youtube.com/feeds/api/playlists/' + playlist_id + '?max-results=' + str(videos_per_page) + '&start-index=' + str(index) + '&v=2&alt=json')
decoded_data = json.loads(codigo_fonte)
for x in range(0, len(decoded_data['feed']['entry'])):
count += 1
youtube_id = decoded_data['feed']['entry'][x]['media$group']['yt$videoid']['$t'].encode("utf8")
if count == int(decoded_data['feed']['openSearch$totalResults']['$t']):
playlist.add('plugin://plugin.video.youtube/?action=play_video&videoid='+youtube_id,xbmcgui.ListItem(name, thumbnailImage=iconimage))
checker = False
break
else:
playlist.add('plugin://plugin.video.youtube/?action=play_video&videoid='+youtube_id,xbmcgui.ListItem(name, thumbnailImage=iconimage))
if index<=500-videos_per_page+1 and index-1+videos_per_page<int(decoded_data['feed']['openSearch$totalResults']['$t']):
page += 1
index = 1 + ((int(page)-1)*videos_per_page)
elif iframe.find('youtube') > -1:
resolver_iframe = youtube_resolver(iframe)
if resolver_iframe != 'youtube_nao resolvido':
playlist.add(resolver_iframe,xbmcgui.ListItem(name, thumbnailImage=iconimage))
elif iframe.find('dailymotion') > -1:
resolver_iframe = daily_resolver(iframe)
if resolver_iframe != 'daily_nao resolvido':
playlist.add(resolver_iframe,xbmcgui.ListItem(name, thumbnailImage=iconimage))
elif iframe.find('vimeo.com') > -1:
resolver_iframe = vimeo_resolver(iframe)
if resolver_iframe != 'vimeo_nao resolvido':
playlist.add(resolver_iframe,xbmcgui.ListItem(name, thumbnailImage=iconimage))
#players em flash (não iframe)
match = re.compile('<embed src=".*?youtube.com/v/([^?"]+).*?"').findall(codigo_fonte)
if match:
for youtube_id in match:
playlist.add('plugin://plugin.video.youtube/?action=play_video&videoid='+youtube_id,xbmcgui.ListItem(name, thumbnailImage=iconimage))
if progress.iscanceled():
sys.exit(0)
progress.update(100)
progress.close()
if len(playlist) == 0:
dialog = xbmcgui.Dialog()
ok = dialog.ok('Replay PT', 'Nenhuma fonte suportada encontrada...')
else:
try:
xbmc.Player().play(playlist)
except:
pass
def youtube_resolver(url):
match = re.compile('.*?youtube.com/embed/([^?"]+).*?').findall(url)
if match:
return 'plugin://plugin.video.youtube/?action=play_video&videoid=' + str(match[0])
else: return 'youtube_nao resolvido'
def daily_resolver(url):
if url.find('syndication') > -1: match = re.compile('/embed/video/(.+?)\?syndication').findall(url)
else: match = re.compile('/embed/video/(.*)').findall(url)
if match:
return 'plugin://plugin.video.dailymotion_com/?mode=playVideo&url=' + str(match[0])
else: return 'daily_nao resolvido'
def vimeo_resolver(url):
match = re.compile('/([0-9]+)').findall(url)
if match:
return 'plugin://plugin.video.vimeo/?action=play_video&videoid=' + str(match[0])
else: return 'vimeo_nao resolvido'
############################################################################################################################
def abrir_url(url):
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
response = urllib2.urlopen(req)
link=response.read()
response.close()
return link
def addDir(name,url,mode,iconimage,pasta=True):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&iconimage="+urllib.quote_plus(iconimage)
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=pasta)
return ok
############################################################################################################
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
params=get_params()
url=None
name=None
mode=None
iconimage=None
try:
url=urllib.unquote_plus(params["url"])
except:
pass
try:
name=urllib.unquote_plus(params["name"])
except:
pass
try:
mode=int(params["mode"])
except:
pass
try:
iconimage=urllib.unquote_plus(params["iconimage"])
except:
pass
#print "Mode: "+str(mode)
#print "URL: "+str(url)
#print "Name: "+str(name)
#print "Iconimage: "+str(iconimage)
|
siouka/dmind
|
plugin.video.replaypt/docverdade.py
|
Python
|
gpl-2.0
| 8,418
|
#! /usr/bin/python
# debugmode=True
debugmode=False
from os import listdir,path,system
from popen2 import popen4
import sys
def svnCommand(cmd):
if debugmode:
print "SVN:",cmd
else:
system("svn "+cmd)
def rmEmpty(d):
if not path.isdir(d):
return False
else:
isEmpty=True
for f in listdir(d):
if f==".svn":
isEmpty=False
elif not rmEmpty(path.join(d,f)):
isEmpty=False
if isEmpty:
print "Removing ",d,"because it is empty"
if not debugmode:
system("rmdir "+d)
return isEmpty
start=sys.argv[1]
rmEmpty(start)
rein,raus=popen4("svn status "+start)
lines=rein.readlines()
rein.close()
raus.close()
modified=0
added=0
removed=0
conflicting=0
replaced=0
for l in lines:
status=l[0]
pstatus=l[1]
name=l[7:-1]
if status=="?":
print "Adding",name
svnCommand("add "+name)
elif status=="!":
print "Removing",name
svnCommand("delete "+name)
elif status=="M":
modified+=1
elif status=="A":
added+=1
elif status=="D":
removed+=1
elif status=="C":
conflicting+=1
elif status=="R":
replaced+=1
elif status=="~":
print "Problem with",name
print
print "Modified files:",modified
print "Added files:",added
print "Removed files:",removed
print "Conflicting files:",conflicting
print "Replaced files:",replaced
print
def checkEmptyDirs(current):
nrOfContents=0
for f in listdir(current):
if f==".svn":
continue
pfad=path.join(current,f)
if path.isdir(pfad):
if checkEmptyDirs(pfad):
nrOfContents+=1
else:
nrOfContents+=1
if nrOfContents==0:
print "Removing",current
svnCommand("remove "+current)
return False
else:
return True
checkEmptyDirs(start)
|
Unofficial-Extend-Project-Mirror/openfoam-extend-Core-OpenFOAM-1.5-dev
|
bin/foamAddAndRemoveFromRelease.py
|
Python
|
gpl-2.0
| 1,974
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from erpnext.accounts.party import get_due_date
from frappe.test_runner import make_test_records
from erpnext.exceptions import PartyFrozen, PartyDisabled
from frappe.utils import flt
from erpnext.selling.doctype.customer.customer import get_credit_limit, get_customer_outstanding
from erpnext.tests.utils import create_test_contact_and_address
test_ignore = ["Price List"]
test_dependencies = ['Payment Term', 'Payment Terms Template']
test_records = frappe.get_test_records('Customer')
from six import iteritems
class TestCustomer(unittest.TestCase):
def setUp(self):
if not frappe.get_value('Item', '_Test Item'):
make_test_records('Item')
def tearDown(self):
frappe.db.set_value("Customer", '_Test Customer', 'credit_limit', 0.0)
def test_party_details(self):
from erpnext.accounts.party import get_party_details
to_check = {
'selling_price_list': None,
'customer_group': '_Test Customer Group',
'contact_designation': None,
'customer_address': '_Test Address for Customer-Office',
'contact_department': None,
'contact_email': 'test_contact_customer@example.com',
'contact_mobile': None,
'sales_team': [],
'contact_display': '_Test Contact for _Test Customer',
'contact_person': '_Test Contact for _Test Customer-_Test Customer',
'territory': u'_Test Territory',
'contact_phone': '+91 0000000000',
'customer_name': '_Test Customer'
}
create_test_contact_and_address()
frappe.db.set_value("Contact", "_Test Contact for _Test Customer-_Test Customer",
"is_primary_contact", 1)
details = get_party_details("_Test Customer")
for key, value in iteritems(to_check):
self.assertEqual(value, details.get(key))
def test_rename(self):
# delete communication linked to these 2 customers
for name in ("_Test Customer 1", "_Test Customer 1 Renamed"):
frappe.db.sql("""delete from `tabCommunication`
where communication_type='Comment' and reference_doctype=%s and reference_name=%s""",
("Customer", name))
# add comments
comment = frappe.get_doc("Customer", "_Test Customer 1").add_comment("Comment", "Test Comment for Rename")
# rename
frappe.rename_doc("Customer", "_Test Customer 1", "_Test Customer 1 Renamed")
# check if customer renamed
self.assertTrue(frappe.db.exists("Customer", "_Test Customer 1 Renamed"))
self.assertFalse(frappe.db.exists("Customer", "_Test Customer 1"))
# test that comment gets linked to renamed doc
self.assertEqual(frappe.db.get_value("Communication", {
"communication_type": "Comment",
"reference_doctype": "Customer",
"reference_name": "_Test Customer 1 Renamed"
}), comment.name)
# rename back to original
frappe.rename_doc("Customer", "_Test Customer 1 Renamed", "_Test Customer 1")
def test_freezed_customer(self):
make_test_records("Item")
frappe.db.set_value("Customer", "_Test Customer", "is_frozen", 1)
from erpnext.selling.doctype.sales_order.test_sales_order import make_sales_order
so = make_sales_order(do_not_save= True)
self.assertRaises(PartyFrozen, so.save)
frappe.db.set_value("Customer", "_Test Customer", "is_frozen", 0)
so.save()
def test_delete_customer_contact(self):
customer = frappe.get_doc(
get_customer_dict('_Test Customer for delete')).insert(ignore_permissions=True)
customer.mobile_no = "8989889890"
customer.save()
self.assertTrue(customer.customer_primary_contact)
frappe.delete_doc('Customer', customer.name)
def test_disabled_customer(self):
make_test_records("Item")
frappe.db.set_value("Customer", "_Test Customer", "disabled", 1)
from erpnext.selling.doctype.sales_order.test_sales_order import make_sales_order
so = make_sales_order(do_not_save=True)
self.assertRaises(PartyDisabled, so.save)
frappe.db.set_value("Customer", "_Test Customer", "disabled", 0)
so.save()
def test_duplicate_customer(self):
frappe.db.sql("delete from `tabCustomer` where customer_name='_Test Customer 1'")
if not frappe.db.get_value("Customer", "_Test Customer 1"):
test_customer_1 = frappe.get_doc(
get_customer_dict('_Test Customer 1')).insert(ignore_permissions=True)
else:
test_customer_1 = frappe.get_doc("Customer", "_Test Customer 1")
duplicate_customer = frappe.get_doc(
get_customer_dict('_Test Customer 1')).insert(ignore_permissions=True)
self.assertEqual("_Test Customer 1", test_customer_1.name)
self.assertEqual("_Test Customer 1 - 1", duplicate_customer.name)
self.assertEqual(test_customer_1.customer_name, duplicate_customer.customer_name)
def get_customer_outstanding_amount(self):
from erpnext.selling.doctype.sales_order.test_sales_order import make_sales_order
outstanding_amt = get_customer_outstanding('_Test Customer', '_Test Company')
# If outstanding is negative make a transaction to get positive outstanding amount
if outstanding_amt > 0.0:
return outstanding_amt
item_qty = int((abs(outstanding_amt) + 200)/100)
make_sales_order(qty=item_qty)
return get_customer_outstanding('_Test Customer', '_Test Company')
def test_customer_credit_limit(self):
from erpnext.stock.doctype.delivery_note.test_delivery_note import create_delivery_note
from erpnext.accounts.doctype.sales_invoice.test_sales_invoice import create_sales_invoice
from erpnext.selling.doctype.sales_order.test_sales_order import make_sales_order
from erpnext.selling.doctype.sales_order.sales_order import make_sales_invoice
outstanding_amt = self.get_customer_outstanding_amount()
credit_limit = get_credit_limit('_Test Customer', '_Test Company')
if outstanding_amt <= 0.0:
item_qty = int((abs(outstanding_amt) + 200)/100)
make_sales_order(qty=item_qty)
if credit_limit == 0.0:
frappe.db.set_value("Customer", '_Test Customer', 'credit_limit', outstanding_amt - 50.0)
# Sales Order
so = make_sales_order(do_not_submit=True)
self.assertRaises(frappe.ValidationError, so.submit)
# Delivery Note
dn = create_delivery_note(do_not_submit=True)
self.assertRaises(frappe.ValidationError, dn.submit)
# Sales Invoice
si = create_sales_invoice(do_not_submit=True)
self.assertRaises(frappe.ValidationError, si.submit)
if credit_limit > outstanding_amt:
frappe.db.set_value("Customer", '_Test Customer', 'credit_limit', credit_limit)
# Makes Sales invoice from Sales Order
so.save(ignore_permissions=True)
si = make_sales_invoice(so.name)
si.save(ignore_permissions=True)
self.assertRaises(frappe.ValidationError, make_sales_order)
def test_customer_credit_limit_on_change(self):
outstanding_amt = self.get_customer_outstanding_amount()
customer = frappe.get_doc("Customer", '_Test Customer')
customer.credit_limit = flt(outstanding_amt - 100)
self.assertRaises(frappe.ValidationError, customer.save)
def test_customer_payment_terms(self):
frappe.db.set_value(
"Customer", "_Test Customer With Template", "payment_terms", "_Test Payment Term Template 3")
due_date = get_due_date("2016-01-22", "Customer", "_Test Customer With Template")
self.assertEqual(due_date, "2016-02-21")
due_date = get_due_date("2017-01-22", "Customer", "_Test Customer With Template")
self.assertEqual(due_date, "2017-02-21")
frappe.db.set_value(
"Customer", "_Test Customer With Template", "payment_terms", "_Test Payment Term Template 1")
due_date = get_due_date("2016-01-22", "Customer", "_Test Customer With Template")
self.assertEqual(due_date, "2016-02-29")
due_date = get_due_date("2017-01-22", "Customer", "_Test Customer With Template")
self.assertEqual(due_date, "2017-02-28")
frappe.db.set_value("Customer", "_Test Customer With Template", "payment_terms", "")
# No default payment term template attached
due_date = get_due_date("2016-01-22", "Customer", "_Test Customer")
self.assertEqual(due_date, "2016-01-22")
due_date = get_due_date("2017-01-22", "Customer", "_Test Customer")
self.assertEqual(due_date, "2017-01-22")
def get_customer_dict(customer_name):
return {
"customer_group": "_Test Customer Group",
"customer_name": customer_name,
"customer_type": "Individual",
"doctype": "Customer",
"territory": "_Test Territory"
}
|
ESS-LLP/erpnext-healthcare
|
erpnext/selling/doctype/customer/test_customer.py
|
Python
|
gpl-3.0
| 8,333
|
# Copyright (c) 2013 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import redis
from redis.exceptions import BusyLoadingError, ConnectionError
from oslo_log import log as logging
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
from trove.common import instance as rd_instance
from trove.common.stream_codecs import PropertiesCodec, StringConverter
from trove.common import utils as utils
from trove.guestagent.common.configuration import ConfigurationManager
from trove.guestagent.common.configuration import OneFileOverrideStrategy
from trove.guestagent.common import guestagent_utils
from trove.guestagent.common import operating_system
from trove.guestagent.datastore.experimental.redis import system
from trove.guestagent.datastore import service
from trove.guestagent import pkg
LOG = logging.getLogger(__name__)
TIME_OUT = 1200 # FIXME(pmalik): should probably use config timeout
CONF = cfg.CONF
CLUSTER_CFG = 'clustering'
packager = pkg.Package()
class RedisAppStatus(service.BaseDbStatus):
"""
Handles all of the status updating for the redis guest agent.
"""
def __init__(self, client):
super(RedisAppStatus, self).__init__()
self.__client = client
def set_client(self, client):
self.__client = client
def _get_actual_db_status(self):
try:
if self.__client.ping():
return rd_instance.ServiceStatuses.RUNNING
except ConnectionError:
return rd_instance.ServiceStatuses.SHUTDOWN
except BusyLoadingError:
return rd_instance.ServiceStatuses.BLOCKED
except Exception:
LOG.exception(_("Error getting Redis status."))
return rd_instance.ServiceStatuses.CRASHED
class RedisApp(object):
"""
Handles installation and configuration of redis
on a trove instance.
"""
@classmethod
def _init_overrides_dir(cls):
"""Initialize a directory for configuration overrides.
"""
revision_dir = guestagent_utils.build_file_path(
os.path.dirname(system.REDIS_CONFIG),
ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR)
if not os.path.exists(revision_dir):
operating_system.create_directory(
revision_dir,
user=system.REDIS_OWNER, group=system.REDIS_OWNER,
force=True, as_root=True)
return revision_dir
def __init__(self, state_change_wait_time=None):
"""
Sets default status and state_change_wait_time
"""
if state_change_wait_time:
self.state_change_wait_time = state_change_wait_time
else:
self.state_change_wait_time = CONF.state_change_wait_time
revision_dir = self._init_overrides_dir()
config_value_mappings = {'yes': True, 'no': False, "''": None}
self._value_converter = StringConverter(config_value_mappings)
self.configuration_manager = ConfigurationManager(
system.REDIS_CONFIG,
system.REDIS_OWNER, system.REDIS_OWNER,
PropertiesCodec(
unpack_singletons=False,
string_mappings=config_value_mappings
), requires_root=True,
override_strategy=OneFileOverrideStrategy(revision_dir))
self.admin = self._build_admin_client()
self.status = RedisAppStatus(self.admin)
def _build_admin_client(self):
password = self.get_configuration_property('requirepass')
socket = self.get_configuration_property('unixsocket')
return RedisAdmin(password=password, unix_socket_path=socket)
def install_if_needed(self, packages):
"""
Install redis if needed do nothing if it is already installed.
"""
LOG.info(_('Preparing Guest as Redis Server.'))
if not packager.pkg_is_installed(packages):
LOG.info(_('Installing Redis.'))
self._install_redis(packages)
LOG.info(_('Redis installed completely.'))
def complete_install_or_restart(self):
"""
finalize status updates for install or restart.
"""
LOG.debug("Complete install or restart called.")
self.status.end_install_or_restart()
def _install_redis(self, packages):
"""
Install the redis server.
"""
LOG.debug('Installing redis server.')
msg = "Creating %s." % system.REDIS_CONF_DIR
LOG.debug(msg)
operating_system.create_directory(system.REDIS_CONF_DIR, as_root=True)
pkg_opts = {}
packager.pkg_install(packages, pkg_opts, TIME_OUT)
self.start_redis()
LOG.debug('Finished installing redis server.')
def _enable_redis_on_boot(self):
"""
Enables redis on boot.
"""
LOG.info(_('Enabling Redis on boot.'))
operating_system.enable_service_on_boot(system.SERVICE_CANDIDATES)
def _disable_redis_on_boot(self):
"""
Disables redis on boot.
"""
LOG.info(_("Disabling Redis on boot."))
operating_system.disable_service_on_boot(system.SERVICE_CANDIDATES)
def stop_db(self, update_db=False, do_not_start_on_reboot=False):
"""
Stops the redis application on the trove instance.
"""
LOG.info(_('Stopping redis.'))
if do_not_start_on_reboot:
self._disable_redis_on_boot()
operating_system.stop_service(system.SERVICE_CANDIDATES)
if not self.status.wait_for_real_status_to_change_to(
rd_instance.ServiceStatuses.SHUTDOWN,
self.state_change_wait_time, update_db):
LOG.error(_('Could not stop Redis.'))
self.status.end_install_or_restart()
def restart(self):
"""
Restarts the redis daemon.
"""
LOG.debug("Restarting Redis daemon.")
try:
self.status.begin_restart()
self.stop_db()
self.start_redis()
finally:
self.status.end_install_or_restart()
def update_overrides(self, context, overrides, remove=False):
if overrides:
self.configuration_manager.apply_user_override(overrides)
def apply_overrides(self, client, overrides):
"""Use the 'CONFIG SET' command to apply configuration at runtime.
Commands that appear multiple times have values separated by a
white space. For instance, the following two 'save' directives from the
configuration file...
save 900 1
save 300 10
... would be applied in a single command as:
CONFIG SET save "900 1 300 10"
Note that the 'CONFIG' command has been renamed to prevent
users from using it to bypass configuration groups.
"""
for prop_name, prop_args in overrides.items():
args_string = self._join_lists(
self._value_converter.to_strings(prop_args), ' ')
client.config_set(prop_name, args_string)
def _join_lists(self, items, sep):
"""Join list items (including items from sub-lists) into a string.
Non-list inputs are returned unchanged.
_join_lists('1234', ' ') = "1234"
_join_lists(['1','2','3','4'], ' ') = "1 2 3 4"
_join_lists([['1','2'], ['3','4']], ' ') = "1 2 3 4"
"""
if isinstance(items, list):
return sep.join([sep.join(e) if isinstance(e, list) else e
for e in items])
return items
def remove_overrides(self):
self.configuration_manager.remove_user_override()
def make_read_only(self, read_only):
# Redis has no mechanism to make an instance read-only at present
pass
def start_db_with_conf_changes(self, config_contents):
LOG.info(_('Starting redis with conf changes.'))
if self.status.is_running:
format = 'Cannot start_db_with_conf_changes because status is %s.'
LOG.debug(format, self.status)
raise RuntimeError(format % self.status)
LOG.info(_("Initiating config."))
self.configuration_manager.save_configuration(config_contents)
# The configuration template has to be updated with
# guestagent-controlled settings.
self.apply_initial_guestagent_configuration()
self.start_redis(True)
def reset_configuration(self, configuration):
LOG.info(_("Resetting configuration."))
config_contents = configuration['config_contents']
self.configuration_manager.save_configuration(config_contents)
def start_redis(self, update_db=False):
"""
Start the redis daemon.
"""
LOG.info(_("Starting redis."))
self._enable_redis_on_boot()
operating_system.start_service(system.SERVICE_CANDIDATES)
if not self.status.wait_for_real_status_to_change_to(
rd_instance.ServiceStatuses.RUNNING,
self.state_change_wait_time, update_db):
LOG.error(_("Start up of redis failed."))
try:
utils.execute_with_timeout('pkill', '-9',
'redis-server',
run_as_root=True,
root_helper='sudo')
except exception.ProcessExecutionError:
LOG.exception(_('Error killing stalled redis start command.'))
self.status.end_install_or_restart()
def apply_initial_guestagent_configuration(self):
"""Update guestagent-controlled configuration properties.
"""
# Hide the 'CONFIG' command from end users by mangling its name.
self.admin.set_config_command_name(self._mangle_config_command_name())
self.configuration_manager.apply_system_override(
{'daemonize': 'yes',
'pidfile': system.REDIS_PID_FILE,
'logfile': system.REDIS_LOG_FILE,
'dir': system.REDIS_DATA_DIR})
def get_config_command_name(self):
"""Get current name of the 'CONFIG' command.
"""
renamed_cmds = self.configuration_manager.get_value('rename-command')
for name_pair in renamed_cmds:
if name_pair[0] == 'CONFIG':
return name_pair[1]
return None
def _mangle_config_command_name(self):
"""Hide the 'CONFIG' command from the clients by renaming it to a
random string known only to the guestagent.
Return the mangled name.
"""
mangled = utils.generate_random_password()
self._rename_command('CONFIG', mangled)
return mangled
def _rename_command(self, old_name, new_name):
"""It is possible to completely disable a command by renaming it
to an empty string.
"""
self.configuration_manager.apply_system_override(
{'rename-command': [old_name, new_name]})
def get_logfile(self):
"""Specify the log file name. Also the empty string can be used to
force Redis to log on the standard output.
Note that if you use standard output for logging but daemonize,
logs will be sent to /dev/null
"""
return self.get_configuration_property('logfile')
def get_db_filename(self):
"""The filename where to dump the DB.
"""
return self.get_configuration_property('dbfilename')
def get_working_dir(self):
"""The DB will be written inside this directory,
with the filename specified the 'dbfilename' configuration directive.
The Append Only File will also be created inside this directory.
"""
return self.get_configuration_property('dir')
def get_persistence_filepath(self):
"""Returns the full path to the persistence file."""
return guestagent_utils.build_file_path(
self.get_working_dir(), self.get_db_filename())
def get_port(self):
"""Port for this instance or default if not set."""
return self.get_configuration_property('port', system.REDIS_PORT)
def get_auth_password(self):
"""Client authentication password for this instance or None if not set.
"""
return self.get_configuration_property('requirepass')
def is_appendonly_enabled(self):
"""True if the Append Only File (AOF) persistence mode is enabled.
"""
return self.get_configuration_property('appendonly', False)
def get_append_file_name(self):
"""The name of the append only file (AOF).
"""
return self.get_configuration_property('appendfilename')
def is_cluster_enabled(self):
"""Only nodes that are started as cluster nodes can be part of a
Redis Cluster.
"""
return self.get_configuration_property('cluster-enabled', False)
def enable_cluster(self):
"""In order to start a Redis instance as a cluster node enable the
cluster support
"""
self.configuration_manager.apply_system_override(
{'cluster-enabled': 'yes'}, CLUSTER_CFG)
def get_cluster_config_filename(self):
"""Cluster node configuration file.
"""
return self.get_configuration_property('cluster-config-file')
def set_cluster_config_filename(self, name):
"""Make sure that instances running in the same system do not have
overlapping cluster configuration file names.
"""
self.configuration_manager.apply_system_override(
{'cluster-config-file': name}, CLUSTER_CFG)
def get_cluster_node_timeout(self):
"""Cluster node timeout is the amount of milliseconds a node must be
unreachable for it to be considered in failure state.
"""
return self.get_configuration_property('cluster-node-timeout')
def get_configuration_property(self, name, default=None):
"""Return the value of a Redis configuration property.
Returns a single value for single-argument properties or
a list otherwise.
"""
return utils.unpack_singleton(
self.configuration_manager.get_value(name, default))
def cluster_meet(self, ip, port):
try:
utils.execute_with_timeout('redis-cli', 'cluster', 'meet',
ip, port)
except exception.ProcessExecutionError:
LOG.exception(_('Error joining node to cluster at %s.'), ip)
raise
def cluster_addslots(self, first_slot, last_slot):
try:
slots = map(str, range(first_slot, last_slot + 1))
group_size = 200
while slots:
cmd = ([system.REDIS_CLI, 'cluster', 'addslots']
+ slots[0:group_size])
out, err = utils.execute_with_timeout(*cmd, run_as_root=True,
root_helper='sudo')
if 'OK' not in out:
raise RuntimeError(_('Error executing addslots: %s')
% out)
del slots[0:group_size]
except exception.ProcessExecutionError:
LOG.exception(_('Error adding slots %(first_slot)s-%(last_slot)s'
' to cluster.'),
{'first_slot': first_slot, 'last_slot': last_slot})
raise
def _get_node_info(self):
try:
out, _ = utils.execute_with_timeout('redis-cli', '--csv',
'cluster', 'nodes')
return [line.split(' ') for line in out.splitlines()]
except exception.ProcessExecutionError:
LOG.exception(_('Error getting node info.'))
raise
def _get_node_details(self):
for node_details in self._get_node_info():
if 'myself' in node_details[2]:
return node_details
raise exception.TroveError(_("Unable to determine node details"))
def get_node_ip(self):
"""Returns [ip, port] where both values are strings"""
return self._get_node_details()[1].split(':')
def get_node_id_for_removal(self):
node_details = self._get_node_details()
node_id = node_details[0]
my_ip = node_details[1].split(':')[0]
try:
slots, _ = utils.execute_with_timeout('redis-cli', '--csv',
'cluster', 'slots')
return node_id if my_ip not in slots else None
except exception.ProcessExecutionError:
LOG.exception(_('Error validating node to for removal.'))
raise
def remove_nodes(self, node_ids):
try:
for node_id in node_ids:
utils.execute_with_timeout('redis-cli', 'cluster',
'forget', node_id)
except exception.ProcessExecutionError:
LOG.exception(_('Error removing node from cluster.'))
raise
class RedisAdmin(object):
"""Handles administrative tasks on the Redis database.
"""
DEFAULT_CONFIG_CMD = 'CONFIG'
def __init__(self, password=None, unix_socket_path=None):
self.__client = redis.StrictRedis(
password=password, unix_socket_path=unix_socket_path)
self.__config_cmd_name = self.DEFAULT_CONFIG_CMD
def set_config_command_name(self, name):
"""Set name of the 'CONFIG' command or None for default.
"""
self.__config_cmd_name = name or self.DEFAULT_CONFIG_CMD
def ping(self):
"""Ping the Redis server and return True if a response is received.
"""
return self.__client.ping()
def get_info(self, section=None):
return self.__client.info(section=section)
def persist_data(self):
return self.__client.save()
def set_master(self, host=None, port=None):
self.__client.slaveof(host, port)
def config_set(self, name, value):
response = self.execute(
'%s %s' % (self.__config_cmd_name, 'SET'), name, value)
if not self._is_ok_response(response):
raise exception.UnprocessableEntity(
_("Could not set configuration property '%(name)s' to "
"'%(value)s'.") % {'name': name, 'value': value})
def _is_ok_response(self, response):
"""Return True if a given Redis response is 'OK'.
"""
return response and redis.client.bool_ok(response)
def execute(self, cmd_name, *cmd_args, **options):
"""Execute a command and return a parsed response.
"""
try:
return self.__client.execute_command(cmd_name, *cmd_args,
**options)
except Exception as e:
LOG.exception(e)
raise exception.TroveError(
_("Redis command '%(cmd_name)s %(cmd_args)s' failed.")
% {'cmd_name': cmd_name, 'cmd_args': ' '.join(cmd_args)})
def wait_until(self, key, wait_value, section=None,
timeout=CONF.usage_timeout):
"""Polls redis until the specified 'key' changes to 'wait_value'."""
LOG.debug("Waiting for Redis '%s' to be: %s." % (key, wait_value))
def _check_info():
redis_info = self.get_info(section)
if key in redis_info:
current_value = redis_info[key]
LOG.debug("Found '%s' for field %s." % (current_value, key))
else:
LOG.error(_('Output from Redis command: %s') % redis_info)
raise RuntimeError(_("Field %(field)s not found "
"(Section: '%(sec)s').") %
({'field': key, 'sec': section}))
return current_value == wait_value
try:
utils.poll_until(_check_info, time_out=timeout)
except exception.PollTimeOut:
raise RuntimeError(_("Timeout occurred waiting for Redis field "
"'%(field)s' to change to '%(val)s'.") %
{'field': key, 'val': wait_value})
|
fabian4/trove
|
trove/guestagent/datastore/experimental/redis/service.py
|
Python
|
apache-2.0
| 20,833
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests dense attention layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.keras import combinations
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.layers import dense_attention
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class BaseDenseAttentionTest(test.TestCase, parameterized.TestCase):
def test_one_dim_with_mask(self):
# Scores tensor of shape [1, 1, 1]
scores = np.array([[[1.1]]], dtype=np.float32)
# Value tensor of shape [1, 1, 1]
v = np.array([[[1.6]]], dtype=np.float32)
# Scores mask tensor of shape [1, 1, 1]
scores_mask = np.array([[[True]]], dtype=np.bool_)
actual, actual_scores = dense_attention.BaseDenseAttention()._apply_scores(
scores=scores, value=v, scores_mask=scores_mask)
# Expected softmax_scores = [[[1]]]
expected_scores = np.array([[[1.]]], dtype=np.float32)
self.assertAllClose(expected_scores, actual_scores)
# Expected tensor of shape [1, 1, 1].
# expected000 = softmax_scores[0, 0] * 1.6 = 1.6
expected = np.array([[[1.6]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_one_dim_no_mask(self):
# Scores tensor of shape [1, 1, 1]
scores = np.array([[[1.1]]], dtype=np.float32)
# Value tensor of shape [1, 1, 1]
v = np.array([[[1.6]]], dtype=np.float32)
actual, actual_scores = dense_attention.BaseDenseAttention()._apply_scores(
scores=scores, value=v)
# Expected softmax_scores = [[[1]]]
expected_scores = np.array([[[1.]]], dtype=np.float32)
self.assertAllClose(expected_scores, actual_scores)
# Expected tensor of shape [1, 1, 1].
# expected000 = softmax_scores[0, 0] * 1.6 = 1.6
expected = np.array([[[1.6]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_multi_dim_with_mask(self):
# Scores tensor of shape [1, 1, 3]
scores = np.array([[[1., 0., 1.]]], dtype=np.float32)
# Value tensor of shape [1, 3, 1]
v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)
# Scores mask tensor of shape [1, 1, 3]
scores_mask = np.array([[[True, True, False]]], dtype=np.bool_)
actual, actual_scores = dense_attention.BaseDenseAttention()._apply_scores(
scores=scores, value=v, scores_mask=scores_mask)
# Expected softmax scores = softmax(scores) with zeros in positions where
# v_mask == False.
# => softmax_scores000 = exp(1)/(exp(1) + exp(0)) = 0.73105857863
# softmax_scores001 = exp(0)/(exp(1) + exp(0)) = 0.26894142137
# softmax_scores002 = 0
expected_scores = np.array(
[[[0.73105857863, 0.26894142137, 0.]]], dtype=np.float32)
self.assertAllClose(expected_scores, actual_scores)
# Expected tensor of shape [1, 1, 1].
# expected000 = 0.73105857863 * 1.6 + 0.26894142137 * 0.7 - 0 * 0.8
# = 1.35795272077
expected = np.array([[[1.35795272077]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_multi_dim_no_mask(self):
# Scores tensor of shape [1, 1, 3]
scores = np.array([[[1., 0., 1.]]], dtype=np.float32)
# Value tensor of shape [1, 3, 1]
v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)
actual, actual_scores = dense_attention.BaseDenseAttention()._apply_scores(
scores=scores, value=v)
# Expected softmax_scores = softmax(scores).
# => softmax_scores000 = exp(1)/(exp(1) + exp(0) + exp(1))
# = 0.42231879825
# softmax_scores001 = exp(0)/(exp(1) + exp(0) + exp(1))
# = 0.15536240349
# softmax_scores002 = exp(1)/(exp(1) + exp(0) + exp(1))
# = 0.42231879825
expected_scores = np.array(
[[[0.42231879825, 0.15536240349, 0.42231879825]]], dtype=np.float32)
self.assertAllClose(expected_scores, actual_scores)
# Expected tensor of shape [1, 1, 1].
# expected000 = 0.42231879825 * 1.6 + 0.15536240349 * 0.7
# - 0.42231879825 * 0.8
# = 0.44660872104
expected = np.array([[[0.44660872104]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_one_dim_batch_size_two(self):
# Scores tensor of shape [2, 1, 1]
scores = np.array([[[1.1]], [[2.1]]], dtype=np.float32)
# Value tensor of shape [2, 1, 1]
v = np.array([[[1.6]], [[2.6]]], dtype=np.float32)
# Scpres mask tensor of shape [2, 1, 1]
scores_mask = np.array([[[True]], [[True]]], dtype=np.bool_)
actual, actual_scores = dense_attention.BaseDenseAttention()._apply_scores(
scores=scores, value=v, scores_mask=scores_mask)
# Expected softmax_scores = [[[1]], [[1]]]
expected_scores = np.array([[[1.]], [[1.]]], dtype=np.float32)
self.assertAllClose(expected_scores, actual_scores)
# Expected tensor of shape [2, 1, 1].
# expected000 = softmax_scores[0, 0] * 1.6 = 1.6
# expected100 = softmax_scores[1, 0] * 2.6 = 2.6
expected = np.array([[[1.6]], [[2.6]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_shape_with_dropout(self):
# scores: Scores float tensor of shape `[batch_size, tq, tv]`.
# value: Value tensor of shape `[batch_size, tv, dim]`.
batch_size = 4
tq = 5
tv = 6
dim = 7
scores = np.ones((batch_size, tq, tv))
value = np.ones((batch_size, tv, dim))
actual, actual_scores = dense_attention.BaseDenseAttention(
dropout=0.1)._apply_scores(
scores=scores, value=value, training=False)
# Expected Tensor of shape `[batch_size, tq, tv]`.
expected_scores_shape = [batch_size, tq, tv]
self.assertAllEqual(expected_scores_shape, array_ops.shape(actual_scores))
# Expected Tensor of shape `[batch_size, tq, dim]`.
expected_shape = [batch_size, tq, dim]
self.assertAllEqual(expected_shape, array_ops.shape(actual))
def test_serialization(self):
# Test serialization with causal
layer = dense_attention.BaseDenseAttention(causal=True)
config = keras.layers.serialize(layer)
new_layer = keras.layers.deserialize(config)
self.assertEqual(new_layer.causal, True)
config = layer.get_config()
new_layer = dense_attention.BaseDenseAttention.from_config(config)
self.assertEqual(new_layer.causal, True)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class AttentionTest(test.TestCase, parameterized.TestCase):
def test_calculate_scores_one_dim(self):
# Query tensor of shape [1, 1, 1]
q = np.array([[[1.1]]], dtype=np.float32)
# Key tensor of shape [1, 1, 1]
k = np.array([[[1.6]]], dtype=np.float32)
attention_layer = dense_attention.Attention()
attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1]))
actual = attention_layer._calculate_scores(query=q, key=k)
# Expected tensor of shape [1, 1, 1].
# expected000 = 1.1*1.6 = 1.76
expected = np.array([[[1.76]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_calculate_scores_multi_dim(self):
# Query tensor of shape [1, 2, 4]
q = np.array(
[[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32)
# Key tensor of shape [1, 3, 4]
k = np.array(
[[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],
dtype=np.float32)
attention_layer = dense_attention.Attention()
attention_layer.build(input_shape=([1, 2, 4], [1, 3, 4]))
actual = attention_layer._calculate_scores(query=q, key=k)
# Expected tensor of shape [1, 2, 3].
# expected000 = 1.*1.5+1.1*1.6+1.2*1.7+1.3*1.8 = 7.64
# expected001 = 1.*2.5+1.1*2.6+1.2*2.7+1.3*2.8 = 12.24
# expected002 = 1.*3.5+1.1*3.6+1.2*3.7+1.3*3.8 = 16.84
# expected010 = 2.*1.5+2.1*1.6+2.2*1.7+2.3*1.8 = 14.24
# expected011 = 2.*2.5+2.1*2.6+2.2*2.7+2.3*2.8 = 22.84
# expected012 = 2.*3.5+2.1*3.6+2.2*3.7+2.3*3.8 = 31.44
expected = np.array(
[[[7.64, 12.24, 16.84], [14.24, 22.84, 31.44]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_calculate_scores_one_dim_batch_size_two(self):
# Query tensor of shape [2, 1, 1]
q = np.array([[[1.1]], [[2.1]]], dtype=np.float32)
# Key tensor of shape [2, 1, 1]
k = np.array([[[1.6]], [[2.6]]], dtype=np.float32)
attention_layer = dense_attention.Attention()
attention_layer.build(input_shape=([2, 1, 1], [2, 1, 1]))
actual = attention_layer._calculate_scores(query=q, key=k)
# Expected tensor of shape [2, 1, 1].
# expected000 = 1.1*1.6 = 1.76
# expected100 = 2.1*2.6 = 5.46
expected = np.array([[[1.76]], [[5.46]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_calculate_scores_one_dim_with_scale(self):
"""Tests that scores are multiplied by scale."""
# Query tensor of shape [1, 1, 1]
q = np.array([[[1.1]]], dtype=np.float32)
# Key tensor of shape [1, 1, 1]
k = np.array([[[1.6]]], dtype=np.float32)
attention_layer = dense_attention.Attention(use_scale=True)
attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1]))
attention_layer.scale = -2.
actual = attention_layer._calculate_scores(query=q, key=k)
# Expected tensor of shape [1, 1, 1].
# expected000 = -2*1.1*1.6 = -3.52
expected = np.array([[[-3.52]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_shape(self):
# Query tensor of shape [1, 2, 4]
q = np.array(
[[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32)
# Value tensor of shape [1, 3, 4]
v = np.array(
[[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],
dtype=np.float32)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.Attention()
actual = attention_layer([q, v], mask=[None, v_mask])
expected_shape = [1, 2, 4]
self.assertAllEqual(expected_shape, array_ops.shape(actual))
def test_shape_with_key(self):
# Query tensor of shape [1, 2, 4]
q = np.array(
[[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32)
# Value tensor of shape [1, 3, 4]
v = np.array(
[[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],
dtype=np.float32)
# Key tensor of shape [1, 3, 4]
k = np.array(
[[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],
dtype=np.float32)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.Attention()
actual = attention_layer([q, v, k], mask=[None, v_mask])
expected_shape = [1, 2, 4]
self.assertAllEqual(expected_shape, array_ops.shape(actual))
def test_multi_dim(self):
# Query tensor of shape [1, 1, 1]
q = np.array([[[1.1]]], dtype=np.float32)
# Value tensor of shape [1, 3, 1]
v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.Attention()
actual = attention_layer([q, v], mask=[None, v_mask])
# Expected scores of shape [1, 1, 3]
# scores = [[[1.1*1.6, 1.1*0.7, -1.1*0.8]]] = [[[1.76, 0.77, -0.88]]]
# Expected attention distribution = softmax(scores) with zeros in
# positions where v_mask == False.
# => attention_distribution000 = exp(1.76)/(exp(1.76) + exp(0.77))
# = 0.72908792234
# attention_distribution001 = exp(0.77)/(exp(1.76) + exp(0.77))
# = 0.27091207765
# attention_distribution002 = 0
#
# Expected tensor of shape [1, 1, 1].
# expected000 = 0.72908792234 * 1.6 + 0.27091207765 * 0.7 - 0 * 0.8
# = 1.3561791301
expected = np.array([[[1.3561791301]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_multi_dim_with_key(self):
# Query tensor of shape [1, 1, 1]
q = np.array([[[1.1]]], dtype=np.float32)
# Value tensor of shape [1, 3, 1]
v = np.array([[[0.5], [0.8], [-0.3]]], dtype=np.float32)
# Key tensor of shape [1, 3, 1]
k = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.Attention()
actual = attention_layer([q, v, k], mask=[None, v_mask])
# Expected scores of shape [1, 1, 3]
# scores = [[[1.1*1.6, 1.1*0.7, -1.1*0.8]]] = [[[1.76, 0.77, -0.88]]]
# Expected attention distribution = softmax(scores) with zeros in
# positions where v_mask == False.
# => attention_distribution000 = exp(1.76)/(exp(1.76) + exp(0.77))
# = 0.72908792234
# attention_distribution001 = exp(0.77)/(exp(1.76) + exp(0.77))
# = 0.27091207765
# attention_distribution002 = 0
#
# Expected tensor of shape [1, 1, 1].
# expected000 = 0.72908792234 * 0.5 + 0.27091207765 * 0.8 - 0 * 0.3
# = 0.58127362329
expected = np.array([[[0.58127362329]]], dtype=np.float32)
self.assertAllClose(expected, actual)
@parameterized.named_parameters(
('', False),
('return_attention_scores', True),
)
def test_multi_dim_with_query_mask(self, return_attention_scores):
# Query tensor of shape [1, 2, 1]
q = np.array([[[1.1], [-0.5]]], dtype=np.float32)
# Value tensor of shape [1, 3, 1]
v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)
# Query mask tensor of shape [1, 2]
q_mask = np.array([[True, False]], dtype=np.bool_)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.Attention(
return_attention_scores=return_attention_scores)
if return_attention_scores:
actual, actual_scores = attention_layer([q, v], mask=[q_mask, v_mask])
else:
actual = attention_layer([q, v], mask=[q_mask, v_mask])
# Expected scores of shape [1, 2, 3]
# scores = [[[1.1*1.6, 1.1*0.7, -1.1*0.8], [-0.5*1.6, -0.5*0.7, 0.5*0.8]]]
# = [[[1.76, 0.77, -0.88], [-0.8, -0.35, 0.4]]]
# Expected attention distribution = softmax(scores) with zeros in
# positions where v_mask == False.
# => attention_distribution000 = exp(1.76)/(exp(1.76) + exp(0.77))
# = 0.72908792234
# attention_distribution001 = exp(0.77)/(exp(1.76) + exp(0.77))
# = 0.27091207765
# attention_distribution002 = 0
# => attention_distribution010 = exp(-0.8)/(exp(-0.8) + exp(-0.35))
# = 0.38936076605
# attention_distribution011 = exp(-0.35)/(exp(-0.8) + exp(-0.35))
# = 0.61063923394
# attention_distribution012 = 0
if return_attention_scores:
expected_scores = np.array(
[[[0.72908792234, 0.27091207765, 0.],
[0.38936076605, 0.61063923394, 0.]]],
dtype=np.float32)
self.assertAllClose(expected_scores, actual_scores)
# Expected tensor of shape [1, 2, 1] with zeros where q_mask == False.
# expected000 = 0.72908792234 * 1.6 + 0.27091207765 * 0.7 - 0 * 0.8
# = 1.3561791301
# expected000 = 0
expected = np.array([[[1.3561791301], [0.]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_scale_None(self):
"""Tests that scale is None by default."""
attention_layer = dense_attention.Attention()
attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1]))
self.assertIsNone(attention_layer.scale)
def test_scale_init_eager(self):
"""Tests that scale initializes to 1 when use_scale=True."""
if not context.executing_eagerly():
self.skipTest('Only run in eager mode')
attention_layer = dense_attention.Attention(use_scale=True)
attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1]))
self.assertAllClose(1., attention_layer.scale.value())
def test_scale_init_graph(self):
"""Tests that scale initializes to 1 when use_scale=True."""
with self.cached_session() as sess:
attention_layer = dense_attention.Attention(use_scale=True)
attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1]))
sess.run(attention_layer.scale.initializer)
self.assertAllClose(1., attention_layer.scale.value())
@parameterized.named_parameters(
('', False),
('return_attention_scores', True),
)
def test_self_attention_causal(self, return_attention_scores):
# Query-value tensor of shape [1, 3, 1]
q = np.array([[[0.5], [0.8], [-0.3]]], dtype=np.float32)
attention_layer = dense_attention.Attention(
causal=True, return_attention_scores=return_attention_scores)
if return_attention_scores:
actual, actual_scores = attention_layer([q, q])
else:
actual = attention_layer([q, q])
# Expected scores of shape [1, 3, 3]
# scores = [[0.25, 0.4, -0.15], [0.4, 0.64, -0.24], [-0.15, -0.24, 0.09]]
# Expected attention distribution = softmax(scores) lower triangular
# => attention_distribution00 = [1., 0., 0.]
# attention_distribution01
# = [exp(0.4), exp(0.64), 0.] / (exp(0.4) + exp(0.64))
# = [0.44028635073, 0.55971364926, 0.]
# attention_distribution02
# = [exp(-0.15), exp(-0.24), exp(0.09)]
# / (exp(-0.15) + exp(-0.24) + exp(0.09))
# = [0.31395396638, 0.28693232061, 0.399113713]
if return_attention_scores:
expected_scores = np.array(
[[[1., 0., 0.],
[0.44028635073, 0.55971364926, 0.],
[0.31395396638, 0.28693232061, 0.399113713]]],
dtype=np.float32)
self.assertAllClose(expected_scores, actual_scores)
# Expected tensor of shape [1, 3, 1].
# expected000 = 0.5
# expected010 = 0.44028635073 * 0.5 + 0.55971364926 * 0.8
# = 0.66791409477
# expected020 = 0.31395396638 * 0.5 +0.28693232061 * 0.8 -0.399113713 * 0.3
# = 0.26678872577
expected = np.array(
[[[0.5], [0.66791409477], [0.26678872577]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_inputs_not_list(self):
attention_layer = dense_attention.Attention()
q = np.array([[[1.1]]], dtype=np.float32)
with self.assertRaisesRegex(
ValueError, 'Attention layer must be called on a list of inputs'):
attention_layer(q)
def test_inputs_too_short(self):
attention_layer = dense_attention.Attention()
q = np.array([[[1.1]]], dtype=np.float32)
with self.assertRaisesRegex(
ValueError, 'Attention layer accepts inputs list of length 2 or 3'):
attention_layer([q])
def test_inputs_too_long(self):
attention_layer = dense_attention.Attention()
q = np.array([[[1.1]]], dtype=np.float32)
with self.assertRaisesRegex(
ValueError, 'Attention layer accepts inputs list of length 2 or 3'):
attention_layer([q, q, q, q])
def test_mask_not_list(self):
attention_layer = dense_attention.Attention()
q = np.array([[[1.1]]], dtype=np.float32)
mask = np.array([[True]], dtype=np.bool_)
with self.assertRaisesRegex(ValueError,
'Attention layer mask must be a list'):
attention_layer([q, q], mask=mask)
def test_mask_too_short(self):
attention_layer = dense_attention.Attention()
q = np.array([[[1.1]]], dtype=np.float32)
mask = np.array([[True]], dtype=np.bool_)
with self.assertRaisesRegex(
ValueError, 'Attention layer mask must be a list of length 2'):
attention_layer([q, q], mask=[mask])
def test_mask_too_long(self):
attention_layer = dense_attention.Attention()
q = np.array([[[1.1]]], dtype=np.float32)
mask = np.array([[True]], dtype=np.bool_)
with self.assertRaisesRegex(
ValueError, 'Attention layer mask must be a list of length 2'):
attention_layer([q, q], mask=[mask, mask, mask])
def test_override_mask(self):
attention_layer = dense_attention.Attention()
q = core.Masking()(np.array([[[1.1]]], dtype=np.float32))
mask = np.array([[False]], dtype=np.bool_)
actual = attention_layer([q, q], mask=[mask, mask])
self.assertAllClose([[[0]]], actual)
def test_implicit_mask(self):
attention_layer = dense_attention.Attention()
q = core.Masking(1.1)(np.array([[[1.1], [1]]], dtype=np.float32))
v = core.Masking(1.2)(np.array([[[1.2], [1]]], dtype=np.float32))
actual = attention_layer([q, v])
self.assertAllClose([[[0], [1]]], actual)
@parameterized.named_parameters(
('', False, False),
('use_scale', True, False),
('return_attention_scores', False, True),
)
def test_serialization(self, use_scale, return_attention_scores):
# Test serialization with use_scale
layer = dense_attention.Attention(
use_scale=use_scale, return_attention_scores=return_attention_scores)
config = keras.layers.serialize(layer)
new_layer = keras.layers.deserialize(config)
self.assertEqual(new_layer.use_scale, use_scale)
self.assertEqual(new_layer.return_attention_scores, return_attention_scores)
config = layer.get_config()
new_layer = dense_attention.Attention.from_config(config)
self.assertEqual(new_layer.use_scale, use_scale)
self.assertEqual(new_layer.return_attention_scores, return_attention_scores)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class AdditiveAttentionTest(test.TestCase, parameterized.TestCase):
def test_calculate_scores_one_dim(self):
# Query tensor of shape [1, 1, 1]
q = np.array([[[1.1]]], dtype=np.float32)
# Key tensor of shape [1, 1, 1]
k = np.array([[[1.6]]], dtype=np.float32)
attention_layer = dense_attention.AdditiveAttention()
attention_layer.build(input_shape=([1, 1, 1], [1, 1, 1]))
# Scale tensor of shape [1]
attention_layer.scale = np.array([[[0.5]]], dtype=np.float32)
actual = attention_layer._calculate_scores(query=q, key=k)
# Expected tensor of shape [1, 1, 1].
# expected000 = 0.5 * tanh(1.1 + 1.6) = 0.49550372683
expected = np.array([[[0.49550372683]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_calculate_scores_multi_dim(self):
# Query tensor of shape [1, 2, 4]
q = np.array(
[[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32)
# Key tensor of shape [1, 3, 4]
k = np.array(
[[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],
dtype=np.float32)
attention_layer = dense_attention.AdditiveAttention()
attention_layer.build(input_shape=([1, 2, 4], [1, 3, 4]))
# Scale tensor of shape [4]
attention_layer.scale = np.array([[[0.5, 0.6, 0.7, 0.8]]], dtype=np.float32)
actual = attention_layer._calculate_scores(query=q, key=k)
# pylint:disable=line-too-long
# expected000 = 0.5*tanh(1.+1.5) + 0.6*tanh(1.1+1.6) + 0.7*tanh(1.2+1.7) + 0.8*tanh(1.3+1.8) = 2.58044532581
# expected001 = 0.5*tanh(1.+2.5) + 0.6*tanh(1.1+2.6) + 0.7*tanh(1.2+2.7) + 0.8*tanh(1.3+2.8) = 2.59734317449
# expected002 = 0.5*tanh(1.+3.5) + 0.6*tanh(1.1+3.6) + 0.7*tanh(1.2+3.7) + 0.8*tanh(1.3+3.8) = 2.59964024652
# expected010 = 0.5*tanh(2.+1.5) + 0.6*tanh(2.1+1.6) + 0.7*tanh(2.2+1.7) + 0.8*tanh(2.3+1.8) = 2.59734317449
# expected011 = 0.5*tanh(2.+2.5) + 0.6*tanh(2.1+2.6) + 0.7*tanh(2.2+2.7) + 0.8*tanh(2.3+2.8) = 2.59964024652
# expected012 = 0.5*tanh(2.+3.5) + 0.6*tanh(2.1+3.6) + 0.7*tanh(2.2+3.7) + 0.8*tanh(2.3+3.8) = 2.59995130916
# pylint:enable=line-too-long
expected = np.array(
[[[2.58044532581, 2.59734317449, 2.59964024652],
[2.59734317449, 2.59964024652, 2.59995130916]]],
dtype=np.float32)
self.assertAllClose(expected, actual)
def test_calculate_scores_one_dim_batch_size_two(self):
# Query tensor of shape [2, 1, 1]
q = np.array([[[1.1]], [[2.1]]], dtype=np.float32)
# Key tensor of shape [2, 1, 1]
k = np.array([[[1.6]], [[2.6]]], dtype=np.float32)
attention_layer = dense_attention.AdditiveAttention()
attention_layer.build(input_shape=([2, 1, 1], [2, 1, 1]))
# Scale tensor of shape [1]
attention_layer.scale = np.array([[[0.5]]], dtype=np.float32)
actual = attention_layer._calculate_scores(query=q, key=k)
# Expected tensor of shape [2, 1, 1].
# expected000 = 0.5 * tanh(1.1 + 1.6) = 0.49550372683
# expected100 = 0.5 * tanh(2.1 + 2.6) = 0.49991728277
expected = np.array(
[[[0.49550372683]], [[0.49991728277]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_shape(self):
# Query tensor of shape [1, 2, 4]
q = np.array(
[[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32)
# Value tensor of shape [1, 3, 4]
v = np.array(
[[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],
dtype=np.float32)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.AdditiveAttention()
actual = attention_layer([q, v], mask=[None, v_mask])
expected_shape = [1, 2, 4]
self.assertAllEqual(expected_shape, array_ops.shape(actual))
def test_shape_no_scale(self):
# Query tensor of shape [1, 2, 4]
q = np.array(
[[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32)
# Value tensor of shape [1, 3, 4]
v = np.array(
[[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],
dtype=np.float32)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.AdditiveAttention(use_scale=False)
actual = attention_layer([q, v], mask=[None, v_mask])
expected_shape = [1, 2, 4]
self.assertAllEqual(expected_shape, array_ops.shape(actual))
def test_shape_with_key(self):
# Query tensor of shape [1, 2, 4]
q = np.array(
[[[1., 1.1, 1.2, 1.3], [2., 2.1, 2.2, 2.3]]], dtype=np.float32)
# Value tensor of shape [1, 3, 4]
v = np.array(
[[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],
dtype=np.float32)
# Key tensor of shape [1, 3, 4]
k = np.array(
[[[1.5, 1.6, 1.7, 1.8], [2.5, 2.6, 2.7, 2.8], [3.5, 3.6, 3.7, 3.8]]],
dtype=np.float32)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.AdditiveAttention()
actual = attention_layer([q, v, k], mask=[None, v_mask])
expected_shape = [1, 2, 4]
self.assertAllEqual(expected_shape, array_ops.shape(actual))
def test_multi_dim(self):
# Query tensor of shape [1, 1, 1]
q = np.array([[[1.1]]], dtype=np.float32)
# Value tensor of shape [1, 3, 1]
v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.AdditiveAttention()
attention_layer.build(input_shape=([1, 1, 1], [1, 3, 1]))
# Scale tensor of shape [1]
attention_layer.scale = np.array([[[0.5]]], dtype=np.float32)
actual = attention_layer([q, v], mask=[None, v_mask])
# pylint:disable=line-too-long
# Expected scores of shape [1, 1, 3]
# scores = [[[0.5 * tanh(1.1 + 1.6), 0.5 * tanh(1.1 + 0.7), 0.5 * tanh(1.1 - 0.8)]]]
# = [[[0.49550372683, 0.47340300642, 0.14565630622]]]
# Expected attention distribution = softmax(scores) with zeros in
# positions where v_mask == False.
# => attention_distribution000
# = exp(0.49550372683)/(exp(0.49550372683) + exp(0.47340300642))
# = 0.50552495521
# attention_distribution001
# = exp(0.47340300642)/(exp(0.49550372683) + exp(0.47340300642))
# = 0.49447504478
# attention_distribution002 = 0
#
# Expected tensor of shape [1, 1, 1].
# expected000 = 0.50552495521 * 1.6 + 0.49447504478 * 0.7 - 0 * 0.8
# = 1.15497245968
# pylint:enable=line-too-long
expected = np.array([[[1.15497245968]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_multi_dim_with_key(self):
# Query tensor of shape [1, 1, 1]
q = np.array([[[1.1]]], dtype=np.float32)
# Value tensor of shape [1, 3, 1]
v = np.array([[[0.5], [0.8], [-0.3]]], dtype=np.float32)
# Key tensor of shape [1, 3, 1]
k = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.AdditiveAttention()
attention_layer.build(input_shape=([1, 1, 1], [1, 3, 1]))
# Scale tensor of shape [1]
attention_layer.scale = np.array([[[0.5]]], dtype=np.float32)
actual = attention_layer([q, v, k], mask=[None, v_mask])
# pylint:disable=line-too-long
# Expected scores of shape [1, 1, 3]
# scores = [[[0.5 * tanh(1.1 + 1.6), 0.5 * tanh(1.1 + 0.7), 0.5 * tanh(1.1 - 0.8)]]]
# = [[[0.49550372683, 0.47340300642, 0.14565630622]]]
# Expected attention distribution = softmax(scores) with zeros in
# positions where v_mask == False.
# => attention_distribution000
# = exp(0.49550372683)/(exp(0.49550372683) + exp(0.47340300642))
# = 0.50552495521
# attention_distribution001
# = exp(0.47340300642)/(exp(0.49550372683) + exp(0.47340300642))
# = 0.49447504478
# attention_distribution002 = 0
#
# Expected tensor of shape [1, 1, 1].
# expected000 = 0.50552495521 * 0.5 + 0.49447504478 * 0.8 - 0 * 0.3
# = 0.64834251342
# pylint:enable=line-too-long
expected = np.array([[[0.64834251342]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_multi_dim_with_query_mask(self):
# Query tensor of shape [1, 2, 1]
q = np.array([[[1.1], [-0.5]]], dtype=np.float32)
# Value tensor of shape [1, 3, 1]
v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)
# Query mask tensor of shape [1, 2]
q_mask = np.array([[True, False]], dtype=np.bool_)
# Value mask tensor of shape [1, 3]
v_mask = np.array([[True, True, False]], dtype=np.bool_)
attention_layer = dense_attention.AdditiveAttention()
attention_layer.build(input_shape=([1, 1, 1], [1, 3, 1]))
# Scale tensor of shape [1]
attention_layer.scale = np.array([[[0.5]]], dtype=np.float32)
actual = attention_layer([q, v], mask=[q_mask, v_mask])
# pylint:disable=line-too-long
# Expected scores of shape [1, 2, 3]
# scores = [[[0.5 * tanh(1.1 + 1.6), 0.5 * tanh(1.1 + 0.7), 0.5 * tanh(1.1 - 0.8)],
# [0.5 * tanh(-0.5 + 1.6), 0.5 * tanh(-0.5 + 0.7), 0.5 * tanh(-0.5 - 0.8)]]]
# = [[[0.49550372683, 0.47340300642, 0.14565630622],
# [0.40024951088, 0.09868766011, -0.43086157965]]]
# Expected attention distribution = softmax(scores) with zeros in
# positions where v_mask == False.
# => attention_distribution000
# = exp(0.49550372683)/(exp(0.49550372683) + exp(0.47340300642))
# = 0.50552495521
# attention_distribution001
# = exp(0.47340300642)/(exp(0.49550372683) + exp(0.47340300642))
# = 0.49447504478
# attention_distribution002 = 0
# => attention_distribution010
# = exp(0.40024951088)/(exp(0.40024951088) + exp(0.09868766011))
# = 0.57482427975
# attention_distribution011
# = exp(0.09868766011)/(exp(0.40024951088) + exp(0.09868766011))
# = 0.42517572025
# attention_distribution012 = 0
#
# Expected tensor of shape [1, 2, 1] with zeros where q_mask == False.
# expected000 = 0.50552495521 * 1.6 + 0.49447504478 * 0.7 - 0 * 0.8
# = 1.15497245968
# expected000 = 0
# pylint:enable=line-too-long
expected = np.array([[[1.15497245968], [0.]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_serialization(self):
# Test serialization with use_scale
layer = dense_attention.AdditiveAttention(use_scale=True)
config = keras.layers.serialize(layer)
new_layer = keras.layers.deserialize(config)
self.assertEqual(new_layer.use_scale, True)
config = layer.get_config()
new_layer = dense_attention.AdditiveAttention.from_config(config)
self.assertEqual(new_layer.use_scale, True)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class LowerTriangularMaskTest(test.TestCase, parameterized.TestCase):
def test_square_shape(self):
actual = dense_attention._lower_triangular_mask([3, 3])
expected = np.array(
[[True, False, False], [True, True, False], [True, True, True]],
dtype=np.bool_)
self.assertAllEqual(expected, actual)
def test_orthogonal_shape(self):
actual = dense_attention._lower_triangular_mask([3, 2])
expected = np.array(
[[True, False], [True, True], [True, True]], dtype=np.bool_)
self.assertAllEqual(expected, actual)
def test_three_dim(self):
actual = dense_attention._lower_triangular_mask([1, 3, 3])
expected = np.array(
[[[True, False, False], [True, True, False], [True, True, True]]],
dtype=np.bool_)
self.assertAllEqual(expected, actual)
if __name__ == '__main__':
test.main()
|
davidzchen/tensorflow
|
tensorflow/python/keras/layers/dense_attention_test.py
|
Python
|
apache-2.0
| 34,547
|
#!/usr/bin/env python
config = {
"exes": {
# Get around the https warnings
"hg": ['/usr/local/bin/hg', "--config", "web.cacerts=/etc/pki/tls/certs/ca-bundle.crt"],
"hgtool.py": ["/usr/local/bin/hgtool.py"],
"gittool.py": ["/usr/local/bin/gittool.py"],
},
'gecko_pull_url': 'https://hg.mozilla.org/integration/b2g-inbound',
'gecko_push_url': 'ssh://hg.mozilla.org/integration/b2g-inbound',
'gecko_local_dir': 'b2g-inbound',
'git_ref_cache': '/builds/b2g_bumper/git_ref_cache.json',
'manifests_repo': 'https://git.mozilla.org/b2g/b2g-manifest.git',
'manifests_revision': 'origin/master',
'hg_user': 'B2G Bumper Bot <release+b2gbumper@mozilla.com>',
"ssh_key": "~/.ssh/ffxbld_rsa",
"ssh_user": "ffxbld",
'hgtool_base_bundle_urls': ['https://ftp-ssl.mozilla.org/pub/mozilla.org/firefox/bundles'],
'gaia_repo_url': 'https://hg.mozilla.org/integration/gaia-central',
'gaia_revision_file': 'b2g/config/gaia.json',
'gaia_max_revisions': 5,
# Which git branch this hg repo corresponds to
'gaia_git_branch': 'master',
'gaia_git_repo': 'https://git.mozilla.org/releases/gaia.git',
'gaia_mapper_project': 'gaia',
'mapper_url': 'http://cruncher.build.mozilla.org/mapper/{project}/{vcs}/{rev}',
'devices': {
'dolphin': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
},
'emulator-l': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
},
'emulator-kk': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
},
'emulator-jb': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
},
'emulator-ics': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
'manifest_file': 'emulator.xml',
},
# Equivalent to emulator-ics - see bug 916134
# Remove once the above bug resolved
'emulator': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
'manifest_file': 'emulator.xml',
},
'flame-kk': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
},
'nexus-4': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
},
'nexus-5-l': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
},
'aries': {
'ignore_projects': ['gecko'],
'ignore_groups': ['darwin'],
'manifest_file': 'shinano.xml',
},
},
'repo_remote_mappings': {
'https://android.googlesource.com/': 'https://git.mozilla.org/external/aosp',
'git://codeaurora.org/': 'https://git.mozilla.org/external/caf',
'git://github.com/mozilla-b2g/': 'https://git.mozilla.org/b2g',
'git://github.com/mozilla/': 'https://git.mozilla.org/b2g',
'https://git.mozilla.org/releases': 'https://git.mozilla.org/releases',
'http://android.git.linaro.org/git-ro/': 'https://git.mozilla.org/external/linaro',
'http://sprdsource.spreadtrum.com:8085/b2g/android': 'https://git.mozilla.org/external/sprd-aosp',
'git://github.com/apitrace/': 'https://git.mozilla.org/external/apitrace',
'git://github.com/t2m-foxfone/': 'https://git.mozilla.org/external/t2m-foxfone',
# Some mappings to ourself, we want to leave these as-is!
'https://git.mozilla.org/external/aosp': 'https://git.mozilla.org/external/aosp',
'https://git.mozilla.org/external/caf': 'https://git.mozilla.org/external/caf',
'https://git.mozilla.org/b2g': 'https://git.mozilla.org/b2g',
'https://git.mozilla.org/external/apitrace': 'https://git.mozilla.org/external/apitrace',
'https://git.mozilla.org/external/t2m-foxfone': 'https://git.mozilla.org/external/t2m-foxfone',
},
}
|
armenzg/build-mozharness
|
configs/b2g_bumper/master.py
|
Python
|
mpl-2.0
| 4,028
|
import random
import string
import time
import storm
class TestSpout(storm.Spout):
def nextTuple(self):
"""Emit a random letter every five seconds."""
time.sleep(5)
storm.emit([random.choice(string.ascii_letters)])
TestSpout().run()
|
mattskone/storm-python-demo
|
multilang/resources/testspout.py
|
Python
|
mit
| 268
|
#!/usr/bin/env python3.2
#
# Copyright (c) Net24 Limited, Christchurch, New Zealand 2011-2012
# and Voyager Internet Ltd, New Zealand, 2012-2013
#
# This file is part of py-magcode-core.
#
# Py-magcode-core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Py-magcode-core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with py-magcode-core. If not, see <http://www.gnu.org/licenses/>.
#
"""
Module for ZoneEngine base class
"""
import datetime
import re
import socket
from io import StringIO
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.orm.exc import MultipleResultsFound
from sqlalchemy import desc
from pyparsing import ParseBaseException
from magcode.core.globals_ import *
from dms.globals_ import *
from magcode.core.database import *
from magcode.core.database import *
from magcode.core.database.event import find_events
from magcode.core.database.event import ESTATE_FAILURE
from magcode.core.database.event import create_event
from magcode.core.database.event import Event
# import all possible types used here so that they intialise in zone-tool
from dms.database.master_sm import zone_sm_dnssec_schedule
from dms.database.zone_sm import ZoneSM
from dms.database.zone_sm import ZoneSMEdit
from dms.database.zone_sm import ZoneSMEditExit
from dms.database.zone_sm import ZoneSMEditTimeout
from dms.database.zone_sm import ZoneSMEditLockTickle
from dms.database.zone_sm import ZoneSMEditUpdate
from dms.database.zone_sm import ZoneSMUpdate
from dms.database.zone_sm import ZoneSMEditSaved
from dms.database.zone_sm import ZoneSMEnable
from dms.database.zone_sm import ZoneSMDisable
from dms.database.zone_sm import ZoneSMDoReconfig
from dms.database.zone_sm import ZoneSMDoBatchConfig
from dms.database.zone_sm import ZoneSMDoConfig
from dms.database.zone_sm import ZoneSMEditSavedNoLock
from dms.database.zone_sm import ZoneSMDelete
from dms.database.zone_sm import ZoneSMUndelete
from dms.database.zone_sm import ZoneSMDoReset
from dms.database.zone_sm import ZoneSMDoRefresh
from dms.database.zone_sm import ZoneSMDoDestroy
from dms.database.zone_sm import ZoneSMDoSgSwap
from dms.database.zone_sm import ZoneSMDoSetAltSg
from dms.database.zone_sm import ZLSTATE_EDIT_LOCK
from dms.database.zone_sm import ZSTATE_DISABLED
from dms.database.zone_sm import ZSTATE_UNCONFIG
from dms.database.zone_sm import ZSTATE_DELETED
from dms.database.zone_sm import ZSTATE_PUBLISHED
from dms.database.zone_sm import exec_zonesm
from dms.database.zone_sm import new_zone
from dms.database.zone_sm import DynDNSZoneSM
from dms.database.zone_instance import ZoneInstance
from dms.database.zone_instance import new_zone_zi
from dms.database.resource_record import ResourceRecord
from dms.database.resource_record import data_to_rr
import dms.database.zone_cfg as zone_cfg
from dms.database.reference import Reference
from dms.database.rr_comment import RRComment
from dms.database.zone_sectag import ZoneSecTag
from dms.database.zone_sectag import list_all_sectags
# import securitytags so that sql_data is initialised
import dms.database.zone_sectag
from dms.database.master_sm import show_master_sm
from dms.database.master_sm import get_mastersm_replica_sg
from dms.database.sg_utility import list_all_sgs
from dms.database.sg_utility import find_sg_byname
from dms.database.reference import new_reference
from dms.database.reference import del_reference
from dms.database.reference import find_reference
from dms.database.reference import rename_reference
from dms.database.server_sm import ServerSM
from dms.database.server_group import ServerGroup
from magcode.core.wsgi.jsonrpc_server import InvalidParamsJsonRpcError
from dms.exceptions import *
from dms.database.zone_query import rr_query_db_raw
from dms.zone_data_util import ZoneDataUtil
from dms.dns import is_inet_domain
from dms.dns import is_network_address
from dms.dns import wellformed_cidr_network
from dms.dns import zone_name_from_network
from dms.dns import new_soa_serial_no
from dms.database.reverse_network import new_reverse_network
from dms.database.reverse_network import ReverseNetwork
from dms.zone_text_util import data_to_bind
from dms.zone_text_util import bind_to_data
class ZoneEngine(ZoneDataUtil):
"""
Base Zone Editing/control Engine container class
Contains common code and stub methods.
"""
def __init__(self, time_format=None, sectag_label=None):
"""
Initialise engine. Get a scoped DB session.
"""
self.time_format = time_format
self.sectag = ZoneSecTag(sectag_label=sectag_label)
self.refresh_db_session()
if self.sectag not in list_all_sectags(self.db_session):
raise ZoneSecTagConfigError(self.sectag.sectag)
def refresh_db_session(self):
self.db_session = sql_data['scoped_session_class']()
def rollback(self):
self.db_session.rollback()
def _finish_op(self):
self.db_session.commit()
def _begin_op(self):
# Refresh SA session
self.refresh_db_session()
self.db_session.commit()
_login_id_char_re = re.compile(r'^[\-_a-zA-Z0-9.@]+$')
_login_id_start_re = re.compile(r'^[0-9a-zA-Z][\-_a-zA-Z0-9.@]*$')
def _make_change_by(self, login_id):
"""
Create a change_by string from a login_id
"""
# Check that the supplied login_id is acceptable
if not login_id:
raise LoginIdInvalidError("a login_id must be given" )
if not isinstance(login_id, str):
raise LoginIdInvalidError("login_id must be a string" )
if len(login_id) > 512:
error_msg = "too long, must be <= 512."
raise LoginIdInvalidError(error_msg)
if not self._login_id_char_re.match(login_id):
error_msg = "can only contain characters '-_a-zA-Z0-9.@'"
raise LoginIdFormatError(login_id, error_msg)
if not self._login_id_start_re.match(login_id):
error_msg = "must start with 'a-zA-Z0-9'"
raise LoginIdFormatError(login_id, error_msg)
return login_id + '/' + self.sectag.sectag
def _list_zone(self, names=None, reference=None, sg_name=None,
include_deleted=False, toggle_deleted=False,
include_disabled=True):
"""
Backend search for the given names. Multiple names may be given.
Wildcards can be used for partial matches. No name will list all
zones.
"""
def build_query(query):
"""
Common query code
"""
if reference:
query = query.join(Reference)\
.filter(Reference.reference.ilike(reference))
if sg_name and self.sectag.sectag == settings['admin_sectag']:
if sg_name not in list_all_sgs(self.db_session):
raise NoSgFound(sg_name)
query = query.join(ServerGroup,
ServerGroup.id_ == ZoneSM.sg_id)\
.filter(ServerGroup.name == sg_name)
if include_deleted:
pass
elif toggle_deleted:
query = query.filter(ZoneSM.state == ZSTATE_DELETED)
else:
query = query.filter(ZoneSM.state != ZSTATE_DELETED)
if not include_disabled:
query = query.filter(ZoneSM.state != ZSTATE_DISABLED)
return(query)
# Deal with SA auto-BEGIN - want fresh transaction to see fresh data
self._begin_op()
db_query_slice = get_numeric_setting('db_query_slice', int)
if not names:
# No arguments
query = self.db_session.query(ZoneSM)
query = build_query(query)
query = query.order_by(ZoneSM.name)
# Set up query so that server side curses are used, and the whole
# zone database is not grabbed all at once, stopping extreme
# allocation of memory...
query = query.yield_per(db_query_slice)
zones = []
for z in query:
if (self.sectag.sectag != settings['admin_sectag']
and self.sectag not in z.sectags):
continue
zones.append(z.to_engine_brief())
self._finish_op()
if not zones:
raise NoZonesFound('')
return zones
# We were given some arguments
zones = []
# We keep domains and labels in database lowercase
name_pattern = ' '.join(names).lower()
names = [x.lower() for x in names]
names = [x.replace('*', '%') for x in names]
names = [x.replace('?', '_') for x in names]
# Perform limit checks to prevent RAM hoggery DOS Death By SQL
if ('%' in names and len(names) > 1):
raise OnlyOneLoneWildcardValid(name_pattern)
# Check that reference is given for non admin sectag accesses
if (self.sectag.sectag != settings['admin_sectag']
and not reference):
raise ReferenceMustBeGiven(name_pattern)
for name in names:
network_address_flag = is_network_address(name)
network = wellformed_cidr_network(name, filter_mask_size=False)
query = self.db_session.query(ZoneSM)
if network_address_flag:
query = query.join(ReverseNetwork)\
.filter( ':name <<= reverse_networks.network')\
.params(name=name)\
.order_by(ReverseNetwork.network)
elif network:
query = query.join(ReverseNetwork)\
.filter( ':network >>= reverse_networks.network')\
.params(network=network)\
.order_by(ReverseNetwork.network)
else:
if not name.endswith('.') and not name.endswith('%'):
name += '.'
query = query.filter(ZoneSM.name.like(name))
query = build_query(query)
query = query.yield_per(db_query_slice)
for z in query:
if (self.sectag.sectag != settings['admin_sectag']
and self.sectag not in z.sectags):
continue
zones.append(z.to_engine_brief())
zones = sorted(zones, key=lambda zone: zone['name'])
if not zones:
raise NoZonesFound(name_pattern)
self._finish_op()
return zones
def list_zone(self, names=None, reference=None):
"""
1st domains level list_zone call
"""
return self._list_zone(names=names, reference=reference)
def list_zone_admin(self, names=None, reference=None,
sg_name=None, include_deleted=False, toggle_deleted=False,
include_disabled=True):
"""
Admin privilege list_zone()
"""
return self._list_zone(names=names, reference=reference,
sg_name=sg_name, include_deleted=include_deleted,
toggle_deleted=toggle_deleted,
include_disabled=include_disabled)
def _get_zone_sm(self, name, zone_id=None, check_sectag=True,
toggle_deleted=False, include_deleted=False, exact_network=False):
"""
Get zone_sm.
"""
db_session = self.db_session
# We keep domains and labels in database lowercase
name = name.lower()
multiple_results = False
network_address_flag = is_network_address(name)
# Don't reassign name so that error messages follow what user supplied
# as input
network = wellformed_cidr_network(name)
try:
query = db_session.query(ZoneSM)
if network_address_flag and not exact_network:
query = query.join(ReverseNetwork)\
.filter( ':inet <<= reverse_networks.network')\
.params(inet=name)\
.order_by(ReverseNetwork.network.desc())
elif network_address_flag and exact_network:
raise ZoneNotFound(name)
elif network and not exact_network:
query = query.join(ReverseNetwork)\
.filter( ':inet <<= reverse_networks.network')\
.params(inet=network)\
.order_by(ReverseNetwork.network.desc())
elif network and exact_network:
query = query.join(ReverseNetwork)\
.filter( ':inet = reverse_networks.network')\
.params(inet=network)\
.order_by(ReverseNetwork.network.desc())
else:
query = query.filter(ZoneSM.name == name)
if zone_id:
query = query.filter(ZoneSM.id_ == zone_id)
if include_deleted:
pass
elif toggle_deleted:
query = query.filter(ZoneSM.state == ZSTATE_DELETED)
else:
query = query.filter(ZoneSM.state != ZSTATE_DELETED)
if network or network_address_flag:
query = query.limit(1)
zone_sm = query.one()
except NoResultFound:
zone_sm = None
except MultipleResultsFound:
multiple_results = True
# Decoupled exception traces
if multiple_results:
raise ZoneMultipleResults(name)
if not zone_sm:
raise ZoneNotFound(name)
if not check_sectag:
return zone_sm
# Check security tag
if self.sectag.sectag == settings['admin_sectag']:
return zone_sm
if self.sectag not in zone_sm.sectags:
raise ZoneNotFound(name)
return zone_sm
def _get_zone_sm_byid(self, zone_id):
"""
Get zone_sm.
"""
db_session = self.db_session
# Get active zi_id
try:
zone_sm = db_session.query(ZoneSM)\
.filter(ZoneSM.id_ == zone_id).one()
except NoResultFound:
zone_sm = None
# Decoupled exception traces
if not zone_sm:
raise ZoneNotFoundByZoneId(zone_id)
# Check security tag
if self.sectag.sectag == settings['admin_sectag']:
return zone_sm
if self.sectag not in zone_sm.sectags:
raise ZoneNotFoundByZoneId(zone_id)
return zone_sm
def _get_zi(self, zi_id):
"""
Get zi.
"""
db_session = self.db_session
# Get active zi_id
zi = self._resolv_zi_id(None, zi_id,
specific_zi_id=True)
if not zi:
raise ZiNotFound('*', zi_id)
return zi
# Parsing regexps for zi_id. Also see _zi_id_human_str in
# dms.exceptions
_zi_am_pm_str = r'am|pm|AM|PM|aM|pM|Pm|Am|a|A|p|P'
_zi_adj_re = re.compile(r'^\^(-+|\++|-\S+|\+\S+)$')
_zi_adj_minus_re = re.compile(r'^-+$')
_zi_adj_plus_re = re.compile(r'^\++$')
_zi_adj_minusn_re = re.compile(r'^-(\S+)$')
_zi_adj_plusn_re = re.compile(r'^\+(\S+)$')
_zi_unit_re = re.compile(r'^\@(\S+)([smhdw])$')
_zi_ddmmyyyy_hhmm_re = re.compile(r'^(\S+)\/(\S+)\/(\S+),(\S+):(\S+?)('
+ _zi_am_pm_str + r'){0,1}$')
_zi_iso_date_hhmm_re = re.compile(r'^(\S+)-(\S+)-(\S+),(\S+):(\S+?)('
+ _zi_am_pm_str + r'){0,1}$')
_zi_ddmmyyyy_re = re.compile(r'^(\S+)\/(\S+)\/(\S+)$')
_zi_iso_date_re = re.compile(r'^(\S+)-(\S+)-(\S+)$')
_zi_ddslashmm_re = re.compile(r'^(\S+)\/(\S+)$')
_zi_hhmm_re = re.compile(r'^(\S+):(\S+?)(' + _zi_am_pm_str + r'){0,1}$')
_zi_int_adj_re = re.compile(r'^(\S+)(-+|\++|-\S+|\+\S+)$')
def _resolv_zi_id(self, zone_sm, zi_id, specific_zi_id=False):
"""
Resolve a zi_id from a string form
"""
def new_query():
if not zone_sm:
query = db_session.query(ZoneInstance)
else:
query = zone_sm.all_zis
query = query.yield_per(db_query_slice)
return query
def resolv_adj_str(adj_str):
nonlocal query
minusn_match = self._zi_adj_minusn_re.search(adj_str)
plusn_match = self._zi_adj_plusn_re.search(adj_str)
try:
if self._zi_adj_minus_re.search(adj_str):
delta = -1 * len(adj_str)
elif self._zi_adj_plus_re.search(adj_str):
delta = len(adj_str)
elif minusn_match:
delta = -1 * int(minusn_match.group(1))
elif plusn_match:
delta = int(plusn_match.group(1))
else:
raise ZiIdAdjStringSyntaxError(zi_id)
except ValueError:
raise ZiIdAdjStringSyntaxError(zi_id)
# A bit of SQL magic to get offset from pivot ID
subq = db_session.query(ZoneInstance)\
.filter(ZoneInstance.id_ == pivot_zi_id).subquery()
if delta < 0:
query = query.filter(ZoneInstance.ctime <= subq.c.ctime)\
.order_by(ZoneInstance.ctime.desc())
delta *= -1
else:
query = query.filter(ZoneInstance.ctime >= subq.c.ctime)\
.order_by(ZoneInstance.ctime.asc())
try:
result = query.offset(delta).limit(1).one()
except NoResultFound:
result = None
return result
def ctime_query(target_ctime):
nonlocal query
query = query.filter(ZoneInstance.ctime <= target_ctime)\
.order_by(ZoneInstance.ctime.desc()).limit(1)
try:
result = query.one()
except NoResultFound:
result = None
return result
def do_year_date_time(regexp_match, date_exception, iso_format_date):
"""
Work out target_ctime, given a complete date
"""
match_args = regexp_match.groups()
try:
if iso_format_date:
year = int(match_args[0])
month = int(match_args[1])
day = int(match_args[2])
else:
day = int(match_args[0])
month = int(match_args[1])
year = int(match_args[2])
except ValueError:
raise date_exception(zi_id)
if len(match_args) == 3:
# time not given, assume midnight
hour = 0
minute = 0
else:
try:
hour = int(match_args[3])
minute = int(match_args[4])
except ValueError:
raise ZiIdHhMmSyntaxError(zi_id)
# Process AM/PM
if len(match_args) > 5 and match_args[5]:
am_pm = match_args[5].lower()
if (am_pm.startswith('p') and hour < 12):
hour += 12
# Sort out 2 digit years
if (70 <= year <= 99):
year += 1900
elif ( 0 <= year < 70):
year += 2000
# Use DB server as time base
now = db_clock_time(db_session)
try:
target_time = datetime.time(hour, minute, tzinfo=now.tzinfo)
target_date = datetime.date(year, month, day)
target_ctime = datetime.datetime.combine(target_date,
target_time)
except ValueError:
raise ZiIdHhMmSyntaxError(zi_id)
return ctime_query(target_ctime)
# Easy as pie and basket case, quicker to do first
if zone_sm:
if not zi_id or zi_id == '^':
return zone_sm.zi
else:
if not zi_id:
return None
db_session = self.db_session
db_query_slice = get_numeric_setting('db_query_slice', int)
# Fast path - check and see if zi_id is a straight integer
query = new_query()
try:
zi_id = int(zi_id)
zi = query.filter(ZoneInstance.id_ == zi_id).one()
return zi
except NoResultFound:
return None
except ValueError:
pass
if specific_zi_id:
return None
# Put the brakes on
# Only zone_tool related parsing from here on
if (self.sectag.sectag != settings['admin_sectag']
and settings['process_name'] != 'zone_tool'):
return None
# Try
match_adj = self._zi_adj_re.search(zi_id)
if match_adj:
adj_str = match_adj.group(1)
pivot_zi_id = zone_sm.zi.id_
return resolv_adj_str(adj_str)
# Has to be done here as regexp is greedy
# Try nnn[smhdw]
match_unit = self._zi_unit_re.search(zi_id)
if match_unit:
amount = match_unit.group(1)
unit = match_unit.group(2)
try:
amount = float(amount)
except ValueError:
raise ZiIdTimeAmountSyntaxError(zi_id)
# Use DB server as time base
now = db_clock_time(db_session)
try:
if unit == 's':
delta_time = datetime.timedelta(seconds=amount)
elif unit == 'm':
delta_time = datetime.timedelta(minutes=amount)
elif unit == 'h':
delta_time = datetime.timedelta(hours=amount)
elif unit == 'd':
delta_time = datetime.timedelta(days=amount)
elif unit == 'w':
delta_time = datetime.timedelta(weeks=amount)
else:
raise ZiIdTimeUnitSyntaxError(zi_id)
except ValueError:
raise ZiIdTimeAmountSyntaxError(zi_id)
target_ctime = now - delta_time
query = query.filter(ZoneInstance.ctime <= target_ctime)\
.order_by(ZoneInstance.ctime.desc()).limit(1)
try:
result = query.one()
except NoResultFound:
result = None
return result
# Try DD/MM/YYYY,hh:mm
match_ddmmyyyy_hhmm = self._zi_ddmmyyyy_hhmm_re.search(zi_id)
if match_ddmmyyyy_hhmm:
return do_year_date_time(match_ddmmyyyy_hhmm,
ZiIdDdMmYyyySyntaxError,
iso_format_date=False)
# Try YYYY-MM-DD,hh:mm
match_iso_date_hhmm = self._zi_iso_date_hhmm_re.search(zi_id)
if match_iso_date_hhmm:
return do_year_date_time(match_iso_date_hhmm,
ZiIdIsoDateSyntaxError,
iso_format_date=True)
# Try DD/MM/YYYY
match_ddmmyyyy = self._zi_ddmmyyyy_re.search(zi_id)
if match_ddmmyyyy:
return do_year_date_time(match_ddmmyyyy, ZiIdDdMmYyyySyntaxError,
iso_format_date=False)
# Try YYYY-MM-DD
match_iso_date = self._zi_iso_date_re.search(zi_id)
if match_iso_date:
return do_year_date_time(match_iso_date, ZiIdIsoDateSyntaxError,
iso_format_date=True)
# Try DD/MM
match_ddslashmm = self._zi_ddslashmm_re.search(zi_id)
if match_ddslashmm:
day = match_ddslashmm.group(1)
month = match_ddslashmm.group(2)
try:
day = int(day)
except ValueError:
raise ZiIdDdSlashMmSyntaxError(zi_id)
try:
month = int(month)
except ValueError:
raise ZiIdDdSlashMmSyntaxError(zi_id)
now = db_clock_time(db_session)
midnight = datetime.time(0, 0, 0, tzinfo=now.tzinfo)
now_year = now.year
last_year = now_year - 1
try:
target_date = datetime.date(now_year, month, day)
target_ctime = datetime.datetime.combine(target_date, midnight)
if target_ctime > now:
target_date = datetime.date(last_year, month, day)
target_ctime = datetime.datetime.combine(target_date,
midnight)
except ValueError:
raise ZiIdDdSlashMmSyntaxError(zi_id)
return ctime_query(target_ctime)
# Try HH:MM
match_hhmm = self._zi_hhmm_re.search(zi_id)
if match_hhmm:
match_args = match_hhmm.groups()
hour = match_args[0]
minute = match_args[1]
try:
hour = int(hour)
except ValueError:
raise ZiIdHhMmSyntaxError(zi_id)
try:
minute = int(minute)
except ValueError:
raise ZiIdHhMmSyntaxError(zi_id)
# Process AM/PM
if len(match_args) > 2 and match_args[2]:
am_pm = match_args[2].lower()
if (am_pm.startswith('p') and hour < 12):
hour += 12
# Use DB server as time base
now = db_clock_time(db_session)
now_date = now.date()
try:
target_time = datetime.time(hour, minute, tzinfo=now.tzinfo)
yesterday_date = now_date - datetime.timedelta(days=1)
target_ctime = datetime.datetime.combine(now_date, target_time)
if target_ctime > now:
# Use yesterday
target_ctime = datetime.datetime.combine(yesterday_date,
target_time)
except ValueError:
raise ZiIdHhMmSyntaxError(zi_id)
return ctime_query(target_ctime)
# Try nnn+++/---/+n/-n
match_int_adj = self._zi_int_adj_re.search(zi_id)
if match_int_adj:
pivot_zi_id = match_int_adj.group(1)
adj_str = match_int_adj.group(2)
return resolv_adj_str(adj_str)
# Can't understand whats been given
raise ZiIdSyntaxError(zi_id)
def list_zi(self, name):
"""
Given a zone name, return all its zis briefly,
fully showing the currently active one.
"""
# Deal with SA auto-BEGIN - want fresh transaction to see fresh data
self._begin_op()
zone_sm = self._get_zone_sm(name)
result = zone_sm.to_engine(time_format=self.time_format)
if zone_sm.zi:
result['zi'] = zone_sm.zi.to_engine(time_format=self.time_format)
# Ineffecient but portable code:
#result['all_zis'] = list(zone_sm.all_zis)
#result['all_zis'].sort(key=(lambda zi : zi.mtime))
#result['all_zis'] = [zi.to_engine_brief(time_format=self.time_format)
# for zi in zone_sm.all_zis]
# Efficient code:
result['all_zis'] = [zi.to_engine_brief(time_format=self.time_format)
for zi in zone_sm.all_zis.order_by(ZoneInstance.ctime)]
return result
def _get_comment(self, comment_id):
"""
Give a comment ID, get the contents of the comment
"""
db_session = self.db_session
result = db_session.query(RRComment) \
.filter(RRComment.id_ == comment_id).one()
if result:
rr_comment = result[0].comment
else:
rr_comment = None
return rr_comment
def _show_zone(self, zone_sm, zi_id=None, all_rrs=False):
"""
Given a zone_sm, return all the values stored in its ZoneSM
record, current zi, RRs, and comments
"""
if not zone_sm:
return {}
result = zone_sm.to_engine(time_format=self.time_format)
if self.sectag.sectag == settings['admin_sectag']:
result['sectags'] = zone_sm.list_sectags(self.db_session)
# This is a bit rabbit-pathed, but it works...
zi = self._resolv_zi_id(zone_sm, zi_id)
if not zi:
raise ZiNotFound(zone_sm.name, zi_id)
result['zi'] = zi.to_data(self.time_format,
zone_sm.use_apex_ns, all_rrs)
# Note alternative code up in list_zi() for different relN loading
# strategy
result['all_zis'] = [zi.to_engine_brief(time_format=self.time_format)
for zi in zone_sm.all_zis.order_by(ZoneInstance.ctime)]
return result
def show_zone(self, name, zi_id=None):
"""
Given a zone name, return all the values stored in its ZoneSM
record, current zi, RRs, and comments
"""
# Deal with SA auto-BEGIN - want fresh transaction to see fresh data
self._begin_op()
zone_sm = self._get_zone_sm(name)
return self._show_zone(zone_sm, zi_id)
def show_zone_byid(self, zone_id, zi_id=None):
"""
Given a zone id, return all the values stored in its ZoneSM
record, current zi, RRs, and comments
"""
# Deal with SA auto-BEGIN - want fresh transaction to see fresh data
self._begin_op()
zone_sm = self._get_zone_sm_byid(zone_id)
return self._show_zone(zone_sm, zi_id)
def show_zone_text(self, name, zi_id=None, all_rrs=True):
"""
Given a zone name and optional zi_id, return the ZI as zone file text
"""
# Deal with SA auto-BEGIN - want fresh transaction to see fresh data
self._begin_op()
result = {}
zone_sm = self._get_zone_sm(name)
data_result = self._show_zone(zone_sm, zi_id, all_rrs=all_rrs)
result['zi_text'] = data_to_bind(data_result['zi'],
name=data_result['name'],
reference=data_result.get('reference'))
result['name'] = data_result['name']
result['zi_id'] = data_result['zi']['zi_id']
result['zi_ctime'] = data_result['zi']['ctime']
result['zi_mtime'] = data_result['zi']['mtime']
result['zi_ptime'] = data_result['zi']['ptime']
result['soa_serial'] = data_result['zi']['soa_serial']
result['zone_id'] = data_result['zone_id']
return result
def show_zi(self, name, zi_id=None):
"""
Given a domain name and optionally a zi_id, return all values
stored in ZoneInstance record
"""
# Deal with SA auto-BEGIN - want fresh transaction to see fresh data
self._begin_op()
zone_sm = self._get_zone_sm(name)
zi = self._resolv_zi_id(zone_sm, zi_id)
if not zi:
raise ZiNotFound(name, zi_id)
result = zi.to_engine(time_format=self.time_format)
return result
def show_zi_byid(self, zi_id):
"""
Given a zi_id, return all values stored in ZoneInstance record
"""
# Deal with SA auto-BEGIN - want fresh transaction to see fresh data
self._begin_op()
zi = self._get_zi(zi_id)
result = zi.to_engine(time_format=self.time_format)
return result
def _edit_zone(self, name, login_id, zi_id=None, all_rrs=False,
admin_privilege=False):
"""
Backend for zone editing.
Start editing a zone, by returning editing data
If zone has edit locking enabled, change state and obtain a
token
"""
# Deal with SA auto-BEGIN - want fresh transaction to see fresh data
self._begin_op()
locked_by = self._make_change_by(login_id)
zone_sm = self._get_zone_sm(name)
# Privilege check for no apex zones - admin only
if not zone_sm.use_apex_ns and not admin_privilege:
raise ZoneAdminPrivilegeNeeded(name)
edit_lock_token = None
if zone_sm.edit_lock:
# This is where we synchronously call the Zone_sm state
# machine Have to obtain lock before getting current data
lock_results = exec_zonesm(zone_sm, ZoneSMEdit, EditLockFailure,
locked_by=locked_by)
edit_lock_token = lock_results['edit_lock_token']
# All locking now done, get zone data and return it!
try:
zone_zi_data = self._show_zone(zone_sm, zi_id, all_rrs)
except ZoneNotFound:
# If fail to obtain data release edit lock
if zone_sm.state == ZLSTATE_EDIT_LOCK:
#Cancel Edit lock
exec_zonesm(zone_sm, ZoneSMEditExit,
edit_lock_token=edit_lock_token)
raise
# return with THE HIDDEN TREASURE
return zone_zi_data, edit_lock_token
def edit_zone(self, name, login_id, zi_id=None):
"""
Start editing a zone, by returning editing data
If zone has edit locking enabled, change state and obtain a
token
"""
return self._edit_zone(name, login_id, zi_id)
def edit_zone_admin(self, name, login_id, zi_id=None):
"""
Start editing a zone, by returning editing data
If zone has edit locking enabled, change state and obtain a
token
"""
return self._edit_zone(name, login_id, zi_id, all_rrs=True,
admin_privilege=True)
def tickle_editlock(self, name, edit_lock_token=None):
"""
Tickle the edit_lock timeout event
"""
# Deal with SA auto-BEGIN - want fresh transaction to see fresh data
self._begin_op()
zone_sm = self._get_zone_sm(name)
exec_zonesm(zone_sm, ZoneSMEditLockTickle,
TickleEditLockFailure,
edit_lock_token=edit_lock_token)
return True
def cancel_edit_zone(self, name, edit_lock_token=None):
"""
Operation to cancel an edit locked session
"""
# Deal with SA auto-BEGIN - want fresh transaction to see fresh data
self._begin_op()
zone_sm = self._get_zone_sm(name)
cancel_results = exec_zonesm(zone_sm, ZoneSMEditExit,
CancelEditLockFailure,
edit_lock_token=edit_lock_token)
return True
def _update_zone(self, name, zi_data, login_id, edit_lock_token=None,
normalize_ttls=False, admin_privilege=False,
helpdesk_privilege=False):
"""
Backend for updating the zone by adding a new ZI,
and emitting a publish event
"""
# Deal with SA auto-BEGIN - want fresh transaction to see fresh data
self._begin_op()
change_by = self._make_change_by(login_id)
zone_sm = self._get_zone_sm(name, exact_network=True)
# Privilege check for no apex zones - admin only
if not zone_sm.use_apex_ns and not admin_privilege:
raise ZoneAdminPrivilegeNeeded(name)
# Save data
zi, auto_ptr_data = self._data_to_zi(name, zi_data, change_by,
normalize_ttls,
admin_privilege, helpdesk_privilege)
# put zi in place, issue appropriate zone SM event
if not zone_sm.edit_lock:
exec_zonesm(zone_sm, ZoneSMEditSavedNoLock,
zi_id=zi.id_)
# Do auto_ptr_data operation here.
self._queue_auto_ptr_data(auto_ptr_data)
return True
try:
exec_zonesm(zone_sm, ZoneSMEditSaved,
UpdateZoneFailure,
zi_id=zi.id_,
edit_lock_token=edit_lock_token)
except UpdateZoneFailure as exc:
# Remove zi as we don't want to keep it around
# - Obviates edit locking in the first place.
self.db_session.delete(zi)
self.db_session.commit()
raise
# Do auto_ptr_data operation here.
self._queue_auto_ptr_data(auto_ptr_data)
return True
def _update_zone_text(self, name, zi_text, login_id, edit_lock_token=None,
normalize_ttls=False, admin_privilege=False,
helpdesk_privilege=False):
"""
Backend for updating the zone by adding a new ZI as a text blob,
and emitting a publish event
"""
zi_data, origin_name, update_type, zone_reference \
= self._parse_zi_text(name, zi_text)
# Use normalize_ttls with imported data to stop surprises
results = self._update_zone(name=name, login_id=login_id,
zi_data=zi_data, edit_lock_token=edit_lock_token,
normalize_ttls=normalize_ttls,
admin_privilege=admin_privilege,
helpdesk_privilege=helpdesk_privilege)
return results
def update_zone_admin(self, name, zi_data, login_id, edit_lock_token=None,
normalize_ttls=False):
"""
Update a zone with admin privilege
"""
return self._update_zone(name, zi_data, login_id, edit_lock_token,
normalize_ttls, admin_privilege=True)
def update_zone_text_admin(self, name, zi_text, login_id,
edit_lock_token=None, normalize_ttls=False):
"""
Update a zone with admin privilege
"""
return self._update_zone_text(name, zi_text, login_id,
edit_lock_token, normalize_ttls,
admin_privilege=True)
def _find_src_zi(self, src_name, src_zi_id, admin_privilege):
"""
Find a src_zi, src_zone_sm given a name and zi_id
Common peice of code between _create_zone and _copy_zi
"""
db_session = self.db_session
src_zone_sm = None
src_zi = None
if src_name:
src_zone_sm = self._get_zone_sm(src_name)
src_zi = self._resolv_zi_id(src_zone_sm, src_zi_id)
if not src_zi:
raise ZiNotFound(src_zone_sm.name, src_zi_id)
elif src_zi_id and admin_privilege:
src_zi = self._resolv_zi_id(None, src_zi_id,
specific_zi_id=True)
if not src_zi:
raise ZiNotFound('*', src_zi_id)
return src_zone_sm, src_zi
def _copy_src_zi(self, src_zi, zone_sm, change_by,
preserve_time_stamps=False):
"""
Given a src_zi, copy it
Common peice of code between _create_zone and _copy_zi
"""
db_session = self.db_session
if preserve_time_stamps:
src_ctime = src_zi.ctime
src_mtime = src_zi.mtime
zi = src_zi.copy(db_session, change_by)
auto_ptr_data = zi.get_auto_ptr_data(zone_sm)
# Tie to zone
zi.zone = zone_sm
zone_sm.all_zis.append(zi)
db_session.flush()
# Update apex if needed
zi.update_apex(db_session)
# Update Zone TTLs for clean initialisation
zi.update_zone_ttls()
# Make sure SOA serial number is fresh
new_soa_serial = new_soa_serial_no(zi.soa_serial, zone_sm.name)
zi.update_soa_serial(new_soa_serial)
if preserve_time_stamps:
zi.ctime = src_ctime
zi.mtime = src_mtime
db_session.flush()
return zi, auto_ptr_data
def _create_zone(self, name, zi_data, login_id,
use_apex_ns, edit_lock, auto_dnssec, nsec3,
inc_updates,
reference=None, sg_name=None, sectags=None,
batch_load=False, src_name=None, src_zi_id=None,
admin_privilege=False,
helpdesk_privilege=False):
"""
Given a name, create a zone
Currently just creates a row in the sm_zone table, as well as
initial zi (provided or default), leaving zone_sm.state as UNCONFIG
"""
def check_parent_domains(name):
"""
Handle all checks for when creating sub domain
ie - only allow sub domain creation for like references etc
"""
nonlocal reference
# Check if sub domain exists
parent_name_list = name.split('.')[1:]
while (len(parent_name_list) > 1):
parent_name = '.'.join(parent_name_list)
parent_name_list = parent_name_list[1:]
try:
parent_zone_sm = self._get_zone_sm(parent_name,
check_sectag=False, exact_network=True)
except ZoneNotFound:
continue
parent_zone_ref = parent_zone_sm.reference
if self.sectag.sectag == settings['admin_sectag']:
# admin can do anything - creating a sub domain
# with any reference defaulting to that of parent
if not reference:
reference = parent_zone_ref.reference \
if hasattr(parent_zone_ref, 'reference') \
and parent_zone_ref.reference \
else None
return
if not reference:
reference = zone_cfg.get_row_exc(db_session, 'default_ref')
ref_obj = new_reference(db_session, reference,
return_existing=True)
if parent_zone_ref and ref_obj != parent_zone_ref:
raise ZoneExists(name)
return
return
self._begin_op()
db_session = self.db_session
# Login ID must be checked and processed
change_by = self._make_change_by(login_id)
# If given source information for copying into creation ZI, check
# it out.
src_zone_sm, src_zi = self._find_src_zi(src_name, src_zi_id,
admin_privilege)
try:
# No point in hiding existence of zone if asked directly with
# name when creating a zone.
zone_sm = self._get_zone_sm(name, check_sectag=False,
exact_network=True)
# reached the end of the road...
raise ZoneExists(name)
except ZoneNotFound:
# Inverted exception
pass
# Check name syntax and convert networks to valid reverse domain names
reverse_network = None
if name.find('/') > -1:
result = zone_name_from_network(name)
if not result:
raise InvalidDomainName(name)
rev_name, rev_network = result
reverse_network = new_reverse_network(db_session, rev_network)
name = rev_name
inc_updates = True if inc_updates == None else inc_updates
elif (name.lower().endswith('ip6.arpa.')
or name.lower().endswith('in-addr.arpa.')):
raise ReverseNamesNotAccepted(name)
elif not is_inet_domain(name):
raise InvalidDomainName(name)
# Check parent domains when creating a sub domain
check_parent_domains(name)
# Set reference if copying and none given.
# Parent domains will override this
if src_zone_sm and not reference:
if src_zone_sm.reference and src_zone_sm.reference.reference:
reference = src_zone_sm.reference.reference
# Check that the security tag exists
sectag = self.sectag
if not sectag in list_all_sectags(db_session):
raise ZoneSecTagDoesNotExist(sectag.sectag)
# If copying zone, set zone flags from src if not given
if src_zone_sm:
if use_apex_ns is None:
use_apex_ns = src_zone_sm.use_apex_ns
if edit_lock is None:
edit_lock = src_zone_sm.edit_lock
if auto_dnssec is None:
auto_dnssec = src_zone_sm.auto_dnssec
if nsec3 is None:
nsec3 = src_zone_sm.nsec3
if inc_updates is None:
inc_updates = src_zone_sm.inc_updates
# create the zone
zone_sm = new_zone(db_session, DynDNSZoneSM, name=name,
use_apex_ns=use_apex_ns, edit_lock=edit_lock,
auto_dnssec=auto_dnssec, nsec3=nsec3,
inc_updates=inc_updates, sectag=self.sectag,
sg_name=sg_name, reference=reference)
# Add extra sectags
if sectags:
if self.sectag.sectag == settings['admin_sectag']:
self.replace_zone_sectags(name, sectags)
else:
raise SecTagPermissionDenied(self.sectag.sectag)
# If Admin and copying, copy sectags from source zone
if self.sectag.sectag == settings['admin_sectag']:
if src_zone_sm:
zone_sm.copy_zone_sectags(db_session, src_zone_sm)
# Fill out zi
if src_zi:
zi, auto_ptr_data = self._copy_src_zi(src_zi, zone_sm, change_by)
elif zi_data:
zi, auto_ptr_data = self._data_to_zi(name, zi_data,
change_by=change_by,
admin_privilege=admin_privilege,
helpdesk_privilege=helpdesk_privilege,
normalize_ttls=True)
# Set new SOA serial if it is old. This is for load_zone(s), and
# new incoming domains
new_soa_serial = new_soa_serial_no(zi.soa_serial, name)
zi.update_soa_serial(new_soa_serial)
else:
zi = new_zone_zi(db_session, zone_sm, change_by)
auto_ptr_data = None
zone_sm.soa_serial = zi.soa_serial
# Add reverse network if that exists
if reverse_network:
zone_sm.reverse_network = reverse_network
# Get commands going with working backend first
if (batch_load and not zone_sm.auto_dnssec):
exec_zonesm(zone_sm, ZoneSMDoBatchConfig, zi_id=zi.id_)
else:
exec_zonesm(zone_sm, ZoneSMDoConfig, zi_id=zi.id_)
# Do auto_ptr_data operation here.
self._queue_auto_ptr_data(auto_ptr_data)
# Commit everything.
self._finish_op()
return True
def create_zone_admin(self, name, login_id, zi_data=None,
use_apex_ns=None, edit_lock=None, auto_dnssec=None,
nsec3=None, inc_updates=None, reference=None, sg_name=None,
sectags=None):
"""
Create a zone with admin privilege
"""
return self._create_zone(name, zi_data, login_id, use_apex_ns,
edit_lock, auto_dnssec, nsec3, inc_updates,
reference, sg_name, sectags, admin_privilege=True)
def copy_zone_admin(self, src_name, name, login_id, zi_id=None,
use_apex_ns=None, edit_lock=None, auto_dnssec=None,
nsec3=None, inc_updates=None, reference=None, sg_name=None,
sectags=None):
"""
Create a zone with admin privilege
"""
return self._create_zone(name, src_name=src_name, src_zi_id=zi_id,
use_apex_ns=use_apex_ns, edit_lock=edit_lock,
auto_dnssec=auto_dnssec, nsec3=nsec3, inc_updates=inc_updates,
reference=reference, sg_name=sg_name, sectags=sectags,
login_id=login_id, zi_data=None, admin_privilege=True)
def destroy_zone(self, zone_id):
"""
Destroy a zone backend
"""
self._begin_op()
zone_sm = self._get_zone_sm_byid(zone_id)
if not zone_sm.is_deleted():
raise ZoneNotDeleted(zone_sm.name)
# Delete the zone
# Database integrity constraints/triggers will do all the rest...
exec_zonesm(zone_sm, ZoneSMDoDestroy, ZoneFilesStillExist)
self._finish_op()
return True
def delete_zone(self, name):
"""
Delete a zone backend
"""
self._begin_op()
zone_sm = self._get_zone_sm(name, exact_network=True)
exec_zonesm(zone_sm, ZoneSMDelete, ZoneBeingCreated)
self._finish_op()
def undelete_zone(self, zone_id):
"""
Delete a zone backend
"""
self._begin_op()
zone_sm = self._get_zone_sm_byid(zone_id)
exec_zonesm(zone_sm, ZoneSMUndelete, ActiveZoneExists)
self._finish_op()
def copy_zi(self, src_name, name, login_id, zi_id=None):
"""
Copy a zi from src_zone to destination zone
"""
self._begin_op()
change_by = self._make_change_by(login_id)
src_zone_sm, src_zi = self._find_src_zi(src_name, zi_id,
admin_privilege=False)
zone_sm = self._get_zone_sm(name)
self._copy_src_zi(src_zi, zone_sm, change_by,
preserve_time_stamps=True)
self._finish_op()
def delete_zi(self, name, zi_id):
"""
Given a zone name and zi_id, delete the zi_id
"""
self._begin_op()
zone_sm = self._get_zone_sm(name, exact_network=True)
if zone_sm.zi_id == zi_id:
raise ZiInUse(name, zi_id)
zi = self._resolv_zi_id(zone_sm, zi_id, specific_zi_id=True)
if not zi:
raise ZiNotFound(name, zi_id)
self.db_session.delete(zi)
self._finish_op()
def _parse_zi_text(self, name, zi_text):
"""
Backend function to parse zi_text and trap/translate PyParsing
exceptions.
"""
zone_stringio = StringIO(initial_value=zi_text)
try:
return bind_to_data(zone_stringio, name)
except ParseBaseException as exc:
raise ZiTextParseError(name, exc)
def _load_zone(self, name, zi_text, login_id,
use_apex_ns, edit_lock, auto_dnssec, nsec3,
inc_updates,
reference=None, sg_name=None, sectags=None,
admin_privilege=False,
helpdesk_privilege=False):
"""
Load a zone from a zi_text blob. Backend.
"""
zi_data, origin_name, update_type, zone_reference \
= self._parse_zi_text(name, zi_text)
if not reference:
reference = zone_reference
results = self._create_zone(name, zi_data, login_id,
use_apex_ns, edit_lock, auto_dnssec, nsec3,
inc_updates,
reference, sg_name, sectags,
admin_privilege=admin_privilege,
helpdesk_privilege=helpdesk_privilege)
return results
def _load_zi(self, name, zi_text, login_id, admin_privilege=False,
helpdesk_privilege=False):
"""
Load a ZI into a zone. Backend.
"""
zone_sm_data, edit_lock_token = self._edit_zone(name=name,
login_id=login_id,
admin_privilege=admin_privilege)
zi_data, origin_name, update_type, zone_reference \
= self._parse_zi_text(name, zi_text)
# Use normalize_ttls with imported data to stop surprises
load_results = self._update_zone(name=name, login_id=login_id,
zi_data=zi_data, edit_lock_token=edit_lock_token,
normalize_ttls=True,
admin_privilege=admin_privilege,
helpdesk_privilege=helpdesk_privilege)
return load_results
def _set_zone(self, name, **kwargs):
"""
Set the settable attributes on a zone. This call also issues
an event to update the zone.
"""
for arg in kwargs:
if arg not in ('use_apex_ns', 'edit_lock', 'auto_dnssec', 'nsec3',
'inc_updates'):
raise InvalidParamsJsonRpcError("Argument '%s' not supported."
% arg)
self._begin_op()
zone_sm = self._get_zone_sm(name, exact_network=True)
if 'use_apex_ns' in kwargs:
use_apex_ns = kwargs['use_apex_ns']
if use_apex_ns == None:
use_apex_ns = zone_cfg.get_key(self.db_session, 'use_apex_ns')
if use_apex_ns == True:
if not zone_sm.use_apex_ns:
zone_sm.use_apex_ns = True
create_event(ZoneSMUpdate, commit=True,
signal_queue_daemon=True,
sm_id=zone_sm.id_, zone_id=zone_sm.id_,
name=zone_sm.name)
elif use_apex_ns == False:
if zone_sm.use_apex_ns:
zone_sm.use_apex_ns = False
create_event(ZoneSMUpdate, commit=True,
signal_queue_daemon=True,
sm_id=zone_sm.id_, zone_id=zone_sm.id_,
name=zone_sm.name)
else:
assert(False)
if 'edit_lock' in kwargs:
edit_lock = kwargs['edit_lock']
if edit_lock == None:
edit_lock = zone_cfg.get_key(self.db_session, 'edit_lock')
if edit_lock == True:
zone_sm.edit_lock = True
elif edit_lock == False:
zone_sm.edit_lock = False
elif edit_lock == None:
pass
else:
assert(False)
if 'inc_updates' in kwargs:
inc_updates = kwargs['inc_updates']
if inc_updates == None:
inc_updates = zone_cfg.get_key(self.db_session, 'inc_updates')
if inc_updates == True:
zone_sm.inc_updates = True
elif inc_updates == False:
zone_sm.inc_updates = False
elif inc_updates == None:
pass
else:
assert(False)
if 'auto_dnssec' in kwargs:
auto_dnssec = kwargs['auto_dnssec']
if auto_dnssec == None:
auto_dnssec = zone_cfg.get_key(self.db_session, 'auto_dnssec')
if auto_dnssec == True:
if not zone_sm.auto_dnssec:
zone_sm.auto_dnssec = True
exec_zonesm(zone_sm, ZoneSMDoReconfig)
elif auto_dnssec == False:
if zone_sm.auto_dnssec:
zone_sm.auto_dnssec = False
exec_zonesm(zone_sm, ZoneSMDoReconfig)
elif auto_dnssec == None:
pass
else:
assert(False)
if 'nsec3' in kwargs:
nsec3 = kwargs['nsec3']
if nsec3 == None:
nsec3 = zone_cfg.get_key(self.db_session, 'nsec3')
if nsec3 == True:
if not zone_sm.nsec3:
zone_sm.nsec3 = True
if zone_sm.auto_dnssec:
exec_zonesm(zone_sm, ZoneSMDoReconfig)
elif nsec3 == False:
if zone_sm.nsec3:
zone_sm.nsec3 = False
if zone_sm.auto_dnssec:
exec_zonesm(zone_sm, ZoneSMDoReconfig)
elif nsec3 == None:
pass
else:
assert(False)
self._finish_op()
return
def set_zone_admin(self, name, **kwargs):
return self._set_zone(name, **kwargs)
def show_sectags(self):
"""
Return all security tags as JSON
"""
if self.sectag.sectag != settings['admin_sectag']:
raise SecTagPermissionDenied(self.sectag.sectag)
self._begin_op()
result = []
all_sectags = list_all_sectags(self.db_session)
if not len(all_sectags):
raise NoSecTagsExist()
for sectag in all_sectags:
result.append(sectag.to_engine(self.time_format))
self._finish_op()
return result
def show_zone_sectags(self, name):
"""
Return all the sectags configured for a zone
"""
if self.sectag.sectag != settings['admin_sectag']:
raise SecTagPermissionDenied(self.sectag.sectag)
self._begin_op()
zone_sm = self._get_zone_sm(name)
result = zone_sm.list_sectags(self.db_session)
if not result:
raise NoZoneSecTagsFound(name)
self._finish_op()
return result
def add_zone_sectag(self, name, sectag_label):
"""
Add a sectag to a zone
"""
if self.sectag.sectag != settings['admin_sectag']:
raise SecTagPermissionDenied(self.sectag.sectag)
self._begin_op()
zone_sm = self._get_zone_sm(name, exact_network=True)
sectag = ZoneSecTag(sectag_label)
if sectag not in list_all_sectags(self.db_session):
raise ZoneSecTagDoesNotExist(sectag_label)
result = zone_sm.add_sectag(self.db_session, sectag)
self._finish_op()
def delete_zone_sectag(self, name, sectag_label):
"""
Add a sectag to a zone
"""
if self.sectag.sectag != settings['admin_sectag']:
raise SecTagPermissionDenied(self.sectag.sectag)
self._begin_op()
zone_sm = self._get_zone_sm(name, exact_network=True)
sectag = ZoneSecTag(sectag_label)
if sectag not in list_all_sectags(self.db_session):
raise ZoneSecTagDoesNotExist(sectag_label)
result = zone_sm.remove_sectag(self.db_session, sectag)
self._finish_op()
def replace_zone_sectags(self, name, sectag_labels):
"""
Replace all sectags for given zone
"""
if self.sectag.sectag != settings['admin_sectag']:
raise SecTagPermissionDenied(self.sectag.sectag)
self._begin_op()
zone_sm = self._get_zone_sm(name, exact_network=True)
sectag_list = []
all_sectags = list_all_sectags(self.db_session)
for sectag_thing in sectag_labels:
try:
sectag = ZoneSecTag(sectag_thing['sectag_label'])
except (TypeError, IndexError):
raise InvalidParamsJsonRpcError('Sectag list format invalid.')
if sectag not in all_sectags:
raise ZoneSecTagDoesNotExist(sectag_thing['sectag_label'])
sectag_list.append(sectag)
result = zone_sm.replace_all_sectags(self.db_session, *sectag_list)
self._finish_op()
def enable_zone(self, name):
"""
Enable a zone
"""
self._begin_op()
zone_sm = self._get_zone_sm(name, exact_network=True)
exec_zonesm(zone_sm, ZoneSMEnable)
self._finish_op()
def disable_zone(self, name):
"""
Disable a zone
"""
self._begin_op()
zone_sm = self._get_zone_sm(name, exact_network=True)
exec_zonesm(zone_sm, ZoneSMDisable)
self._finish_op()
def show_mastersm(self):
"""
Show the MasterSM
"""
self._begin_op()
result = show_master_sm(self.db_session, time_format=self.time_format)
self._finish_op()
return result
def sign_zone(self, name):
"""
Schedule a zone for signing event
"""
self._begin_op()
zone_sm = self._get_zone_sm(name, exact_network=True)
if not zone_sm.auto_dnssec:
raise ZoneNotDnssecEnabled(name)
zone_sm_dnssec_schedule(self.db_session, zone_sm, 'sign')
self._finish_op()
def loadkeys_zone(self, name):
"""
Schedule a zone key loading event
"""
self._begin_op()
zone_sm = self._get_zone_sm(name, exact_network=True)
if not zone_sm.auto_dnssec:
raise ZoneNotDnssecEnabled(name)
zone_sm_dnssec_schedule(self.db_session, zone_sm, 'loadkeys')
self._finish_op()
def reset_zone(self, name, zi_id=None):
"""
Schedule a zone reset event
"""
self._begin_op()
zone_sm = self._get_zone_sm(name, exact_network=True)
reset_args = {}
if zi_id:
zi = self._resolv_zi_id(zone_sm, zi_id, specific_zi_id=False)
if not zi:
raise ZiNotFound(zone_sm.name, zi_id)
reset_args['zi_id'] = zi.id_
results = exec_zonesm(zone_sm, ZoneSMDoReset, **reset_args)
self._finish_op()
def refresh_zone(self, name, zi_id=None):
"""
Refresh a zone by issuing an update.
"""
self._begin_op()
zone_sm = self._get_zone_sm(name)
refresh_args = {}
if zi_id:
zi = self._resolv_zi_id(zone_sm, zi_id, specific_zi_id=False)
if not zi:
raise ZiNotFound(zone_sm.name, zi_id)
refresh_args['zi_id'] = zi.id_
results = exec_zonesm(zone_sm, ZoneSMDoRefresh,
exception_type=UpdateZoneFailure, **refresh_args)
else:
results = exec_zonesm(zone_sm, ZoneSMDoRefresh, **refresh_args)
self._finish_op()
def poke_zone_set_serial(self, name, soa_serial=None,
force_soa_serial_update=False):
"""
Set zone SOA serial number to given value if possible
"""
return self._poke_zone(name, soa_serial=soa_serial,
force_soa_serial_update=force_soa_serial_update)
def poke_zone_wrap_serial(self, name):
"""
Wrap current zone SOA serial number
"""
return self._poke_zone(name, wrap_soa_serial=True)
def _poke_zone(self, name, soa_serial=None,
wrap_soa_serial=False,
force_soa_serial_update=False):
"""
Manipulate a zone's serial number on the DNS servers via update.
"""
self._begin_op()
zone_sm = self._get_zone_sm(name)
if zone_sm.state != ZSTATE_PUBLISHED:
raise ZoneNotPublished(name)
# If candidate serial given, test it
if soa_serial:
# Check that incoming argument is an integer
if not isinstance(soa_serial, int):
raise SOASerialTypeError(name)
if not ( 0 < soa_serial <= (2**32 -1 )):
raise SOASerialRangeError(name)
# Assume that current is previously published SOA serial
test_soa_serial = new_soa_serial_no(zone_sm.soa_serial, name,
candidate=soa_serial)
if test_soa_serial != soa_serial:
raise SOASerialCandidateIgnored(name)
refresh_args = {'candidate_soa_serial': soa_serial,
'wrap_soa_serial': wrap_soa_serial,
'force_soa_serial_update': force_soa_serial_update}
results = exec_zonesm(zone_sm, ZoneSMDoRefresh,
exception_type=UpdateZoneFailure, **refresh_args)
self._finish_op()
def create_reference(self, reference):
"""
Create a new reference
"""
self._begin_op()
new_reference(self.db_session, reference)
self._finish_op()
def delete_reference(self, reference):
"""
Delete a reference
"""
self._begin_op()
del_reference(self.db_session, reference)
self._finish_op()
def rename_reference(self, reference, dst_reference):
"""
Rename a reference
"""
self._begin_op()
rename_reference(self.db_session, reference, dst_reference)
self._finish_op()
def list_reference(self, *references):
"""
List references
"""
self._begin_op()
db_session = self.db_session
db_query_slice = get_numeric_setting('db_query_slice', int)
if not references:
references = '*'
ref_list = []
ref_pattern = ' '.join(references)
references = [x.replace('*', '%') for x in references]
references = [x.replace('?', '_') for x in references]
for ref in references:
query = self.db_session.query(Reference)\
.filter(Reference.reference.ilike(ref))\
.yield_per(db_query_slice)
for ref in query:
ref_list.append(ref.to_engine_brief())
if not ref_list:
raise NoReferenceFound('*')
ref_list = sorted(ref_list,
key=lambda reference: reference['reference'].lower())
self._finish_op()
return ref_list
def set_zone_reference(self, name, reference=None):
"""
Set the reference for a zone
"""
self._begin_op()
db_session = self.db_session
zone_sm = self._get_zone_sm(name, exact_network=True)
if reference:
reference = find_reference(db_session, reference)
reference.set_zone(zone_sm)
else:
zone_sm.ref_id = None
self._finish_op()
def list_sg(self):
"""
List all server groups
"""
self._begin_op()
sgs = self.db_session.query(ServerGroup).all()
result = []
for sg in sgs:
result.append(sg.to_engine_brief())
if not result:
raise NoSgFound('*')
self._finish_op()
return result
def set_zone_sg(self, name, sg_name=None):
"""
Set the SG a zone is associated with
"""
self._begin_op()
db_session = self.db_session
zone_sm = self._get_zone_sm(name, exact_network=True)
if not zone_sm.is_disabled():
raise ZoneNotDisabled(name)
if not sg_name:
sg_name = zone_cfg.get_row_exc(db_session, 'default_sg')
sg = find_sg_byname(db_session, sg_name, raise_exc=True)
zone_sm.set_sg(sg)
self._finish_op()
def set_zone_alt_sg(self, name, sg_name=None):
"""
Set the alternate SG a zone is associated with
"""
self._begin_op()
db_session = self.db_session
zone_sm = self._get_zone_sm(name, exact_network=True)
exec_zonesm(zone_sm, ZoneSMDoSetAltSg, ZoneSmFailure,
alt_sg_name=sg_name)
self._finish_op()
def swap_zone_sg(self, name):
"""
Swap a live zone's sg over with its alt_sg
"""
self._begin_op()
db_session = self.db_session
zone_sm = self._get_zone_sm(name, exact_network=True)
if not zone_sm.alt_sg:
raise ZoneNoAltSgForSwap(name)
exec_zonesm(zone_sm, ZoneSMDoSgSwap)
self._finish_op()
def rr_query_db(self, label, name=None, type=None,
rdata=None, zi_id=None, show_all=False):
"""
Query the DB for RRs matching the given pattern
"""
self._begin_op()
db_session = self.db_session
try:
result = rr_query_db_raw(db_session, label=label, name=name,
type_=type, rdata=rdata, include_disabled=show_all,
zi_id=zi_id, sectag=self.sectag)
except ValueError as exc:
raise RrQueryDomainError(name)
if result:
rrs = result.get('rrs')
if not rrs:
return None
result['rrs'] = [rr.to_engine() for rr in rrs]
result['zone_disabled'] = result['zone_sm'].is_disabled()
result.pop('zone_sm', None)
self._finish_op()
return result
def _update_rrs(self, name, update_data, update_type, login_id,
admin_privilege=False, helpdesk_privilege=False):
"""
Do Incremental Updates for a zone. Takes same ZI data format as
_create_zone(). Will produce a JSON Error if an exception is thrown.
"""
self._begin_op()
change_by = self._make_change_by(login_id)
auto_ptr_data = self._data_to_update(name, update_data, update_type,
change_by,
admin_privilege=admin_privilege,
helpdesk_privilege=helpdesk_privilege)
# Do auto_ptr_data operation here.
self._queue_auto_ptr_data(auto_ptr_data)
# Commit everything.
self._finish_op()
def update_rrs_admin(self, name, update_data, update_type, login_id):
"""
Incremental updates, admin privilege
"""
return self._update_rrs(name, update_data, update_type, login_id,
admin_privilege=True)
def refresh_zone_ttl(self, name, zone_ttl=None):
"""
Refresh a zones TTL by issuing an update.
"""
self._begin_op()
zone_sm = self._get_zone_sm(name, exact_network=True)
if not zone_ttl:
zone_ttl = zone_cfg.get_row_exc(self.db_session, 'zone_ttl',
sg_name=zone_sm.sg.name)
if zone_sm.zi_candidate:
zone_sm.zi_candidate.update_zone_ttls(zone_ttl=zone_ttl)
elif zone_sm.zi:
zone_sm.zi.update_zone_ttls(zone_ttl=zone_ttl)
else:
raise ZoneHasNoZi(name)
results = exec_zonesm(zone_sm, ZoneSMDoRefresh)
self._finish_op()
def list_pending_events(self):
"""
List pending events
"""
self._begin_op()
db_query_slice = get_numeric_setting('db_query_slice', int)
db_session = self.db_session
query = db_session.query(Event).filter(Event.processed == None)\
.order_by(Event.id_).yield_per(db_query_slice)
results = []
for event in query:
json_event = event.to_engine_brief(time_format=self.time_format)
results.append(json_event)
self._finish_op()
return results
def _find_sg_byname(self, sg_name):
"""
Given an sg_id, return the server group
"""
db_session = self.db_session
return find_sg_byname(db_session, sg_name, raise_exc=True)
def _show_sg(self, sg):
"""
Back end - Show the details of an SG
"""
result = sg.to_engine()
servers = []
for server in sg.servers:
servers.append(server.to_engine_brief())
result['servers'] = servers if servers else None
self._finish_op()
return result
def show_sg(self, sg_name):
"""
Show the details of an SG
"""
self._begin_op()
sg = self._find_sg_byname(sg_name)
return self._show_sg(sg)
def show_replica_sg(self):
"""
Show Master SG - sub call for status display
"""
self._begin_op()
db_session = self.db_session
replica_sg = get_mastersm_replica_sg(db_session)
if not replica_sg:
raise NoReplicaSgFound()
return self._show_sg(replica_sg)
def list_server(self, *servers, sg_name=None, show_all=True,
show_active=False):
"""
List servers
"""
self._begin_op()
if not servers:
servers = '*'
server_list = []
#
server_pattern = ' '.join(servers)
servers = [x.replace('*', '%') for x in servers]
servers = [x.replace('?', '_') for x in servers]
for s in servers:
query = self.db_session.query(ServerSM)\
.filter(ServerSM.name.like(s))\
.order_by(ServerSM.name)
if sg_name:
if sg_name not in list_all_sgs(self.db_session):
raise NoSgFound(sg_name)
query = query.join(ServerGroup,
ServerGroup.id_ == ServerSM.sg_id)\
.filter(ServerGroup.name == sg_name)
server_list.extend(query.all())
replica_sg = get_mastersm_replica_sg(self.db_session)
if not show_all:
replica_sg = get_mastersm_replica_sg(self.db_session)
server_list = [s for s in server_list if s.sg != replica_sg ]
if show_active:
server_list = [s for s in server_list if (not s.is_disabled())]
if not server_list:
raise NoServerFound('*')
server_list = [ s.to_engine_brief(time_format=self.time_format)
for s in server_list ]
server_list = sorted(server_list, key=lambda s: s['server_name'])
self._finish_op()
return server_list
def show_dms_status(self):
"""
Show DMS system status
"""
result = {}
try:
result['show_replica_sg'] = self.show_replica_sg()
except NoReplicaSgFound:
result['show_replica_sg'] = None
result['show_mastersm'] = self.show_mastersm()
try:
result['list_server'] = self.list_server()
except NoServerFound:
result['list_server'] = None
result['list_pending_events'] = self.list_pending_events()
return result
|
onlinepcwizard/dms
|
dms/zone_engine.py
|
Python
|
gpl-3.0
| 74,441
|
# Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
import tensorflow as tf
from tensorforce import TensorforceError
from tensorforce.core import ModuleDict, memory_modules, optimizer_modules, parameter_modules, \
SignatureDict, TensorDict, TensorSpec, TensorsSpec, tf_function, tf_util, VariableDict
from tensorforce.core.models import Model
from tensorforce.core.networks import Preprocessor
from tensorforce.core.objectives import objective_modules
from tensorforce.core.policies import policy_modules, StochasticPolicy
class TensorforceModel(Model):
def __init__(
self, *,
states, actions, max_episode_timesteps,
policy, memory, update, optimizer, objective, reward_estimation,
baseline, baseline_optimizer, baseline_objective,
l2_regularization, entropy_regularization,
state_preprocessing, reward_preprocessing,
exploration, variable_noise,
parallel_interactions,
config, saver, summarizer, tracking
):
super().__init__(
states=states, actions=actions, l2_regularization=l2_regularization,
parallel_interactions=parallel_interactions, config=config, saver=saver,
summarizer=summarizer, tracking=tracking
)
if max_episode_timesteps is None:
self.max_episode_timesteps = None
else:
self.max_episode_timesteps = int(max_episode_timesteps)
# State preprocessing
self.processed_states_spec = TensorsSpec()
self.state_preprocessing = ModuleDict()
if state_preprocessing == 'linear_normalization':
# Default handling, otherwise layer will be applied to all input types
state_preprocessing = None
if not isinstance(state_preprocessing, dict) or \
any(name not in self.states_spec for name in state_preprocessing):
state_preprocessing = {name: state_preprocessing for name in self.states_spec}
for name, spec in self.states_spec.items():
if name in state_preprocessing:
layers = state_preprocessing[name]
elif spec.type in state_preprocessing:
layers = state_preprocessing[spec.type]
else:
layers = None
# Normalize bounded inputs to [-2.0, 2.0]
if spec.type == 'float' and spec.min_value is not None and \
spec.max_value is not None and layers is None:
layers = ['linear_normalization']
if layers is None:
self.processed_states_spec[name] = self.states_spec[name]
else:
if name is None:
module_name = 'state_preprocessing'
else:
module_name = name + '_preprocessing'
self.state_preprocessing[name] = self.submodule(
name=module_name, module=Preprocessor, is_trainable=False, input_spec=spec,
layers=layers
)
spec = self.state_preprocessing[name].output_spec()
self.processed_states_spec[name] = spec
if spec.type == 'float' and spec.min_value is not None and \
spec.max_value is not None:
if isinstance(spec.min_value, float):
if not (-10.0 <= spec.min_value < 0.0) or not (0.0 < spec.max_value <= 10.0):
logging.warning("{}tate{} does not seem to be normalized, consider "
"adding linear_normalization preprocessing.".format(
'S' if layers is None else 'Preprocessed s',
'' if name is None else ' ' + name
))
else:
# TODO: missing +/-10.0 check, but cases of values +/-inf are already covered by
# previous no-bound warning
if (spec.min_value >= 0.0).any() or (spec.max_value <= 0.0).any():
logging.warning("{}tate{} does not seem to be normalized, consider "
"adding linear_normalization preprocessing.".format(
'S' if layers is None else 'Preprocessed s',
'' if name is None else ' ' + name
))
# Reward preprocessing
if reward_preprocessing is None:
self.reward_preprocessing = None
else:
self.reward_preprocessing = self.submodule(
name='reward_preprocessing', module=Preprocessor, is_trainable=False,
input_spec=self.reward_spec, layers=reward_preprocessing
)
if self.reward_preprocessing.output_spec() != self.reward_spec:
raise TensorforceError.mismatch(
name='reward_preprocessing', argument='output spec',
value1=self.reward_preprocessing.output_spec(), value2=self.reward_spec
)
# Action exploration
if exploration is None:
exploration = 0.0
if isinstance(exploration, dict) and all(name in self.actions_spec for name in exploration):
# Different exploration per action
self.exploration = ModuleDict()
for name, spec in self.actions_spec.items():
if name in exploration:
module = exploration[name]
elif spec.type in exploration:
module = exploration[spec.type]
else:
module = None
if module is None:
pass
elif spec.type in ('bool', 'int'):
self.exploration[name] = self.submodule(
name=(name + '_exploration'), module=module, modules=parameter_modules,
is_trainable=False, dtype='float', min_value=0.0, max_value=1.0
)
else:
self.exploration[name] = self.submodule(
name=(name + '_exploration'), module=module, modules=parameter_modules,
is_trainable=False, dtype='float', min_value=0.0
)
else:
# Same exploration for all actions
self.exploration = self.submodule(
name='exploration', module=exploration, modules=parameter_modules,
is_trainable=False, dtype='float', min_value=0.0
)
# Variable noise
if variable_noise is None:
variable_noise = 0.0
self.variable_noise = self.submodule(
name='variable_noise', module=variable_noise, modules=parameter_modules,
is_trainable=False, dtype='float', min_value=0.0
)
# Reward estimation argument check
if not all(key in (
'advantage_processing', 'discount', 'estimate_advantage', 'gae_decay', 'horizon',
'predict_action_values', 'predict_horizon_values', 'predict_terminal_values',
'return_processing', 'trace_decay'
) for key in reward_estimation):
raise TensorforceError.value(
name='agent', argument='reward_estimation', value=reward_estimation,
hint='not from {advantage_processing,discount,estimate_advantage,gae_decay,'
'horizon,predict_action_values,predict_horizon_values,predict_terminal_values,'
'return_processing,trace_decay}'
)
# Reward estimation
self.estimate_advantage = reward_estimation.get('estimate_advantage', False)
self.predict_horizon_values = reward_estimation.get('predict_horizon_values')
self.predict_action_values = reward_estimation.get('predict_action_values', False)
self.predict_terminal_values = reward_estimation.get('predict_terminal_values', False)
# Return horizon
if reward_estimation['horizon'] == 'episode':
self.reward_horizon = 'episode'
if self.predict_horizon_values is None:
self.predict_horizon_values = 'early'
elif self.predict_horizon_values == 'late':
raise TensorforceError.value(
name='agent', argument='reward_estimation[predict_horizon_values]',
value=self.predict_horizon_values,
condition='reward_estimation[reward_horizon] is "episode"'
)
else:
self.reward_horizon = self.submodule(
name='reward_horizon', module=reward_estimation['horizon'],
modules=parameter_modules, dtype='int', min_value=1,
max_value=self.max_episode_timesteps
)
if self.predict_horizon_values is None:
self.predict_horizon_values = 'late'
# Reward discount
reward_discount = reward_estimation.get('discount')
if reward_discount is None:
reward_discount = 1.0
self.reward_discount = self.submodule(
name='reward_discount', module=reward_discount, modules=parameter_modules,
dtype='float', min_value=0.0, max_value=1.0
)
# Entropy regularization
if entropy_regularization is None:
entropy_regularization = 0.0
self.entropy_regularization = self.submodule(
name='entropy_regularization', module=entropy_regularization,
modules=parameter_modules, is_trainable=False, dtype='float', min_value=0.0
)
# Update mode
if not all(key in ('batch_size', 'frequency', 'start', 'unit') for key in update):
raise TensorforceError.value(
name='agent', argument='update', value=list(update),
hint='not from {batch_size,frequency,start,unit}'
)
# update: unit
elif 'unit' not in update:
raise TensorforceError.required(name='agent', argument='update[unit]')
elif update['unit'] not in ('timesteps', 'episodes'):
raise TensorforceError.value(
name='agent', argument='update[unit]', value=update['unit'],
hint='not in {timesteps,episodes}'
)
# update: batch_size
elif 'batch_size' not in update:
raise TensorforceError.required(name='agent', argument='update[batch_size]')
self.update_unit = update['unit']
self.update_batch_size = self.submodule(
name='update_batch_size', module=update['batch_size'], modules=parameter_modules,
is_trainable=False, dtype='int', min_value=1
)
if 'frequency' in update and update['frequency'] == 'never':
self.update_frequency = None
else:
frequency = update.get('frequency')
if frequency is None:
frequency = update['batch_size']
elif isinstance(frequency, float):
if frequency <= 0.0 or frequency > 1.0:
raise TensorforceError.value(
name='agent', argument='update[frequency]', value=update['frequency'],
hint='not in (0.0, 1.0]'
)
else:
frequency = max(1, int(frequency * update['batch_size']))
self.update_frequency = self.submodule(
name='update_frequency', module=frequency, modules=parameter_modules,
is_trainable=False, dtype='int', min_value=1,
max_value=max(2, self.update_batch_size.max_value())
)
start = update.get('start')
if start is None:
start = 0
self.update_start = self.submodule(
name='update_start', module=start, modules=parameter_modules, is_trainable=False,
dtype='int', min_value=0
)
# Baseline optimization overview:
# Policy Objective Optimizer Config
# n n n default predict_horizon_values=False
# n n f default predict_horizon=False
# n n y default predict_horizon=False
# n y n main policy, shared loss/kldiv, weighted 1.0
# n y f main policy, shared loss/kldiv, weighted
# n y y main policy, separate
# y n n estimate_advantage=True,advantage_in_loss=True
# y n f shared objective/loss/kldiv, weighted
# y n y shared objective
# y y n shared loss/kldiv, weighted 1.0, equal horizon
# y y f shared loss/kldiv, weighted, equal horizon
# y y y separate
self.separate_baseline = (baseline is not None)
if baseline is None and baseline_objective is None and \
'predict_horizon_values' not in reward_estimation:
self.predict_horizon_values = False
if baseline is not None and baseline_objective is None and \
baseline_optimizer is None:
if 'estimate_advantage' not in reward_estimation:
self.estimate_advantage = True
self.advantage_in_loss = True
else:
self.advantage_in_loss = False
if baseline_optimizer is None and baseline_objective is not None:
baseline_optimizer = 1.0
if baseline_optimizer is None or isinstance(baseline_optimizer, float):
baseline_is_trainable = True
else:
baseline_is_trainable = False
# Return processing
return_processing = reward_estimation.get('return_processing')
if return_processing is None:
self.return_processing = None
else:
self.return_processing = self.submodule(
name='return_processing', module=Preprocessor, is_trainable=False,
input_spec=self.reward_spec, layers=return_processing,
is_preprocessing_layer_valid=False
)
if self.return_processing.output_spec() != self.reward_spec:
raise TensorforceError.mismatch(
name='reward_estimation[return_processing]', argument='output spec',
value1=self.return_processing.output_spec(), value2=self.reward_spec
)
# Advantage processing
advantage_processing = reward_estimation.get('advantage_processing')
if advantage_processing is None:
self.advantage_processing = None
else:
if self.estimate_advantage is False:
raise TensorforceError.invalid(
name='agent', argument='reward_estimation[advantage_processing]',
condition='reward_estimation[estimate_advantage] is false'
)
self.advantage_processing = self.submodule(
name='advantage_processing', module=Preprocessor, is_trainable=False,
input_spec=self.reward_spec, layers=advantage_processing,
is_preprocessing_layer_valid=False
)
if self.advantage_processing.output_spec() != self.reward_spec:
raise TensorforceError.mismatch(
name='reward_estimation[advantage_processing]', argument='output spec',
value1=self.advantage_processing.output_spec(), value2=self.reward_spec
)
# Objectives
self.objective = self.submodule(
name='policy_objective', module=objective, modules=objective_modules,
states_spec=self.processed_states_spec, auxiliaries_spec=self.auxiliaries_spec,
actions_spec=self.actions_spec, reward_spec=self.reward_spec
)
if baseline_objective is None:
self.baseline_objective = None
else:
self.baseline_objective = self.submodule(
name='baseline_objective', module=baseline_objective, modules=objective_modules,
is_trainable=baseline_is_trainable, states_spec=self.processed_states_spec,
auxiliaries_spec=self.auxiliaries_spec, actions_spec=self.actions_spec,
reward_spec=self.reward_spec
)
assert len(self.baseline_objective.required_baseline_fns()) == 0
# Policy
required_fns = {'policy'}
required_fns.update(self.objective.required_policy_fns())
if not self.separate_baseline:
if self.predict_horizon_values is not False or self.estimate_advantage is not False:
if self.predict_action_values:
required_fns.add('action_value')
else:
required_fns.add('state_value')
required_fns.update(self.objective.required_baseline_fns())
if self.baseline_objective is not None:
required_fns.update(self.baseline_objective.required_policy_fns())
if required_fns <= {'state_value'}:
default_module = 'parametrized_state_value'
elif required_fns <= {'action_value'} and \
all(spec.type == 'float' for spec in self.actions_spec.values()):
default_module = 'parametrized_action_value'
elif required_fns <= {'policy', 'action_value', 'state_value'} and \
all(spec.type in ('bool', 'int') for spec in self.actions_spec.values()):
default_module = 'parametrized_value_policy'
elif required_fns <= {'policy', 'stochastic'}:
default_module = 'parametrized_distributions'
else:
logging.warning(
"Policy type should be explicitly specified for non-standard agent configuration."
)
default_module = 'parametrized_distributions'
self.policy = self.submodule(
name='policy', module=policy, modules=policy_modules, default_module=default_module,
states_spec=self.processed_states_spec, auxiliaries_spec=self.auxiliaries_spec,
actions_spec=self.actions_spec
)
self.internals_spec['policy'] = self.policy.internals_spec
self.initial_internals['policy'] = self.policy.internals_init()
self.objective.internals_spec = self.policy.internals_spec
if not self.entropy_regularization.is_constant(value=0.0) and \
not isinstance(self.policy, StochasticPolicy):
raise TensorforceError.invalid(
name='agent', argument='entropy_regularization',
condition='policy is not stochastic'
)
# Baseline
if self.separate_baseline:
if self.predict_horizon_values is not False or self.estimate_advantage is not False:
if self.predict_action_values:
required_fns = {'action_value'}
else:
required_fns = {'state_value'}
required_fns.update(self.objective.required_baseline_fns())
if self.baseline_objective is not None:
required_fns.update(self.baseline_objective.required_policy_fns())
if required_fns <= {'state_value'}:
default_module = 'parametrized_state_value'
elif required_fns <= {'action_value'} and \
all(spec.type == 'float' for spec in self.actions_spec.values()):
default_module = 'parametrized_action_value'
elif required_fns <= {'policy', 'action_value', 'state_value'} and \
all(spec.type in ('bool', 'int') for spec in self.actions_spec.values()):
default_module = 'parametrized_value_policy'
elif required_fns <= {'policy', 'stochastic'}:
default_module = 'parametrized_distributions'
else:
logging.warning("Policy type should be explicitly specified for non-standard agent "
"configuration.")
default_module = 'parametrized_distributions'
self.baseline = self.submodule(
name='baseline', module=baseline, modules=policy_modules,
default_module=default_module, is_trainable=baseline_is_trainable,
states_spec=self.processed_states_spec, auxiliaries_spec=self.auxiliaries_spec,
actions_spec=self.actions_spec
)
self.internals_spec['baseline'] = self.baseline.internals_spec
self.initial_internals['baseline'] = self.baseline.internals_init()
else:
self.baseline = self.policy
if self.baseline_objective is not None:
self.baseline_objective.internals_spec = self.baseline.internals_spec
# Check for name collisions
for name in self.internals_spec:
if name in self.value_names:
raise TensorforceError.exists(name='value name', value=name)
self.value_names.add(name)
# Optimizers
if baseline_optimizer is None:
self.baseline_loss_weight = None
internals_spec = self.internals_spec
self.baseline_optimizer = None
elif isinstance(baseline_optimizer, float):
self.baseline_loss_weight = self.submodule(
name='baseline_loss_weight', module=baseline_optimizer, modules=parameter_modules,
is_trainable=False, dtype='float', min_value=0.0
)
internals_spec = self.internals_spec
self.baseline_optimizer = None
else:
self.baseline_loss_weight = None
internals_spec = self.internals_spec['policy']
if self.separate_baseline:
baseline_internals = self.internals_spec['baseline']
else:
baseline_internals = self.internals_spec['policy']
arguments_spec = TensorsSpec(
states=self.processed_states_spec, horizons=TensorSpec(type='int', shape=(2,)),
internals=baseline_internals, auxiliaries=self.auxiliaries_spec,
actions=self.actions_spec, reward=self.reward_spec
)
if self.baseline_objective is not None:
arguments_spec['reference'] = self.baseline_objective.reference_spec()
self.baseline_optimizer = self.submodule(
name='baseline_optimizer', module=baseline_optimizer, modules=optimizer_modules,
is_trainable=False, arguments_spec=arguments_spec
)
arguments_spec = TensorsSpec(
states=self.processed_states_spec, horizons=TensorSpec(type='int', shape=(2,)),
internals=internals_spec, auxiliaries=self.auxiliaries_spec, actions=self.actions_spec,
reward=self.reward_spec
)
if self.baseline_objective is not None and self.baseline_loss_weight is not None and \
not self.baseline_loss_weight.is_constant(value=0.0):
arguments_spec['reference'] = TensorsSpec(
policy=self.objective.reference_spec(),
baseline=self.baseline_objective.reference_spec()
)
else:
arguments_spec['reference'] = self.objective.reference_spec()
self.optimizer = self.submodule(
name='policy_optimizer', module=optimizer, modules=optimizer_modules,
arguments_spec=arguments_spec
)
# Memory
values_spec = TensorsSpec(
states=self.processed_states_spec, internals=self.internals_spec,
auxiliaries=self.auxiliaries_spec, actions=self.actions_spec,
terminal=self.terminal_spec, reward=self.reward_spec
)
if self.update_unit == 'timesteps':
max_past_horizon = max(
self.policy.max_past_horizon(on_policy=False),
self.baseline.max_past_horizon(on_policy=False)
)
min_capacity = self.update_batch_size.max_value() + 1 + max_past_horizon
if self.reward_horizon == 'episode':
min_capacity += self.max_episode_timesteps
else:
min_capacity += self.reward_horizon.max_value()
if self.max_episode_timesteps is not None:
min_capacity = max(min_capacity, self.max_episode_timesteps)
elif self.update_unit == 'episodes':
if self.max_episode_timesteps is None:
min_capacity = None
else:
min_capacity = (self.update_batch_size.max_value() + 1) * self.max_episode_timesteps
else:
assert False
if self.config.buffer_observe == 'episode':
if self.max_episode_timesteps is not None:
min_capacity = max(min_capacity, 2 * self.max_episode_timesteps)
elif isinstance(self.config.buffer_observe, int):
if min_capacity is None:
min_capacity = 2 * self.config.buffer_observe
else:
min_capacity = max(min_capacity, 2 * self.config.buffer_observe)
self.memory = self.submodule(
name='memory', module=memory, modules=memory_modules, is_trainable=False,
values_spec=values_spec, min_capacity=min_capacity
)
# Trace decay
trace_decay = reward_estimation.get('trace_decay', 1.0)
if trace_decay != 1.0 and self.predict_horizon_values != 'early':
raise TensorforceError.invalid(
name='agent', argument='reward_estimation[trace_decay]',
condition='reward_estimation[predict_horizon_values] != "early"'
)
self.trace_decay = self.submodule(
name='trace_decay', module=trace_decay, modules=parameter_modules, dtype='float',
min_value=0.0, max_value=1.0
)
# GAE decay
gae_decay = reward_estimation.get('gae_decay', 0.0)
if gae_decay != 0.0:
from tensorforce.core.memories import Recent
if not isinstance(self.memory, Recent):
raise TensorforceError.invalid(
name='agent', argument='reward_estimation[gae_decay]',
condition='memory type is not Recent'
)
elif self.estimate_advantage is False:
raise TensorforceError.invalid(
name='agent', argument='reward_estimation[gae_decay]',
condition='reward_estimation[estimate_advantage] is false'
)
elif self.advantage_in_loss:
raise TensorforceError.invalid(
name='agent', argument='reward_estimation[gae_decay]',
condition='advantage-in-loss mode'
)
self.gae_decay = self.submodule(
name='gae_decay', module=gae_decay, modules=parameter_modules, dtype='float',
min_value=0.0, max_value=1.0
)
def get_architecture(self):
if self.state_preprocessing.is_singleton():
architecture = 'State-preprocessing: {}\n'.format(
self.state_preprocessing.singleton().get_architecture().replace('\n', '\n ')
)
else:
architecture = ''
for name, preprocessor in self.state_preprocessing.items():
architecture += ' {}: {}\n'.format(
name, preprocessor.get_architecture().replace('\n', '\n ')
)
if len(architecture) > 0:
architecture = 'State-preprocessing:\n' + architecture
architecture = 'Policy:\n {}'.format(
self.policy.get_architecture().replace('\n', '\n ')
)
if self.separate_baseline:
architecture += '\nBaseline:\n {}'.format(
self.baseline.get_architecture().replace('\n', '\n ')
)
elif self.predict_horizon_values or self.baseline_objective is not None:
architecture += '\nBaseline: policy used as baseline'
return architecture
def initialize(self):
super().initialize()
# Initial variables summaries
if self.summaries == 'all' or 'variables' in self.summaries:
with self.summarizer.as_default():
for variable in self.trainable_variables:
name = variable.name
assert name.startswith(self.name + '/') and name[-2:] == ':0'
# Add prefix self.name since otherwise different scope from later summaries
name = self.name + '/variables/' + name[len(self.name) + 1: -2]
x = tf.math.reduce_mean(input_tensor=variable)
tf.summary.scalar(name=name, data=x, step=self.updates)
def core_initialize(self):
super().core_initialize()
# Preprocessed episode reward
if self.reward_preprocessing is not None:
self.preprocessed_episode_return = self.variable(
name='preprocessed-episode-return',
spec=TensorSpec(type=self.reward_spec.type, shape=(self.parallel_interactions,)),
initializer='zeros', is_trainable=False, is_saved=False
)
# Buffer index
self.buffer_index = self.variable(
name='buffer-index', spec=TensorSpec(type='int', shape=(self.parallel_interactions,)),
initializer='zeros', is_trainable=False, is_saved=False
)
if self.reward_horizon == 'episode' or self.parallel_interactions > 1 or \
self.config.buffer_observe == 'episode':
capacity = self.max_episode_timesteps
else:
capacity = self.config.buffer_observe + self.reward_horizon.max_value()
if self.max_episode_timesteps is not None:
capacity = min(capacity, self.max_episode_timesteps)
# States/internals/auxiliaries/actions buffers
def function(name, spec):
shape = (self.parallel_interactions, capacity) + spec.shape
return self.variable(
name=(name + '-buffer'), spec=TensorSpec(type=spec.type, shape=shape),
initializer='zeros', is_trainable=False, is_saved=False
)
self.states_buffer = self.processed_states_spec.fmap(
function=function, cls=VariableDict, with_names='states'
)
self.internals_buffer = self.internals_spec.fmap(
function=function, cls=VariableDict, with_names=True
)
self.auxiliaries_buffer = self.auxiliaries_spec.fmap(
function=function, cls=VariableDict, with_names='action'
)
self.actions_buffer = self.actions_spec.fmap(
function=function, cls=VariableDict, with_names='actions'
)
# Terminal/reward buffer
if self.config.buffer_observe != 'episode':
self.terminal_buffer = function('terminal', self.terminal_spec)
self.reward_buffer = function('reward', self.reward_spec)
# Buffer start
if self.reward_horizon != 'episode' and self.parallel_interactions == 1 and \
self.config.buffer_observe != 'episode':
self.circular_buffer = True
self.buffer_capacity = capacity
self.buffer_start = self.variable(
name='buffer-start',
spec=TensorSpec(type='int', shape=(self.parallel_interactions,)),
initializer='zeros', is_trainable=False, is_saved=False
)
else:
self.circular_buffer = False
# Last update
self.last_update = self.variable(
name='last-update', spec=TensorSpec(type='int'),
initializer=-self.update_frequency.max_value(), is_trainable=False, is_saved=True
)
# Optimizer initialize given variables
if self.advantage_in_loss:
self.optimizer.initialize_given_variables(variables=self.trainable_variables)
else:
self.optimizer.initialize_given_variables(variables=self.policy.trainable_variables)
if self.baseline_optimizer is not None:
self.baseline_optimizer.initialize_given_variables(
variables=self.baseline.trainable_variables
)
# Summaries and tracking
self.register_summary(label='loss', name='losses/policy-objective-loss')
self.register_tracking(
label='loss', name='policy-objective-loss', spec=TensorSpec(type='float')
)
self.register_summary(label='loss', name='losses/policy-regularization-loss')
self.register_tracking(
label='loss', name='policy-regularization-loss', spec=TensorSpec(type='float')
)
self.register_summary(label='loss', name='losses/policy-loss')
self.register_tracking(label='loss', name='policy-loss', spec=TensorSpec(type='float'))
if self.baseline_optimizer is not None or (
self.baseline_loss_weight is not None and
not self.baseline_loss_weight.is_constant(value=0.0)
):
self.register_summary(label='loss', name='losses/baseline-loss')
self.register_tracking(label='loss', name='baseline-loss', spec=TensorSpec(type='float'))
if self.separate_baseline:
self.register_summary(label='loss', name='losses/baseline-objective-loss')
self.register_tracking(
label='loss', name='baseline-objective-loss', spec=TensorSpec(type='float')
)
self.register_summary(label='loss', name='losses/baseline-regularization-loss')
self.register_tracking(
label='loss', name='baseline-regularization-loss',
spec=TensorSpec(type='float')
)
if self.reward_preprocessing is not None:
self.register_tracking(
label='reward', name='preprocessed-reward', spec=TensorSpec(type='float')
)
self.register_tracking(
label='reward', name='preprocessed-episode-return', spec=TensorSpec(type='float')
)
self.register_tracking(label='reward', name='update-return', spec=TensorSpec(type='float'))
if self.return_processing is not None:
self.register_tracking(
label='reward', name='update-processed-return', spec=TensorSpec(type='float')
)
if self.estimate_advantage is not False:
self.register_tracking(
label='reward', name='update-advantage', spec=TensorSpec(type='float')
)
if self.advantage_processing is not None:
self.register_tracking(
label='reward', name='update-processed-advantage',
spec=TensorSpec(type='float')
)
if not self.gae_decay.is_constant(value=0.0):
self.register_tracking(
label='reward', name='update-gae', spec=TensorSpec(type='float')
)
self.register_tracking(label='entropy', name='entropy', spec=TensorSpec(type='float'))
self.register_tracking(
label='kl-divergence', name='kl-divergence', spec=TensorSpec(type='float')
)
if len(self.actions_spec) > 1:
for name in self.actions_spec:
self.register_tracking(
label='entropy', name=('entropies/' + name), spec=TensorSpec(type='float')
)
self.register_tracking(
label='kl-divergence', name=('kl-divergences/' + name),
spec=TensorSpec(type='float')
)
def initialize_api(self):
super().initialize_api()
if 'graph' in self.summaries:
tf.summary.trace_on(graph=True, profiler=False)
self.experience(
states=self.states_spec, internals=self.internals_spec,
auxiliaries=self.auxiliaries_spec, actions=self.actions_spec,
terminal=self.terminal_spec, reward=self.reward_spec, _initialize=True
)
if 'graph' in self.summaries:
tf.summary.trace_export(name='experience', step=self.timesteps, profiler_outdir=None)
tf.summary.trace_on(graph=True, profiler=False)
self.update(_initialize=True)
if 'graph' in self.summaries:
tf.summary.trace_export(name='update', step=self.timesteps, profiler_outdir=None)
def get_savedmodel_trackables(self):
trackables = super().get_savedmodel_trackables()
for name, trackable in self.policy.get_savedmodel_trackables().items():
assert name not in trackables
trackables[name] = trackable
if self.separate_baseline and len(self.internals_spec['baseline']) > 0:
for name, trackable in self.baseline.get_savedmodel_trackables().items():
assert name not in trackables
trackables[name] = trackable
return trackables
def input_signature(self, *, function):
if function == 'baseline_loss':
if self.separate_baseline:
internals_signature = self.internals_spec['baseline'].signature(batched=True)
else:
internals_signature = self.internals_spec['policy'].signature(batched=True)
if self.advantage_in_loss:
assert False
elif self.baseline_objective is None:
return SignatureDict(
states=self.processed_states_spec.signature(batched=True),
horizons=TensorSpec(type='int', shape=(2,)).signature(batched=True),
internals=internals_signature,
auxiliaries=self.auxiliaries_spec.signature(batched=True),
actions=self.actions_spec.signature(batched=True),
reward=self.reward_spec.signature(batched=True),
reference=self.objective.reference_spec().signature(batched=True)
)
else:
return SignatureDict(
states=self.processed_states_spec.signature(batched=True),
horizons=TensorSpec(type='int', shape=(2,)).signature(batched=True),
internals=internals_signature,
auxiliaries=self.auxiliaries_spec.signature(batched=True),
actions=self.actions_spec.signature(batched=True),
reward=self.reward_spec.signature(batched=True),
reference=self.baseline_objective.reference_spec().signature(batched=True)
)
elif function == 'core_experience':
return SignatureDict(
states=self.processed_states_spec.signature(batched=True),
internals=self.internals_spec.signature(batched=True),
auxiliaries=self.auxiliaries_spec.signature(batched=True),
actions=self.actions_spec.signature(batched=True),
terminal=self.terminal_spec.signature(batched=True),
reward=self.reward_spec.signature(batched=True)
)
elif function == 'core_update':
return SignatureDict()
elif function == 'experience':
return SignatureDict(
states=self.states_spec.signature(batched=True),
internals=self.internals_spec.signature(batched=True),
auxiliaries=self.auxiliaries_spec.signature(batched=True),
actions=self.actions_spec.signature(batched=True),
terminal=self.terminal_spec.signature(batched=True),
reward=self.reward_spec.signature(batched=True)
)
elif function == 'loss':
if self.baseline_objective is not None and self.baseline_loss_weight is not None and \
not self.baseline_loss_weight.is_constant(value=0.0):
return SignatureDict(
states=self.processed_states_spec.signature(batched=True),
horizons=TensorSpec(type='int', shape=(2,)).signature(batched=True),
internals=self.internals_spec.signature(batched=True),
auxiliaries=self.auxiliaries_spec.signature(batched=True),
actions=self.actions_spec.signature(batched=True),
reward=self.reward_spec.signature(batched=True),
reference=SignatureDict(
policy=self.objective.reference_spec().signature(batched=True),
baseline=self.baseline_objective.reference_spec().signature(batched=True)
)
)
elif self.baseline_optimizer is None:
return SignatureDict(
states=self.processed_states_spec.signature(batched=True),
horizons=TensorSpec(type='int', shape=(2,)).signature(batched=True),
internals=self.internals_spec.signature(batched=True),
auxiliaries=self.auxiliaries_spec.signature(batched=True),
actions=self.actions_spec.signature(batched=True),
reward=self.reward_spec.signature(batched=True),
reference=self.objective.reference_spec().signature(batched=True)
)
else:
return SignatureDict(
states=self.processed_states_spec.signature(batched=True),
horizons=TensorSpec(type='int', shape=(2,)).signature(batched=True),
internals=self.internals_spec['policy'].signature(batched=True),
auxiliaries=self.auxiliaries_spec.signature(batched=True),
actions=self.actions_spec.signature(batched=True),
reward=self.reward_spec.signature(batched=True),
reference=self.objective.reference_spec().signature(batched=True)
)
elif function == 'regularize':
return SignatureDict(
states=self.processed_states_spec.signature(batched=True),
horizons=TensorSpec(type='int', shape=(2,)).signature(batched=True),
internals=self.internals_spec['policy'].signature(batched=True),
auxiliaries=self.auxiliaries_spec.signature(batched=True)
)
elif function == 'update':
return SignatureDict()
else:
return super().input_signature(function=function)
def output_signature(self, *, function):
if function == 'baseline_loss':
return SignatureDict(
singleton=TensorSpec(type='float', shape=()).signature(batched=False)
)
elif function == 'core_experience':
return SignatureDict(
singleton=TensorSpec(type='bool', shape=()).signature(batched=False)
)
elif function == 'core_update':
return SignatureDict(
singleton=TensorSpec(type='bool', shape=()).signature(batched=False)
)
elif function == 'experience':
return SignatureDict(
timesteps=TensorSpec(type='int', shape=()).signature(batched=False),
episodes=TensorSpec(type='int', shape=()).signature(batched=False)
)
elif function == 'loss':
return SignatureDict(
singleton=TensorSpec(type='float', shape=()).signature(batched=False)
)
elif function == 'update':
return SignatureDict(
singleton=TensorSpec(type='int', shape=()).signature(batched=False)
)
else:
return super().output_signature(function=function)
@tf_function(num_args=0, api_function=True)
def reset(self):
operations = list()
zeros = tf_util.zeros(shape=(self.parallel_interactions,), dtype='int')
operations.append(self.buffer_index.assign(value=zeros, read_value=False))
if self.circular_buffer:
operations.append(self.buffer_start.assign(value=zeros, read_value=False))
operations.append(self.memory.reset())
# TODO: Synchronization optimizer initial sync?
with tf.control_dependencies(control_inputs=operations):
return super().reset()
@tf_function(num_args=6, api_function=True)
def experience(self, *, states, internals, auxiliaries, actions, terminal, reward):
true = tf_util.constant(value=True, dtype='bool')
one = tf_util.constant(value=1, dtype='int')
batch_size = tf_util.cast(x=tf.shape(input=terminal)[0], dtype='int')
# Input assertions
assertions = list()
if self.config.create_tf_assertions:
zero = tf_util.constant(value=0, dtype='int')
assertions.extend(self.states_spec.tf_assert(
x=states, batch_size=batch_size,
message='Agent.experience: invalid {issue} for {name} state input.'
))
assertions.extend(self.internals_spec.tf_assert(
x=internals, batch_size=batch_size,
message='Agent.experience: invalid {issue} for {name} internal input.'
))
assertions.extend(self.auxiliaries_spec.tf_assert(
x=auxiliaries, batch_size=batch_size,
message='Agent.experience: invalid {issue} for {name} input.'
))
assertions.extend(self.actions_spec.tf_assert(
x=actions, batch_size=batch_size,
message='Agent.experience: invalid {issue} for {name} action input.'
))
assertions.extend(self.terminal_spec.tf_assert(
x=terminal, batch_size=batch_size,
message='Agent.experience: invalid {issue} for terminal input.'
))
assertions.extend(self.reward_spec.tf_assert(
x=reward, batch_size=batch_size,
message='Agent.experience: invalid {issue} for reward input.'
))
# Mask assertions
if self.config.enable_int_action_masking:
for name, spec in self.actions_spec.items():
if spec.type == 'int' and spec.num_values is not None:
is_valid = tf.reduce_all(input_tensor=tf.gather(
params=auxiliaries[name]['mask'],
indices=tf.expand_dims(input=actions[name], axis=(spec.rank + 1)),
batch_dims=(spec.rank + 1)
))
assertions.append(tf.debugging.assert_equal(
x=is_valid, y=true, message="Agent.experience: invalid action / mask."
))
# Assertion: buffer indices is zero
assertions.append(tf.debugging.assert_equal(
x=tf.math.reduce_sum(input_tensor=self.buffer_index, axis=0), y=zero,
message="Agent.experience: cannot be called mid-episode."
))
# Assertion: one terminal
num_terms = tf.math.count_nonzero(input=terminal, dtype=tf_util.get_dtype(type='int'))
assertions.append(tf.debugging.assert_equal(
x=num_terms, y=one,
message="Agent.experience: input contains none or more than one terminal."
))
# Assertion: terminal is last timestep in batch
assertions.append(tf.debugging.assert_greater_equal(
x=terminal[-1], y=one,
message="Agent.experience: terminal is not the last input timestep."
))
with tf.control_dependencies(control_inputs=assertions):
# Preprocessing
for name in states:
if name in self.state_preprocessing:
states[name] = self.state_preprocessing[name].apply(
x=states[name], deterministic=true, independent=False
)
if self.reward_preprocessing is not None:
reward = self.reward_preprocessing.apply(
x=reward, deterministic=true, independent=False
)
# Core experience
experienced = self.core_experience(
states=states, internals=internals, auxiliaries=auxiliaries, actions=actions,
terminal=terminal, reward=reward
)
# Increment timestep and episode
with tf.control_dependencies(control_inputs=(experienced,)):
assignments = list()
assignments.append(self.timesteps.assign_add(delta=batch_size, read_value=False))
assignments.append(self.episodes.assign_add(delta=one, read_value=False))
with tf.control_dependencies(control_inputs=assignments):
timestep = tf_util.identity(input=self.timesteps)
episode = tf_util.identity(input=self.episodes)
return timestep, episode
@tf_function(num_args=0, api_function=True)
def update(self):
# Core update
updated = self.core_update()
with tf.control_dependencies(control_inputs=(updated,)):
return tf_util.identity(input=self.updates)
@tf_function(num_args=5)
def core_act(self, *, states, internals, auxiliaries, parallel, deterministic, independent):
zero_float = tf_util.constant(value=0.0, dtype='float')
# On-policy policy/baseline horizon (TODO: retrieve from buffer!)
assertions = list()
if self.config.create_tf_assertions:
zero = tf_util.constant(value=0, dtype='int')
past_horizon = tf.math.maximum(
x=self.policy.past_horizon(on_policy=True),
y=self.baseline.past_horizon(on_policy=True)
)
assertions.append(tf.debugging.assert_equal(
x=past_horizon, y=zero,
message="Policy/baseline on-policy horizon currently not supported."
))
if not independent:
false = tf_util.constant(value=False, dtype='bool')
assertions.append(tf.debugging.assert_equal(
x=deterministic, y=false,
message="Invalid combination deterministic and not independent."
))
# Variable noise
if len(self.policy.trainable_variables) > 0 and (
(not independent and not self.variable_noise.is_constant(value=0.0)) or
(independent and self.variable_noise.final_value() != 0.0)
):
if independent:
variable_noise = tf_util.constant(
value=self.variable_noise.final_value(), dtype=self.variable_noise.spec.type
)
else:
variable_noise = self.variable_noise.value()
def no_variable_noise():
return [tf.zeros_like(input=var) for var in self.policy.trainable_variables]
def apply_variable_noise():
variable_noise_tensors = list()
for variable in self.policy.trainable_variables:
noise = tf.random.normal(
shape=tf_util.shape(x=variable), mean=0.0, stddev=variable_noise,
dtype=self.variable_noise.spec.tf_type()
)
if variable.dtype != tf_util.get_dtype(type='float'):
noise = tf.cast(x=noise, dtype=variable.dtype)
assignment = variable.assign_add(delta=noise, read_value=False)
with tf.control_dependencies(control_inputs=(assignment,)):
variable_noise_tensors.append(tf_util.identity(input=noise))
return variable_noise_tensors
variable_noise_tensors = tf.cond(
pred=tf.math.logical_or(
x=deterministic, y=tf.math.equal(x=variable_noise, y=zero_float)
), true_fn=no_variable_noise, false_fn=apply_variable_noise
)
else:
variable_noise_tensors = list()
with tf.control_dependencies(control_inputs=(variable_noise_tensors + assertions)):
dependencies = list()
# State preprocessing (after variable noise)
for name in self.states_spec:
if name in self.state_preprocessing:
states[name] = self.state_preprocessing[name].apply(
x=states[name], deterministic=deterministic, independent=independent
)
# Policy act (after variable noise)
batch_size = tf_util.cast(x=tf.shape(input=states.value())[0], dtype='int')
starts = tf.range(batch_size, dtype=tf_util.get_dtype(type='int'))
lengths = tf_util.ones(shape=(batch_size,), dtype='int')
horizons = tf.stack(values=(starts, lengths), axis=1)
next_internals = TensorDict()
actions, next_internals['policy'] = self.policy.act(
states=states, horizons=horizons, internals=internals['policy'],
auxiliaries=auxiliaries, deterministic=deterministic, independent=independent
)
if isinstance(actions, tf.Tensor):
dependencies.append(actions)
else:
dependencies.extend(actions.flatten())
# Baseline internals (after variable noise)
# TODO: shouldn't be required for independent-act
if self.separate_baseline and len(self.internals_spec['baseline']) > 0:
next_internals['baseline'] = self.baseline.next_internals(
states=states, horizons=horizons, internals=internals['baseline'],
actions=actions, deterministic=deterministic, independent=independent
)
else:
next_internals['baseline'] = TensorDict()
dependencies.extend(next_internals.flatten())
# Reverse variable noise (after policy act)
if len(variable_noise_tensors) > 0:
with tf.control_dependencies(control_inputs=dependencies):
dependencies = list()
def apply_variable_noise():
assignments = list()
for var, noise in zip(self.policy.trainable_variables, variable_noise_tensors):
assignments.append(var.assign_sub(delta=noise, read_value=False))
return tf.group(*assignments)
dependencies.append(tf.cond(
pred=tf.math.equal(x=variable_noise, y=zero_float),
true_fn=tf.no_op, false_fn=apply_variable_noise
))
# Exploration
if (not independent and (
isinstance(self.exploration, dict) or not self.exploration.is_constant(value=0.0)
)) or (independent and (
isinstance(self.exploration, dict) or self.exploration.final_value() != 0.0
)):
# Global exploration
if not isinstance(self.exploration, dict):
# exploration_fns = dict()
if not independent and not self.exploration.is_constant(value=0.0):
exploration = self.exploration.value()
elif independent and self.exploration.final_value() != 0.0:
exploration = tf_util.constant(
value=self.exploration.final_value(), dtype=self.exploration.spec.type
)
else:
assert False
float_dtype = tf_util.get_dtype(type='float')
for name, spec, action in self.actions_spec.zip_items(actions):
# Per-action exploration
if isinstance(self.exploration, dict):
if name not in self.exploration:
continue
elif not independent and not self.exploration[name].is_constant(value=0.0):
exploration = self.exploration.value()
elif independent and self.exploration[name].final_value() != 0.0:
exploration = tf_util.constant(
value=self.exploration[name].final_value(),
dtype=self.exploration[name].spec.type
)
else:
continue
# Apply exploration
if spec.type == 'bool':
# Bool action: if uniform[0, 1] < exploration, then uniform[True, False]
def apply_exploration():
shape = tf_util.cast(x=tf.shape(input=action), dtype='int')
half = tf_util.constant(value=0.5, dtype='float')
random_action = tf.random.uniform(shape=shape, dtype=float_dtype) < half
is_random = tf.random.uniform(shape=shape, dtype=float_dtype) < exploration
return tf.where(condition=is_random, x=random_action, y=action)
elif spec.type == 'int' and spec.num_values is not None:
if self.config.enable_int_action_masking:
# Masked action: if uniform[0, 1] < exploration, then uniform[unmasked]
# (Similar code as for RandomModel.core_act)
def apply_exploration():
shape = tf_util.cast(x=tf.shape(input=action), dtype='int')
mask = auxiliaries[name]['mask']
choices = tf_util.constant(
value=list(range(spec.num_values)), dtype=spec.type,
shape=(tuple(1 for _ in spec.shape) + (1, spec.num_values))
)
one = tf_util.constant(value=1, dtype='int', shape=(1,))
multiples = tf.concat(values=(shape, one), axis=0)
choices = tf.tile(input=choices, multiples=multiples)
choices = tf.boolean_mask(tensor=choices, mask=mask)
num_valid = tf.math.count_nonzero(input=mask, axis=(spec.rank + 1))
num_valid = tf.reshape(tensor=num_valid, shape=(-1,))
masked_offset = tf.math.cumsum(x=num_valid, axis=0, exclusive=True)
uniform = tf.random.uniform(shape=shape, dtype=float_dtype)
uniform = tf.reshape(tensor=uniform, shape=(-1,))
num_valid = tf_util.cast(x=num_valid, dtype='float')
random_offset = tf.dtypes.cast(
x=(uniform * num_valid), dtype=tf.dtypes.int64
)
random_action = tf.gather(
params=choices, indices=(masked_offset + random_offset)
)
random_action = tf.reshape(tensor=random_action, shape=shape)
is_random = tf.random.uniform(shape=shape, dtype=float_dtype)
is_random = is_random < exploration
return tf.where(condition=is_random, x=random_action, y=action)
else:
# Int action: if uniform[0, 1] < exploration, then uniform[num_values]
def apply_exploration():
shape = tf_util.cast(x=tf.shape(input=action), dtype='int')
random_action = tf.random.uniform(
shape=shape, maxval=spec.num_values, dtype=spec.tf_type()
)
is_random = tf.random.uniform(shape=shape, dtype=float_dtype)
is_random = is_random < exploration
return tf.where(condition=is_random, x=random_action, y=action)
else:
# Int/float action: action + normal[0, exploration]
def apply_exploration():
shape = tf_util.cast(x=tf.shape(input=action), dtype='int')
noise = tf.random.normal(shape=shape, dtype=spec.tf_type())
x = action + noise * exploration
# Clip action if left-/right-bounded
if spec.min_value is not None:
x = tf.math.maximum(x=x, y=spec.min_value)
if spec.max_value is not None:
x = tf.math.minimum(x=x, y=spec.max_value)
return x
# if isinstance(self.exploration, dict):
# Per-action exploration
actions[name] = tf.cond(
pred=tf.math.logical_or(
x=deterministic, y=tf.math.equal(x=exploration, y=zero_float)
), true_fn=(lambda: action), false_fn=apply_exploration
)
# else:
# exploration_fns[name] = apply_exploration
# if not isinstance(self.exploration, dict):
# # Global exploration
# def apply_exploration():
# for name in self.actions_spec:
# actions[name] = exploration_fns[name]()
# return actions
# actions = tf.cond(
# pred=tf.math.equal(x=exploration, y=zero_float),
# true_fn=(lambda: actions), false_fn=apply_exploration
# )
# Update states/internals/auxiliaries/actions buffers
if not independent:
assignments = list()
buffer_index = tf.gather(params=self.buffer_index, indices=parallel)
if self.circular_buffer:
buffer_index = tf.math.mod(x=buffer_index, y=self.buffer_capacity)
indices = tf.stack(values=(parallel, buffer_index), axis=1)
for name, buffer, state in self.states_buffer.zip_items(states):
value = tf.tensor_scatter_nd_update(tensor=buffer, indices=indices, updates=state)
assignments.append(buffer.assign(value=value))
# assignments.append(buffer.scatter_nd_update(indices=indices, updates=state))
for name, buffer, internal in self.internals_buffer.zip_items(internals): # not next_*
value = tf.tensor_scatter_nd_update(
tensor=buffer, indices=indices, updates=internal
)
assignments.append(buffer.assign(value=value))
# assignments.append(buffer.scatter_nd_update(indices=indices, updates=internal))
for name, buffer, auxiliary in self.auxiliaries_buffer.zip_items(auxiliaries):
value = tf.tensor_scatter_nd_update(
tensor=buffer, indices=indices, updates=auxiliary
)
assignments.append(buffer.assign(value=value))
# assignments.append(buffer.scatter_nd_update(indices=indices, updates=auxiliary))
for name, buffer, action in self.actions_buffer.zip_items(actions):
value = tf.tensor_scatter_nd_update(tensor=buffer, indices=indices, updates=action)
assignments.append(buffer.assign(value=value))
# assignments.append(buffer.scatter_nd_update(indices=indices, updates=action))
# Increment buffer index (after buffer assignments)
with tf.control_dependencies(control_inputs=assignments):
ones = tf_util.ones(shape=(batch_size,), dtype='int')
indices = tf.expand_dims(input=parallel, axis=1)
value = tf.tensor_scatter_nd_add(
tensor=self.buffer_index, indices=indices, updates=ones
)
dependencies.append(self.buffer_index.assign(value=value))
# sparse_delta = tf.IndexedSlices(values=ones, indices=parallel)
# dependencies.append(self.buffer_index.scatter_add(sparse_delta=sparse_delta))
with tf.control_dependencies(control_inputs=dependencies):
actions = actions.fmap(
function=(lambda name, x: tf_util.identity(input=x, name=name)), with_names=True
)
next_internals = next_internals.fmap(
function=(lambda name, x: tf_util.identity(input=x, name=name)), with_names=True
)
return actions, next_internals
@tf_function(num_args=3)
def core_observe(self, *, terminal, reward, parallel):
zero = tf_util.constant(value=0, dtype='int')
one = tf_util.constant(value=1, dtype='int')
buffer_index = tf.gather(params=self.buffer_index, indices=parallel)
batch_size = tf_util.cast(x=tf.shape(input=terminal)[0], dtype='int')
expanded_parallel = tf.expand_dims(input=tf.expand_dims(input=parallel, axis=0), axis=1)
if self.circular_buffer:
buffer_start = tf.gather(params=self.buffer_start, indices=parallel)
# Assertion: size of terminal equals number of buffered timesteps
assertions = list()
# if self.config.create_tf_assertions:
# if self.circular_buffer:
# maybe_one = tf.minimum(x=buffer_index, y=self.reward_horizon.value())
# assertions.append(tf.debugging.assert_equal(
# x=batch_size, y=(buffer_index - buffer_start - maybe_one),
# message="Agent.observe: number of observe-timesteps has to be equal to number "
# "of buffered act-timesteps."
# ))
# else:
# assertions.append(tf.debugging.assert_equal(
# x=batch_size, y=buffer_index,
# message="Agent.observe: number of observe-timesteps has to be equal to number "
# "of buffered act-timesteps."
# ))
if self.config.buffer_observe == 'episode':
# Observe inputs are always buffered in agent until episode is terminated
# --> Call core_experience directly, no need for terminal/reward buffers
def fn_nonterminal():
# Should not be called
return tf.debugging.assert_equal(x=batch_size, y=zero)
def fn_terminal():
# Gather values from buffers, and episode experience
function = (lambda x: x[parallel, :buffer_index])
states = self.states_buffer.fmap(function=function, cls=TensorDict)
internals = self.internals_buffer.fmap(function=function, cls=TensorDict)
auxiliaries = self.auxiliaries_buffer.fmap(function=function, cls=TensorDict)
actions = self.actions_buffer.fmap(function=function, cls=TensorDict)
return self.core_experience(
states=states, internals=internals, auxiliaries=auxiliaries, actions=actions,
terminal=terminal, reward=reward
)
elif self.reward_horizon == 'episode' or self.parallel_interactions > 1:
# Observe inputs need to be buffered until episode is terminated
# --> Call core_experience if terminal, otherwise buffer terminal/reward
batch_parallel = tf.fill(dims=(batch_size,), value=parallel)
def fn_nonterminal():
# Update terminal/reward buffers
assignments = list()
indices = tf.range(start=(buffer_index - batch_size), limit=buffer_index)
indices = tf.stack(values=(batch_parallel, indices), axis=1)
value = tf.tensor_scatter_nd_update(
tensor=self.terminal_buffer, indices=indices, updates=terminal
)
assignments.append(self.terminal_buffer.assign(value=value))
value = tf.tensor_scatter_nd_update(
tensor=self.reward_buffer, indices=indices, updates=reward
)
assignments.append(self.reward_buffer.assign(value=value))
return tf.group(assignments)
def fn_terminal():
# Gather values from buffers, and episode experience
function = (lambda x: x[parallel, :buffer_index])
states = self.states_buffer.fmap(function=function, cls=TensorDict)
internals = self.internals_buffer.fmap(function=function, cls=TensorDict)
auxiliaries = self.auxiliaries_buffer.fmap(function=function, cls=TensorDict)
actions = self.actions_buffer.fmap(function=function, cls=TensorDict)
episode_terminal = self.terminal_buffer[parallel, :buffer_index - batch_size]
episode_reward = self.reward_buffer[parallel, :buffer_index - batch_size]
episode_terminal = tf.concat(values=(episode_terminal, terminal), axis=0)
episode_reward = tf.concat(values=(episode_reward, reward), axis=0)
return self.core_experience(
states=states, internals=internals, auxiliaries=auxiliaries, actions=actions,
terminal=episode_terminal, reward=episode_reward
)
else:
# Observe inputs are buffered temporarily and return is computed as soon as possible
# --> Call core_experience if terminal, otherwise ???
capacity = tf_util.constant(value=self.buffer_capacity, dtype='int')
reward_horizon = self.reward_horizon.value()
reward_discount = self.reward_discount.value()
batch_parallel = tf.fill(dims=(batch_size,), value=parallel)
def fn_nonterminal():
# Update terminal/reward buffers
assignments = list()
indices = tf.range(start=(buffer_index - batch_size), limit=buffer_index)
indices = tf.math.mod(x=indices, y=capacity)
indices = tf.stack(values=(batch_parallel, indices), axis=1)
value = tf.tensor_scatter_nd_update(
tensor=self.terminal_buffer, indices=indices, updates=terminal
)
assignments.append(self.terminal_buffer.assign(value=value))
value = tf.tensor_scatter_nd_update(
tensor=self.reward_buffer, indices=indices, updates=reward
)
assignments.append(self.reward_buffer.assign(value=value))
with tf.control_dependencies(control_inputs=assignments):
# Number of completed timesteps to process
num_complete = buffer_index - buffer_start - reward_horizon
def true_fn():
return self._nonterminal_experience(
parallel=parallel, buffer_start=buffer_start, buffer_index=buffer_index,
reward_horizon=reward_horizon, num_complete=num_complete,
reward_discount=reward_discount
)
return tf.cond(pred=(num_complete > zero), true_fn=true_fn, false_fn=tf.no_op)
def fn_terminal():
# Gather values from buffers
indices = tf.range(start=buffer_start, limit=buffer_index)
indices = tf.math.mod(x=indices, y=capacity)
function = (lambda x: tf.gather(params=x[parallel], indices=indices))
states = self.states_buffer.fmap(function=function, cls=TensorDict)
internals = self.internals_buffer.fmap(function=function, cls=TensorDict)
auxiliaries = self.auxiliaries_buffer.fmap(function=function, cls=TensorDict)
actions = self.actions_buffer.fmap(function=function, cls=TensorDict)
indices = tf.range(buffer_start, buffer_index - batch_size)
indices = tf.math.mod(x=indices, y=capacity)
episode_terminal = tf.gather(params=self.terminal_buffer[parallel], indices=indices)
episode_reward = tf.gather(params=self.reward_buffer[parallel], indices=indices)
episode_terminal = tf.concat(values=(episode_terminal, terminal), axis=0)
episode_reward = tf.concat(values=(episode_reward, reward), axis=0)
# Episode experience
experienced = self.core_experience(
states=states, internals=internals, auxiliaries=auxiliaries, actions=actions,
terminal=episode_terminal, reward=episode_reward
)
# Increment buffer start index
with tf.control_dependencies(control_inputs=(indices,)):
zeros = tf_util.zeros(shape=(1,), dtype='int')
value = tf.tensor_scatter_nd_update(
tensor=self.buffer_start, indices=expanded_parallel, updates=zeros
)
assignment = self.buffer_start.assign(value=value)
# sparse_delta = tf.IndexedSlices(values=zero, indices=parallel)
# assignment = self.buffer_start.scatter_update(sparse_delta=sparse_delta)
return tf.group((experienced, assignment))
def fn_terminal_continuation():
# Appropriate terminal function above
operations = [fn_terminal()]
# Reset buffer index
with tf.control_dependencies(control_inputs=operations):
updates = tf_util.zeros(shape=(1,), dtype='int')
indices = tf.expand_dims(input=tf.expand_dims(input=parallel, axis=0), axis=1)
value = tf.tensor_scatter_nd_update(
tensor=self.buffer_index, indices=indices, updates=updates
)
operations.append(self.buffer_index.assign(value=value))
# sparse_delta = tf.IndexedSlices(values=zero, indices=parallel)
# operations.append(self.buffer_index.scatter_update(sparse_delta=sparse_delta))
# Preprocessed episode reward summaries (before preprocessed episode reward reset)
if self.reward_preprocessing is not None:
dependencies = list()
if self.summaries == 'all' or 'reward' in self.summaries or \
self.tracking == 'all' or 'reward' in self.tracking:
if self.summaries == 'all' or 'reward' in self.summaries:
summarizer = self.summarizer.as_default()
summarizer.__enter__()
else:
summarizer = None
x = tf.gather(params=self.preprocessed_episode_return, indices=parallel)
if summarizer is not None:
dependencies.append(tf.summary.scalar(
name='preprocessed-episode-return', data=x, step=self.episodes
))
dependencies.extend(self.track(
label='reward', name='preprocessed-episode-return', data=x
))
if summarizer is not None:
summarizer.__exit__(None, None, None)
# Reset preprocessed episode reward
with tf.control_dependencies(control_inputs=dependencies):
zeros = tf_util.zeros(shape=(1,), dtype='float')
value = tf.tensor_scatter_nd_update(
tensor=self.preprocessed_episode_return, indices=expanded_parallel,
updates=zeros
)
operations.append(self.preprocessed_episode_return.assign(value=value))
# zero_float = tf_util.constant(value=0.0, dtype='float')
# sparse_delta = tf.IndexedSlices(values=zero_float, indices=parallel)
# operations.append(
# self.preprocessed_episode_return.scatter_update(sparse_delta=sparse_delta)
# )
# Reset preprocessors
for preprocessor in self.state_preprocessing.values():
operations.append(preprocessor.reset())
if self.reward_preprocessing is not None:
operations.append(self.reward_preprocessing.reset())
return tf.group(*operations)
# Reward preprocessing
dependencies = assertions
if self.reward_preprocessing is not None:
with tf.control_dependencies(control_inputs=dependencies):
dependencies = list()
true = tf_util.constant(value=True, dtype='bool')
reward = self.reward_preprocessing.apply(
x=reward, deterministic=true, independent=False
)
# Preprocessed reward summary
if self.summaries == 'all' or 'reward' in self.summaries or \
self.tracking == 'all' or 'reward' in self.tracking:
if self.summaries == 'all' or 'reward' in self.summaries:
summarizer = self.summarizer.as_default()
summarizer.__enter__()
else:
summarizer = None
x = tf.math.reduce_mean(input_tensor=reward, axis=0)
if summarizer is not None:
dependencies.append(tf.summary.scalar(
name='preprocessed-reward', data=x, step=self.timesteps
))
dependencies.extend(self.track(
label='reward', name='preprocessed-reward', data=x
))
if summarizer is not None:
summarizer.__exit__(None, None, None)
# Update preprocessed episode reward
sum_reward = tf.math.reduce_sum(input_tensor=reward, keepdims=True)
value = tf.tensor_scatter_nd_add(
tensor=self.preprocessed_episode_return, indices=expanded_parallel,
updates=sum_reward
)
dependencies.append(self.preprocessed_episode_return.assign(value=value))
# sum_reward = tf.math.reduce_sum(input_tensor=reward)
# sparse_delta = tf.IndexedSlices(values=sum_reward, indices=parallel)
# dependencies.append(
# self.preprocessed_episode_return.scatter_add(sparse_delta=sparse_delta)
# )
# Handle terminal vs non-terminal (after preprocessed episode reward)
with tf.control_dependencies(control_inputs=dependencies):
is_terminal = tf.concat(values=([zero], terminal), axis=0)[-1] > zero
experienced = tf.cond(
pred=is_terminal, true_fn=fn_terminal_continuation, false_fn=fn_nonterminal
)
# Handle periodic update
with tf.control_dependencies(control_inputs=(experienced,)):
if self.update_frequency is None:
updated = tf_util.constant(value=False, dtype='bool')
else:
frequency = self.update_frequency.value()
start = self.update_start.value()
if self.update_unit == 'timesteps':
# Timestep-based batch
past_horizon = tf.math.maximum(
x=self.policy.past_horizon(on_policy=False),
y=self.baseline.past_horizon(on_policy=False)
)
unit = self.timesteps
start = tf.math.maximum(x=start, y=(frequency + past_horizon + one))
if self.reward_horizon == 'episode':
min_start = tf.where(
condition=(self.episodes > zero), x=start, y=(unit + one)
)
start = tf.math.maximum(x=start, y=min_start)
else:
start += self.reward_horizon.value()
if self.config.buffer_observe == 'episode':
min_start = tf.where(
condition=(self.episodes > zero), x=start, y=(unit + one)
)
start = tf.math.maximum(x=start, y=min_start)
else:
buffer_observe = tf_util.constant(
value=self.config.buffer_observe, dtype='int'
)
start = tf.math.maximum(x=start, y=buffer_observe)
elif self.update_unit == 'episodes':
# Episode-based batch
start = tf.math.maximum(x=start, y=frequency)
# (Episode counter is only incremented at the end of observe)
unit = self.episodes + tf.where(condition=is_terminal, x=one, y=zero)
unit = unit - start
is_frequency = tf.math.greater_equal(x=unit, y=(self.last_update + frequency))
def perform_update():
assignment = self.last_update.assign(value=unit, read_value=False)
with tf.control_dependencies(control_inputs=(assignment,)):
return self.core_update()
def no_update():
return tf_util.constant(value=False, dtype='bool')
updated = tf.cond(pred=is_frequency, true_fn=perform_update, false_fn=no_update)
with tf.control_dependencies(control_inputs=(updated,)):
return tf_util.identity(input=updated)
def _nonterminal_experience(
self, *, parallel, buffer_start, buffer_index, reward_horizon, num_complete, reward_discount
):
# (similar to _terminal_experience_parallel)
one = tf_util.constant(value=1, dtype='int')
capacity = tf_util.constant(value=self.buffer_capacity, dtype='int')
# Whether to predict horizon values now
if self.predict_horizon_values != 'early':
assert self.trace_decay.is_constant(value=1.0)
horizon_values = tf_util.zeros(shape=(num_complete,), dtype='float')
else:
# Baseline horizon
baseline_horizon = self.baseline.past_horizon(on_policy=True)
if self.trace_decay.is_constant(value=1.0):
assertion = tf.debugging.assert_less_equal(
x=baseline_horizon, y=reward_horizon,
message="Baseline on-policy horizon greater than reward estimation horizon "
"currently not supported if prediction_horizon_values = \"early\"."
)
else:
zero = tf_util.constant(value=0, dtype='int')
assertion = tf.debugging.assert_less_equal(
x=baseline_horizon, y=zero,
message="Baseline on-policy horizon currently not supported if "
"trace_decay != 1.0."
)
with tf.control_dependencies(control_inputs=(assertion,)):
# Index range to gather from buffers
if self.trace_decay.is_constant(value=1.0):
# Only indices relevant for horizon values
indices = tf.range(
start=(buffer_start + reward_horizon - baseline_horizon), limit=buffer_index
)
ints_end = num_complete
auxs_start = baseline_horizon
horizons_start = tf.range(num_complete)
horizons_length = tf.fill(dims=(num_complete,), value=(baseline_horizon + one))
else:
# All indices
indices = tf.range(start=(buffer_start + one), limit=buffer_index)
ints_end = None
auxs_start = None
horizons_start = tf.range(buffer_index - buffer_start - one)
horizons_length = tf.ones_like(input=horizons_start)
indices = tf.math.mod(x=indices, y=capacity)
# Return-sequence per timestep, as horizons indexing tensor
horizons = tf.stack(values=(horizons_start, horizons_length), axis=1)
# Gather states
function = (lambda x: tf.gather(params=x[parallel], indices=indices))
states = self.states_buffer.fmap(function=function, cls=TensorDict)
# Gather internals, only for return-sequence start
function = (lambda x: tf.gather(params=x[parallel], indices=indices[:ints_end]))
key = ('baseline' if self.separate_baseline else 'policy')
if len(self.internals_spec[key]) > 0:
internals = self.internals_buffer[key].fmap(function=function, cls=TensorDict)
else:
internals = TensorDict()
# Gather auxiliaries (and actions), only for return-sequence end
function = (lambda x: tf.gather(params=x[parallel], indices=indices[auxs_start:]))
auxiliaries = self.auxiliaries_buffer.fmap(function=function, cls=TensorDict)
# Predict values
if self.predict_action_values:
# TODO: option to re-sample action deterministically?
actions = self.actions_buffer.fmap(function=function, cls=TensorDict)
values = self.baseline.action_value(
states=states, horizons=horizons, internals=internals,
auxiliaries=auxiliaries, actions=actions
)
else:
values = self.baseline.state_value(
states=states, horizons=horizons, internals=internals,
auxiliaries=auxiliaries
)
# Horizon values
if self.trace_decay.is_constant(value=1.0):
horizon_values = values
else:
horizon_values = values[reward_horizon - one:]
# Gather all rewards (incl return-horizon) from buffer
indices = tf.range(start=buffer_start, limit=(buffer_index - one))
indices = tf.math.mod(x=indices, y=capacity)
reward = tf.gather(params=self.reward_buffer[parallel], indices=indices)
# Recursive return
if self.trace_decay.is_constant(value=1.0):
# Discounted cumulative sum
def recursive_return(next_return, index):
return reward[index: index + num_complete] + reward_discount * next_return
else:
# TD-lambda
one_float = tf_util.constant(value=1.0, dtype='float')
trace_decay = self.trace_decay.value()
def recursive_return(next_return, index):
next_value = values[index: index + num_complete]
next_return = (one_float - trace_decay) * next_value + trace_decay * next_return
return reward[index: index + num_complete] + reward_discount * next_return
reward = tf.foldr(
fn=recursive_return, elems=tf.range(reward_horizon), initializer=horizon_values
)
# Gather other values of completed timesteps from buffers
indices = tf.range(start=buffer_start, limit=(buffer_start + num_complete))
indices = tf.math.mod(x=indices, y=capacity)
function = (lambda x: tf.gather(params=x[parallel], indices=indices))
states = self.states_buffer.fmap(function=function, cls=TensorDict)
internals = self.internals_buffer.fmap(function=function, cls=TensorDict)
auxiliaries = self.auxiliaries_buffer.fmap(function=function, cls=TensorDict)
actions = self.actions_buffer.fmap(function=function, cls=TensorDict)
terminal = function(self.terminal_buffer)
# Store completed timesteps
experienced = self.memory.enqueue(
states=states, internals=internals, auxiliaries=auxiliaries, actions=actions,
terminal=terminal, reward=reward
)
# Increment buffer start index
with tf.control_dependencies(control_inputs=(indices,)):
updates = tf.expand_dims(input=num_complete, axis=0)
indices = tf.expand_dims(input=tf.expand_dims(input=parallel, axis=0), axis=1)
value = tf.tensor_scatter_nd_add(
tensor=self.buffer_start, indices=indices, updates=updates
)
assignment = self.buffer_start.assign(value=value)
# sparse_delta = tf.IndexedSlices(values=num_complete, indices=parallel)
# assignment = self.buffer_start.scatter_add(sparse_delta=sparse_delta)
return tf.group((experienced, assignment))
@tf_function(num_args=6)
def core_experience(self, *, states, internals, auxiliaries, actions, terminal, reward):
episode_length = tf_util.cast(x=tf.shape(input=terminal)[0], dtype='int')
reward_discount = self.reward_discount.value()
if self.reward_horizon == 'episode':
# Reward horizon is entire episode
reward = self._terminal_experience_iterative(
episode_length=episode_length, reward_discount=reward_discount, states=states,
internals=internals, auxiliaries=auxiliaries, actions=actions, reward=reward,
terminal=terminal
)
else:
# Optimize required loop iterations, so whether to process remaining timesteps
# - iteratively, if remaining episode length is at most reward horizon
# - in parallel, if reward horizon is less than remaining episode length
reward_horizon = self.reward_horizon.value()
def true_fn():
return self._terminal_experience_iterative(
episode_length=episode_length, reward_discount=reward_discount, states=states,
internals=internals, auxiliaries=auxiliaries, actions=actions, reward=reward,
terminal=terminal
)
def false_fn():
return self._terminal_experience_parallel(
episode_length=episode_length, reward_horizon=reward_horizon,
reward_discount=reward_discount, states=states, internals=internals,
auxiliaries=auxiliaries, actions=actions, reward=reward, terminal=terminal
)
reward = tf.cond(
pred=(episode_length <= reward_horizon), true_fn=true_fn, false_fn=false_fn
)
# Store episode
return self.memory.enqueue(
states=states, internals=internals, auxiliaries=auxiliaries, actions=actions,
terminal=terminal, reward=reward
)
def _terminal_experience_iterative(
self, *, episode_length, reward_discount,
states, internals, auxiliaries, actions, reward, terminal
):
zero = tf_util.constant(value=0, dtype='int')
one = tf_util.constant(value=1, dtype='int')
zero_float = tf_util.constant(value=0.0, dtype='float')
internals = (internals['baseline'] if self.separate_baseline else internals['policy'])
if self.trace_decay.is_constant(value=1.0):
# Whether to predict horizon/terminal values now
if self.predict_horizon_values != 'early':
# Whether to predict all or only abort-terminals
# (-reward[-1] since terminal state value will be predicted)
terminal_value = -reward[-1] / reward_discount
if not self.predict_terminal_values:
is_terminal = tf.math.equal(x=terminal[-1], y=one)
terminal_value = tf.where(condition=is_terminal, x=zero_float, y=terminal_value)
else:
def predict_terminal_value():
# Baseline horizon
baseline_horizon = self.baseline.past_horizon(on_policy=True)
baseline_horizon = tf.math.minimum(x=baseline_horizon, y=episode_length)
# Single-step horizon
horizon_start = episode_length - one - baseline_horizon
horizons = tf.expand_dims(
input=tf.stack(values=(zero, baseline_horizon + one)), axis=0
)
# Predict values
if self.predict_action_values:
# TODO: option to re-sample action deterministically?
# Use given actions since early estimate
# if self.separate_baseline:
# policy_horizon = self.policy.past_horizon(on_policy=True)
# policy_horizon = tf.math.minimum(x=policy_horizon, y=episode_length)
# policy_horizon_start = terminal_index - policy_horizon
# else:
# policy_horizon_start = past_horizon_start
# deterministic = tf_util.constant(value=True, dtype='bool')
# _actions, _ = self.policy.act(
# states=states[policy_horizon_start:], horizons=horizons[:maybe_one],
# internals=internals['policy'][policy_horizon_start: policy_horizon_start + maybe_one],
# auxiliaries=auxiliaries[terminal_index:], deterministic=deterministic,
# independent=True
# )
terminal_value = self.baseline.action_value(
states=states[horizon_start:], horizons=horizons,
internals=internals[horizon_start: horizon_start + one],
auxiliaries=auxiliaries[-1:],
actions=actions[-1:]
)
else:
terminal_value = self.baseline.state_value(
states=states[horizon_start:], horizons=horizons,
internals=internals[horizon_start: horizon_start + one],
auxiliaries=auxiliaries[-1:]
)
# Modification to correct for use as initializer in tf.scan
# (-reward[-1] since terminal state value will be predicted)
return (terminal_value[0] - reward[-1]) / reward_discount
# Whether to predict all or only abort-terminals
if self.predict_terminal_values:
terminal_value = predict_terminal_value()
else:
is_terminal = tf.math.equal(x=terminal[-1], y=one)
terminal_value = tf.cond(
pred=is_terminal, true_fn=(lambda: zero_float),
false_fn=predict_terminal_value
)
# Discounted cumulative sum return
def recursive_return(next_return, current_reward):
return current_reward + reward_discount * next_return
return tf.scan(
fn=recursive_return, elems=reward, initializer=terminal_value, reverse=True
)
else:
# Baseline horizon
baseline_horizon = self.baseline.past_horizon(on_policy=True)
assertion = tf.debugging.assert_equal(
x=baseline_horizon, y=zero,
message="Baseline cannot have on-policy horizon if trace_decay != 1.0."
)
with tf.control_dependencies(control_inputs=(assertion,)):
# Baseline-horizon-sequence per timestep, as horizons indexing tensor
horizons_start = tf.range(episode_length - one)
horizons_length = tf.fill(dims=(episode_length - one,), value=one)
horizons = tf.stack(values=(horizons_start, horizons_length), axis=1)
if self.predict_action_values:
# TODO: option to re-sample action deterministically?
values = self.baseline.action_value(
states=states[1:], horizons=horizons, internals=internals[1:],
auxiliaries=auxiliaries[1:], actions=actions[1:]
)
else:
values = self.baseline.state_value(
states=states[1:], horizons=horizons, internals=internals[1:],
auxiliaries=auxiliaries[1:]
)
# Modification to correct for use as initializer in tf.scan
# (-reward[-1] since terminal state value will be predicted)
terminal_value = (values[-1] - reward[-1]) / reward_discount
# Whether to predict all or only abort-terminals
if not self.predict_terminal_values:
is_terminal = tf.math.equal(x=terminal[-1], y=one)
terminal_value = tf.where(condition=is_terminal, x=zero_float, y=terminal_value)
values = tf.concat(values=(values, [terminal_value]), axis=0)
# TD-lambda return
one_float = tf_util.constant(value=1.0, dtype='float')
trace_decay = self.trace_decay.value()
def recursive_return(next_return, reward_value):
current_reward, next_value = reward_value
next_return = (one_float - trace_decay) * next_value + trace_decay * next_return
return current_reward + reward_discount * next_return
return tf.scan(
fn=recursive_return, elems=(reward, values), initializer=terminal_value,
reverse=True
)
def _terminal_experience_parallel(
self, *, episode_length, reward_horizon, reward_discount,
states, internals, auxiliaries, actions, reward, terminal
):
# (similar to _nonterminal_experience)
one = tf_util.constant(value=1, dtype='int')
internals = (internals['baseline'] if self.separate_baseline else internals['policy'])
# Whether to predict horizon values now
if self.predict_horizon_values != 'early':
assert self.trace_decay.is_constant(value=1.0)
# Whether to predict all or only abort-terminals
terminal_value = tf_util.constant(value=0.0, dtype='float')
if not self.predict_terminal_values:
is_terminal = tf.math.equal(x=terminal[-1], y=one)
terminal_value = tf.where(condition=is_terminal, x=reward[-1], y=terminal_value)
# Horizon-expanded rewards and values
horizon_values = tf_util.zeros(shape=(episode_length,), dtype='float')
reward = tf.concat(
values=(reward[:-1], [terminal_value], horizon_values[:reward_horizon]), axis=0
)
else:
# Baseline horizon
baseline_horizon = self.baseline.past_horizon(on_policy=True)
assertions = list() # (control dependency below, before baseline call)
if not self.trace_decay.is_constant(value=1.0):
zero = tf_util.constant(value=0, dtype='int')
assertions.append(tf.debugging.assert_equal(
x=baseline_horizon, y=zero,
message="Baseline cannot have on-policy horizon if trace_decay != 1.0."
))
# Index starts/ends
if self.trace_decay.is_constant(value=1.0):
# Only indices relevant for horizon values
reward_horizon_start = reward_horizon
zero = tf_util.constant(value=0, dtype='int')
baseline_horizon_start = tf.maximum(
x=(reward_horizon_start - baseline_horizon), y=zero
)
baseline_horizon_end = episode_length - baseline_horizon
baseline_horizon_end = tf.maximum(x=baseline_horizon_end, y=baseline_horizon_start)
horizons_start = tf.range(baseline_horizon_end - baseline_horizon_start)
horizons_length = reward_horizon_start + horizons_start
horizons_length = tf.math.minimum(x=horizons_length, y=(baseline_horizon + one))
else:
# All indices
reward_horizon_start = 1
baseline_horizon_start = 1
baseline_horizon_end = None
horizons_start = tf.range(episode_length - one)
horizons_length = tf.ones_like(input=horizons_start)
# Baseline-horizon-sequence per timestep, as horizons indexing tensor
horizons = tf.stack(values=(horizons_start, horizons_length), axis=1)
# Predict values
with tf.control_dependencies(control_inputs=assertions):
if self.predict_action_values:
# TODO: option to re-sample action deterministically?
values = self.baseline.action_value(
states=states[baseline_horizon_start:],
horizons=horizons,
internals=internals[baseline_horizon_start: baseline_horizon_end],
auxiliaries=auxiliaries[reward_horizon_start:],
actions=actions[reward_horizon_start:]
)
else:
values = self.baseline.state_value(
states=states[baseline_horizon_start:],
horizons=horizons,
internals=internals[baseline_horizon_start: baseline_horizon_end],
auxiliaries=auxiliaries[reward_horizon_start:]
)
# Whether to predict all or only abort-terminals
terminal_value = values[-1]
if not self.predict_terminal_values:
is_terminal = tf.math.equal(x=terminal[-1], y=one)
terminal_value = tf.where(condition=is_terminal, x=reward[-1], y=terminal_value)
# Horizon-expanded rewards and values
zeros_reward_horizon = tf_util.zeros(shape=(reward_horizon - one,), dtype='float')
reward = tf.concat(values=(reward[:-1], [terminal_value], zeros_reward_horizon), axis=0)
zeros_reward_horizon = tf_util.zeros(shape=(reward_horizon,), dtype='float')
values = tf.concat(values=(values, zeros_reward_horizon), axis=0)
# Horizon values
if self.trace_decay.is_constant(value=1.0):
horizon_values = values
else:
horizon_values = values[reward_horizon - one:]
# Recursive return
if self.trace_decay.is_constant(value=1.0):
# Discounted cumulative sum
def recursive_return(next_return, index):
return reward[index: index + episode_length] + reward_discount * next_return
else:
# TD-lambda
one_float = tf_util.constant(value=1.0, dtype='float')
trace_decay = self.trace_decay.value()
def recursive_return(next_return, index):
next_value = values[index: index + episode_length]
next_return = (one_float - trace_decay) * next_value + trace_decay * next_return
return reward[index: index + episode_length] + reward_discount * next_return
return tf.foldr(
fn=recursive_return, elems=tf.range(reward_horizon), initializer=horizon_values
)
@tf_function(num_args=0)
def core_update(self):
zero = tf_util.constant(value=0, dtype='int')
one = tf_util.constant(value=1, dtype='int')
true = tf_util.constant(value=True, dtype='bool')
# Retrieve batch
batch_size = self.update_batch_size.value()
if self.update_unit == 'timesteps':
# Timestep-based batch
# Dependency horizon
past_horizon = tf.math.maximum(
x=self.policy.past_horizon(on_policy=False),
y=self.baseline.past_horizon(on_policy=False)
)
if self.predict_horizon_values != 'late':
future_horizon = zero
elif self.reward_horizon == 'episode':
future_horizon = tf_util.constant(value=self.max_episode_timesteps, dtype='int')
else:
future_horizon = self.reward_horizon.value()
indices = self.memory.retrieve_timesteps(
n=batch_size, past_horizon=past_horizon, future_horizon=future_horizon
)
elif self.update_unit == 'episodes':
# Episode-based batch
indices = self.memory.retrieve_episodes(n=batch_size)
# Retrieve states and internals
policy_horizon = self.policy.past_horizon(on_policy=False)
if self.separate_baseline and self.baseline_optimizer is None:
assertions = list()
if self.config.create_tf_assertions:
assertions.append(tf.debugging.assert_equal(
x=policy_horizon, y=self.baseline.past_horizon(on_policy=False),
message="Policy and baseline cannot depend on a different number of previous "
"states if baseline_optimizer is None."
))
with tf.control_dependencies(control_inputs=assertions):
policy_horizons, sequence_values, initial_values = self.memory.predecessors(
indices=indices, horizon=policy_horizon, sequence_values=('states',),
initial_values=('internals',)
)
baseline_horizons = policy_horizons
baseline_states = policy_states = sequence_values['states']
internals = policy_internals = initial_values['internals']
if self.separate_baseline:
baseline_internals = policy_internals['baseline']
else:
baseline_internals = policy_internals
else:
if self.baseline_optimizer is None:
policy_horizons, sequence_values, initial_values = self.memory.predecessors(
indices=indices, horizon=policy_horizon, sequence_values=('states',),
initial_values=('internals',)
)
policy_states = sequence_values['states']
internals = policy_internals = initial_values['internals']
elif len(self.internals_spec['policy']) > 0:
policy_horizons, sequence_values, initial_values = self.memory.predecessors(
indices=indices, horizon=policy_horizon, sequence_values=('states',),
initial_values=('internals/policy',)
)
policy_states = sequence_values['states']
internals = initial_values['internals']
policy_internals = initial_values['internals/policy']
else:
policy_horizons, sequence_values = self.memory.predecessors(
indices=indices, horizon=policy_horizon, sequence_values=('states',),
initial_values=()
)
policy_states = sequence_values['states']
internals = policy_internals = TensorDict()
# Optimize !!!!!
baseline_horizon = self.baseline.past_horizon(on_policy=False)
if self.separate_baseline:
if len(self.internals_spec['baseline']) > 0:
baseline_horizons, sequence_values, initial_values = self.memory.predecessors(
indices=indices, horizon=baseline_horizon, sequence_values=('states',),
initial_values=('internals/baseline',)
)
baseline_states = sequence_values['states']
internals = initial_values['internals']
baseline_internals = initial_values['internals/baseline']
else:
baseline_horizons, sequence_values = self.memory.predecessors(
indices=indices, horizon=baseline_horizon, sequence_values=('states',),
initial_values=()
)
baseline_states = sequence_values['states']
internals = baseline_internals = TensorDict()
else:
if len(self.internals_spec['policy']) > 0:
baseline_horizons, sequence_values, initial_values = self.memory.predecessors(
indices=indices, horizon=baseline_horizon, sequence_values=('states',),
initial_values=('internals/policy',)
)
baseline_states = sequence_values['states']
internals = initial_values['internals']
baseline_internals = initial_values['internals/policy']
else:
baseline_horizons, sequence_values = self.memory.predecessors(
indices=indices, horizon=baseline_horizon, sequence_values=('states',),
initial_values=()
)
baseline_states = sequence_values['states']
internals = baseline_internals = TensorDict()
# Retrieve auxiliaries, actions, reward
if self.gae_decay.is_constant(value=0.0):
values = self.memory.retrieve(
indices=indices, values=('auxiliaries', 'actions', 'reward')
)
else:
values = self.memory.retrieve(
indices=indices, values=('auxiliaries', 'actions', 'reward', 'terminal')
)
terminal = values['terminal']
auxiliaries = values['auxiliaries']
actions = values['actions']
reward = values['reward']
# Return estimation
if self.predict_horizon_values == 'late':
reward = self._complete_horizon_values(
indices=indices, internals=internals, reward=reward
)
dependencies = [reward]
if self.summaries == 'all' or 'reward' in self.summaries or \
self.tracking == 'all' or 'reward' in self.tracking:
if self.summaries == 'all' or 'reward' in self.summaries:
summarizer = self.summarizer.as_default()
summarizer.__enter__()
else:
summarizer = None
x = tf.math.reduce_mean(input_tensor=reward, axis=0)
if summarizer is not None:
dependencies.append(tf.summary.scalar(
name='update-return', data=x, step=self.updates
))
dependencies.extend(self.track(label='reward', name='update-return', data=x))
if summarizer is not None:
summarizer.__exit__(None, None, None)
if self.return_processing is not None:
with tf.control_dependencies(control_inputs=dependencies):
reward = self.return_processing.apply(
x=reward, deterministic=true, independent=False
)
dependencies = [reward]
if self.summaries == 'all' or 'reward' in self.summaries or \
self.tracking == 'all' or 'reward' in self.tracking:
if self.summaries == 'all' or 'reward' in self.summaries:
summarizer = self.summarizer.as_default()
summarizer.__enter__()
else:
summarizer = None
x = tf.math.reduce_mean(input_tensor=reward, axis=0)
if summarizer is not None:
dependencies.append(tf.summary.scalar(
name='update-processed-return', data=x, step=self.updates
))
dependencies.extend(self.track(
label='reward', name='update-processed-return', data=x
))
if summarizer is not None:
summarizer.__exit__(None, None, None)
baseline_arguments = TensorDict(
states=baseline_states, horizons=baseline_horizons, internals=baseline_internals,
auxiliaries=auxiliaries, actions=actions, reward=reward
)
if self.baseline_objective is not None:
baseline_arguments['reference'] = self.baseline_objective.reference(
states=baseline_states, horizons=baseline_horizons, internals=baseline_internals,
auxiliaries=auxiliaries, actions=actions, policy=self.baseline
)
if self.baseline_optimizer is not None and self.estimate_advantage != 'early':
def fn_kl_divergence(
*, states, horizons, internals, auxiliaries, actions, reward, reference
):
reference = self.baseline.kldiv_reference(
states=states, horizons=horizons, internals=internals, auxiliaries=auxiliaries
)
return self.baseline.kl_divergence(
states=states, horizons=horizons, internals=internals, auxiliaries=auxiliaries,
reference=reference
)
variables = tuple(self.baseline.trainable_variables)
kwargs = dict()
try:
ordered_names = [variable.name for variable in variables]
kwargs['source_variables'] = tuple(sorted(
self.policy.trainable_variables,
key=(lambda x: ordered_names.index(x.name.replace('/policy/', '/baseline/')))
))
except ValueError:
pass
dependencies.extend(baseline_arguments.flatten())
# Optimization
with tf.control_dependencies(control_inputs=dependencies):
optimized = self.baseline_optimizer.update(
arguments=baseline_arguments, variables=variables, fn_loss=self.baseline_loss,
fn_kl_divergence=fn_kl_divergence, **kwargs
)
dependencies = [optimized]
with tf.control_dependencies(control_inputs=dependencies):
if self.estimate_advantage is not False and not self.advantage_in_loss:
if self.predict_action_values:
# Use past actions since advantage R(s,a) - Q(s,a)
baseline_prediction = self.baseline.action_value(
states=baseline_states, horizons=baseline_horizons,
internals=baseline_internals, auxiliaries=auxiliaries, actions=actions
)
else:
baseline_prediction = self.baseline.state_value(
states=baseline_states, horizons=baseline_horizons,
internals=baseline_internals, auxiliaries=auxiliaries
)
reward = reward - baseline_prediction
dependencies = [reward]
if self.summaries == 'all' or 'reward' in self.summaries or \
self.tracking == 'all' or 'reward' in self.tracking:
if self.summaries == 'all' or 'reward' in self.summaries:
summarizer = self.summarizer.as_default()
summarizer.__enter__()
else:
summarizer = None
x = tf.math.reduce_mean(input_tensor=reward, axis=0)
if summarizer is not None:
dependencies.append(tf.summary.scalar(
name='update-advantage', data=x, step=self.updates
))
dependencies.extend(self.track(
label='reward', name='update-advantage', data=x
))
if summarizer is not None:
summarizer.__exit__(None, None, None)
if self.advantage_processing is not None:
with tf.control_dependencies(control_inputs=dependencies):
reward = self.advantage_processing.apply(
x=reward, deterministic=true, independent=False
)
dependencies = [reward]
if self.summaries == 'all' or 'reward' in self.summaries or \
self.tracking == 'all' or 'reward' in self.tracking:
if self.summaries == 'all' or 'reward' in self.summaries:
summarizer = self.summarizer.as_default()
summarizer.__enter__()
else:
summarizer = None
x = tf.math.reduce_mean(input_tensor=reward, axis=0)
if summarizer is not None:
dependencies.append(tf.summary.scalar(
name='update-processed-advantage', data=x,
step=self.updates
))
dependencies.extend(self.track(
label='reward', name='update-processed-advantage', data=x
))
if summarizer is not None:
summarizer.__exit__(None, None, None)
if not self.gae_decay.is_constant(value=0.0):
with tf.control_dependencies(control_inputs=dependencies):
# Requires consistent batch!!!
zero_float = tf_util.constant(value=0.0, dtype='float')
reward_discount = self.reward_discount.value()
gae_decay = self.gae_decay.value()
# Discounted cumulative sum
def recursive_gae(next_gae, advantage_terminal):
current_advantage, current_terminal = advantage_terminal
next_gae = tf.where(
condition=(current_terminal == zero), x=next_gae, y=zero_float
)
return current_advantage + reward_discount * gae_decay * next_gae
reward = tf.scan(
fn=recursive_gae, elems=(reward, terminal), initializer=zero_float,
reverse=True
)
dependencies = [reward]
if self.summaries == 'all' or 'reward' in self.summaries or \
self.tracking == 'all' or 'reward' in self.tracking:
if self.summaries == 'all' or 'reward' in self.summaries:
summarizer = self.summarizer.as_default()
summarizer.__enter__()
else:
summarizer = None
x = tf.math.reduce_mean(input_tensor=reward, axis=0)
if summarizer is not None:
dependencies.append(tf.summary.scalar(
name='update-gae', data=x, step=self.updates
))
dependencies.extend(self.track(
label='reward', name='update-gae', data=x
))
if summarizer is not None:
summarizer.__exit__(None, None, None)
if self.baseline_optimizer is None:
policy_only_internals = policy_internals['policy']
else:
policy_only_internals = policy_internals
reference = self.objective.reference(
states=policy_states, horizons=policy_horizons, internals=policy_only_internals,
auxiliaries=auxiliaries, actions=actions, policy=self.policy
)
if self.baseline_objective is not None and self.baseline_loss_weight is not None and \
not self.baseline_loss_weight.is_constant(value=0.0):
reference = TensorDict(policy=reference, baseline=baseline_arguments['reference'])
policy_arguments = TensorDict(
states=policy_states, horizons=policy_horizons, internals=policy_internals,
auxiliaries=auxiliaries, actions=actions, reward=reward, reference=reference
)
if self.estimate_advantage is not False and self.advantage_in_loss:
variables = tuple(self.trainable_variables)
def fn_loss(*, states, horizons, internals, auxiliaries, actions, reward, reference):
assertions = list()
if self.config.create_tf_assertions:
past_horizon = self.baseline.past_horizon(on_policy=False)
# TODO: remove restriction
assertions.append(tf.debugging.assert_less_equal(
x=(horizons[:, 1] - one), y=past_horizon,
message="Baseline horizon cannot be greater than policy horizon."
))
with tf.control_dependencies(control_inputs=assertions):
if self.predict_action_values:
# Use past actions since advantage R(s,a) - Q(s,a)
baseline_prediction = self.baseline.action_value(
states=states, horizons=horizons, internals=internals['baseline'],
auxiliaries=auxiliaries, actions=actions
)
else:
baseline_prediction = self.baseline.state_value(
states=states, horizons=horizons, internals=internals['baseline'],
auxiliaries=auxiliaries
)
reward = reward - baseline_prediction
def fn_summary1():
return tf.math.reduce_mean(input_tensor=reward, axis=0)
dependencies = self.summary(
label='reward', name='update-advantage', data=fn_summary1, step='updates'
)
dependencies.extend(self.track(
label='reward', name='update-advantage', data=fn_summary1
))
if self.advantage_processing is not None:
with tf.control_dependencies(control_inputs=dependencies):
reward = self.advantage_processing.apply(
x=reward, deterministic=true, independent=False
)
def fn_summary2():
return tf.math.reduce_mean(input_tensor=reward, axis=0)
dependencies = self.summary(
label='reward', name='update-processed-advantage',
data=fn_summary2, step='updates'
)
dependencies.extend(self.track(
label='reward', name='update-processed-advantage', data=fn_summary2
))
with tf.control_dependencies(control_inputs=dependencies):
return self.loss(
states=states, horizons=horizons, internals=internals,
auxiliaries=auxiliaries, actions=actions, reward=reward, reference=reference
)
else:
variables = tuple(self.policy.trainable_variables)
fn_loss = self.loss
def fn_kl_divergence(
*, states, horizons, internals, auxiliaries, actions, reward, reference
):
if self.baseline_optimizer is None:
internals = internals['policy']
# TODO: Policy require
reference = self.policy.kldiv_reference(
states=states, horizons=horizons, internals=internals, auxiliaries=auxiliaries
)
return self.policy.kl_divergence(
states=states, horizons=horizons, internals=internals, auxiliaries=auxiliaries,
reference=reference
)
kwargs = dict()
if self.separate_baseline:
try:
ordered_names = [variable.name for variable in variables]
kwargs['source_variables'] = tuple(sorted(
self.baseline.trainable_variables,
key=(lambda x: ordered_names.index(x.name.replace('/baseline/', '/policy/')))
))
except ValueError:
pass
# if self.global_model is not None:
# assert 'global_variables' not in kwargs
# kwargs['global_variables'] = tuple(self.global_model.trainable_variables)
dependencies.extend(policy_arguments.flatten())
# Hack: KL divergence summary: reference before update
if isinstance(self.policy, StochasticPolicy) and (
self.summaries == 'all' or 'kl-divergence' in self.summaries or
self.tracking == 'all' or 'kl-divergence' in self.tracking
):
kldiv_reference = self.policy.kldiv_reference(
states=policy_states, horizons=policy_horizons, internals=policy_only_internals,
auxiliaries=auxiliaries
)
dependencies.extend(kldiv_reference.flatten())
# Optimization
with tf.control_dependencies(control_inputs=dependencies):
optimized = self.optimizer.update(
arguments=policy_arguments, variables=variables, fn_loss=fn_loss,
fn_kl_divergence=fn_kl_divergence, **kwargs
)
dependencies = [optimized]
if self.baseline_optimizer is not None and self.estimate_advantage == 'early':
def fn_kl_divergence(
*, states, horizons, internals, auxiliaries, actions, reward, reference
):
reference = self.baseline.kldiv_reference(
states=states, horizons=horizons, internals=internals, auxiliaries=auxiliaries
)
return self.baseline.kl_divergence(
states=states, horizons=horizons, internals=internals, auxiliaries=auxiliaries,
reference=reference
)
variables = tuple(self.baseline.trainable_variables)
kwargs = dict()
try:
ordered_names = [variable.name for variable in variables]
kwargs['source_variables'] = tuple(sorted(
self.policy.trainable_variables,
key=(lambda x: ordered_names.index(x.name.replace('/policy/', '/baseline/')))
))
except ValueError:
pass
dependencies.extend(baseline_arguments.flatten())
# Optimization
with tf.control_dependencies(control_inputs=dependencies):
optimized = self.baseline_optimizer.update(
arguments=baseline_arguments, variables=variables, fn_loss=self.baseline_loss,
fn_kl_divergence=fn_kl_divergence, **kwargs
)
dependencies = [optimized]
# Update summaries
with tf.control_dependencies(control_inputs=dependencies):
dependencies = list()
# Entropy summaries
if isinstance(self.policy, StochasticPolicy) and (
self.summaries == 'all' or 'entropy' in self.summaries or
self.tracking == 'all' or 'entropy' in self.tracking
):
if self.summaries == 'all' or 'entropy' in self.summaries:
summarizer = self.summarizer.as_default()
summarizer.__enter__()
else:
summarizer = None
if len(self.actions_spec) > 1:
entropies = self.policy.entropies(
states=policy_states, horizons=policy_horizons,
internals=policy_only_internals, auxiliaries=auxiliaries
)
for name, spec in self.actions_spec.items():
entropies[name] = tf.reshape(tensor=entropies[name], shape=(-1,))
entropy = tf.math.reduce_mean(input_tensor=entropies[name], axis=0)
if summarizer is not None:
dependencies.append(tf.summary.scalar(
name=('entropies/' + name), data=entropy, step=self.updates
))
dependencies.extend(self.track(
label='entropy', name=('entropies/' + name), data=entropy
))
entropy = tf.concat(values=tuple(entropies.values()), axis=0)
else:
entropy = self.policy.entropy(
states=policy_states, horizons=policy_horizons,
internals=policy_only_internals, auxiliaries=auxiliaries
)
entropy = tf.math.reduce_mean(input_tensor=entropy, axis=0)
if summarizer is not None:
dependencies.append(
tf.summary.scalar(name='entropy', data=entropy, step=self.updates)
)
dependencies.extend(self.track(label='entropy', name='entropy', data=entropy))
if summarizer is not None:
summarizer.__exit__(None, None, None)
# KL divergence summaries
if isinstance(self.policy, StochasticPolicy) and (
self.summaries == 'all' or 'kl-divergence' in self.summaries or
self.tracking == 'all' or 'kl-divergence' in self.tracking
):
if self.summaries == 'all' or 'kl-divergence' in self.summaries:
summarizer = self.summarizer.as_default()
summarizer.__enter__()
else:
summarizer = None
if len(self.actions_spec) > 1:
kl_divs = self.policy.kl_divergences(
states=policy_states, horizons=policy_horizons,
internals=policy_only_internals, auxiliaries=auxiliaries,
reference=kldiv_reference
)
for name, spec in self.actions_spec.items():
kl_divs[name] = tf.reshape(tensor=kl_divs[name], shape=(-1,))
kl_div = tf.math.reduce_mean(input_tensor=kl_divs[name], axis=0)
if summarizer is not None:
dependencies.append(tf.summary.scalar(
name=('kl-divergences/' + name), data=kl_div, step=self.updates
))
dependencies.extend(self.track(
label='kl-divergence', name=('kl-divergences/' + name), data=kl_div
))
kl_div = tf.concat(values=tuple(kl_divs.values()), axis=0)
else:
kl_div = self.policy.kl_divergence(
states=policy_states, horizons=policy_horizons,
internals=policy_only_internals, auxiliaries=auxiliaries,
reference=kldiv_reference
)
kl_div = tf.math.reduce_mean(input_tensor=kl_div, axis=0)
if summarizer is not None:
dependencies.append(
tf.summary.scalar(name='kl-divergence', data=kl_div, step=self.updates)
)
dependencies.extend(
self.track(label='kl-divergence', name='kl-divergence', data=kl_div)
)
if summarizer is not None:
summarizer.__exit__(None, None, None)
# Increment update
with tf.control_dependencies(control_inputs=dependencies):
assignment = self.updates.assign_add(delta=one, read_value=False)
with tf.control_dependencies(control_inputs=(assignment,)):
dependencies = list()
# Variables summaries
if self.summaries == 'all' or 'variables' in self.summaries:
with self.summarizer.as_default():
for variable in self.trainable_variables:
name = variable.name
assert name.startswith(self.name + '/') and name[-2:] == ':0'
name = 'variables/' + name[len(self.name) + 1: -2]
x = tf.math.reduce_mean(input_tensor=variable)
dependencies.append(tf.summary.scalar(name=name, data=x, step=self.updates))
with tf.control_dependencies(control_inputs=dependencies):
return tf_util.identity(input=optimized)
def _complete_horizon_values(self, indices, internals, reward):
zero = tf_util.constant(value=0, dtype='int')
one = tf_util.constant(value=1, dtype='int')
true = tf_util.constant(value=True, dtype='bool')
reward_horizon = self.reward_horizon.value()
reward_discount = self.reward_discount.value()
# TODO: no need for memory if update episode-based (or not random replay?)
# Internal values to retrieve, depending on different internals configurations
baseline_internals_values = 'internals/baseline'
if self.predict_action_values and self.separate_baseline:
internals_values = 'internals'
elif self.separate_baseline:
if len(self.internals_spec['baseline']) > 0:
internals_values = 'internals/baseline'
else:
internals_values = None
else:
if len(self.internals_spec['policy']) > 0:
internals_values = 'internals/policy'
baseline_internals_values = 'internals/policy'
else:
internals_values = None
if self.baseline.max_past_horizon(on_policy=False) == 0:
# Horizons indexing tensor
batch_size = tf_util.cast(x=tf.shape(input=indices)[0], dtype='int')
starts = tf.range(batch_size)
lengths = tf.ones_like(input=indices)
horizons = tf.stack(values=(starts, lengths), axis=1)
# TODO: remove restriction
if self.predict_action_values and self.separate_baseline:
assert self.policy.max_past_horizon(on_policy=False) == 0
# Retrieve horizon values from memory
values = ('states', 'auxiliaries', 'terminal')
if internals_values is not None:
values += (internals_values,)
offsets, values = self.memory.successors(
indices=indices, horizon=reward_horizon, sequence_values=(), final_values=values
)
states = values['states']
policy_internals = values.get('internals/policy')
baseline_internals = values.get(baseline_internals_values, TensorDict())
auxiliaries = values['auxiliaries']
terminal = values['terminal']
# -1 since successors length >= 1
offsets = offsets - one
else:
baseline_horizon = self.baseline.past_horizon(on_policy=False)
assertions = list()
if self.config.create_tf_assertions and self.predict_action_values:
policy_horizon = self.policy.past_horizon(on_policy=False)
# TODO: remove restriction
assertions.append(tf.debugging.assert_equal(
x=policy_horizon, y=baseline_horizon,
message="Policy and baseline cannot depend on a different number of "
"previous states if predict_action_values is True."
))
with tf.control_dependencies(control_inputs=assertions):
# (Tried to do this more efficiently by differentiating between
# reward horizon >/=/< baseline horizon, but gets too complex since
# it needs to take into account episode start/end edge cases.)
# Retrieve horizon values from memory
offsets, values = self.memory.successors(
indices=indices, horizon=reward_horizon, sequence_values=(),
final_values=('auxiliaries', 'terminal')
)
auxiliaries = values['auxiliaries']
terminal = values['terminal']
# -1 since successors length >= 1
offsets = offsets - one
# Retrieve baseline states sequence and initial internals from memory
if internals_values is None:
horizons, sequence_values = self.memory.predecessors(
indices=(indices + offsets), horizon=baseline_horizon,
sequence_values=('states',), initial_values=()
)
policy_internals = None
baseline_internals = TensorDict()
else:
horizons, sequence_values, initial_values = self.memory.predecessors(
indices=indices, horizon=(baseline_horizon - reward_horizon),
sequence_values=('states',), initial_values=(internals_values,)
)
policy_internals = initial_values.get('internals/policy')
baseline_internals = initial_values.get(baseline_internals_values, TensorDict())
states = sequence_values['states']
# Predict horizon values
if self.predict_action_values:
actions, _ = self.policy.act(
states=states, horizons=horizons, internals=policy_internals,
auxiliaries=auxiliaries, deterministic=true, independent=True
)
horizon_values = self.baseline.action_value(
states=states, horizons=horizons, internals=baseline_internals,
auxiliaries=auxiliaries, actions=actions
)
else:
horizon_values = self.baseline.state_value(
states=states, horizons=horizons, internals=baseline_internals,
auxiliaries=auxiliaries
)
# Value horizon assertions
assertions = list()
if self.config.create_tf_assertions:
assertions.append(tf.debugging.assert_greater_equal(x=offsets, y=zero))
if self.baseline.max_past_horizon(on_policy=False) == 0:
baseline_horizon = self.baseline.past_horizon(on_policy=False)
assertions.append(tf.debugging.assert_less_equal(x=offsets, y=reward_horizon))
# Add appropriately discounted horizon values to reward
with tf.control_dependencies(control_inputs=assertions):
# Pow numerically stable since 0.0 <= discount <= 1.0
discounts = tf.math.pow(x=reward_discount, y=tf_util.cast(x=offsets, dtype='float'))
if not self.predict_terminal_values:
is_terminal = tf.math.equal(x=terminal, y=one)
zeros = tf.zeros_like(input=discounts)
discounts = tf.where(condition=is_terminal, x=zeros, y=discounts)
return reward + discounts * horizon_values
@tf_function(num_args=7)
def loss(self, *, states, horizons, internals, auxiliaries, actions, reward, reference):
if self.baseline_optimizer is None:
policy_internals = internals['policy']
else:
policy_internals = internals
if self.baseline_objective is not None and self.baseline_loss_weight is not None and \
not self.baseline_loss_weight.is_constant(value=0.0):
policy_reference = reference['policy']
else:
policy_reference = reference
# Loss per instance
loss = self.objective.loss(
states=states, horizons=horizons, internals=policy_internals, auxiliaries=auxiliaries,
actions=actions, reward=reward, reference=policy_reference, policy=self.policy,
baseline=(self.baseline if self.separate_baseline else None)
)
# Objective loss
loss = tf.math.reduce_mean(input_tensor=loss, axis=0)
dependencies = self.summary(
label='loss', name='losses/policy-objective-loss', data=loss, step='updates'
)
dependencies.extend(self.track(label='loss', name='policy-objective-loss', data=loss))
# Regularization losses
regularization_loss = self.regularize(
states=states, horizons=horizons, internals=policy_internals, auxiliaries=auxiliaries
)
dependencies.extend(self.summary(
label='loss', name='losses/policy-regularization-loss', data=regularization_loss,
step='updates'
))
dependencies.extend(
self.track(label='loss', name='policy-regularization-loss', data=regularization_loss)
)
loss += regularization_loss
# Baseline loss
if self.baseline_loss_weight is not None and \
not self.baseline_loss_weight.is_constant(value=0.0):
if self.separate_baseline:
baseline_internals = internals['baseline']
else:
baseline_internals = policy_internals
if self.baseline_objective is not None:
baseline_reference = reference['baseline']
else:
baseline_reference = policy_reference
zero = tf_util.constant(value=0.0, dtype='float')
baseline_loss_weight = self.baseline_loss_weight.value()
def no_baseline_loss():
return zero
def apply_baseline_loss():
baseline_loss = self.baseline_loss(
states=states, horizons=horizons, internals=baseline_internals,
auxiliaries=auxiliaries, actions=actions, reward=reward,
reference=baseline_reference
)
return baseline_loss_weight * baseline_loss
loss += tf.cond(
pred=tf.math.equal(x=baseline_loss_weight, y=zero),
true_fn=no_baseline_loss, false_fn=apply_baseline_loss
)
dependencies.extend(self.summary(
label='loss', name='losses/policy-loss', data=loss, step='updates'
))
dependencies.extend(self.track(label='loss', name='policy-loss', data=loss))
with tf.control_dependencies(control_inputs=dependencies):
return tf_util.identity(input=loss)
@tf_function(num_args=4, overwrites_signature=True)
def regularize(self, *, states, horizons, internals, auxiliaries):
regularization_loss = super().regularize()
# Entropy regularization
if not self.entropy_regularization.is_constant(value=0.0):
zero = tf_util.constant(value=0.0, dtype='float')
entropy_regularization = self.entropy_regularization.value()
def no_entropy_regularization():
return zero
def apply_entropy_regularization():
entropy = self.policy.entropy(
states=states, horizons=horizons, internals=internals, auxiliaries=auxiliaries
)
entropy = tf.math.reduce_mean(input_tensor=entropy, axis=0)
return -entropy_regularization * entropy
regularization_loss += tf.cond(
pred=tf.math.equal(x=entropy_regularization, y=zero),
true_fn=no_entropy_regularization, false_fn=apply_entropy_regularization
)
return regularization_loss
@tf_function(num_args=7)
def baseline_loss(
self, *, states, horizons, internals, auxiliaries, actions, reward, reference
):
# Loss per instance
loss = self.baseline_objective.loss(
states=states, horizons=horizons, internals=internals, auxiliaries=auxiliaries,
actions=actions, reward=reward, reference=reference, policy=self.baseline
)
# Objective loss
loss = tf.math.reduce_mean(input_tensor=loss, axis=0)
dependencies = list()
if self.separate_baseline:
dependencies.extend(self.summary(
label='loss', name='losses/baseline-objective-loss', data=loss, step='updates'
))
dependencies.extend(
self.track(label='loss', name='baseline-objective-loss', data=loss)
)
# Regularization losses
regularization_loss = self.baseline.regularize()
dependencies.extend(self.summary(
label='loss', name='losses/baseline-regularization-loss',
data=regularization_loss, step='updates'
))
dependencies.extend(self.track(
label='loss', name='baseline-regularization-loss', data=regularization_loss
))
loss += regularization_loss
dependencies.extend(self.summary(
label='loss', name='losses/baseline-loss', data=loss, step='updates'
))
dependencies.extend(self.track(label='loss', name='baseline-loss', data=loss))
with tf.control_dependencies(control_inputs=dependencies):
return tf_util.identity(input=loss)
|
reinforceio/tensorforce
|
tensorforce/core/models/tensorforce.py
|
Python
|
apache-2.0
| 147,854
|
from Conjugate import verb
from random import randrange as r
def limit(number, max = 1689):
t1 = number + max
t2 = abs(number - max)
t3 = t2 - t1
t4 = t3 / 2
t5 = abs(t4) - t4
t6 = t5 / 2
return t6
def readfile(path = "sorted verbs.txt"):
with open(path, "rb") as f:
f = f.read().decode("utf-8").split("\n")
data = []
for i in range(len(f)):
f[i] = f[i].split(" ")
if len(f[i]) == 2: data.append(f[i])
for i in range(len(data)):
data[i][1] = int(data[i][1].split("\r")[0])
return data
def bigtable():
table = []
for i in range(816, 2506):
table.append([i])
return table
def addtotable():
table = bigtable()
for i in range(len(table)):
for j in readfile():
if table[i][0] == j[1]: table[i] = j
return table
def endtable():
table = addtotable()
current = 0
for i in range(len(table)):
if len(table[current]) > len(table[i]):
table[i] = [0, table[i][0]]
table[i][0] = table[current][0]
else: current = i
return table
TABLE_OF_VERBS = endtable()
def gen_possible_moods(chance, outof):
choose = r(0, outof)
if choose > chance: f = "Indicative"
else: f = "Conditional"
return f
def gen_possible_tenses(mood):
if mood == "Indicative": possible_tenses = ["Present", "Perfect", "Past", "Future I"]
else: possible_tenses = ["Present", "Perfect"]
return possible_tenses
def possible_persons():
return ["ich", "du", "er", "wir", "ihr", "Sie"]
def choose_verb(score, previous_verb):
selection = [previous_verb, 0]
for _ in range(10):
if selection[0] == previous_verb:
selection = TABLE_OF_VERBS[r(limit(score-1016), limit(score-616)+1)] # 616 218
return(selection)
def alternitive_answers(current_verb, mood, tense, person, score):
tmptbl = list()
for i in range(6):
tmptbl.append(i)
number_of_other_verb_answers = r(2,5)
other_verb = choose_verb(score, current_verb)[0]
old_verb = []
for i in range(number_of_other_verb_answers):
old_verb.append(tmptbl.pop(r(len(tmptbl))))
ans1 = verb.Conjugate(current_verb, "Indicative", "Present", possible_persons()[r(6)])
ans2 = verb.Conjugate(current_verb, "Indicative", "Perfect", possible_persons()[r(6)])
ans3 = verb.Conjugate(current_verb, "Indicative", "Past", possible_persons()[r(6)])
ans4 = verb.Conjugate(current_verb, "Indicative", "Future I", possible_persons()[r(6)])
ans5 = verb.Conjugate(current_verb, "Conditional", "Present", possible_persons()[r(6)])
ans6 = verb.Conjugate(current_verb, "Conditional", "Perfect", possible_persons()[r(6)])
answers = [ans1, ans2, ans3, ans4, ans5, ans6]
real_answer = verb.Conjugate(current_verb, mood, tense, person)
another_table = []
for i in range(6): another_table.append(i)
for i in range(len(answers)):
if answers[i] == real_answer:
another_table.pop(i)
answers[i] = answers[another_table[r(len(another_table))]]
oans1 = verb.Conjugate(other_verb, "Indicative", "Present", possible_persons()[r(6)])
oans2 = verb.Conjugate(other_verb, "Indicative", "Perfect", possible_persons()[r(6)])
oans3 = verb.Conjugate(other_verb, "Indicative", "Past", possible_persons()[r(6)])
oans4 = verb.Conjugate(other_verb, "Indicative", "Future I", possible_persons()[r(6)])
oans5 = verb.Conjugate(other_verb, "Conditional", "Present", possible_persons()[r(6)])
oans6 = verb.Conjugate(other_verb, "Conditional", "Perfect", possible_persons()[r(6)])
oanswers = [oans1, oans2, oans3, oans4, oans5, oans6]
aanswers = oanswers
for i in tmptbl:
aanswers[i] = answers[i]
rans = []
for i in range(len(aanswers)):
rans.append(aanswers.pop(r(len(aanswers))))
rans = rans[:3]
return rans
def gen_question(score, previous_infinitive = " "):
mood = gen_possible_moods(6, 24)
tenses = gen_possible_tenses(mood)
tense = tenses[int(r(4)/3)]
people = possible_persons()
person = people[r(len(people))]
if tense == "Present": multiplier = 0.8
elif tense == "Perfect": multiplier = 1
elif tense == "Past": multiplier = 1.5
elif tense == "Future I": multiplier = 1
if mood == "Conditional": multiplier *= 1.3
if person == "ich": multiplier *= 0.9
elif person == "du": multiplier *= 1
elif person == "er": multiplier *= 1
elif person == "wir": multiplier *= 0.9
elif person == "ihr": multiplier *= 1.2
elif person == "Sie": multiplier *= 1.1
if multiplier == 1.3: multiplier = 2
score = int(0.5+(score / multiplier))
current_verb = choose_verb(score, previous_infinitive)
conjugated_verb = verb.Conjugate(current_verb[0], mood, tense, person)
other_answers = alternitive_answers(current_verb[0], mood, tense, person, score)
if person == "er":
person = ["er", "sie", "man"][r(3)]
if tense == "Past": tense = "Imperfect"
if tense == "Future I": tense = "Future"
answer_tense = tense
question_parts = ["Fill the gap: ", person, " _________ (", current_verb[0], ") in the ", tense.lower(), " tense."]
if mood == "Conditional":
question_parts.insert(6, " conditional")
answer_tense += " conditional"
question = "".join(question_parts)
answer = conjugated_verb
infinitve = current_verb[0]
question_value = int(0.5+(current_verb[1] * multiplier))
question = {"answer": answer, "alt answers" : other_answers,
"question": question, "infinitive" : infinitve, "tense" : answer_tense, "q value" : question_value}
return question
"""
def Conjugate(verb,mood,tense,person):
verb: see verbs.txt for possible verbs
mood: Indicative or Conditional (probs indic)
tense:
indicative = ["Present", "Perfect","Past","Pluperfect", "Future I","Future II"]
conditional = ["Present", "Perfect"]
person: ich, du, er, wir, ihr, sie, Sie
er is er, sie and man
eg verb.Conjugate("tragen", "Indicative", "Present", "ich")
{"ans" : "the answer", "other answers" : ["other ans", "other ans", "other ans", "etc"],
"infinitve" : "infintive", "tense" : "tense"}
"""
|
TheGoomy42/German-Verb-Conjugation-Quiz
|
Question_Generator.py
|
Python
|
mit
| 6,305
|
import pytest
from aioredis.errors import (
ProtocolError,
ReplyError,
AuthError,
MaxClientsError,
)
from aioredis.parser import PyReader
@pytest.fixture
def reader():
return PyReader()
def test_nothing(reader):
assert reader.gets() is False
def test_error_when_feeding_non_string(reader):
with pytest.raises(TypeError):
reader.feed(1)
@pytest.mark.parametrize('data', [
b'x', b'$5\r\nHello world',
b':None\r\n', b':1.2\r\n', b':1,2\r\n',
], ids=[
'Bad control char',
'Invalid bulk length',
'Invalid int - none',
'Invalid int - dot',
'Invalid int - comma',
])
def test_protocol_error(reader, data):
reader.feed(data)
with pytest.raises(ProtocolError):
reader.gets()
# not functional any more
with pytest.raises(ProtocolError):
reader.gets()
class CustomExc(Exception):
pass
@pytest.mark.parametrize('exc,arg', [
(RuntimeError, RuntimeError),
(CustomExc, lambda e: CustomExc(e)),
], ids=['RuntimeError', 'callable'])
def test_protocol_error_with_custom_class(exc, arg):
reader = PyReader(protocolError=arg)
reader.feed(b"x")
with pytest.raises(exc):
reader.gets()
@pytest.mark.parametrize('init', [
dict(protocolError="wrong"),
dict(replyError="wrong"),
], ids=['wrong protocolError', 'wrong replyError'])
def test_fail_with_wrong_error_class(init):
with pytest.raises(TypeError):
PyReader(**init)
def test_error_string(reader):
reader.feed(b"-error\r\n")
error = reader.gets()
assert isinstance(error, ReplyError)
assert error.args == ("error",)
@pytest.mark.parametrize('error_kind,data', [
(AuthError, b"-NOAUTH auth required\r\n"),
(AuthError, b"-ERR invalid password\r\n"),
(MaxClientsError, b"-ERR max number of clients reached\r\n"),
])
def test_error_construction(reader, error_kind, data):
reader.feed(data)
error = reader.gets()
assert isinstance(error, ReplyError)
assert isinstance(error, error_kind)
@pytest.mark.parametrize('exc,arg', [
(RuntimeError, RuntimeError),
(CustomExc, lambda e: CustomExc(e)),
], ids=['RuntimeError', 'callable'])
def test_error_string_with_custom_class(exc, arg):
reader = PyReader(replyError=arg)
reader.feed(b"-error\r\n")
error = reader.gets()
assert isinstance(error, exc)
assert error.args == ("error",)
def test_errors_in_nested_multi_bulk(reader):
reader.feed(b"*2\r\n-err0\r\n-err1\r\n")
for r, error in zip(("err0", "err1"), reader.gets()):
assert isinstance(error, ReplyError)
assert error.args == (r,)
def test_integer(reader):
value = 2**63-1 # Largest 64-bit signed integer
reader.feed((":%d\r\n" % value).encode("ascii"))
assert reader.gets() == value
def test_status_string(reader):
reader.feed(b"+ok\r\n")
assert reader.gets() == b"ok"
@pytest.mark.parametrize('data,expected', [
(b'$0\r\n\r\n', b''),
(b'$-1\r\n', None),
(b'$5\r\nhello\r\n', b'hello'),
], ids=['Empty', 'null', 'hello'])
def test_bulk_string(reader, data, expected):
reader.feed(data)
assert reader.gets() == expected
def test_bulk_string_without_encoding(reader):
snowman = b"\xe2\x98\x83"
reader.feed(b"$3\r\n" + snowman + b"\r\n")
assert reader.gets() == snowman
@pytest.mark.parametrize('encoding,expected', [
('utf-8', b"\xe2\x98\x83".decode('utf-8')),
('utf-32', b"\xe2\x98\x83"),
], ids=['utf-8', 'utf-32'])
def test_bulk_string_with_encoding(encoding, expected):
snowman = b"\xe2\x98\x83"
reader = PyReader(encoding=encoding)
reader.feed(b"$3\r\n" + snowman + b"\r\n")
assert reader.gets() == expected
def test_bulk_string_with_invalid_encoding():
reader = PyReader(encoding="unknown")
reader.feed(b"$5\r\nhello\r\n")
with pytest.raises(LookupError):
reader.gets()
def test_bulk_string_wait_buffer(reader):
reader.feed(b'$5\r\nH')
assert not reader.gets()
reader.feed(b'ello')
assert not reader.gets()
reader.feed(b'\r\n')
assert reader.gets() == b'Hello'
@pytest.mark.parametrize('data,expected', [
(b"*-1\r\n", None),
(b"*0\r\n", []),
(b"*2\r\n$5\r\nhello\r\n$5\r\nworld\r\n", [b'hello', b'world']),
], ids=['Null', 'Empty list', 'hello world'])
def test_null_multi_bulk(reader, data, expected):
reader.feed(data)
assert reader.gets() == expected
@pytest.mark.parametrize('data', [
(b"*2\r\n$5\r\nhello\r\n", b':1'),
(b'*2\r\n:1\r\n*1\r\n', b'+hello'),
(b'*2\r\n+hello\r\n+world',),
(b'*2\r\n*1\r\n+hello\r\n*1\r\n+world',),
], ids=['First in bulk',
'Error in nested',
'Multiple errors',
'Multiple nested'])
def test_multi_bulk_with_invalid_encoding_and_partial_reply(data):
reader = PyReader(encoding="unknown")
for chunk in data:
reader.feed(chunk)
assert reader.gets() is False
reader.feed(b"\r\n")
with pytest.raises(LookupError):
reader.gets()
reader.feed(b':1\r\n')
assert reader.gets() == 1
def test_nested_multi_bulk(reader):
reader.feed(b"*2\r\n*2\r\n$5\r\nhello\r\n$5\r\nworld\r\n$1\r\n!\r\n")
assert reader.gets() == [[b"hello", b"world"], b"!"]
def test_nested_multi_bulk_depth(reader):
reader.feed(b"*1\r\n*1\r\n*1\r\n*1\r\n$1\r\n!\r\n")
assert reader.gets() == [[[[b"!"]]]]
@pytest.mark.parametrize('encoding,expected', [
('utf-8', b"\xe2\x98\x83".decode('utf-8')),
('utf-32', b"\xe2\x98\x83"),
], ids=['utf-8', 'utf-32'])
def test_simple_string_with_encoding(encoding, expected):
snowman = b"\xe2\x98\x83"
reader = PyReader(encoding=encoding)
reader.feed(b"+" + snowman + b"\r\n")
assert reader.gets() == expected
def test_invalid_offset(reader):
data = b"+ok\r\n"
with pytest.raises(ValueError):
reader.feed(data, 6)
def test_invalid_length(reader):
data = b"+ok\r\n"
with pytest.raises(ValueError):
reader.feed(data, 0, 6)
def test_ok_offset(reader):
data = b"blah+ok\r\n"
reader.feed(data, 4)
assert reader.gets() == b"ok"
def test_ok_length(reader):
data = b"blah+ok\r\n"
reader.feed(data, 4, len(data)-4)
assert reader.gets() == b"ok"
@pytest.mark.xfail()
def test_maxbuf(reader):
defaultmaxbuf = reader.getmaxbuf()
reader.setmaxbuf(0)
assert 0 == reader.getmaxbuf()
reader.setmaxbuf(10000)
assert 10000 == reader.getmaxbuf()
reader.setmaxbuf(None)
assert defaultmaxbuf == reader.getmaxbuf()
with pytest.raises(ValueError):
reader.setmaxbuf(-4)
|
ymap/aioredis
|
tests/pyreader_test.py
|
Python
|
mit
| 6,582
|
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
import json
from elasticsearch_dsl import Q
from inspirehep.modules.search import LiteratureSearch
class AuthorAPICitations(object):
"""API endpoint for author collection returning citations."""
def serialize(self, pid, record, links_factory=None):
"""Return a list of citations for a given author recid.
:param pid:
Persistent identifier instance.
:param record:
Record instance.
:param links_factory:
Factory function for the link generation, which are added to
the response.
"""
author_pid = pid.pid_value
citations = {}
query = Q('match', authors__recid=author_pid)
search = LiteratureSearch().query('nested', path='authors', query=query)\
.params(_source=[
'authors.recid',
'control_number',
'self',
])
# For each publication co-authored by a given author...
for result in search.scan():
result_source = result.to_dict()
recid = result_source['control_number']
authors = set([i['recid'] for i in result_source['authors']])
citations[recid] = {}
nested_search = LiteratureSearch().query({
"match": {
"references.recid": recid
}
}).params(
_source=[
"authors.recid",
"collections",
"control_number",
"earliest_date",
"self",
]
)
# The source record that is being cited.
citations[recid]['citee'] = dict(
id=recid,
record=result_source['self'],
)
citations[recid]['citers'] = []
# Check all publications, which cite the parent record.
for nested_result in nested_search.scan():
nested_result_source = nested_result.to_dict()
# Not every signature has a recid (at least for demo records).
try:
nested_authors = set(
[i['recid'] for i in nested_result_source['authors']]
)
except KeyError:
nested_authors = set()
citation = dict(
citer=dict(
id=int(nested_result_source['control_number']),
record=nested_result_source['self']
),
# If at least one author is shared, it's a self-citation.
self_citation=len(authors & nested_authors) > 0,
)
# Get the earliest date of a citer.
try:
citation['date'] = nested_result_source['earliest_date']
except KeyError:
pass
# Get status if a citer is published.
# FIXME: As discussed with Sam, we should have a boolean flag
# for this type of information.
try:
citation['published_paper'] = "Published" in [
i['primary'] for i in nested_result_source[
'collections']]
except KeyError:
citation['published_paper'] = False
citations[recid]['citers'].append(citation)
return json.dumps(citations.values())
|
jacquerie/inspire-next
|
inspirehep/modules/authors/rest/citations.py
|
Python
|
gpl-3.0
| 4,632
|
import sys
def main(filepath):
with open(filepath, 'r') as f:
for line in f.readlines():
if line:
line = line.strip()
line = line.split(';')[:-1]
distances = []
for city in line:
city = city.strip()
city = city.split(',')
distances.append(int(city[1]))
distances_between = []
point = 0
distances = sorted(distances)
for distance in distances:
distances_between.append(str(distance - point))
point = distance
print ','.join(distances_between)
if __name__ == '__main__':
main(sys.argv[1])
|
tdsymonds/codeeval
|
python/easy/(124) road-trip.py
|
Python
|
mit
| 812
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
import webob
from nova import compute
from nova import exception
from nova import objects
from nova.objects import instance as instance_obj
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
UUID1 = '00000000-0000-0000-0000-000000000001'
UUID2 = '00000000-0000-0000-0000-000000000002'
UUID3 = '00000000-0000-0000-0000-000000000003'
def fake_compute_get(*args, **kwargs):
inst = fakes.stub_instance(1, uuid=UUID3, task_state="kayaking",
vm_state="slightly crunchy", power_state=1)
return fake_instance.fake_instance_obj(args[1], **inst)
def fake_compute_get_all(*args, **kwargs):
db_list = [
fakes.stub_instance(1, uuid=UUID1, task_state="task-1",
vm_state="vm-1", power_state=1),
fakes.stub_instance(2, uuid=UUID2, task_state="task-2",
vm_state="vm-2", power_state=2),
]
fields = instance_obj.INSTANCE_DEFAULT_FIELDS
return instance_obj._make_instance_list(args[1],
objects.InstanceList(),
db_list, fields)
class ExtendedStatusTestV21(test.TestCase):
content_type = 'application/json'
prefix = 'OS-EXT-STS:'
fake_url = '/v2/fake'
def _set_flags(self):
pass
def _make_request(self, url):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
res = req.get_response(fakes.wsgi_app_v21(
init_only=('servers',
'os-extended-status')))
return res
def setUp(self):
super(ExtendedStatusTestV21, self).setUp()
fakes.stub_out_nw_api(self)
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
self._set_flags()
return_server = fakes.fake_instance_get()
self.stub_out('nova.db.instance_get_by_uuid', return_server)
def _get_server(self, body):
return jsonutils.loads(body).get('server')
def _get_servers(self, body):
return jsonutils.loads(body).get('servers')
def assertServerStates(self, server, vm_state, power_state, task_state):
self.assertEqual(server.get('%svm_state' % self.prefix), vm_state)
self.assertEqual(int(server.get('%spower_state' % self.prefix)),
power_state)
self.assertEqual(server.get('%stask_state' % self.prefix), task_state)
def test_show(self):
url = self.fake_url + '/servers/%s' % UUID3
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
self.assertServerStates(self._get_server(res.body),
vm_state='slightly crunchy',
power_state=1,
task_state='kayaking')
def test_detail(self):
url = self.fake_url + '/servers/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
for i, server in enumerate(self._get_servers(res.body)):
self.assertServerStates(server,
vm_state='vm-%s' % (i + 1),
power_state=(i + 1),
task_state='task-%s' % (i + 1))
def test_no_instance_passthrough_404(self):
def fake_compute_get(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
url = self.fake_url + '/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
res = self._make_request(url)
self.assertEqual(res.status_int, 404)
class ExtendedStatusTestV2(ExtendedStatusTestV21):
def _set_flags(self):
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Extended_status'])
def _make_request(self, url):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
return res
|
zhimin711/nova
|
nova/tests/unit/api/openstack/compute/test_extended_status.py
|
Python
|
apache-2.0
| 4,900
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2015-2016 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports :class:`MontalvaEtAl2016SInter`
:class:`MontalvaEtAl2016SSlab`
"""
from __future__ import division
import numpy as np
from openquake.hazardlib.gsim.base import CoeffsTable
from openquake.hazardlib.imt import PGA
from openquake.hazardlib.gsim.abrahamson_2015 import (AbrahamsonEtAl2015SInter,
AbrahamsonEtAl2015SSlab)
class MontalvaEtAl2016SInter(AbrahamsonEtAl2015SInter):
"""
Adaptation of the Abrahamson et al. (2015) BC Hydro subduction interface
GMPE, calibrated to Chilean strong motion data.
GMPE and related coefficients published by:
Montalva, G., Bastias, N., Rodriguez-Marek, A. (2016), 'Ground Motion
Prediction Equation for the Chilean Subduction Zone'. Submitted to
Seismological Research Letters
"""
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# extract dictionaries of coefficients specific to required
# intensity measure type and for PGA
C = self.COEFFS[imt]
C_PGA = self.COEFFS[PGA()]
dc1_pga = C_PGA["DC1"]
# compute median pga on rock (vs30=1000), needed for site response
# term calculation
pga1000 = np.exp(
self._compute_pga_rock(C_PGA, dc1_pga, sites, rup, dists))
mean = (self._compute_magnitude_term(C, C["DC1"], rup.mag) +
self._compute_distance_term(C, rup.mag, dists) +
self._compute_focal_depth_term(C, rup) +
self._compute_forearc_backarc_term(C, sites, dists) +
self._compute_site_response_term(C, sites, pga1000))
stddevs = self._get_stddevs(C, stddev_types, len(sites.vs30))
return mean, stddevs
def _compute_magnitude_term(self, C, dc1, mag):
"""
Computes the magnitude scaling term given by equation (2)
"""
base = C['theta1'] + (C['theta4'] * dc1)
dmag = self.CONSTS["C1"] + dc1
if mag > dmag:
f_mag = (C['theta5'] * (mag - dmag)) +\
C['theta13'] * ((10. - mag) ** 2.)
else:
f_mag = (C['theta4'] * (mag - dmag)) +\
C['theta13'] * ((10. - mag) ** 2.)
return base + f_mag
def _compute_distance_term(self, C, mag, dists):
"""
Computes the distance scaling term, as contained within equation (1)
"""
return (C['theta2'] + C['theta3'] * (mag - 7.8)) *\
np.log(dists.rrup + self.CONSTS['c4'] * np.exp((mag - 6.) *
self.CONSTS['theta9'])) + (C['theta6'] * dists.rrup)
COEFFS = CoeffsTable(sa_damping=5, table="""\
imt DC1 vlin b theta1 theta2 theta3 theta4 theta5 theta6 theta7 theta8 theta10 theta11 theta12 theta13 theta14 theta15 theta16 phi tau sigma phi_s2s
pga 0.200000000 865.1 -1.186 4.935754758 -1.319716122 0.156954813 -1.038307042 -0.200134154 -0.002064757 1.0988 -1.42 4.559632568 0.004375202 0.914271114 -0.203185487 -0.694459960 0.9969 -1.00 0.676804137 0.436356919 0.805277096 0.547434071
0.010 0.200000000 865.1 -1.186 4.935754758 -1.319716122 0.156954813 -1.038307042 -0.200134154 -0.002064757 1.0988 -1.42 4.559632568 0.004375202 0.914271114 -0.203185487 -0.694459960 0.9969 -1.00 0.676804137 0.436356919 0.805277096 0.547434071
0.020 0.200000000 865.1 -1.186 4.963548267 -1.321501153 0.142973041 -0.925888135 -0.148739101 -0.002188725 1.0988 -1.42 4.654806348 0.004263109 0.934182754 -0.197899904 -0.710342148 0.9969 -1.00 0.683243196 0.430880779 0.807762039 0.554691749
0.050 0.200000000 1053.5 -1.346 7.358115618 -1.825715240 0.093914961 -0.555849306 0.129797026 -0.000431630 1.2536 -1.65 5.105622455 0.005100764 1.188485184 -0.183201116 -0.737586413 1.1030 -1.18 0.671606746 0.483036109 0.827272328 0.545045472
0.075 0.200000000 1085.7 -1.471 7.558603177 -1.810050104 0.103239851 -0.561904402 0.151637804 -0.001102941 1.4175 -1.80 4.514166186 0.005820798 1.395007124 -0.174729372 -0.615517435 1.2732 -1.36 0.697383068 0.508098624 0.862848397 0.582340972
0.100 0.200000000 1032.5 -1.624 7.027657583 -1.633492535 0.088844200 -0.525502325 0.265136034 -0.002397453 1.3997 -1.80 3.827080246 0.004236026 1.560949356 -0.176264079 -0.487079351 1.3042 -1.36 0.722361027 0.504635520 0.881171074 0.616096154
0.150 0.200000000 877.6 -1.931 6.049355161 -1.335645478 0.073754755 -0.567044631 0.294956394 -0.003942231 1.3582 -1.69 2.880273890 0.002951253 1.824536435 -0.193149712 -0.343522351 1.2600 -1.30 0.741411715 0.475224629 0.880641687 0.629331025
0.200 0.200000000 748.2 -2.188 4.179750788 -0.885470585 0.065604603 -0.659648456 0.359088006 -0.005638198 1.1648 -1.49 3.257747522 0.002516425 1.976696142 -0.214467130 -0.452888442 1.2230 -1.25 0.759426634 0.429788781 0.872609425 0.637577298
0.250 0.200000000 654.3 -2.381 3.999581211 -0.821066204 0.055367666 -0.643078011 0.352583884 -0.005484494 0.9940 -1.30 3.545595708 0.000888426 2.152539829 -0.226122818 -0.531334245 1.1600 -1.17 0.743380316 0.401651257 0.844948535 0.606641527
0.300 0.200000000 587.1 -2.518 3.343521294 -0.678019870 0.070313635 -0.816717363 0.236089761 -0.005490803 0.8821 -1.18 3.711884196 0.001562756 2.179000482 -0.238785185 -0.601073843 1.0500 -1.06 0.750620673 0.389053205 0.845454783 0.609833032
0.400 0.143682921 503.0 -2.657 3.342528747 -0.674981502 0.071624870 -1.123522692 0.103008688 -0.004346784 0.7046 -0.98 4.125701638 -0.001119565 2.225720730 -0.284536574 -0.702111182 0.8000 -0.78 0.741503989 0.383488689 0.834800419 0.589961066
0.500 0.100000000 456.6 -2.669 3.714706072 -0.770820923 0.073623537 -1.330962172 -0.019664088 -0.003028097 0.5799 -0.82 4.507163580 -0.000434645 2.265272475 -0.318116722 -0.800834677 0.6620 -0.62 0.688862082 0.384159164 0.788739014 0.513251109
0.600 0.073696559 430.3 -2.599 4.425108150 -0.939459680 0.062188731 -1.569443919 -0.014606735 -0.001675340 0.5021 -0.70 5.255072487 -0.000097416 2.200898990 -0.365330018 -0.966147926 0.5800 -0.50 0.665479640 0.394271020 0.773506812 0.486626176
0.750 0.041503750 410.5 -2.401 4.372165283 -0.933761671 0.053771754 -1.730788918 -0.031408137 -0.001524349 0.3687 -0.54 5.074522171 -0.001350443 1.918279398 -0.401223910 -0.937019824 0.4800 -0.34 0.637244299 0.414109647 0.759978352 0.443006934
1.000 0.000000000 400.0 -1.955 4.021211151 -0.924917589 0.054326150 -1.908027335 -0.138131804 -0.001101517 0.1746 -0.34 5.211831136 -0.002283504 1.509910061 -0.433435346 -0.964846571 0.3300 -0.14 0.611337571 0.442015583 0.754394725 0.421636418
1.500 -0.058496250 400.0 -1.025 3.946972058 -1.002244695 0.049918773 -2.307833569 -0.412376757 -0.000261255 -0.0820 -0.05 5.561359279 -0.000996882 0.656237153 -0.502990059 -1.057548381 0.3100 0.00 0.617840247 0.436708751 0.756598377 0.448028967
2.000 -0.100000000 400.0 -0.299 3.763370770 -1.048406811 0.049945027 -2.218316295 -0.488347011 -0.000156404 -0.2821 0.12 5.310311721 -0.000289011 -0.148288073 -0.501824964 -1.007661553 0.3000 0.00 0.586452050 0.429957558 0.727179144 0.424207890
2.500 -0.155033971 400.0 0.000 3.279573476 -0.991842986 0.095212751 -2.496506471 -0.770828569 -0.000738153 -0.4108 0.25 4.764778613 -0.001039535 -0.459995635 -0.517128864 -0.886704977 0.3000 0.00 0.567864698 0.442678828 0.720024208 0.416230786
3.000 -0.200000000 400.0 0.000 3.407135085 -1.079312405 0.092359656 -2.425045547 -0.883889211 -0.000357658 -0.4466 0.30 4.800502846 -0.000395577 -0.450645670 -0.514638813 -0.901051441 0.3000 0.00 0.559253514 0.420099114 0.699462478 0.418794658
4.000 -0.200000000 400.0 0.000 2.789669400 -1.072279505 0.148258197 -2.792416051 -1.282315047 0.000409730 -0.4344 0.30 5.011985606 -0.000308830 -0.512937685 -0.529022902 -0.939796651 0.3000 0.00 0.569097474 0.408117852 0.700308586 0.435934346
5.000 -0.200000000 400.0 0.000 2.700791140 -1.202536653 0.172625283 -2.741020801 -1.141773134 0.001833647 -0.4368 0.30 5.457710792 0.000255165 -0.503538042 -0.504799612 -1.025705989 0.3000 0.00 0.558540211 0.387890193 0.680019095 0.418174855
6.000 -0.200000000 400.0 0.000 2.630174552 -1.303101604 0.127044195 -1.863112205 -0.727779859 0.002185845 -0.4586 0.30 5.826483564 0.001637500 -0.497674025 -0.423978007 -1.110103433 0.3000 0.00 0.502062640 0.394614799 0.638582598 0.346222778
7.500 -0.200000000 400.0 0.000 2.520418211 -1.399368154 0.084904399 -0.930694380 -0.212014425 0.002325451 -0.4433 0.30 6.332273436 0.001046880 -0.481585300 -0.334701563 -1.195826518 0.3000 0.00 0.482570602 0.373377912 0.610151990 0.321745366
10.00 -0.200000000 400.0 0.000 3.266979586 -1.707902316 0.068210457 -0.967817098 0.253077379 0.004736644 -0.4828 0.30 7.382937906 0.000738462 -0.423369635 -0.347713953 -1.409670235 0.3000 0.00 0.466924628 0.376696614 0.599932452 0.300789811
""")
CONSTS = {
# Period-Independent Coefficients (Table 2)
'n': 1.18,
'c': 1.88,
'c4': 10.0,
'C1': 7.8,
'theta9': 0.4
}
class MontalvaEtAl2016SSlab(AbrahamsonEtAl2015SSlab):
"""
Adaptation of the Abrahamson et al. (2015) BC Hydro subduction in-slab
GMPE, calibrated to Chilean strong motion data
"""
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# extract dictionaries of coefficients specific to required
# intensity measure type and for PGA
C = self.COEFFS[imt]
# For inslab GMPEs the correction term is fixed at -0.3
dc1 = -0.3
C_PGA = self.COEFFS[PGA()]
# compute median pga on rock (vs30=1000), needed for site response
# term calculation
pga1000 = np.exp(
self._compute_pga_rock(C_PGA, dc1, sites, rup, dists))
mean = (self._compute_magnitude_term(C, dc1, rup.mag) +
self._compute_distance_term(C, rup.mag, dists) +
self._compute_focal_depth_term(C, rup) +
self._compute_forearc_backarc_term(C, sites, dists) +
self._compute_site_response_term(C, sites, pga1000))
stddevs = self._get_stddevs(C, stddev_types, len(sites.vs30))
return mean, stddevs
def _compute_magnitude_term(self, C, dc1, mag):
"""
Computes the magnitude scaling term given by equation (2)
corrected by a local adjustment factor
"""
base = C['theta1'] + (C['theta4'] * dc1)
dmag = self.CONSTS["C1"] + dc1
if mag > dmag:
f_mag = (C['theta5'] * (mag - dmag)) +\
C['theta13'] * ((10. - mag) ** 2.)
else:
f_mag = (C['theta4'] * (mag - dmag)) +\
C['theta13'] * ((10. - mag) ** 2.)
return base + f_mag
def _compute_distance_term(self, C, mag, dists):
"""
Computes the distance scaling term, as contained within equation (1b)
"""
return ((C['theta2'] + C['theta14'] + C['theta3'] *
(mag - 7.8)) * np.log(dists.rhypo + self.CONSTS['c4'] *
np.exp((mag - 6.) * self.CONSTS['theta9'])) +
(C['theta6'] * dists.rhypo)) + C["theta10"]
COEFFS = CoeffsTable(sa_damping=5, table="""\
imt DC1 vlin b theta1 theta2 theta3 theta4 theta5 theta6 theta7 theta8 theta10 theta11 theta12 theta13 theta14 theta15 theta16 phi tau sigma phi_s2s
pga -0.300000000 865.1 -1.186 4.935754758 -1.319716122 0.156954813 -1.038307042 -0.200134154 -0.002064757 1.0988 -1.42 4.559632568 0.004375202 0.914271114 -0.203185487 -0.694459960 0.9969 -1.00 0.676804137 0.436356919 0.805277096 0.547434071
0.010 -0.300000000 865.1 -1.186 4.935754758 -1.319716122 0.156954813 -1.038307042 -0.200134154 -0.002064757 1.0988 -1.42 4.559632568 0.004375202 0.914271114 -0.203185487 -0.694459960 0.9969 -1.00 0.676804137 0.436356919 0.805277096 0.547434071
0.020 -0.300000000 865.1 -1.186 4.963548267 -1.321501153 0.142973041 -0.925888135 -0.148739101 -0.002188725 1.0988 -1.42 4.654806348 0.004263109 0.934182754 -0.197899904 -0.710342148 0.9969 -1.00 0.683243196 0.430880779 0.807762039 0.554691749
0.050 -0.300000000 1053.5 -1.346 7.358115618 -1.825715240 0.093914961 -0.555849306 0.129797026 -0.000431630 1.2536 -1.65 5.105622455 0.005100764 1.188485184 -0.183201116 -0.737586413 1.1030 -1.18 0.671606746 0.483036109 0.827272328 0.545045472
0.075 -0.300000000 1085.7 -1.471 7.558603177 -1.810050104 0.103239851 -0.561904402 0.151637804 -0.001102941 1.4175 -1.80 4.514166186 0.005820798 1.395007124 -0.174729372 -0.615517435 1.2732 -1.36 0.697383068 0.508098624 0.862848397 0.582340972
0.100 -0.300000000 1032.5 -1.624 7.027657583 -1.633492535 0.088844200 -0.525502325 0.265136034 -0.002397453 1.3997 -1.80 3.827080246 0.004236026 1.560949356 -0.176264079 -0.487079351 1.3042 -1.36 0.722361027 0.504635520 0.881171074 0.616096154
0.150 -0.300000000 877.6 -1.931 6.049355161 -1.335645478 0.073754755 -0.567044631 0.294956394 -0.003942231 1.3582 -1.69 2.880273890 0.002951253 1.824536435 -0.193149712 -0.343522351 1.2600 -1.30 0.741411715 0.475224629 0.880641687 0.629331025
0.200 -0.300000000 748.2 -2.188 4.179750788 -0.885470585 0.065604603 -0.659648456 0.359088006 -0.005638198 1.1648 -1.49 3.257747522 0.002516425 1.976696142 -0.214467130 -0.452888442 1.2230 -1.25 0.759426634 0.429788781 0.872609425 0.637577298
0.250 -0.300000000 654.3 -2.381 3.999581211 -0.821066204 0.055367666 -0.643078011 0.352583884 -0.005484494 0.9940 -1.30 3.545595708 0.000888426 2.152539829 -0.226122818 -0.531334245 1.1600 -1.17 0.743380316 0.401651257 0.844948535 0.606641527
0.300 -0.300000000 587.1 -2.518 3.343521294 -0.678019870 0.070313635 -0.816717363 0.236089761 -0.005490803 0.8821 -1.18 3.711884196 0.001562756 2.179000482 -0.238785185 -0.601073843 1.0500 -1.06 0.750620673 0.389053205 0.845454783 0.609833032
0.400 -0.300000000 503.0 -2.657 3.342528747 -0.674981502 0.071624870 -1.123522692 0.103008688 -0.004346784 0.7046 -0.98 4.125701638 -0.001119565 2.225720730 -0.284536574 -0.702111182 0.8000 -0.78 0.741503989 0.383488689 0.834800419 0.589961066
0.500 -0.300000000 456.6 -2.669 3.714706072 -0.770820923 0.073623537 -1.330962172 -0.019664088 -0.003028097 0.5799 -0.82 4.507163580 -0.000434645 2.265272475 -0.318116722 -0.800834677 0.6620 -0.62 0.688862082 0.384159164 0.788739014 0.513251109
0.600 -0.300000000 430.3 -2.599 4.425108150 -0.939459680 0.062188731 -1.569443919 -0.014606735 -0.001675340 0.5021 -0.70 5.255072487 -0.000097416 2.200898990 -0.365330018 -0.966147926 0.5800 -0.50 0.665479640 0.394271020 0.773506812 0.486626176
0.750 -0.300000000 410.5 -2.401 4.372165283 -0.933761671 0.053771754 -1.730788918 -0.031408137 -0.001524349 0.3687 -0.54 5.074522171 -0.001350443 1.918279398 -0.401223910 -0.937019824 0.4800 -0.34 0.637244299 0.414109647 0.759978352 0.443006934
1.000 -0.300000000 400.0 -1.955 4.021211151 -0.924917589 0.054326150 -1.908027335 -0.138131804 -0.001101517 0.1746 -0.34 5.211831136 -0.002283504 1.509910061 -0.433435346 -0.964846571 0.3300 -0.14 0.611337571 0.442015583 0.754394725 0.421636418
1.500 -0.300000000 400.0 -1.025 3.946972058 -1.002244695 0.049918773 -2.307833569 -0.412376757 -0.000261255 -0.0820 -0.05 5.561359279 -0.000996882 0.656237153 -0.502990059 -1.057548381 0.3100 0.00 0.617840247 0.436708751 0.756598377 0.448028967
2.000 -0.300000000 400.0 -0.299 3.763370770 -1.048406811 0.049945027 -2.218316295 -0.488347011 -0.000156404 -0.2821 0.12 5.310311721 -0.000289011 -0.148288073 -0.501824964 -1.007661553 0.3000 0.00 0.586452050 0.429957558 0.727179144 0.424207890
2.500 -0.300000000 400.0 0.000 3.279573476 -0.991842986 0.095212751 -2.496506471 -0.770828569 -0.000738153 -0.4108 0.25 4.764778613 -0.001039535 -0.459995635 -0.517128864 -0.886704977 0.3000 0.00 0.567864698 0.442678828 0.720024208 0.416230786
3.000 -0.300000000 400.0 0.000 3.407135085 -1.079312405 0.092359656 -2.425045547 -0.883889211 -0.000357658 -0.4466 0.30 4.800502846 -0.000395577 -0.450645670 -0.514638813 -0.901051441 0.3000 0.00 0.559253514 0.420099114 0.699462478 0.418794658
4.000 -0.300000000 400.0 0.000 2.789669400 -1.072279505 0.148258197 -2.792416051 -1.282315047 0.000409730 -0.4344 0.30 5.011985606 -0.000308830 -0.512937685 -0.529022902 -0.939796651 0.3000 0.00 0.569097474 0.408117852 0.700308586 0.435934346
5.000 -0.300000000 400.0 0.000 2.700791140 -1.202536653 0.172625283 -2.741020801 -1.141773134 0.001833647 -0.4368 0.30 5.457710792 0.000255165 -0.503538042 -0.504799612 -1.025705989 0.3000 0.00 0.558540211 0.387890193 0.680019095 0.418174855
6.000 -0.300000000 400.0 0.000 2.630174552 -1.303101604 0.127044195 -1.863112205 -0.727779859 0.002185845 -0.4586 0.30 5.826483564 0.001637500 -0.497674025 -0.423978007 -1.110103433 0.3000 0.00 0.502062640 0.394614799 0.638582598 0.346222778
7.500 -0.300000000 400.0 0.000 2.520418211 -1.399368154 0.084904399 -0.930694380 -0.212014425 0.002325451 -0.4433 0.30 6.332273436 0.001046880 -0.481585300 -0.334701563 -1.195826518 0.3000 0.00 0.482570602 0.373377912 0.610151990 0.321745366
10.00 -0.300000000 400.0 0.000 3.266979586 -1.707902316 0.068210457 -0.967817098 0.253077379 0.004736644 -0.4828 0.30 7.382937906 0.000738462 -0.423369635 -0.347713953 -1.409670235 0.3000 0.00 0.466924628 0.376696614 0.599932452 0.300789811
""")
|
vup1120/oq-hazardlib
|
openquake/hazardlib/gsim/montalva_2016.py
|
Python
|
agpl-3.0
| 20,875
|
# encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from test_haystack.discovery.models import Bar, Foo
from haystack import indexes
class FooIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, model_attr='body')
def get_model(self):
return Foo
class BarIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True)
def get_model(self):
return Bar
|
streeter/django-haystack
|
test_haystack/discovery/search_indexes.py
|
Python
|
bsd-3-clause
| 498
|
import json
import subprocess
import sublime
try:
from urllib.parse import urlencode
from urllib.request import urlopen
except ImportError:
from urllib import urlencode, urlopen
BASE_URL = 'https://slack.com/api/'
def api_call(method, call_args={}, loading=None, filename=None, icon=None):
if icon:
call_args['icon_url'] = icon
print('icon', icon)
URL = BASE_URL + method + "?" + urlencode(call_args)
print('calling:', URL)
try:
if filename:
f = open(filename, 'rb')
filebody = f.read()
f.close()
data = urlencode({'content': filebody})
response = urlopen(
url=URL,
data=data.encode('utf8')
).read().decode('utf8')
else:
response = urlopen(url=URL).read().decode('utf8')
except:
# fallback for sublime bug with urlopen (on linux only)
if filename: # upload filename
proc = subprocess.Popen(
['curl', '-X', 'POST', '-F', 'file=@'+filename, URL],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
else:
proc = subprocess.Popen(
['curl', '-s', URL],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
response = out.decode('utf8')
response = json.loads(response)
if not response['ok']:
sublime.error_message("SLACK Api error: " + response['error'])
if loading:
loading.done = True
return False
return response
|
simion/sublime-slack-integration
|
api.py
|
Python
|
gpl-2.0
| 1,631
|
import jedi
import sys
from os.path import dirname, join
def test_namespace_package():
sys.path.insert(0, join(dirname(__file__), 'namespace_package/ns1'))
sys.path.insert(1, join(dirname(__file__), 'namespace_package/ns2'))
try:
# goto definition
assert jedi.Script('from pkg import ns1_file').goto_definitions()
assert jedi.Script('from pkg import ns2_file').goto_definitions()
assert not jedi.Script('from pkg import ns3_file').goto_definitions()
# goto assignment
tests = {
'from pkg.ns2_folder.nested import foo': 'nested!',
'from pkg.ns2_folder import foo': 'ns2_folder!',
'from pkg.ns2_file import foo': 'ns2_file!',
'from pkg.ns1_folder import foo': 'ns1_folder!',
'from pkg.ns1_file import foo': 'ns1_file!',
'from pkg import foo': 'ns1!',
}
for source, solution in tests.items():
ass = jedi.Script(source).goto_assignments()
assert len(ass) == 1
assert ass[0].description == "foo = '%s'" % solution
# completion
completions = jedi.Script('from pkg import ').completions()
names = [str(c.name) for c in completions] # str because of unicode
compare = ['foo', 'ns1_file', 'ns1_folder', 'ns2_folder', 'ns2_file',
'pkg_resources', 'pkgutil', '__name__', '__path__',
'__package__', '__file__', '__doc__']
# must at least contain these items, other items are not important
assert set(compare) == set(names)
tests = {
'from pkg import ns2_folder as x': 'ns2_folder!',
'from pkg import ns2_file as x': 'ns2_file!',
'from pkg.ns2_folder import nested as x': 'nested!',
'from pkg import ns1_folder as x': 'ns1_folder!',
'from pkg import ns1_file as x': 'ns1_file!',
'import pkg as x': 'ns1!',
}
for source, solution in tests.items():
for c in jedi.Script(source + '; x.').completions():
if c.name == 'foo':
completion = c
solution = "statement: foo = '%s'" % solution
assert completion.description == solution
finally:
sys.path.pop(0)
sys.path.pop(0)
|
alexrao/YouCompleteMe
|
third_party/ycmd/third_party/jedi/test/test_evaluate/test_namespace_package.py
|
Python
|
gpl-3.0
| 2,316
|
#Copyright (c) 2011, Peter Brunner (Lugoues)
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
import logging
import gobject
import pygst
pygst.require('0.10')
import gst
from rgain import rgcalc
from beets import ui
from beets.plugins import BeetsPlugin
from beets.ui import print_, Subcommand
from beets.mediafile import MediaFile, FileTypeError, UnreadableFileError
log = logging.getLogger('beets')
log.addHandler(logging.StreamHandler())
DEFAULT_REFERENCE_LOUDNESS = 89
DEFAULT_MP3_FORMAT = 'fb2k'
class ReplayGainPlugin(BeetsPlugin):
'''Provides replay gain analysis for the Beets Music Manager'''
ref_level = 0
mp3_format = ''
overwrite = False
def __init__(self):
self.register_listener('album_imported', self.album_imported)
self.register_listener('item_imported', self.item_imported)
def configure(self, config):
self.ref_level = ui.config_val(config, 'replaygain', 'reference_loundess', DEFAULT_REFERENCE_LOUDNESS, int)
self.mp3_format = ui.config_val(config, 'replaygain', 'mp3_format', DEFAULT_MP3_FORMAT)
self.overwrite = ui.config_val(config, 'replaygain', 'overwrite', False)
def album_imported(self, lib, album):
self.write_album = True
print_("Tagging Replay Gain: %s - %s" % (album.albumartist, album.album))
try:
media_files = [MediaFile(item.path) for item in album.items()]
media_files = [mf for mf in media_files if self.requires_gain( mf )]
#calculate gain. Return value - track_data: array dictionary indexed by filename
track_data, album_data = rgcalc.calculate([mf.path for mf in media_files], True, self.ref_level)
for mf in media_files:
self.write_gain(mf, track_data, album_data)
except (FileTypeError, UnreadableFileError, TypeError, ValueError),e:
log.error("failed to calculate replaygain: %s ", e)
def item_imported(self, lib, item):
try:
self.write_album = False
print_("Tagging Replay Gain: %s - %s" % (item.artist, item.title))
mf = MediaFile(item.path)
if self.requires_gain(mf):
track_data, album_data = rgcalc.calculate([ mf.path], True, self.ref_level)
self.write_gain(mf, track_data, None)
except (FileTypeError, UnreadableFileError, TypeError, ValueError),e:
log.error("failed to calculate replaygain: %s ", e)
def write_gain(self, mf, track_data, album_data):
try:
mf.rg_track_gain = track_data[mf.path].gain
mf.rg_track_peak= track_data[mf.path].peak
if self.write_album and album_data :
mf.rg_album_gain = album_data.gain
mf.rg_album_peak= album_data.peak
mf.save()
except (FileTypeError, UnreadableFileError, TypeError, ValueError),e:
log.error("failed to write replaygain: %s" % (mf.title))
def requires_gain(self, mf):
return self.overwrite or (not mf.rg_track_gain or not mf.rg_track_peak) or ((not mf.rg_album_gain or not mf.rg_album_peak) and self.write_album)
|
Lugoues/beets-replaygain
|
beetsplug/replaygain/__init__.py
|
Python
|
mit
| 4,154
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2011 Chris Liechti <cliechti@gmx.net>
# All Rights Reserved.
# Simplified BSD License (see LICENSE.txt for full text)
"""\
X-Protocol command class for serial devices.
The X-Protocol simple:
- line based
- line ends with '\n', '\r' shall be ignored
- master/slave. PC always initiates communication
- a command can have many lines as response
- the first character of the line serves as command or response indicator
- 'xOK' or 'xERR <optional message>' indicate the end of a command
This implementation provides a class that connects through the serial port.
Commands can be sent with the method ``command``. It returns all lines output
by the device as list when it was 'xOK' (result is excluding this). In case of
an 'xERR' response, an exception is raised containing the message from the
device. All lines read until the error occurred are attached to the exception.
Typical usages for commands:
- 'd' set debug mode: 'd0' / 'd1'
- 'e' set echo mode: 'e0' / 'e1'
Typical usages for answers:
- 'i' for integer. e.g. 'i123' or 'i0x123'
- 's' for a string. e.g. 'sHello'
- 'o' for output message. The message is intended to be shown to the user,
e.g. 'oHello World'
Example::
PC Device Note
====== ====== ====
oHello ---------> a command that
<--------- oHello sends the message back
<--------- xOK
m1 ---------> a command that does some measurement
<--------- i123 and returns an integer
<--------- xOK
r ---------> an invalid command
<--------- xERR unknown command yields an error
"""
import serial
import codecs
class XProtocolError(Exception):
pass
class XProtocol(object):
def __init__(self, port, baudrate=2400):
self.serial = serial.Serial()
self.serial.port = port
self.serial.baudrate = baudrate
self.serial.timeout = 3
def open(self):
self.serial.open()
def close(self):
self.serial.close()
def command(self, cmd):
self.serial.write('%s\n' % cmd)
lines = []
chars = []
while True:
c = self.serial.read(1)
if not c: raise XProtocolError('Timeout', lines)
if c == '\r':
pass
elif c == '\n':
line = ''.join(chars)
del chars[:]
if not line:
pass
elif line[0] == 'x':
if line.startswith('xOK'):
return lines
else:
raise XProtocolError(''.join(line), lines)
else:
lines.append(line)
else:
chars.append(c)
def decode(self, lines):
result = []
for line in lines:
if not line:
pass
elif line[0] == 'i':
result.append(int(line[1:], 0))
elif line[0] == 'h':
result.append(line[1:].decode('hex'))
elif line[0] == 's':
result.append(codecs.escape_decode(line[1:])[0])
elif line[0] == 'o':
sys.stdout.write(line[1:])
else:
raise ValueError('unknown line type: %r' % (line,))
return result
if __name__ == '__main__':
import sys
import time
import unittest
x = XProtocol('/dev/ttyACM0', 2400)
#~ x = XProtocol('/dev/ttyACM0', 9600)
x.open()
class TestDecoder(unittest.TestCase):
def test_int(self):
self.failUnlessEqual(x.decode(['i123']), [123])
self.failUnlessEqual(x.decode(['i0x123']), [0x123])
self.failUnlessEqual(x.decode(['i1', 'i2', 'i3']), [1,2,3])
def test_str(self):
self.failUnlessEqual(x.decode(['sHello']), ['Hello'])
self.failUnlessEqual(x.decode(['s\\n']), ['\n'])
def test_unknown(self):
self.failUnlessRaises(ValueError, x.decode, ['r'])
class TestCommands(unittest.TestCase):
def test_echo(self):
self.failUnlessEqual(x.command('oHello'), ['oHello'])
def test_error(self):
self.failUnlessRaises(XProtocolError, x.command, 'error')
def test_load(self):
test_duration = 5.0 # [seconds]
t_end = time.time() + test_duration
n = 0
while time.time() < t_end:
self.failUnlessEqual(x.command('oHello'), ['oHello'])
n += 1
print '\n~%d echo commands/second' % (n/test_duration)
sys.argv[1:] = ['-v']
unittest.main()
#~ x.close()
|
cetic/python-msp430-tools
|
examples/asm/forth_to_asm_advanced/xprotocol.py
|
Python
|
bsd-3-clause
| 4,878
|
from nose.tools import assert_equal, assert_is_not_none
from nose_parameterized import parameterized
from util import query, commit
from time import time
import json
import os
USE_SCHEMA = True
for q in (
'DROP TABLE IF EXISTS obs_perftest_simple',
'''CREATE TABLE obs_perftest_simple (cartodb_id SERIAL PRIMARY KEY,
point GEOMETRY,
geom GEOMETRY,
offset_geom GEOMETRY,
name TEXT, measure NUMERIC, category TEXT)''',
'''INSERT INTO obs_perftest_simple (point, geom, offset_geom, name)
SELECT ST_PointOnSurface(the_geom) point,
the_geom geom,
ST_Translate(the_geom, -0.1, 0.1) offset_geom,
geom_refs AS name
FROM (SELECT * FROM {schema}OBS_GetBoundariesByGeometry(
st_makeenvelope(-74.1, 40.5,
-73.8, 40.9, 4326),
'us.census.tiger.census_tract_2015_clipped')) foo
ORDER BY ST_NPoints(the_geom) ASC
LIMIT 1000''',
'DROP TABLE IF EXISTS obs_perftest_complex',
'''CREATE TABLE obs_perftest_complex (cartodb_id SERIAL PRIMARY KEY,
point GEOMETRY,
geom GEOMETRY,
offset_geom GEOMETRY,
name TEXT, measure NUMERIC, category TEXT)''',
'''INSERT INTO obs_perftest_complex (point, geom, offset_geom, name)
SELECT ST_PointOnSurface(the_geom) point,
the_geom geom,
ST_Translate(the_geom, -0.1, 0.1) offset_geom,
geom_refs AS name
FROM (SELECT * FROM {schema}OBS_GetBoundariesByGeometry(
st_makeenvelope(-75.05437469482422,40.66319159533881,
-73.81885528564453,41.745696344339564, 4326),
'us.census.tiger.county_2015_clipped')) foo
ORDER BY ST_NPoints(the_geom) DESC
LIMIT 50;'''):
q_formatted = q.format(
schema='cdb_observatory.' if USE_SCHEMA else '',
)
start = time()
resp = query(q_formatted)
end = time()
print('{} for {}'.format(int(end - start), q_formatted))
if q.lower().startswith('insert'):
if resp.rowcount == 0:
raise Exception('''Performance fixture creation "{}" inserted 0 rows,
this will break tests. Check the query to determine
what is going wrong.'''.format(q_formatted))
commit()
ARGS = {
('OBS_GetMeasureByID', None): "name, 'us.census.acs.B01001002', '{}'",
('OBS_GetMeasure', 'predenominated'): "{}, 'us.census.acs.B01003001', null, {}",
('OBS_GetMeasure', 'area'): "{}, 'us.census.acs.B01001002', 'area', {}",
('OBS_GetMeasure', 'denominator'): "{}, 'us.census.acs.B01001002', 'denominator', {}",
('OBS_GetCategory', None): "{}, 'us.census.spielman_singleton_segments.X10', {}",
('_OBS_GetGeometryScores', None): "{}, NULL"
}
def record(params, results):
sha = os.environ['OBS_EXTENSION_SHA']
msg = os.environ.get('OBS_EXTENSION_MSG')
fpath = os.path.join(os.environ['OBS_PERFTEST_DIR'], sha + '.json')
if os.path.isfile(fpath):
tests = json.load(open(fpath, 'r'))
else:
tests = {}
with open(fpath, 'w') as fhandle:
tests[json.dumps(params)] = {
'params': params,
'results': results
}
json.dump(tests, fhandle)
@parameterized([
('simple', '_OBS_GetGeometryScores', 'NULL', 1),
('simple', '_OBS_GetGeometryScores', 'NULL', 500),
('simple', '_OBS_GetGeometryScores', 'NULL', 3000),
('complex', '_OBS_GetGeometryScores', 'NULL', 1),
('complex', '_OBS_GetGeometryScores', 'NULL', 500),
('complex', '_OBS_GetGeometryScores', 'NULL', 3000)
])
def test_getgeometryscores_performance(geom_complexity, api_method, filters, target_geoms):
print(api_method, geom_complexity, filters, target_geoms)
rownums = (1, 5, 10, ) if 'complex' in geom_complexity else (5, 25, 50,)
results = []
for rows in rownums:
stmt = '''SELECT {schema}{api_method}(geom, {filters}, {target_geoms})
FROM obs_perftest_{complexity}
WHERE cartodb_id <= {n}'''.format(
complexity=geom_complexity,
schema='cdb_observatory.' if USE_SCHEMA else '',
api_method=api_method,
filters=filters,
target_geoms=target_geoms,
n=rows)
start = time()
query(stmt)
end = time()
qps = (rows / (end - start))
results.append({
'rows': rows,
'qps': qps,
'stmt': stmt
})
print(rows, ': ', qps, ' QPS')
if 'OBS_RECORD_TEST' in os.environ:
record({
'geom_complexity': geom_complexity,
'api_method': api_method,
'filters': filters,
'target_geoms': target_geoms
}, results)
@parameterized([
('simple', 'OBS_GetMeasureByID', None, 'us.census.tiger.census_tract', None),
('complex', 'OBS_GetMeasureByID', None, 'us.census.tiger.county', None),
('simple', 'OBS_GetMeasure', 'predenominated', 'point', 'NULL'),
('simple', 'OBS_GetMeasure', 'predenominated', 'geom', 'NULL'),
('simple', 'OBS_GetMeasure', 'predenominated', 'offset_geom', 'NULL'),
('simple', 'OBS_GetMeasure', 'area', 'point', 'NULL'),
('simple', 'OBS_GetMeasure', 'area', 'geom', 'NULL'),
('simple', 'OBS_GetMeasure', 'area', 'offset_geom', 'NULL'),
('simple', 'OBS_GetMeasure', 'denominator', 'point', 'NULL'),
('simple', 'OBS_GetMeasure', 'denominator', 'geom', 'NULL'),
('simple', 'OBS_GetMeasure', 'denominator', 'offset_geom', 'NULL'),
('simple', 'OBS_GetCategory', None, 'point', 'NULL'),
('simple', 'OBS_GetCategory', None, 'geom', 'NULL'),
('simple', 'OBS_GetCategory', None, 'offset_geom', 'NULL'),
('simple', 'OBS_GetMeasure', 'predenominated', 'point', "'us.census.tiger.census_tract'"),
('simple', 'OBS_GetMeasure', 'predenominated', 'geom', "'us.census.tiger.census_tract'"),
('simple', 'OBS_GetMeasure', 'predenominated', 'offset_geom', "'us.census.tiger.census_tract'"),
('simple', 'OBS_GetMeasure', 'area', 'point', "'us.census.tiger.census_tract'"),
('simple', 'OBS_GetMeasure', 'area', 'geom', "'us.census.tiger.census_tract'"),
('simple', 'OBS_GetMeasure', 'area', 'offset_geom', "'us.census.tiger.census_tract'"),
('simple', 'OBS_GetMeasure', 'denominator', 'point', "'us.census.tiger.census_tract'"),
('simple', 'OBS_GetMeasure', 'denominator', 'geom', "'us.census.tiger.census_tract'"),
('simple', 'OBS_GetMeasure', 'denominator', 'offset_geom', "'us.census.tiger.census_tract'"),
('simple', 'OBS_GetCategory', None, 'point', "'us.census.tiger.census_tract'"),
('simple', 'OBS_GetCategory', None, 'geom', "'us.census.tiger.census_tract'"),
('simple', 'OBS_GetCategory', None, 'offset_geom', "'us.census.tiger.census_tract'"),
('complex', 'OBS_GetMeasure', 'predenominated', 'geom', 'NULL'),
('complex', 'OBS_GetMeasure', 'predenominated', 'offset_geom', 'NULL'),
('complex', 'OBS_GetMeasure', 'area', 'geom', 'NULL'),
('complex', 'OBS_GetMeasure', 'area', 'offset_geom', 'NULL'),
('complex', 'OBS_GetMeasure', 'denominator', 'geom', 'NULL'),
('complex', 'OBS_GetMeasure', 'denominator', 'offset_geom', 'NULL'),
('complex', 'OBS_GetCategory', None, 'geom', 'NULL'),
('complex', 'OBS_GetCategory', None, 'offset_geom', 'NULL'),
('complex', 'OBS_GetMeasure', 'predenominated', 'geom', "'us.census.tiger.county'"),
('complex', 'OBS_GetMeasure', 'predenominated', 'offset_geom', "'us.census.tiger.county'"),
('complex', 'OBS_GetMeasure', 'area', 'geom', "'us.census.tiger.county'"),
('complex', 'OBS_GetMeasure', 'area', 'offset_geom', "'us.census.tiger.county'"),
('complex', 'OBS_GetMeasure', 'denominator', 'geom', "'us.census.tiger.county'"),
('complex', 'OBS_GetMeasure', 'denominator', 'offset_geom', "'us.census.tiger.county'"),
('complex', 'OBS_GetCategory', None, 'geom', "'us.census.tiger.census_tract'"),
('complex', 'OBS_GetCategory', None, 'offset_geom', "'us.census.tiger.census_tract'"),
])
def test_getmeasure_performance(geom_complexity, api_method, normalization, geom, boundary):
print(api_method, geom_complexity, normalization, geom, boundary)
col = 'measure' if 'measure' in api_method.lower() else 'category'
results = []
rownums = (1, 5, 10, ) if geom_complexity == 'complex' else (5, 25, 50, )
for rows in rownums:
stmt = '''UPDATE obs_perftest_{complexity}
SET {col} = {schema}{api_method}({args})
WHERE cartodb_id <= {n}'''.format(
col=col,
complexity=geom_complexity,
schema='cdb_observatory.' if USE_SCHEMA else '',
api_method=api_method,
args=ARGS[api_method, normalization].format(geom, boundary),
n=rows)
start = time()
query(stmt)
end = time()
qps = (rows / (end - start))
results.append({
'rows': rows,
'qps': qps,
'stmt': stmt
})
print(rows, ': ', qps, ' QPS')
if 'OBS_RECORD_TEST' in os.environ:
record({
'geom_complexity': geom_complexity,
'api_method': api_method,
'normalization': normalization,
'boundary': boundary,
'geom': geom
}, results)
@parameterized([
('simple', 'predenominated', 'point', 'null'),
('simple', 'predenominated', 'geom', 'null'),
('simple', 'predenominated', 'offset_geom', 'null'),
('simple', 'area', 'point', 'null'),
('simple', 'area', 'geom', 'null'),
('simple', 'area', 'offset_geom', 'null'),
('simple', 'denominator', 'point', 'null'),
('simple', 'denominator', 'geom', 'null'),
('simple', 'denominator', 'offset_geom', 'null'),
('simple', 'predenominated', 'point', "'us.census.tiger.census_tract'"),
('simple', 'predenominated', 'geom', "'us.census.tiger.census_tract'"),
('simple', 'predenominated', 'offset_geom', "'us.census.tiger.census_tract'"),
('simple', 'area', 'point', "'us.census.tiger.census_tract'"),
('simple', 'area', 'geom', "'us.census.tiger.census_tract'"),
('simple', 'area', 'offset_geom', "'us.census.tiger.census_tract'"),
('simple', 'denominator', 'point', "'us.census.tiger.census_tract'"),
('simple', 'denominator', 'geom', "'us.census.tiger.census_tract'"),
('simple', 'denominator', 'offset_geom', "'us.census.tiger.census_tract'"),
('complex', 'predenominated', 'geom', 'null'),
('complex', 'predenominated', 'offset_geom', 'null'),
('complex', 'area', 'geom', 'null'),
('complex', 'area', 'offset_geom', 'null'),
('complex', 'denominator', 'geom', 'null'),
('complex', 'denominator', 'offset_geom', 'null'),
('complex', 'predenominated', 'geom', "'us.census.tiger.county'"),
('complex', 'predenominated', 'offset_geom', "'us.census.tiger.county'"),
('complex', 'area', 'geom', "'us.census.tiger.county'"),
('complex', 'area', 'offset_geom', "'us.census.tiger.county'"),
('complex', 'denominator', 'geom', "'us.census.tiger.county'"),
('complex', 'denominator', 'offset_geom', "'us.census.tiger.county'"),
])
def test_getdata_performance(geom_complexity, normalization, geom, boundary):
print(geom_complexity, normalization, geom, boundary)
cols = ['us.census.acs.B01001002',
'us.census.acs.B01001003',
'us.census.acs.B01001004',
'us.census.acs.B01001005',
'us.census.acs.B01001006',
'us.census.acs.B01001007',
'us.census.acs.B01001008',
'us.census.acs.B01001009',
'us.census.acs.B01001010',
'us.census.acs.B01001011', ]
in_meta = [{"numer_id": col,
"normalization": normalization,
"geom_id": None if boundary.lower() == 'null' else boundary.replace("'", '')}
for col in cols]
rownums = (1, 5, 10, ) if geom_complexity == 'complex' else (10, 50, 100)
for num_meta in (1, 10, ):
results = []
for rows in rownums:
stmt = '''
with data as (
SELECT id, data FROM {schema}OBS_GetData(
(SELECT array_agg(({geom}, cartodb_id)::geomval)
FROM obs_perftest_{complexity}
WHERE cartodb_id <= {n}),
(SELECT {schema}OBS_GetMeta(
(SELECT st_setsrid(st_extent({geom}), 4326)
FROM obs_perftest_{complexity}
WHERE cartodb_id <= {n}),
'{in_meta}'::JSON
))
))
UPDATE obs_perftest_{complexity}
SET measure = (data->0->>'value')::Numeric
FROM data
WHERE obs_perftest_{complexity}.cartodb_id = data.id
;
'''.format(
point_or_poly='point' if geom == 'point' else 'polygon',
complexity=geom_complexity,
schema='cdb_observatory.' if USE_SCHEMA else '',
geom=geom,
in_meta=json.dumps(in_meta[0:num_meta]),
n=rows)
start = time()
query(stmt)
end = time()
qps = (rows / (end - start))
results.append({
'rows': rows,
'qps': qps,
'stmt': stmt
})
print(rows, ': ', qps, ' QPS')
if 'OBS_RECORD_TEST' in os.environ:
record({
'geom_complexity': geom_complexity,
'api_method': 'OBS_GetData',
'normalization': normalization,
'boundary': boundary,
'geom': geom,
'num_meta': str(num_meta)
}, results)
|
CartoDB/observatory-extension
|
src/python/test/perftest.py
|
Python
|
bsd-3-clause
| 13,951
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates Python source files from a mojom.Module."""
import re
from itertools import ifilter
import mojom.generate.generator as generator
import mojom.generate.data as data
import mojom.generate.module as mojom
from mojom.generate.template_expander import UseJinja
_kind_to_type = {
mojom.BOOL: '_descriptor.TYPE_BOOL',
mojom.INT8: '_descriptor.TYPE_INT8',
mojom.UINT8: '_descriptor.TYPE_UINT8',
mojom.INT16: '_descriptor.TYPE_INT16',
mojom.UINT16: '_descriptor.TYPE_UINT16',
mojom.INT32: '_descriptor.TYPE_INT32',
mojom.UINT32: '_descriptor.TYPE_UINT32',
mojom.INT64: '_descriptor.TYPE_INT64',
mojom.UINT64: '_descriptor.TYPE_UINT64',
mojom.FLOAT: '_descriptor.TYPE_FLOAT',
mojom.DOUBLE: '_descriptor.TYPE_DOUBLE',
mojom.STRING: '_descriptor.TYPE_STRING',
mojom.NULLABLE_STRING: '_descriptor.TYPE_NULLABLE_STRING',
mojom.HANDLE: '_descriptor.TYPE_HANDLE',
mojom.DCPIPE: '_descriptor.TYPE_HANDLE',
mojom.DPPIPE: '_descriptor.TYPE_HANDLE',
mojom.MSGPIPE: '_descriptor.TYPE_HANDLE',
mojom.SHAREDBUFFER: '_descriptor.TYPE_HANDLE',
mojom.NULLABLE_HANDLE: '_descriptor.TYPE_NULLABLE_HANDLE',
mojom.NULLABLE_DCPIPE: '_descriptor.TYPE_NULLABLE_HANDLE',
mojom.NULLABLE_DPPIPE: '_descriptor.TYPE_NULLABLE_HANDLE',
mojom.NULLABLE_MSGPIPE: '_descriptor.TYPE_NULLABLE_HANDLE',
mojom.NULLABLE_SHAREDBUFFER: '_descriptor.TYPE_NULLABLE_HANDLE',
}
# int64 integers are not handled by array.array. int64/uint64 array are
# supported but storage is not optimized (ie. they are plain python list, not
# array.array)
_kind_to_typecode_for_native_array = {
mojom.INT8: 'b',
mojom.UINT8: 'B',
mojom.INT16: 'h',
mojom.UINT16: 'H',
mojom.INT32: 'i',
mojom.UINT32: 'I',
mojom.FLOAT: 'f',
mojom.DOUBLE: 'd',
}
def NameToComponent(name):
# insert '_' between anything and a Title name (e.g, HTTPEntry2FooBar ->
# HTTP_Entry2_FooBar)
name = re.sub('([^_])([A-Z][^A-Z_]+)', r'\1_\2', name)
# insert '_' between non upper and start of upper blocks (e.g.,
# HTTP_Entry2_FooBar -> HTTP_Entry2_Foo_Bar)
name = re.sub('([^A-Z_])([A-Z])', r'\1_\2', name)
return [x.lower() for x in name.split('_')]
def UpperCamelCase(name):
return ''.join([x.capitalize() for x in NameToComponent(name)])
def CamelCase(name):
uccc = UpperCamelCase(name)
return uccc[0].lower() + uccc[1:]
def ConstantStyle(name):
components = NameToComponent(name)
if components[0] == 'k':
components = components[1:]
return '_'.join([x.upper() for x in components])
def FieldStyle(name):
components = NameToComponent(name)
return '_'.join([x.lower() for x in components])
def GetNameForElement(element):
if (mojom.IsEnumKind(element) or mojom.IsInterfaceKind(element) or
mojom.IsStructKind(element) or isinstance(element, mojom.Method)):
return UpperCamelCase(element.name)
if isinstance(element, mojom.EnumValue):
return (GetNameForElement(element.enum) + '.' +
ConstantStyle(element.name))
if isinstance(element, (mojom.NamedValue,
mojom.Constant)):
return ConstantStyle(element.name)
if isinstance(element, mojom.Field):
return FieldStyle(element.name)
raise Exception('Unexpected element: %s' % element)
def ExpressionToText(token):
if isinstance(token, (mojom.EnumValue, mojom.NamedValue)):
return str(token.computed_value)
if isinstance(token, mojom.BuiltinValue):
if token.value == 'double.INFINITY' or token.value == 'float.INFINITY':
return 'float(\'inf\')';
if (token.value == 'double.NEGATIVE_INFINITY' or
token.value == 'float.NEGATIVE_INFINITY'):
return 'float(\'-inf\')'
if token.value == 'double.NAN' or token.value == 'float.NAN':
return 'float(\'nan\')';
if token in ['true', 'false']:
return str(token == 'true')
return token
def GetFullyQualifiedName(kind):
name = []
if kind.imported_from:
name.append(kind.imported_from['python_module'])
name.append(GetNameForElement(kind))
return '.'.join(name)
def GetFieldType(kind, field=None):
if mojom.IsArrayKind(kind):
arguments = []
if kind.kind in _kind_to_typecode_for_native_array:
arguments.append('%r' % _kind_to_typecode_for_native_array[kind.kind])
elif kind.kind != mojom.BOOL:
arguments.append(GetFieldType(kind.kind))
if mojom.IsNullableKind(kind):
arguments.append('nullable=True')
if kind.length is not None:
arguments.append('length=%d' % kind.length)
array_type = 'GenericArrayType'
if kind.kind == mojom.BOOL:
array_type = 'BooleanArrayType'
elif kind.kind in _kind_to_typecode_for_native_array:
array_type = 'NativeArrayType'
return '_descriptor.%s(%s)' % (array_type, ', '.join(arguments))
if mojom.IsMapKind(kind):
arguments = [
GetFieldType(kind.key_kind),
GetFieldType(kind.value_kind),
]
if mojom.IsNullableKind(kind):
arguments.append('nullable=True')
return '_descriptor.MapType(%s)' % ', '.join(arguments)
if mojom.IsStructKind(kind):
arguments = [ 'lambda: %s' % GetFullyQualifiedName(kind) ]
if mojom.IsNullableKind(kind):
arguments.append('nullable=True')
return '_descriptor.StructType(%s)' % ', '.join(arguments)
if mojom.IsEnumKind(kind):
return GetFieldType(mojom.INT32)
if mojom.IsInterfaceKind(kind):
arguments = [ 'lambda: %s' % GetFullyQualifiedName(kind) ]
if mojom.IsNullableKind(kind):
arguments.append('nullable=True')
return '_descriptor.InterfaceType(%s)' % ', '.join(arguments)
if mojom.IsInterfaceRequestKind(kind):
arguments = []
if mojom.IsNullableKind(kind):
arguments.append('nullable=True')
return '_descriptor.InterfaceRequestType(%s)' % ', '.join(arguments)
return _kind_to_type[kind]
def GetFieldDescriptor(packed_field):
field = packed_field.field
class_name = 'SingleFieldGroup'
if field.kind == mojom.BOOL:
class_name = 'FieldDescriptor'
arguments = [ '%r' % GetNameForElement(field) ]
arguments.append(GetFieldType(field.kind, field))
arguments.append(str(packed_field.index))
arguments.append(str(packed_field.min_version))
if field.default:
if mojom.IsStructKind(field.kind):
arguments.append('default_value=True')
else:
arguments.append('default_value=%s' % ExpressionToText(field.default))
return '_descriptor.%s(%s)' % (class_name, ', '.join(arguments))
def GetFieldGroup(byte):
if byte.packed_fields[0].field.kind == mojom.BOOL:
descriptors = map(GetFieldDescriptor, byte.packed_fields)
return '_descriptor.BooleanGroup([%s])' % ', '.join(descriptors)
assert len(byte.packed_fields) == 1
return GetFieldDescriptor(byte.packed_fields[0])
def ComputeStaticValues(module):
in_progress = set()
computed = set()
def GetComputedValue(named_value):
if isinstance(named_value, mojom.EnumValue):
field = next(ifilter(lambda field: field.name == named_value.name,
named_value.enum.fields), None)
if not field:
raise RuntimeError(
'Unable to get computed value for field %s of enum %s' %
(named_value.name, named_value.enum.name))
if field not in computed:
ResolveEnum(named_value.enum)
return field.computed_value
elif isinstance(named_value, mojom.ConstantValue):
ResolveConstant(named_value.constant)
named_value.computed_value = named_value.constant.computed_value
return named_value.computed_value
else:
print named_value
def ResolveConstant(constant):
if constant in computed:
return
if constant in in_progress:
raise RuntimeError('Circular dependency for constant: %s' % constant.name)
in_progress.add(constant)
if isinstance(constant.value, (mojom.EnumValue, mojom.ConstantValue)):
computed_value = GetComputedValue(constant.value)
else:
computed_value = ExpressionToText(constant.value)
constant.computed_value = computed_value
in_progress.remove(constant)
computed.add(constant)
def ResolveEnum(enum):
def ResolveEnumField(enum, field, default_value):
if field in computed:
return
if field in in_progress:
raise RuntimeError('Circular dependency for enum: %s' % enum.name)
in_progress.add(field)
if field.value:
if isinstance(field.value, mojom.EnumValue):
computed_value = GetComputedValue(field.value)
elif isinstance(field.value, str):
computed_value = int(field.value, 0)
else:
raise RuntimeError('Unexpected value: %s' % field.value)
else:
computed_value = default_value
field.computed_value = computed_value
in_progress.remove(field)
computed.add(field)
current_value = 0
for field in enum.fields:
ResolveEnumField(enum, field, current_value)
current_value = field.computed_value + 1
for constant in module.constants:
ResolveConstant(constant)
for enum in module.enums:
ResolveEnum(enum)
for struct in module.structs:
for constant in struct.constants:
ResolveConstant(constant)
for enum in struct.enums:
ResolveEnum(enum)
for field in struct.fields:
if isinstance(field.default, (mojom.ConstantValue, mojom.EnumValue)):
field.default.computed_value = GetComputedValue(field.default)
return module
def MojomToPythonImport(mojom):
return mojom.replace('.mojom', '_mojom')
class Generator(generator.Generator):
python_filters = {
'expression_to_text': ExpressionToText,
'field_group': GetFieldGroup,
'fully_qualified_name': GetFullyQualifiedName,
'name': GetNameForElement,
'response_struct_from_method': generator.GetResponseStructFromMethod,
'struct_from_method': generator.GetStructFromMethod,
}
@UseJinja('python_templates/module.py.tmpl', filters=python_filters)
def GeneratePythonModule(self):
return {
'enums': self.module.enums,
'imports': self.GetImports(),
'interfaces': self.module.interfaces,
'module': ComputeStaticValues(self.module),
'namespace': self.module.namespace,
'structs': self.GetStructs(),
}
def GenerateFiles(self, args):
import_path = MojomToPythonImport(self.module.name)
self.Write(self.GeneratePythonModule(),
self.MatchMojomFilePath('%s.py' % import_path))
def GetImports(self):
for each in self.module.imports:
each['python_module'] = MojomToPythonImport(each['module_name'])
return self.module.imports
def GetJinjaParameters(self):
return {
'lstrip_blocks': True,
'trim_blocks': True,
}
|
sgraham/nope
|
third_party/mojo/src/mojo/public/tools/bindings/generators/mojom_python_generator.py
|
Python
|
bsd-3-clause
| 11,084
|
# A list of directory locations that other scripts will use to determine output locations
import os.path
# Path to the root directory of the project.
# Append to this to create absolute paths for the other locations
project_root = os.path.expanduser('~/Documents/co_processors/ParCNN/')
# Output location for the .h file used in verilog
network_params = project_root + 'Hardware/network_params.h'
# Output location for the read port mux .v file
read_port_mux = project_root + 'Hardware/v/read_port_mux.v'
# Output location for window selector .v file
window_selector = project_root + 'Hardware/v/window_selector.v'
# Directory with Kernel CSVs
kernel_path = project_root + 'Software/Verilog_Builder/kernel_base2/'
# Path to .h file that instantiates the kernel in verilog
kernel_defs = project_root + 'Hardware/kernel_defs.h'
|
alan4186/ParCNN
|
Software/Verilog_Builder/project_settings.py
|
Python
|
mit
| 834
|
import copy
from datetime import datetime, timedelta
from django.contrib.contenttypes.models import ContentType
from django.db import IntegrityError
from django.db import models, transaction
from django.db.models import Q
from django.db.models.manager import Manager
from django.db.models.query import QuerySet
from django.utils.timezone import now as utcnow
from django.utils.timezone import utc
TIME_CURRENT = datetime.max.replace(tzinfo=utc)
TIME_RESOLUTION = timedelta(0, 0, 1) # = 1 microsecond
class MasterObject(models.Model):
content_type = models.ForeignKey(ContentType)
def __unicode__(self):
return u'{}[{}]'.format(self.content_type, self.pk)
def get_all(self):
"""Returns BitemporalQuerySet view of this object"""
return self.content_type.model_class().objects.filter(_master=self)
def get_current(self):
return self.content_type.model_class().objects.filter(_master=self).current().get()
class BitemporalQuerySet(QuerySet):
@transaction.commit_on_success
def delete(self):
for obj in self:
obj.delete()
def during(self, valid_start, valid_end=None):
if valid_end is None:
# Single data searches single point in time
valid_end = valid_start + TIME_RESOLUTION
# Any equal times are outside the period
# because of end point exclusion
return self.filter(
# obj.start before p.end OR obj.end after p.start
Q(_valid_start_date__lt=valid_end) |
Q(_valid_end_date__gt=valid_start),
).exclude(
# BUT NOT
# obj.end before p.start OR obj.end after p.end
Q(_valid_end_date__lte=valid_start) |
Q(_valid_start_date__gte=valid_end)
)
def active_during(self, txn_start, txn_end=None):
if txn_end is None:
# Single data searches single point in time
txn_end = txn_start + TIME_RESOLUTION
# Any equal times are outside the period
# because of end point exclusion
return self.filter(
# obj.start before p.end OR obj.end after p.start
Q(_txn_start_date__lt=txn_end) |
Q(_txn_end_date__gt=txn_start),
).exclude(
# BUT NOT
# obj.end before p.start OR obj.end after p.end
Q(_txn_end_date__lte=txn_start) |
Q(_txn_start_date__gte=txn_end)
)
def active(self):
return self.filter(
# transaction active
_txn_end_date=TIME_CURRENT,
)
def current(self):
return self.active().during(utcnow())
class BitemporalManager(Manager):
def get_query_set(self):
return BitemporalQuerySet(self.model, using=self._db)
def during(self, valid_start, valid_end=None):
return self.get_query_set().during(valid_start, valid_end)
def active_during(self, txn_start, txn_end=None):
return self.get_query_set().active_during(tnx_start, tnx_end)
def active(self):
return self.get_queryset().active()
def current(self):
return self.get_query_set().current()
class BitemporalModelBase(models.Model):
objects = BitemporalManager()
# nicht fur der gefingerpoken
_valid_start_date = models.DateTimeField()
_valid_end_date = models.DateTimeField(default=TIME_CURRENT)
_txn_start_date = models.DateTimeField(auto_now_add=True)
_txn_end_date = models.DateTimeField(default=TIME_CURRENT)
_master = models.ForeignKey(MasterObject, related_name='+')
@property
def master(self):
try:
return self._master
except MasterObject.DoesNotExist:
return None
@property
def valid_start_date(self):
return self._valid_start_date
@property
def valid_end_date(self):
return self._valid_end_date
@property
def txn_start_date(self):
return self._txn_start_date
@property
def txn_end_date(self):
return self._txn_end_date
class Meta:
abstract = True
# This is true, but doesn't really help anything, doesn't imply the
# non-overlap requirement in active rows
# unique_together = [
# ('id', '_valid_start_date', '_valid_end_date', '_txn_end_date'),
# ]
ordering = ('_valid_start_date', )
def _original(self):
return self.__class__.objects.get(pk=self.pk)
def save(self, as_of=None, force_insert=False, force_update=False, using=None, update_fields=None):
""" if as_of is provided, self.valid_start_date is set to it.
if self.valid_start_date is undefined, it is set to now.
"""
now = utcnow()
if self.pk and update_fields and tuple(update_fields) != ('_txn_end_date', ):
raise IntegrityError('Attempted re-save of {} object, pk: {}'.format(
self.__class__.__name__, self.pk))
# _valid_start_date resolves in this order:
# as_of (overide), self.valid_start_date (existing value), now() (default)
if as_of is not None:
self._valid_start_date = as_of
if self.valid_start_date is None:
self._valid_start_date = now
if self.txn_end_date != TIME_CURRENT and self.txn_end_date > now:
raise IntegrityError('txn_end_date date {} may not be in the future'.format(
self.txn_end_date))
if self.valid_start_date > self.valid_end_date:
raise IntegrityError('valid_start_date date {} must precede valid_end_date {}'.format(
self.valid_start_date, self.valid_end_date))
# _txn_start_date is None before first save
if self.txn_start_date and self.txn_start_date > self.txn_end_date:
raise IntegrityError('txn_start_date date {} must precede txn_end_date {}'.format(
self.txn_start_date, self.txn_end_date))
if self.txn_start_date is None and self.txn_end_date != TIME_CURRENT:
raise IntegrityError('txn_end_date {} must be TIME_CURRENT for new transactions'.format(
self.txn_end_date))
# Create a new master object if we don't have one already
if self.master is None:
new_master = MasterObject(content_type=ContentType.objects.get_for_model(self))
new_master.save()
self._master = new_master
# TODO: why save_base and not super().save() (used to be)
super(BitemporalModelBase, self).save(using=using, force_insert=force_insert,
force_update=force_update, update_fields=update_fields)
@transaction.commit_on_success
def save_during(self, valid_start, valid_end=None, using=None):
now = utcnow()
if valid_end is None:
valid_end = TIME_CURRENT
# Itterate rows while invalidating them
def row_builder(rows):
for row in rows:
row._txn_end_date = now
row.save(using=using, update_fields=['_txn_end_date',])
yield row
# New objects don't have a master yet
if self.master:
# All rows with data in the time period (includes self._original() if needed)
old_rows = row_builder(self.master.get_all().active().during(valid_start, valid_end))
try:
resume_through = None
old = old_rows.next()
if old.valid_start_date < valid_start:
if old.valid_end_date > valid_end:
# update inside single larger record
# save old end_date for later
resume_through = old.valid_end_date
# Old value exists before update, set valid_end_date
old.pk = None
old._valid_end_date = valid_start
old._txn_start_date = now
old._txn_end_date = TIME_CURRENT
old.save(using=using)
if resume_through is not None:
# Old value continues after update
old.pk = None
old._valid_start_date = valid_end
old._valid_end_date = resume_through
# txn times still now/TIME_CURRENT
old.save(using=using)
old = old_rows.next()
if old.valid_start_date == valid_start:
# Old start is exactly the same, it is being updated and will have no valid period
if old.valid_end_date > valid_end:
# Old value continues after update
old.pk = None
old._valid_start_date = valid_end
old._txn_start_date = now
old._txn_end_date = TIME_CURRENT
old.save(using=using)
old = old_rows.next()
while True:
# old.valid_start_date is > valid_start (and < valid_end)
if old.valid_end_date > valid_end:
# old value exists beyond valid_end
# Old value continues after update
old.pk = None
old._valid_start_date = valid_end
old._txn_start_date = now
old._txn_end_date = TIME_CURRENT
old.save(using=using)
# This will stop the while, not hit the try/except
old = old_rows.next()
except StopIteration:
pass
# Save a new row
self.pk = None
self._valid_start_date = valid_start
self._valid_end_date = valid_end
self._txn_start_date = now
self._txn_end_date = TIME_CURRENT
self.save(using=using)
@transaction.commit_on_success
def amend(self, as_of=None, using=None):
"""
Invalidate self
Write old data with valid_end set
write new data
"""
now = utcnow()
if as_of is None:
as_of = now
if self.txn_end_date != TIME_CURRENT:
#Raise error, must change an active row
raise IntegrityError('[{}] pk: {} is not an active row'.format(
self.__class__.__name__, self.pk))
if as_of > self.valid_end_date:
raise IntegrityError('as_of date {} must precede valid_end_date {}'.format(
as_of, self.valid_end_date))
old_self = self._original()
if old_self.valid_start_date != self.valid_start_date:
raise IntegrityError('You may not change valid_start_date in an update or amend, use save_during')
if self.valid_end_date != old_self.valid_end_date:
raise IntegrityError('You may not change valid_end_date in an update or amend, use save_during')
# Optimized for replacing a single row
# invalidate previous row
old_self._txn_end_date = now
old_self.save(using=using, update_fields=['_txn_end_date',])
# If valid_start == as_of, don't save a new row that covers no time
# This was an update
if old_self.valid_start_date != as_of :
# Save new row with updated valid end date
old_self.pk = None
old_self._txn_start_date = now
old_self._txn_end_date = TIME_CURRENT
old_self._valid_end_date = as_of
# save change
old_self.save(using=using)
# Save self as new row
self.pk = None
self._txn_start_date = now
self._txn_end_date = TIME_CURRENT
self._valid_start_date = as_of
self.save(using=using)
def update(self, using=None):
"""
an amend where:
old values were never true, valid_date range will be null
"""
self.amend(as_of=self.valid_start_date, using=using)
def eradicate(self, *args, **kwargs):
return super(BitemporalModelBase, self).delete(*args, **kwargs)
@transaction.commit_on_success
def delete(self, as_of=None, using=None):
"""
Invalidate self
Write new row with valid_end_date set
"""
now = utcnow()
if as_of is None:
as_of = now
if self.valid_end_date != TIME_CURRENT:
raise IntegrityError('Cannot delete non-current object')
# Refetch data so we don't update any fields
old_self = self._original()
old_self._txn_end_date = now
# invalidate previous row
old_self.save(using=using, update_fields=['_txn_end_date',])
# Save new row with valid end date
old_self.pk = None
old_self._txn_start_date = now
old_self._txn_end_date = TIME_CURRENT
old_self._valid_end_date = as_of
# save change
old_self.save(using=using)
return old_self
|
finiteloopsoftware/django-bitemporal
|
bitemporal/models.py
|
Python
|
bsd-3-clause
| 13,067
|
"""Add is_locked field to testable.
Revision ID: 344c3f86394c
Revises: 15e554bd88aa
Create Date: 2013-10-14 21:02:27.239190
"""
# revision identifiers, used by Alembic.
revision = '344c3f86394c'
down_revision = '15e554bd88aa'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('testable', sa.Column('is_locked', sa.Boolean(),
server_default=u'0', nullable=False))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('testable', 'is_locked')
### end Alembic commands ###
|
ucsb-cs/submit
|
submit/migrations/versions/344c3f86394c_add_is_locked_field_.py
|
Python
|
bsd-2-clause
| 696
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import MySQLdb
import sys
def time_data_clean(time_data):
rating = [0.0]*len(time_data)
time = [0]*len(time_data)
rating = [x[0] for x in time_data]
time = [x[1] for x in time_data]
return rating, time
def main():
db = MySQLdb.connect(host="localhost", user="root", db = "home_kitchen")
cursor = db.cursor()
tablename = 'all_hk'
prod_id = sys.argv[1]
#Get time & score from table
sql = "Select RTime, RScore From " +tablename + " Where PID = " + '"' + prod_id +'";'
cursor.execute(sql)
time_data = cursor.fetchall()
time_data = sorted(time_data)
rating = zip(*time_data)[1]
time = zip(*time_data)[0]
#plot the review scores with time, raw data
fig = plt.figure(figsize=(10, 5), dpi=100)
plt.scatter(*zip(*time_data))
#plt.gca().xaxis.set_major_formatter(ticker.ScalarFormatter(useOffset = False))
plt.title("Ratings with Time")
plt.ylabel("Ratings")
plt.xlabel("Time (Unix Timestamp)")
plt.show()
avg = [0]*len(time)
avg[0] = rating[0]
for k in range(1, len(time)):
avg[k]= np.mean(rating[:k])
#plot the average review with time
fig = plt.figure(figsize=(10, 5), dpi=100)
plt.scatter(time, avg)
plt.title("Avg Rating Over Time")
plt.ylabel("Avg Rating")
plt.xlabel("Time (Unix Timestamp)")
plt.show()
if __name__ == '__main__':
main()
|
KeltyAllen/Amazon-Reviews-Project
|
app/Rating_Avg.py
|
Python
|
mit
| 1,403
|
# pyOCD debugger
# Copyright (c) 2015-2019 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .fpb import HardwareBreakpoint
from ..core.target import Target
from .component import CoreSightComponent
import logging
LOG = logging.getLogger(__name__)
# Need a local copy to prevent circular import.
# Debug Exception and Monitor Control Register
DEMCR = 0xE000EDFC
# DWTENA in armv6 architecture reference manual
DEMCR_TRCENA = (1 << 24)
class Watchpoint(HardwareBreakpoint):
def __init__(self, comp_register_addr, provider):
super(Watchpoint, self).__init__(comp_register_addr, provider)
self.addr = 0
self.size = 0
self.func = 0
class DWT(CoreSightComponent):
"""@brief Data Watchpoint and Trace version 1.0"""
# DWT registers
#
# The addresses are offsets from the base address.
DWT_CTRL = 0x00000000
DWT_CYCCNT = 0x00000004
DWT_CPICNT = 0x00000008
DWT_EXCCNT = 0x0000000C
DWT_SLEEPCNT = 0x00000010
DWT_LSUCNT = 0x00000014
DWT_FOLDCNT = 0x00000018
DWT_PCSR = 0x0000001C
DWT_COMP_BASE = 0x00000020
DWT_MASK_OFFSET = 4
DWT_FUNCTION_OFFSET = 8
DWT_COMP_BLOCK_SIZE = 0x10
DWT_CTRL_NUM_COMP_MASK = (0xF << 28)
DWT_CTRL_NUM_COMP_SHIFT = 28
DWT_CTRL_CYCEVTENA_MASK = (1 << 22)
DWT_CTRL_FOLDEVTENA_MASK = (1 << 21)
DWT_CTRL_LSUEVTENA_MASK = (1 << 20)
DWT_CTRL_SLEEPEVTENA_MASK = (1 << 19)
DWT_CTRL_EXCEVTENA_MASK = (1 << 18)
DWT_CTRL_CPIEVTENA_MASK = (1 << 17)
DWT_CTRL_EXCTRCENA_MASK = (1 << 16)
DWT_CTRL_PCSAMPLENA_MASK = (1 << 12)
DWT_CTRL_SYNCTAP_MASK = (0x3 << 10)
DWT_CTRL_SYNCTAP_SHIFT = 10
DWT_CTRL_CYCTAP_MASK = (1 << 9)
DWT_CTRL_POSTINIT_MASK = (0xF << 5)
DWT_CTRL_POSTINIT_SHIFT = 5
DWT_CTRL_POSTRESET_MASK = (0xF << 1)
DWT_CTRL_POSTRESET_SHIFT = 1
DWT_CTRL_CYCCNTENA_MASK = (1 << 0)
WATCH_TYPE_TO_FUNCT = {
Target.WatchpointType.READ: 5,
Target.WatchpointType.WRITE: 6,
Target.WatchpointType.READ_WRITE: 7,
5: Target.WatchpointType.READ,
6: Target.WatchpointType.WRITE,
7: Target.WatchpointType.READ_WRITE,
}
# Only sizes that are powers of 2 are supported
# Breakpoint size = MASK**2
WATCH_SIZE_TO_MASK = dict((2**i, i) for i in range(0,32))
def __init__(self, ap, cmpid=None, addr=None):
super(DWT, self).__init__(ap, cmpid, addr)
self.watchpoints = []
self.watchpoint_used = 0
self.dwt_configured = False
@property
def watchpoint_count(self):
return len(self.watchpoints)
def init(self):
"""@brief Inits the DWT.
Reads the number of hardware watchpoints available on the core and makes sure that they
are all disabled and ready for future use.
"""
# Make sure trace is enabled.
demcr = self.ap.read_memory(DEMCR)
if (demcr & DEMCR_TRCENA) == 0:
demcr |= DEMCR_TRCENA
self.ap.write_memory(DEMCR, demcr)
dwt_ctrl = self.ap.read_memory(self.address + self.DWT_CTRL)
watchpoint_count = (dwt_ctrl & self.DWT_CTRL_NUM_COMP_MASK) >> self.DWT_CTRL_NUM_COMP_SHIFT
LOG.info("%d hardware watchpoints", watchpoint_count)
for i in range(watchpoint_count):
comparatorAddress = self.address + self.DWT_COMP_BASE + self.DWT_COMP_BLOCK_SIZE * i
self.watchpoints.append(Watchpoint(comparatorAddress, self))
self.ap.write_memory(comparatorAddress + self.DWT_FUNCTION_OFFSET, 0)
# Enable cycle counter.
self.ap.write32(self.address + self.DWT_CTRL, self.DWT_CTRL_CYCCNTENA_MASK)
self.dwt_configured = True
def find_watchpoint(self, addr, size, type):
for watch in self.watchpoints:
if watch.addr == addr and watch.size == size and watch.func == self.WATCH_TYPE_TO_FUNCT[type]:
return watch
return None
def set_watchpoint(self, addr, size, type):
"""@brief Set a hardware watchpoint."""
if self.dwt_configured is False:
self.init()
watch = self.find_watchpoint(addr, size, type)
if watch is not None:
return True
if type not in self.WATCH_TYPE_TO_FUNCT:
LOG.error("Invalid watchpoint type %i", type)
return False
for watch in self.watchpoints:
if watch.func == 0:
watch.addr = addr
watch.func = self.WATCH_TYPE_TO_FUNCT[type]
watch.size = size
if size not in self.WATCH_SIZE_TO_MASK:
LOG.error('Watchpoint of size %d not supported by device', size)
return False
mask = self.WATCH_SIZE_TO_MASK[size]
self.ap.write_memory(watch.comp_register_addr + self.DWT_MASK_OFFSET, mask)
if self.ap.read_memory(watch.comp_register_addr + self.DWT_MASK_OFFSET) != mask:
LOG.error('Watchpoint of size %d not supported by device', size)
return False
self.ap.write_memory(watch.comp_register_addr, addr)
self.ap.write_memory(watch.comp_register_addr + self.DWT_FUNCTION_OFFSET, watch.func)
self.watchpoint_used += 1
return True
LOG.error('No more watchpoints are available, dropped watchpoint at 0x%08x', addr)
return False
def remove_watchpoint(self, addr, size, type):
"""@brief Remove a hardware watchpoint."""
watch = self.find_watchpoint(addr, size, type)
if watch is None:
return
watch.func = 0
self.ap.write_memory(watch.comp_register_addr + self.DWT_FUNCTION_OFFSET, 0)
self.watchpoint_used -= 1
def remove_all_watchpoints(self):
for watch in self.watchpoints:
if watch.func != 0:
self.remove_watchpoint(watch.addr, watch.size, self.WATCH_TYPE_TO_FUNCT[watch.func])
def get_watchpoints(self):
return [watch for watch in self.watchpoints if watch.func != 0]
@property
def cycle_count(self):
return self.ap.read32(self.address + self.DWT_CYCCNT)
@cycle_count.setter
def cycle_count(self, value):
self.ap.write32(self.address + self.DWT_CYCCNT, value)
class DWTv2(DWT):
"""@brief Data Watchpoint and Trace version 2.x
This version is present in v8-M platforms.
- DWT 2.0 appears in v8.0-M
- DWT 2.1 appears in v8.1-M and adds the VMASKn registers.
"""
DWT_ACTION_DEBUG_EVENT = 0x00000010
## Map from watchpoint type to FUNCTIONn.MATCH field value.
WATCH_TYPE_TO_FUNCT = {
Target.WatchpointType.READ: 0b0110,
Target.WatchpointType.WRITE: 0b0101,
Target.WatchpointType.READ_WRITE: 0b0100,
0b0110: Target.WatchpointType.READ,
0b0101: Target.WatchpointType.WRITE,
0b0100: Target.WatchpointType.READ_WRITE,
}
## Map from data access size to pre-shifted DATAVSIZE field value.
DATAVSIZE_MAP = {
1: (0 << 10),
2: (1 << 10),
4: (2 << 10),
}
def set_watchpoint(self, addr, size, type):
"""@brief Set a hardware watchpoint."""
if self.dwt_configured is False:
self.init()
watch = self.find_watchpoint(addr, size, type)
if watch is not None:
return True
if type not in self.WATCH_TYPE_TO_FUNCT:
LOG.error("Invalid watchpoint type %i", type)
return False
# Only support sizes that can be handled with a single comparator.
if size not in (1, 2, 4):
LOG.error("Invalid watchpoint size %d", size)
return False
for watch in self.watchpoints:
if watch.func == 0:
watch.addr = addr
watch.func = self.WATCH_TYPE_TO_FUNCT[type]
watch.size = size
# Build FUNCTIONn register value.
value = self.DATAVSIZE_MAP[size] | self.DWT_ACTION_DEBUG_EVENT | watch.func
self.ap.write_memory(watch.comp_register_addr, addr)
self.ap.write_memory(watch.comp_register_addr + self.DWT_FUNCTION_OFFSET, value)
self.watchpoint_used += 1
return True
LOG.error('No more watchpoints are available, dropped watchpoint at 0x%08x', addr)
return False
|
pyocd/pyOCD
|
pyocd/coresight/dwt.py
|
Python
|
apache-2.0
| 9,308
|
import praw
import obot
from steamapi import getheroes, getproplayerlist, getschema, getleaguelisting
from reddit import botinfo
from reddit import workerdeletebadcomments, workerfindcomments, workerdeleterequestedcomments
import threading
from reddit.botinfo import message
#message = True
class LoginReddit:
def __init__(self):
if message: print('[loginreddit] logging in')
r = praw.Reddit(client_id=obot.client_id,
client_secret=obot.client_secret,
user_agent=obot.user_agent,
username=obot.username,
password=obot.password)
if message: print('[loginreddit] logging successful')
if message: print('[loginreddit] updating heroDictionary')
getheroes.requestGetHeroes()
if message: print('[loginreddit] updating heroDictionary success')
if message: print('[loginreddit] updating proPlayerDictionary')
getproplayerlist.requestGetProPlayerList()
if message: print('[loginreddit] updating proPlayerDictionary success')
#if message: print('[loginreddit] updating dota 2 items_game schema')
#getschema.requestGetSchema()
#if message: print('[loginreddit] updating dota 2 items_game schema success')
if message: print('[loginreddit] starting threads')
if message: print('[loginreddit] starting deleteBadComments thread')
t = threading.Thread(target=workerdeletebadcomments.deleteBadComments , args = (r,))
t.start()
if message: print('[loginreddit] starting findComments thread')
t = threading.Thread(target=workerfindcomments.findComments, args = (r,))
t.start()
if message: print('[loginreddit] starting deleteRequestedComments thread')
t = threading.Thread(target=workerdeleterequestedcomments.deleteRequestedComments, args = (r,))
t.start()
if message: print('[loginreddit] starting threads success')
self.r = r
|
NNTin/Reply-Dota-2-Reddit
|
reddit/loginreddit.py
|
Python
|
mit
| 2,023
|
from Tools.Profile import profile
profile("LOAD:GUISkin")
from Components.GUISkin import GUISkin
profile("LOAD:Source")
from Components.Sources.Source import Source
profile("LOAD:GUIComponent")
from Components.GUIComponent import GUIComponent
profile("LOAD:eRCInput")
from enigma import eRCInput
class Screen(dict, GUISkin):
False, SUSPEND_STOPS, SUSPEND_PAUSES = range(3)
ALLOW_SUSPEND = False
global_screen = None
def __init__(self, session, parent = None):
dict.__init__(self)
self.skinName = self.__class__.__name__
self.session = session
self.parent = parent
GUISkin.__init__(self)
self.onClose = [ ]
self.onFirstExecBegin = [ ]
self.onExecBegin = [ ]
self.onExecEnd = [ ]
self.onShown = [ ]
self.onShow = [ ]
self.onHide = [ ]
self.execing = False
self.shown = True
# already shown is false until the screen is really shown (after creation)
self.already_shown = False
self.renderer = [ ]
# in order to support screens *without* a help,
# we need the list in every screen. how ironic.
self.helpList = [ ]
self.close_on_next_exec = None
# stand alone screens (for example web screens)
# don't care about having or not having focus.
self.stand_alone = False
self.keyboardMode = None
def saveKeyboardMode(self):
rcinput = eRCInput.getInstance()
self.keyboardMode = rcinput.getKeyboardMode()
def setKeyboardModeAscii(self):
rcinput = eRCInput.getInstance()
rcinput.setKeyboardMode(rcinput.kmAscii)
def setKeyboardModeNone(self):
rcinput = eRCInput.getInstance()
rcinput.setKeyboardMode(rcinput.kmNone)
def restoreKeyboardMode(self):
rcinput = eRCInput.getInstance()
if self.keyboardMode is not None:
rcinput.setKeyboardMode(self.keyboardMode)
def execBegin(self):
self.active_components = [ ]
if self.close_on_next_exec is not None:
tmp = self.close_on_next_exec
self.close_on_next_exec = None
self.execing = True
self.close(*tmp)
else:
single = self.onFirstExecBegin
self.onFirstExecBegin = []
for x in self.onExecBegin + single:
x()
if not self.stand_alone and self.session.current_dialog != self:
return
# assert self.session == None, "a screen can only exec once per time"
# self.session = session
for val in self.values() + self.renderer:
val.execBegin()
if not self.stand_alone and self.session.current_dialog != self:
return
self.active_components.append(val)
self.execing = True
for x in self.onShown:
x()
def execEnd(self):
active_components = self.active_components
# for (name, val) in self.items():
self.active_components = None
for val in active_components:
val.execEnd()
# assert self.session != None, "execEnd on non-execing screen!"
# self.session = None
self.execing = False
for x in self.onExecEnd:
x()
# never call this directly - it will be called from the session!
def doClose(self):
self.hide()
for x in self.onClose:
x()
# fixup circular references
del self.helpList
GUISkin.close(self)
# first disconnect all render from their sources.
# we might split this out into a "unskin"-call,
# but currently we destroy the screen afterwards
# anyway.
for val in self.renderer:
val.disconnectAll() # disconnected converter/sources and probably destroy them. Sources will not be destroyed.
del self.session
for (name, val) in self.items():
val.destroy()
del self[name]
self.renderer = [ ]
# really delete all elements now
self.__dict__.clear()
def close(self, *retval):
if not self.execing:
self.close_on_next_exec = retval
else:
self.session.close(self, *retval)
def setFocus(self, o):
self.instance.setFocus(o.instance)
def show(self):
# Temporarily add to ease up identification of screens
print '[SCREENNAME] ',self.skinName
if (self.shown and self.already_shown) or not self.instance:
return
self.shown = True
self.already_shown = True
self.instance.show()
for x in self.onShow:
x()
for val in self.values() + self.renderer:
if isinstance(val, GUIComponent) or isinstance(val, Source):
val.onShow()
def hide(self):
if not self.shown or not self.instance:
return
self.shown = False
self.instance.hide()
for x in self.onHide:
x()
for val in self.values() + self.renderer:
if isinstance(val, GUIComponent) or isinstance(val, Source):
val.onHide()
def setAnimationMode(self, mode):
if self.instance:
self.instance.setAnimationMode(mode)
def __repr__(self):
return str(type(self))
def getRelatedScreen(self, name):
if name == "session":
return self.session.screen
elif name == "parent":
return self.parent
elif name == "global":
return self.global_screen
else:
return None
|
devclone/enigma2-9f38fd6
|
lib/python/Screens/Screen.py
|
Python
|
gpl-2.0
| 4,739
|
"""
measurement_control.py is becoming very large
this file is intended for small helpers to keep main file more clean
"""
from collections.abc import Iterable
from scipy.spatial import ConvexHull
import numpy as np
def scale_bounds(af_pars, x_scale=None):
if x_scale is not None:
for b_name in ["bounds", "dimensions"]:
if b_name in af_pars.keys():
# ND hull compatible with adaptive learners
bounds = af_pars[b_name]
if isinstance(bounds, ConvexHull):
vertices = bounds.points[bounds.vertices]
scale = np.array(x_scale)
scaled_vertices = vertices * scale
scaled_hull = ConvexHull(scaled_vertices)
af_pars[b_name] = scaled_hull
# 1D
elif not isinstance(bounds[0], Iterable):
scaled_bounds = tuple(b * x_scale for b in bounds)
af_pars[b_name] = scaled_bounds
# ND
elif isinstance(bounds[0], Iterable):
scaled_bounds = tuple(
tuple(b * scale for b in bounds_dim)for
bounds_dim, scale in zip(bounds, x_scale)
)
af_pars[b_name] = scaled_bounds
return True
|
DiCarloLab-Delft/PycQED_py3
|
pycqed/measurement/measurement_control_helpers.py
|
Python
|
mit
| 1,347
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Mapping, Callable, Optional
from google.pubsub_v1 import PubsubMessage
from google.cloud.pubsublite.cloudpubsub.message_transforms import (
from_cps_publish_message,
)
from google.cloud.pubsublite.cloudpubsub.internal.single_publisher import (
AsyncSinglePublisher,
)
from google.cloud.pubsublite.internal.wire.publisher import Publisher
class AsyncSinglePublisherImpl(AsyncSinglePublisher):
_publisher_factory: Callable[[], Publisher]
_publisher: Optional[Publisher]
def __init__(self, publisher_factory: Callable[[], Publisher]):
"""
Accepts a factory for a Publisher instead of a Publisher because GRPC asyncio uses the current thread's event
loop.
"""
super().__init__()
self._publisher_factory = publisher_factory
self._publisher = None
async def publish(
self, data: bytes, ordering_key: str = "", **attrs: Mapping[str, str]
) -> str:
cps_message = PubsubMessage(
data=data, ordering_key=ordering_key, attributes=attrs
)
psl_message = from_cps_publish_message(cps_message)
return (await self._publisher.publish(psl_message)).encode()
async def __aenter__(self):
self._publisher = self._publisher_factory()
await self._publisher.__aenter__()
return self
async def __aexit__(self, exc_type, exc_value, traceback):
await self._publisher.__aexit__(exc_type, exc_value, traceback)
|
googleapis/python-pubsublite
|
google/cloud/pubsublite/cloudpubsub/internal/async_publisher_impl.py
|
Python
|
apache-2.0
| 2,060
|
# Copyright 2016 datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Helpers for generating documentation using Jinja2 templates.
"""
import jinja2
def filter_commandline(value, prompt="$ "):
return prompt + value.command
def filter_output(value):
return "\n ".join(value.output.split("\n"))
jinja2_filters = dict(commandline=filter_commandline,
output=filter_output)
def make_env():
env = jinja2.Environment()
for key, value in jinja2_filters.items():
env.filters[key] = value
return env
|
bozzzzo/quark
|
quarkc/test/generate_docs.py
|
Python
|
apache-2.0
| 1,081
|
import tensorflow as tf
from odin.bay.vi.autoencoder.variational_autoencoder import \
VariationalAutoencoder
class SequentialVAE(VariationalAutoencoder):
r"""
References:
Yingzhen Li and Stephan Mandt. "Disentangled Sequential Autoencoder".
In _International Conference on Machine Learning_, 2018.
https://arxiv.org/abs/1803.02991
Fraccaro, M., Sønderby, S.K., Paquet, U., Winther, O., 2016.
"Sequential Neural Models with Stochastic Layers".
arXiv:1605.07571 [cs, stat]. (https://github.com/google/vae-seq)
Zhao, S., Song, J., Ermon, S., 2017. "Towards Deeper Understanding
of Variational Autoencoding Models". arXiv:1702.08658 [cs, stat].
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
class SequentialAttentionVAE(VariationalAutoencoder):
r"""
Reference:
Deng, Y., Kim, Y., Chiu, J., Guo, D., Rush, A.M., 2018.
"Latent Alignment and Variational Attention".
arXiv:1807.03756 [cs, stat].
Bahuleyan, H., Mou, L., Vechtomova, O., Poupart, P., 2017.
"Variational Attention for Sequence-to-Sequence Models".
arXiv:1712.08207 [cs].
https://github.com/HareeshBahuleyan/tf-var-attention
https://github.com/harvardnlp/var-attn/
"""
class VariationalRNN(VariationalAutoencoder):
r"""
Reference:
Chung, J., Kastner, K., Dinh, L., Goel, K., Courville, A.C., Bengio, Y.,
2015. "A Recurrent Latent Variable Model for Sequential Data",
Advances in Neural Information Processing Systems 28.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
|
imito/odin
|
odin/bay/vi/autoencoder/sequential_vae.py
|
Python
|
mit
| 1,596
|
# Copyright (C) 2014 Coders at Work
from keyvalue import KeyValueStore
from metadata import MetaDataFile
import os
import sys
last_component_id = 0
class InvalidComponentState(Exception):
def __init__(self, name, state):
super(InvalidComponentState, self).__init__(
u'Component "%s" is in an invalid state: %s.' % (name, state))
class ComponentState:
STOPPED = 0
STARTING = 1
RUNNING = 2
STOPPING = 3
class ComponentBase(KeyValueStore):
_habitat = None
_state = ComponentState.STOPPED
_name = None
_env = None
def __init__(self, habitat=None,
name=None,
deps=None,
env=None,
disabled=False,
**kwargs):
super(ComponentBase, self).__init__(**kwargs)
self._deps = deps or []
self._env = env
self._disabled = disabled
self._name = name
global last_component_id
last_component_id += 1
self._id = last_component_id
if habitat:
self.habitat = habitat
@property
def habitat(self):
return self._habitat
@habitat.setter
def habitat(self, value):
self.parent = value
self._habitat = value
@property
def deps(self):
if self._env:
return self._deps + [self._env]
else:
return self._deps
@property
def name(self):
if self._name is None:
return '__%s_%d' % (self.__class__.__name__, self._id)
return self._name
@name.setter
def name(self, value):
self._name = value
def is_running(self):
return self._state == ComponentState.RUNNING
def _start(self):
pass
def _stop(self):
pass
def cycle(self, force=False):
self.start(force)
self.stop()
def start(self, force=False):
if self._state == ComponentState.RUNNING:
return
if self._disabled and not force:
return
if self._state != ComponentState.STOPPED:
raise InvalidComponentState(self.name, self._state)
self._state = ComponentState.STARTING
for dep in self.deps:
dep.start()
print 'Starting component "%s"...' % (self['name'], )
self._start()
self._state = ComponentState.RUNNING
def stop(self, force=False):
if self._state == ComponentState.STOPPED:
return
if self._state != ComponentState.RUNNING and not force:
raise InvalidComponentState(self.name, self._state)
self._state = ComponentState.STOPPING
for dep in self.deps:
dep.stop()
print 'Stopping component "%s"...' % (self.name, )
self._stop()
self._state = ComponentState.STOPPED
def execute(self, **kwargs):
if self._env is None:
raise Exception('No environment.')
return self._env.execute(component=self, **kwargs)
def execute_or_die(self, **kwargs):
if self._env is None:
raise Exception('No environment.')
return self._env.execute_or_die(component=self, **kwargs)
def execute_interactive(self, **kwargs):
if self._env is None:
raise Exception('No environment.')
return self._env.execute_interactive(component=self, **kwargs)
def execute_in_thread(self, **kwargs):
if self._env is None:
raise Exception('No environment.')
return self._env.execute_in_thread(component=self, **kwargs)
# KeyValue related functions.
def env(self):
if self._env:
return self._env.build_environment()
else:
return None
# Commands.
class Commands:
pass
class NullComponent(ComponentBase):
"""Component that can be used to simplify the dependency graph. It does
not implement anything.
"""
pass
|
hansl/habitat
|
habitat/base.py
|
Python
|
apache-2.0
| 3,978
|
from datetime import datetime
from uuid import uuid4
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.types import DateTime, Integer, Unicode
from tornado.options import define, options, parse_command_line
define("crate-host", default="localhost:4200", help="CrateDB host", type=str)
parse_command_line()
engine = sa.create_engine(f"crate://{options.crate_host}")
Session = scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=engine))
Base = declarative_base()
def gen_id():
return str(uuid4())
class Story(Base):
__tablename__ = "stories"
id = sa.Column(Unicode, primary_key=True, default=gen_id)
text = sa.Column(Unicode)
link = sa.Column(Unicode)
position = sa.Column(Integer, default=1)
created = sa.Column(DateTime, default=datetime.utcnow)
def __repr__(self):
return "<Story({text})>".format(text=self.text)
def to_dict(self):
tasks = Task.query.filter(Task.story_id == self.id).order_by(Task.state).all()
return {
"id": self.id,
"text": self.text,
"tasks": [t.to_dict() for t in tasks],
"position": self.position,
"link": self.link,
}
class Task(Base):
__tablename__ = "tasks"
id = sa.Column(Unicode, primary_key=True, default=gen_id)
text = sa.Column(Unicode)
user = sa.Column(Unicode)
state = sa.Column(Integer, default=0)
story_id = sa.Column(Unicode)
created = sa.Column(DateTime, default=datetime.utcnow)
def __repr__(self):
return "<Task({text})>".format(text=self.text[:20])
def to_dict(self):
return {
"id": self.id,
"text": self.text,
"user": self.user,
"state": self.state,
"story_id": self.story_id,
}
def main():
Base.metadata.create_all(bind=engine)
if __name__ == "__main__":
main()
|
mikethebeer/scraty
|
backend/src/models.py
|
Python
|
apache-2.0
| 1,998
|
"""Helper functions for any word-vector operations.
TODO: Figure out how to log ImportError when building on travis, if gensim not available
TODO: Run Latin W2V again with WordTokenizer().
TODO: Add CLTK logging to this.
"""
import logging
import os
import sys
import time
from cltk.utils.cltk_logger import logger
# TODO: Fix this
# KJ added this to fix failing build on Travis CI. Gensim seems to load boto, which in turn causes an error.
try:
from gensim.models import Word2Vec
except AttributeError:
logger.error('Command `from gensim.models import Word2Vec` failed with AttributeError.')
from cltk.corpus.utils.formatter import phi5_plaintext_cleanup
from cltk.corpus.utils.formatter import tlg_plaintext_cleanup
from cltk.corpus.utils.formatter import assemble_phi5_author_filepaths
from cltk.corpus.utils.formatter import assemble_tlg_author_filepaths
from cltk.stem.latin.j_v import JVReplacer
from cltk.stem.lemma import LemmaReplacer # Change lemmatizer
from cltk.stop.latin import STOPS_LIST as latin_stops
from cltk.tokenize.word import nltk_tokenize_words
from cltk.tokenize.sentence import TokenizeSentence
from cltk.tokenize.word import WordTokenizer
def gen_docs(corpus, lemmatize, rm_stops):
"""Open and process files from a corpus. Return a list of sentences for an author. Each sentence
is itself a list of tokenized words.
"""
assert corpus in ['phi5', 'tlg']
if corpus == 'phi5':
language = 'latin'
filepaths = assemble_phi5_author_filepaths()
jv_replacer = JVReplacer()
text_cleaner = phi5_plaintext_cleanup
word_tokenizer = nltk_tokenize_words
if rm_stops:
stops = latin_stops
else:
stops = None
elif corpus == 'tlg':
language = 'greek'
filepaths = assemble_tlg_author_filepaths()
text_cleaner = tlg_plaintext_cleanup
word_tokenizer = nltk_tokenize_words
if rm_stops:
stops = latin_stops
else:
stops = None
if lemmatize:
lemmatizer = LemmaReplacer(language)
sent_tokenizer = TokenizeSentence(language)
for filepath in filepaths:
with open(filepath) as f:
text = f.read()
# light first-pass cleanup, before sentence tokenization (which relies on punctuation)
text = text_cleaner(text, rm_punctuation=False, rm_periods=False)
sent_tokens = sent_tokenizer.tokenize_sentences(text)
# doc_sentences = []
for sentence in sent_tokens:
# a second cleanup at sentence-level, to rm all punctuation
sentence = text_cleaner(sentence, rm_punctuation=True, rm_periods=True)
sentence = word_tokenizer(sentence)
sentence = [s.lower() for s in sentence]
sentence = [w for w in sentence if w]
if language == 'latin':
sentence = [w[1:] if w.startswith('-') else w for w in sentence]
if stops:
sentence = [w for w in sentence if w not in stops]
sentence = [w for w in sentence if len(w) > 1] # rm short words
if sentence:
sentence = sentence
if lemmatize:
sentence = lemmatizer.lemmatize(sentence)
if sentence and language == 'latin':
sentence = [jv_replacer.replace(word) for word in sentence]
if sentence:
yield sentence
# doc_sentences.append(sentence)
# if doc_sentences != []:
# yield doc_sentences
def make_model(corpus, lemmatize=False, rm_stops=False, size=100, window=10, min_count=5, workers=4, sg=1,
save_path=None):
"""Train W2V model."""
# Simple training, with one large list
t0 = time.time()
sentences_stream = gen_docs(corpus, lemmatize=lemmatize, rm_stops=rm_stops)
# sentences_list = []
# for sent in sentences_stream:
# sentences_list.append(sent)
model = Word2Vec(sentences=list(sentences_stream), size=size, window=window, min_count=min_count, workers=workers,
sg=sg)
# "Trim" the model of unnecessary data. Model cannot be updated anymore.
model.init_sims(replace=True)
if save_path:
save_path = os.path.expanduser(save_path)
model.save(save_path)
print('Total training time for {0}: {1} minutes'.format(save_path, (time.time() - t0) / 60))
def get_sims(word, language, lemmatized=False, threshold=0.70):
"""Get similar Word2Vec terms from vocabulary or trained model.
TODO: Add option to install corpus if not available.
"""
# Normalize incoming word string
jv_replacer = JVReplacer()
if language == 'latin':
# Note that casefold() seemingly does not work with diacritic
# Greek, likely because of it expects single code points, not
# diacritics. Look into global string normalization to code points
# for all languages, especially Greek.
word = jv_replacer.replace(word).casefold()
model_dirs = {'greek': '~/cltk_data/greek/model/greek_word2vec_cltk',
'latin': '~/cltk_data/latin/model/latin_word2vec_cltk'}
assert language in model_dirs.keys(), 'Langauges available with Word2Vec model: {}'.format(model_dirs.keys())
if lemmatized:
lemma_str = '_lemmed'
else:
lemma_str = ''
model_name = '{0}_s100_w30_min5_sg{1}.model'.format(language, lemma_str)
model_dir_abs = os.path.expanduser(model_dirs[language])
model_path = os.path.join(model_dir_abs, model_name)
try:
model = Word2Vec.load(model_path)
except FileNotFoundError as fnf_error:
print(fnf_error)
print("CLTK's Word2Vec models cannot be found. Please import '{}_word2vec_cltk'.".format(language))
raise
try:
similars = model.most_similar(word)
except KeyError as key_err:
print(key_err)
possible_matches = []
for term in model.vocab:
if term.startswith(word[:3]):
possible_matches.append(term)
print("The following terms in the Word2Vec model you may be looking for: '{}'.".format(possible_matches))
return None
returned_sims = []
for similar in similars:
if similar[1] > threshold:
returned_sims.append(similar[0])
if not returned_sims:
print("Matches found, but below the threshold of 'threshold={}'. Lower it to see these results.".format(threshold))
return returned_sims
|
LBenzahia/cltk
|
cltk/vector/word2vec.py
|
Python
|
mit
| 6,542
|
#!/usr/bin/env python
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class UserPasswordInfo:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'old_pswd_salt': 'str',
'new_pswd_salt': 'str',
'reset_token': 'str'
}
self.old_pswd_salt = None # str
self.new_pswd_salt = None # str
self.reset_token = None # str
|
liosha2007/temporary-groupdocs-python3-sdk
|
groupdocs/models/UserPasswordInfo.py
|
Python
|
apache-2.0
| 1,072
|
import time
import datetime
from flask.ext.admin._compat import text_type
from flask.ext.admin.babel import lazy_gettext
class BaseFilter(object):
"""
Base filter class.
"""
def __init__(self, name, options=None, data_type=None):
"""
Constructor.
:param name:
Displayed name
:param options:
List of fixed options. If provided, will use drop down instead of textbox.
:param data_type:
Client-side widget type to use.
"""
self.name = name
self.options = options
self.data_type = data_type
def get_options(self, view):
"""
Return list of predefined options.
Override to customize behavior.
:param view:
Associated administrative view class.
"""
options = self.options
if options:
if callable(options):
options = options()
return [(v, text_type(n)) for v, n in options]
return None
def validate(self, value):
"""
Validate value.
If value is valid, returns `True` and `False` otherwise.
:param value:
Value to validate
"""
# useful for filters with date conversions, see if conversion in clean() raises ValueError
try:
self.clean(value)
return True
except ValueError:
return False
def clean(self, value):
"""
Parse value into python format. Occurs before .apply()
:param value:
Value to parse
"""
return value
def apply(self, query):
"""
Apply search criteria to the query and return new query.
:param query:
Query
"""
raise NotImplementedError()
def operation(self):
"""
Return readable operation name.
For example: u'equals'
"""
raise NotImplementedError()
def __unicode__(self):
return self.name
# Customized filters
class BaseBooleanFilter(BaseFilter):
"""
Base boolean filter, uses fixed list of options.
"""
def __init__(self, name, options=None, data_type=None):
super(BaseBooleanFilter, self).__init__(name,
(('1', lazy_gettext(u'Yes')),
('0', lazy_gettext(u'No'))),
data_type)
def validate(self, value):
return value in ('0', '1')
class BaseIntFilter(BaseFilter):
"""
Base Int filter. Adds validation and changes value to python int.
"""
def clean(self, value):
return int(float(value))
class BaseFloatFilter(BaseFilter):
"""
Base Float filter. Adds validation and changes value to python float.
"""
def clean(self, value):
return float(value)
class BaseIntListFilter(BaseFilter):
"""
Base Integer list filter. Adds validation for int "In List" filter.
"""
def clean(self, value):
return [int(float(v.strip())) for v in value.split(',') if v.strip()]
class BaseFloatListFilter(BaseFilter):
"""
Base Float list filter. Adds validation for float "In List" filter.
"""
def clean(self, value):
return [float(v.strip()) for v in value.split(',') if v.strip()]
class BaseDateFilter(BaseFilter):
"""
Base Date filter. Uses client-side date picker control.
"""
def __init__(self, name, options=None, data_type=None):
super(BaseDateFilter, self).__init__(name,
options,
data_type='datepicker')
def clean(self, value):
return datetime.datetime.strptime(value, '%Y-%m-%d').date()
class BaseDateBetweenFilter(BaseFilter):
"""
Base Date Between filter. Consolidates logic for validation and clean.
Apply method is different for each back-end.
"""
def clean(self, value):
return [datetime.datetime.strptime(range, '%Y-%m-%d')
for range in value.split(' to ')]
def operation(self):
return lazy_gettext('between')
def validate(self, value):
try:
value = [datetime.datetime.strptime(range, '%Y-%m-%d')
for range in value.split(' to ')]
# if " to " is missing, fail validation
# sqlalchemy's .between() will not work if end date is before start date
if (len(value) == 2) and (value[0] <= value[1]):
return True
else:
return False
except ValueError:
return False
class BaseDateTimeFilter(BaseFilter):
"""
Base DateTime filter. Uses client-side date time picker control.
"""
def __init__(self, name, options=None, data_type=None):
super(BaseDateTimeFilter, self).__init__(name,
options,
data_type='datetimepicker')
def clean(self, value):
# datetime filters will not work in SQLite + SQLAlchemy if value not converted to datetime
return datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S')
class BaseDateTimeBetweenFilter(BaseFilter):
"""
Base DateTime Between filter. Consolidates logic for validation and clean.
Apply method is different for each back-end.
"""
def clean(self, value):
return [datetime.datetime.strptime(range, '%Y-%m-%d %H:%M:%S')
for range in value.split(' to ')]
def operation(self):
return lazy_gettext('between')
def validate(self, value):
try:
value = [datetime.datetime.strptime(range, '%Y-%m-%d %H:%M:%S')
for range in value.split(' to ')]
if (len(value) == 2) and (value[0] <= value[1]):
return True
else:
return False
except ValueError:
return False
class BaseTimeFilter(BaseFilter):
"""
Base Time filter. Uses client-side time picker control.
"""
def __init__(self, name, options=None, data_type=None):
super(BaseTimeFilter, self).__init__(name,
options,
data_type='timepicker')
def clean(self, value):
# time filters will not work in SQLite + SQLAlchemy if value not converted to time
timetuple = time.strptime(value, '%H:%M:%S')
return datetime.time(timetuple.tm_hour,
timetuple.tm_min,
timetuple.tm_sec)
class BaseTimeBetweenFilter(BaseFilter):
"""
Base Time Between filter. Consolidates logic for validation and clean.
Apply method is different for each back-end.
"""
def clean(self, value):
timetuples = [time.strptime(range, '%H:%M:%S')
for range in value.split(' to ')]
return [datetime.time(timetuple.tm_hour,
timetuple.tm_min,
timetuple.tm_sec)
for timetuple in timetuples]
def operation(self):
return lazy_gettext('between')
def validate(self, value):
try:
timetuples = [time.strptime(range, '%H:%M:%S')
for range in value.split(' to ')]
if (len(timetuples) == 2) and (timetuples[0] <= timetuples[1]):
return True
else:
return False
except ValueError:
raise
return False
def convert(*args):
"""
Decorator for field to filter conversion routine.
See :mod:`flask.ext.admin.contrib.sqla.filters` for usage example.
"""
def _inner(func):
func._converter_for = list(map(str.lower, args))
return func
return _inner
class BaseFilterConverter(object):
"""
Base filter converter.
Derive from this class to implement custom field to filter conversion
logic.
"""
def __init__(self):
self.converters = dict()
for p in dir(self):
attr = getattr(self, p)
if hasattr(attr, '_converter_for'):
for p in attr._converter_for:
self.converters[p] = attr
|
ibushong/test-repo
|
flask_admin/model/filters.py
|
Python
|
bsd-3-clause
| 8,579
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_solve."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
def BatchMatMul(a, b):
# A numpy implementation of tf.batch_matmul().
if a.ndim < 3:
return np.dot(a, b)
# Get the number of matrices.
n = np.prod(a.shape[:-2])
assert n == np.prod(b.shape[:-2])
a_flat = np.reshape(a, tuple([n]) + a.shape[-2:])
b_flat = np.reshape(b, tuple([n]) + b.shape[-2:])
c_flat_shape = [n, a.shape[-2], b.shape[-1]]
c_flat = np.empty(c_flat_shape)
for i in range(n):
c_flat[i, :, :] = np.dot(a_flat[i, :, :], b_flat[i, :, :])
return np.reshape(c_flat, a.shape[:-1] + b_flat.shape[-1:])
def BatchRegularizedLeastSquares(matrices, rhss, l2_regularization=0.0):
# A numpy implementation of regularized least squares solver using
# the normal equations.
matrix_dims = matrices.shape
matrices_transposed = np.swapaxes(matrices, -2, -1)
rows = matrix_dims[-2]
cols = matrix_dims[-1]
if rows >= cols:
preconditioner = l2_regularization * np.identity(cols)
gramian = BatchMatMul(matrices_transposed, matrices) + preconditioner
inverse = np.linalg.inv(gramian)
left_pseudo_inverse = BatchMatMul(inverse, matrices_transposed)
return BatchMatMul(left_pseudo_inverse, rhss)
else:
preconditioner = l2_regularization * np.identity(rows)
gramian = BatchMatMul(matrices, matrices_transposed) + preconditioner
inverse = np.linalg.inv(gramian)
right_pseudo_inverse = BatchMatMul(matrices_transposed, inverse)
return BatchMatMul(right_pseudo_inverse, rhss)
class MatrixSolveLsOpTest(tf.test.TestCase):
def _verifySolve(self, x, y):
for np_type in [np.float32, np.float64]:
a = x.astype(np_type)
b = y.astype(np_type)
np_ans, _, _, _ = np.linalg.lstsq(a, b)
for fast in [True, False]:
with self.test_session():
tf_ans = tf.matrix_solve_ls(a, b, fast=fast)
ans = tf_ans.eval()
self.assertEqual(np_ans.shape, tf_ans.get_shape())
self.assertEqual(np_ans.shape, ans.shape)
# Check residual norm.
tf_r = b - BatchMatMul(a, ans)
tf_r_norm = np.sum(tf_r * tf_r)
np_r = b - BatchMatMul(a, np_ans)
np_r_norm = np.sum(np_r * np_r)
self.assertAllClose(np_r_norm, tf_r_norm)
# Check solution.
if fast or a.shape[0] >= a.shape[1]:
# We skip this test for the underdetermined case when using the
# slow path, because Eigen does not return a minimum norm solution.
# TODO(rmlarsen): Enable this check for all paths if/when we fix
# Eigen's solver.
self.assertAllClose(np_ans, ans, atol=1e-5, rtol=1e-5)
def _verifySolveBatch(self, x, y):
# Since numpy.linalg.lsqr does not support batch solves, as opposed
# to numpy.linalg.solve, we just perform this test for a fixed batch size
# of 2x3.
for np_type in [np.float32, np.float64]:
a = np.tile(x.astype(np_type), [2, 3, 1, 1])
b = np.tile(y.astype(np_type), [2, 3, 1, 1])
np_ans = np.empty([2, 3, a.shape[-1], b.shape[-1]])
for dim1 in range(2):
for dim2 in range(3):
np_ans[dim1, dim2, :, :], _, _, _ = np.linalg.lstsq(
a[dim1, dim2, :, :], b[dim1, dim2, :, :])
for fast in [True, False]:
with self.test_session():
tf_ans = tf.batch_matrix_solve_ls(a, b, fast=fast).eval()
self.assertEqual(np_ans.shape, tf_ans.shape)
# Check residual norm.
tf_r = b - BatchMatMul(a, tf_ans)
tf_r_norm = np.sum(tf_r * tf_r)
np_r = b - BatchMatMul(a, np_ans)
np_r_norm = np.sum(np_r * np_r)
self.assertAllClose(np_r_norm, tf_r_norm)
# Check solution.
if fast or a.shape[-2] >= a.shape[-1]:
# We skip this test for the underdetermined case when using the
# slow path, because Eigen does not return a minimum norm solution.
# TODO(rmlarsen): Enable this check for all paths if/when we fix
# Eigen's solver.
self.assertAllClose(np_ans, tf_ans, atol=1e-5, rtol=1e-5)
def _verifyRegularized(self, x, y, l2_regularizer):
for np_type in [np.float32, np.float64]:
# Test with a single matrix.
a = x.astype(np_type)
b = y.astype(np_type)
np_ans = BatchRegularizedLeastSquares(a, b, l2_regularizer)
with self.test_session():
# Test with the batch version of matrix_solve_ls on regular matrices
tf_ans = tf.batch_matrix_solve_ls(
a, b, l2_regularizer=l2_regularizer, fast=True).eval()
self.assertAllClose(np_ans, tf_ans, atol=1e-5, rtol=1e-5)
# Test with the simple matrix_solve_ls on regular matrices
tf_ans = tf.matrix_solve_ls(
a, b, l2_regularizer=l2_regularizer, fast=True).eval()
self.assertAllClose(np_ans, tf_ans, atol=1e-5, rtol=1e-5)
# Test with a 2x3 batch of matrices.
a = np.tile(x.astype(np_type), [2, 3, 1, 1])
b = np.tile(y.astype(np_type), [2, 3, 1, 1])
np_ans = BatchRegularizedLeastSquares(a, b, l2_regularizer)
with self.test_session():
tf_ans = tf.batch_matrix_solve_ls(
a, b, l2_regularizer=l2_regularizer, fast=True).eval()
self.assertAllClose(np_ans, tf_ans, atol=1e-5, rtol=1e-5)
def testSquare(self):
# 2x2 matrices, 2x3 right-hand sides.
matrix = np.array([[1., 2.], [3., 4.]])
rhs = np.array([[1., 0., 1.], [0., 1., 1.]])
self._verifySolve(matrix, rhs)
self._verifySolveBatch(matrix, rhs)
self._verifyRegularized(matrix, rhs, l2_regularizer=0.1)
def testOverdetermined(self):
# 2x2 matrices, 2x3 right-hand sides.
matrix = np.array([[1., 2.], [3., 4.], [5., 6.]])
rhs = np.array([[1., 0., 1.], [0., 1., 1.], [1., 1., 0.]])
self._verifySolve(matrix, rhs)
self._verifySolveBatch(matrix, rhs)
self._verifyRegularized(matrix, rhs, l2_regularizer=0.1)
def testUnderdetermined(self):
# 2x2 matrices, 2x3 right-hand sides.
matrix = np.array([[1., 2., 3], [4., 5., 6.]])
rhs = np.array([[1., 0., 1.], [0., 1., 1.]])
self._verifySolve(matrix, rhs)
self._verifySolveBatch(matrix, rhs)
self._verifyRegularized(matrix, rhs, l2_regularizer=0.1)
def testWrongDimensions(self):
# The matrix and right-hand sides should have the same number of rows.
with self.test_session():
matrix = tf.constant([[1., 0.], [0., 1.]])
rhs = tf.constant([[1., 0.]])
with self.assertRaises(ValueError):
tf.matrix_solve_ls(matrix, rhs)
with self.assertRaises(ValueError):
tf.batch_matrix_solve_ls(matrix, rhs)
def testEmpty(self):
full = np.array([[1., 2.], [3., 4.], [5., 6.]])
empty0 = np.empty([3, 0])
empty1 = np.empty([0, 2])
for fast in [True, False]:
with self.test_session():
tf_ans = tf.matrix_solve_ls(empty0, empty0, fast=fast).eval()
self.assertEqual(tf_ans.shape, (0, 0))
tf_ans = tf.matrix_solve_ls(empty0, full, fast=fast).eval()
self.assertEqual(tf_ans.shape, (0, 2))
tf_ans = tf.matrix_solve_ls(full, empty0, fast=fast).eval()
self.assertEqual(tf_ans.shape, (2, 0))
tf_ans = tf.matrix_solve_ls(empty1, empty1, fast=fast).eval()
self.assertEqual(tf_ans.shape, (2, 2))
def testBatchResultSize(self):
# 3x3x3 matrices, 3x3x1 right-hand sides.
matrix = np.array([1., 2., 3., 4., 5., 6., 7., 8., 9.] * 3).reshape(3, 3, 3)
rhs = np.array([1., 2., 3.] * 3).reshape(3, 3, 1)
answer = tf.batch_matrix_solve(matrix, rhs)
ls_answer = tf.batch_matrix_solve_ls(matrix, rhs)
self.assertEqual(ls_answer.get_shape(), [3, 3, 1])
self.assertEqual(answer.get_shape(), [3, 3, 1])
if __name__ == "__main__":
tf.test.main()
|
dhalleine/tensorflow
|
tensorflow/python/kernel_tests/matrix_solve_ls_op_test.py
|
Python
|
apache-2.0
| 8,547
|
# Copyright 2007-2019 Red Hat, Inc. and others.
#
# This file is part of Bodhi.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from datetime import datetime
from hashlib import sha256
from os.path import join, exists, basename
from unittest import mock
import glob
import os
import shutil
import tempfile
import createrepo_c
from bodhi.server.buildsys import (setup_buildsystem, teardown_buildsystem,
DevBuildsys)
from bodhi.server.config import config
from bodhi.server.models import Release, Update, UpdateRequest, UpdateStatus
from bodhi.server.metadata import UpdateInfoMetadata
import bodhi.server.metadata as bodhi_metadata
from bodhi.tests.server import base, create_update
class UpdateInfoMetadataTestCase(base.BasePyTestCase):
def setup_method(self, method):
"""
Initialize our temporary repo.
"""
super().setup_method(method)
setup_buildsystem({'buildsystem': 'dev'})
self.tempdir = tempfile.mkdtemp('bodhi')
self.tempcompdir = join(self.tempdir, 'f17-updates-testing')
self.temprepo = join(self.tempcompdir, 'compose', 'Everything', 'i386', 'os')
base.mkmetadatadir(join(self.temprepo, 'f17-updates-testing', 'i386'), updateinfo=False)
config['cache_dir'] = os.path.join(self.tempdir, 'cache')
os.makedirs(config['cache_dir'])
def teardown_method(self, method):
"""
Clean up the tempdir.
"""
super().teardown_method(method)
teardown_buildsystem()
shutil.rmtree(self.tempdir)
config['cache_dir'] = None
class TestAddUpdate(UpdateInfoMetadataTestCase):
"""
This class contains tests for the UpdateInfoMetadata.add_update() method.
"""
def test_build_not_in_builds(self):
"""
Test correct behavior when a build in update.builds isn't found in self.builds() and
koji.getBuild() is called instead.
"""
update = self.db.query(Update).one()
now = datetime(year=2018, month=2, day=8, hour=12, minute=41, second=4)
update.date_pushed = now
update.date_modified = now
md = UpdateInfoMetadata(update.release, update.request, self.db, self.temprepo,
close_shelf=False)
md.add_update(update)
md.shelf.close()
assert len(md.uinfo.updates) == 1
assert md.uinfo.updates[0].title == update.title
assert md.uinfo.updates[0].release == update.release.long_name
assert md.uinfo.updates[0].status == update.status.value
assert md.uinfo.updates[0].updated_date == update.date_modified
assert md.uinfo.updates[0].fromstr == config.get('bodhi_email')
assert md.uinfo.updates[0].rights == config.get('updateinfo_rights')
assert md.uinfo.updates[0].description == update.notes
assert md.uinfo.updates[0].id == update.alias
assert md.uinfo.updates[0].severity == 'Moderate'
assert len(md.uinfo.updates[0].references) == 1
bug = md.uinfo.updates[0].references[0]
assert bug.href == update.bugs[0].url
assert bug.id == '12345'
assert bug.type == 'bugzilla'
assert len(md.uinfo.updates[0].collections) == 1
col = md.uinfo.updates[0].collections[0]
assert col.name == update.release.long_name
assert col.shortname == update.release.name
assert len(col.packages) == 2
pkg = col.packages[0]
assert pkg.epoch == '0'
# It's a little goofy, but the DevBuildsys is going to return TurboGears rpms when its
# listBuildRPMs() method is called, so let's just roll with it.
assert pkg.name == 'TurboGears'
assert pkg.src == \
('https://download.fedoraproject.org/pub/fedora/linux/updates/17/SRPMS/T/'
'TurboGears-1.0.2.2-2.fc17.src.rpm')
assert pkg.version == '1.0.2.2'
assert not pkg.reboot_suggested
assert pkg.arch == 'src'
assert pkg.filename == 'TurboGears-1.0.2.2-2.fc17.src.rpm'
pkg = col.packages[1]
assert pkg.epoch == '0'
assert pkg.name == 'TurboGears'
assert pkg.src == \
('https://download.fedoraproject.org/pub/fedora/linux/updates/17/i386/T/'
'TurboGears-1.0.2.2-2.fc17.noarch.rpm')
assert pkg.version == '1.0.2.2'
assert not pkg.reboot_suggested
assert pkg.arch == 'noarch'
assert pkg.filename == 'TurboGears-1.0.2.2-2.fc17.noarch.rpm'
def test_date_modified_none(self):
"""The metadata should use date_submitted if an update's date_modified is None."""
update = self.db.query(Update).one()
update.date_modified = None
md = UpdateInfoMetadata(update.release, update.request, self.db, self.temprepo,
close_shelf=False)
md.add_update(update)
md.shelf.close()
assert len(md.uinfo.updates) == 1
assert md.uinfo.updates[0].updated_date == update.date_submitted
def test_date_pushed_none(self):
"""The metadata should use date_submitted if an update's date_pushed is None."""
update = self.db.query(Update).one()
update.date_pushed = None
md = UpdateInfoMetadata(update.release, update.request, self.db, self.temprepo,
close_shelf=False)
md.add_update(update)
md.shelf.close()
assert len(md.uinfo.updates) == 1
assert md.uinfo.updates[0].issued_date == update.date_submitted
def test_rpm_with_arch(self):
"""Ensure that an RPM with a non 386 arch gets handled correctly."""
update = self.db.query(Update).one()
md = UpdateInfoMetadata(update.release, update.request, self.db, self.temprepo,
close_shelf=False)
# Set the arch to aarch64
fake_rpms = [{
'nvr': 'TurboGears-1.0.2.2-2.fc17', 'buildtime': 1178868422, 'arch': 'aarch64',
'id': 62330, 'size': 761742, 'build_id': 6475, 'name': 'TurboGears', 'epoch': None,
'version': '1.0.2.2', 'release': '2.fc17', 'buildroot_id': 1883,
'payloadhash': '6787febe92434a9be2a8f309d0e2014e'}]
with mock.patch.object(md, 'get_rpms', mock.MagicMock(return_value=fake_rpms)):
md.add_update(update)
md.shelf.close()
col = md.uinfo.updates[0].collections[0]
assert len(col.packages) == 1
pkg = col.packages[0]
assert pkg.src == \
('https://download.fedoraproject.org/pub/fedora/linux/updates/17/aarch64/T/'
'TurboGears-1.0.2.2-2.fc17.aarch64.rpm')
def test_rpm_with_epoch(self):
"""Ensure that an RPM with an Epoch gets handled correctly."""
update = self.db.query(Update).one()
md = UpdateInfoMetadata(update.release, update.request, self.db, self.temprepo,
close_shelf=False)
# We'll fake the return of get_rpms so we can inject an epoch of 42.
fake_rpms = [{
'nvr': 'TurboGears-1.0.2.2-2.fc17', 'buildtime': 1178868422, 'arch': 'src', 'id': 62330,
'size': 761742, 'build_id': 6475, 'name': 'TurboGears', 'epoch': 42,
'version': '1.0.2.2', 'release': '2.fc17', 'buildroot_id': 1883,
'payloadhash': '6787febe92434a9be2a8f309d0e2014e'}]
with mock.patch.object(md, 'get_rpms', mock.MagicMock(return_value=fake_rpms)):
md.add_update(update)
md.shelf.close()
col = md.uinfo.updates[0].collections[0]
assert len(col.packages) == 1
pkg = col.packages[0]
assert pkg.epoch == '42'
class TestFetchUpdates(UpdateInfoMetadataTestCase):
"""Test the UpdateInfoMetadata._fetch_updates() method."""
@mock.patch('bodhi.server.metadata.log.warning')
def test_build_unassociated(self, warning):
"""A warning should be logged if the Bodhi Build object is not associated with an Update."""
update = self.db.query(Update).one()
update.date_pushed = None
u = create_update(self.db, ['TurboGears-1.0.2.2-4.fc17'])
u.builds[0].update = None
self.db.flush()
# _fetch_updates() is called as part of UpdateInfoMetadata.__init__() so we'll just
# instantiate one.
md = UpdateInfoMetadata(update.release, update.request, self.db, self.temprepo,
close_shelf=False)
warning.assert_called_once_with(
'TurboGears-1.0.2.2-4.fc17 does not have a corresponding update')
# Since the Build didn't have an Update, no Update should have been added to md.updates.
assert md.updates == set([])
class TestUpdateInfoMetadata(UpdateInfoMetadataTestCase):
def setup_method(self, method):
super().setup_method(method)
self._new_compose_stage_dir = tempfile.mkdtemp()
self._compose_stage_dir = config['compose_stage_dir']
self._compose_dir = config['compose_dir']
config['compose_stage_dir'] = self._new_compose_stage_dir
config['compose_dir'] = os.path.join(config['compose_stage_dir'], 'compose')
config['cache_dir'] = os.path.join(config['compose_stage_dir'], 'cache')
os.makedirs(config['cache_dir'])
os.makedirs(os.path.join(config['compose_dir'], 'f17-updates-testing'))
# Initialize our temporary repo
base.mkmetadatadir(self.temprepo, updateinfo=False)
base.mkmetadatadir(join(self.tempcompdir, 'compose', 'Everything', 'source', 'tree'),
updateinfo=False)
self.repodata = join(self.temprepo, 'repodata')
assert exists(join(self.repodata, 'repomd.xml'))
DevBuildsys.__rpms__ = [{
'arch': 'src',
'build_id': 6475,
'buildroot_id': 1883,
'buildtime': 1178868422,
'epoch': None,
'id': 62330,
'name': 'bodhi',
'nvr': 'bodhi-2.0-1.fc17',
'release': '1.fc17',
'size': 761742,
'version': '2.0'
}]
def teardown_method(self, method):
config['compose_stage_dir'] = self._compose_stage_dir
config['compose_dir'] = self._compose_dir
config['cache_dir'] = None
shutil.rmtree(self._new_compose_stage_dir)
super().teardown_method(method)
def _verify_updateinfos(self, repodata):
updateinfos = glob.glob(join(repodata, "*-updateinfo.xml*"))
if hasattr(createrepo_c, 'ZCK_COMPRESSION'):
assert len(updateinfos) == 2, f"We generated {len(updateinfos)} updateinfo metadata"
else:
assert len(updateinfos) == 1, f"We generated {len(updateinfos)} updateinfo metadata"
for updateinfo in updateinfos:
hash = basename(updateinfo).split("-", 1)[0]
with open(updateinfo, 'rb') as fn:
hashed = sha256(fn.read()).hexdigest()
assert hash == hashed, f"File: {basename(updateinfo)}\nHash: {hashed}"
return updateinfos
def get_notice(self, uinfo, title):
for record in uinfo.updates:
if record.title == title:
return record
def test___init___uses_bz2_for_epel(self):
"""Assert that the __init__() method sets the comp_type attribute to cr.BZ2 for EPEL."""
epel_7 = Release(id_prefix="FEDORA-EPEL", stable_tag='epel7')
md = UpdateInfoMetadata(epel_7, UpdateRequest.stable, self.db, self.tempdir)
assert md.comp_type == createrepo_c.BZ2
assert not md.zchunk
def test___init___uses_xz_for_fedora(self):
"""Assert that the __init__() method sets the comp_type attribute to cr.XZ for Fedora."""
fedora = Release.query.one()
md = UpdateInfoMetadata(fedora, UpdateRequest.stable, self.db, self.tempdir)
assert md.comp_type == createrepo_c.XZ
assert md.zchunk
def test_extended_metadata_once(self):
"""Assert that a single call to update the metadata works as expected."""
self._test_extended_metadata()
def test_extended_metadata_cache(self):
"""Asserts that when the same update is retrieved twice, the info is unshelved.
After the first run, we clear the buildsystem.__rpms__ so that there would be no way to
again retrieve the info from the buildsystem, and it'll have to be returned from the
cache.
"""
self._test_extended_metadata()
shutil.rmtree(self.temprepo)
base.mkmetadatadir(self.temprepo, updateinfo=False)
base.mkmetadatadir(join(self.tempcompdir, 'compose', 'Everything', 'source', 'tree'),
updateinfo=False)
DevBuildsys.__rpms__ = []
self._test_extended_metadata()
def _test_extended_metadata(self):
update = self.db.query(Update).one()
# Pretend it's pushed to testing
update.status = UpdateStatus.testing
update.request = None
update.date_pushed = datetime.utcnow()
DevBuildsys.__tagged__[update.title] = ['f17-updates-testing']
# Generate the XML
md = UpdateInfoMetadata(update.release, update.request, self.db, self.tempcompdir)
# Insert the updateinfo.xml into the repository
md.insert_updateinfo(self.tempcompdir)
updateinfos = self._verify_updateinfos(self.repodata)
for updateinfo in updateinfos:
# Read an verify the updateinfo.xml.gz
uinfo = createrepo_c.UpdateInfo(updateinfo)
notice = self.get_notice(uinfo, 'mutt-1.5.14-1.fc13')
assert notice is None
assert len(uinfo.updates) == 1
notice = uinfo.updates[0]
assert notice is not None
assert notice.title == update.title
assert notice.release == update.release.long_name
assert notice.status == update.status.value
if update.date_modified:
assert notice.updated_date == update.date_modified
assert notice.fromstr == config.get('bodhi_email')
assert notice.rights == config.get('updateinfo_rights')
assert notice.description == update.notes
assert notice.id == update.alias
assert notice.severity == 'Moderate'
bug = notice.references[0]
assert bug.href == update.bugs[0].url
assert bug.id == '12345'
assert bug.type == 'bugzilla'
col = notice.collections[0]
assert col.name == update.release.long_name
assert col.shortname == update.release.name
pkg = col.packages[0]
assert pkg.epoch == '0'
assert pkg.name == 'TurboGears'
assert pkg.src == \
('https://download.fedoraproject.org/pub/fedora/linux/updates/testing/17/SRPMS/T/'
'TurboGears-1.0.2.2-2.fc17.src.rpm')
assert pkg.version == '1.0.2.2'
assert not pkg.reboot_suggested
assert pkg.arch == 'src'
assert pkg.filename == 'TurboGears-1.0.2.2-2.fc17.src.rpm'
@mock.patch('bodhi.server.metadata.cr')
def test_zchunk_metadata_coverage_xz_compression(self, mock_cr):
"""
Let's test that we skip zchunk files, because we don't want to zchunk zchunk files.
This test makes sure we reach 100% coverage by mocking createrepo.
cr.ZCK_COMPRESSION is only defined when createrepo_c supports zchunk, but createrepo_c's
zchunk support is only available in createrepo_c >= 0.12.0, and it is also a build flag,
so we can't be sure that the createrepo_c we work with has that feature.
This function is designed to *only* make sure we reach 100% coverage and isn't meant
to test whether zchunk is working correctly. _test_extended_metadata will take care
of testing both the regular and zchunked updateinfo if zchunk is enabled
"""
mock_cr.ZCK_COMPRESSION = 99
mock_repomd = mock.MagicMock()
mock_repomd.xml_dump = mock.MagicMock(return_value="test data")
mock_cr.Repomd = mock.MagicMock(return_value=mock_repomd)
bodhi_metadata.insert_in_repo(bodhi_metadata.cr.XZ_COMPRESSION, self.tempcompdir,
'garbage', 'zck', '/dev/null', True)
mock_cr.Repomd.assert_called_once_with(os.path.join(self.tempcompdir, 'repomd.xml'))
assert mock_cr.RepomdRecord.mock_calls == \
[mock.call('garbage', os.path.join(self.tempcompdir, 'garbage.zck')),
mock.call().compress_and_fill(mock_cr.SHA256, mock_cr.XZ_COMPRESSION),
mock.call().compress_and_fill().rename_file(),
mock.call('garbage_zck', os.path.join(self.tempcompdir, 'garbage.zck')),
mock.call().compress_and_fill(mock_cr.SHA256, mock_cr.ZCK_COMPRESSION),
mock.call().compress_and_fill().rename_file()]
rec = mock_cr.RepomdRecord.return_value
rec_comp = rec.compress_and_fill.return_value
# The last comp_type added is the _zck one
assert rec_comp.type == 'garbage_zck'
assert mock_cr.Repomd.return_value.set_record.mock_calls == \
[mock.call(rec_comp), mock.call(rec_comp)]
with open(os.path.join(self.tempcompdir, 'repomd.xml')) as repomd_file:
repomd_contents = repomd_file.read()
assert repomd_contents == 'test data'
assert not os.path.exists(os.path.join(self.tempcompdir, 'garbage.zck'))
@mock.patch('bodhi.server.metadata.cr')
def test_zchunk_metadata_coverage_zchunk_skipped(self, mock_cr):
"""
Let's test that we skip zchunk files, because we don't want to zchunk zchunk files.
This test makes sure we reach 100% coverage by mocking createrepo.
cr.ZCK_COMPRESSION is only defined when createrepo_c supports zchunk, but createrepo_c's
zchunk support is only available in createrepo_c >= 0.12.0, and it is also a build flag,
so we can't be sure that the createrepo_c we work with has that feature.
This function is designed to *only* make sure we reach 100% coverage and isn't meant
to test whether zchunk is working correctly. _test_extended_metadata will take care
of testing both the regular and zchunked updateinfo if zchunk is enabled
"""
mock_cr.ZCK_COMPRESSION = 99
mock_repomd = mock.MagicMock()
mock_repomd.xml_dump = mock.MagicMock(return_value="test data")
mock_cr.Repomd = mock.MagicMock(return_value=mock_repomd)
bodhi_metadata.insert_in_repo(99, self.tempcompdir, 'garbage', 'zck', '/dev/null', True)
mock_cr.Repomd.assert_called_once_with(os.path.join(self.tempcompdir, 'repomd.xml'))
mock_cr.RepomdRecord.assert_called_once_with('garbage',
os.path.join(self.tempcompdir, 'garbage.zck'))
rec = mock_cr.RepomdRecord.return_value
rec.compress_and_fill.assert_called_once_with(mock_cr.SHA256, mock_cr.ZCK_COMPRESSION)
rec_comp = rec.compress_and_fill.return_value
rec_comp.rename_file.assert_called_once_with()
assert rec_comp.type == 'garbage'
mock_cr.Repomd.return_value.set_record.assert_called_once_with(rec_comp)
with open(os.path.join(self.tempcompdir, 'repomd.xml')) as repomd_file:
repomd_contents = repomd_file.read()
assert repomd_contents == 'test data'
assert not os.path.exists(os.path.join(self.tempcompdir, 'garbage.zck'))
@mock.patch('bodhi.server.metadata.cr')
def test_zchunk_metadata_coverage_zchunk_unsupported(self, mock_cr):
"""
Let's test that we skip zchunk compression when it is unsupported by createrepo_c.
This test makes sure we reach 100% coverage by mocking createrepo.
cr.ZCK_COMPRESSION is only defined when createrepo_c supports zchunk, but createrepo_c's
zchunk support is only available in createrepo_c >= 0.12.0, and it is also a build flag,
so we can't be sure that the createrepo_c we work with has that feature.
This function is designed to *only* make sure we reach 100% coverage and isn't meant
to test whether zchunk is working correctly. _test_extended_metadata will take care
of testing both the regular and zchunked updateinfo if zchunk is enabled
"""
del mock_cr.ZCK_COMPRESSION
mock_repomd = mock.MagicMock()
mock_repomd.xml_dump = mock.MagicMock(return_value="test data")
mock_cr.Repomd = mock.MagicMock(return_value=mock_repomd)
bodhi_metadata.insert_in_repo(bodhi_metadata.cr.XZ_COMPRESSION, self.tempcompdir,
'garbage', 'xz', '/dev/null', True)
mock_cr.Repomd.assert_called_once_with(os.path.join(self.tempcompdir, 'repomd.xml'))
mock_cr.RepomdRecord.assert_called_once_with('garbage',
os.path.join(self.tempcompdir, 'garbage.xz'))
rec = mock_cr.RepomdRecord.return_value
rec.compress_and_fill.assert_called_once_with(mock_cr.SHA256, mock_cr.XZ_COMPRESSION)
rec_comp = rec.compress_and_fill.return_value
rec_comp.rename_file.assert_called_once_with()
# The last inserted type is without _zck
assert rec_comp.type == 'garbage'
mock_cr.Repomd.return_value.set_record.assert_called_once_with(rec_comp)
with open(os.path.join(self.tempcompdir, 'repomd.xml')) as repomd_file:
repomd_contents = repomd_file.read()
assert repomd_contents == 'test data'
assert not os.path.exists(os.path.join(self.tempcompdir, 'garbage.zck'))
|
Conan-Kudo/bodhi
|
bodhi/tests/server/test_metadata.py
|
Python
|
gpl-2.0
| 22,439
|
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.db import router
from django.template.response import TemplateResponse
from django.utils.safestring import mark_safe
from django.contrib.admin import helpers
import floppyforms as forms
from froide.helper.admin_utils import NullFilterSpec, AdminTagAllMixIn
from froide.helper.widgets import TagAutocompleteTagIt
from .models import (FoiRequest, FoiMessage,
FoiAttachment, FoiEvent, PublicBodySuggestion,
DeferredMessage)
from .tasks import count_same_foirequests, convert_attachment_task
class FoiMessageInline(admin.StackedInline):
model = FoiMessage
raw_id_fields = ('request', 'sender_user', 'sender_public_body', 'recipient_public_body')
class SameAsNullFilter(NullFilterSpec):
title = _(u'Has same request')
parameter_name = u'same_as'
class RequesterFilter(admin.FieldListFilter):
template = "admin/foirequest/user_filter.html"
def __init__(self, field, request, params, model, model_admin, field_path):
super(RequesterFilter, self).__init__(
field, request, params, model, model_admin, field_path)
self.lookup_val = request.GET.get(self.field_path, None)
def expected_parameters(self):
return [self.field_path]
def choices(self, cl):
return [{
'value': self.lookup_val,
'field_path': self.field_path,
'query_string': cl.get_query_string({},
[self.field_path]),
}]
class FoiRequestAdminForm(forms.ModelForm):
class Meta:
model = FoiRequest
widgets = {
'tags': TagAutocompleteTagIt(
autocomplete_url=lambda: reverse('api_get_tags_autocomplete', kwargs={
'api_name': 'v1',
'resource_name': 'request'}
)),
}
class FoiRequestAdmin(admin.ModelAdmin, AdminTagAllMixIn):
form = FoiRequestAdminForm
prepopulated_fields = {"slug": ("title",)}
inlines = [
FoiMessageInline,
]
list_display = ('title', 'first_message', 'secret_address', 'checked',
'public_body', 'status',)
list_filter = ('jurisdiction', 'first_message', 'last_message', 'status',
'resolution', 'is_foi', 'checked', 'public', 'visibility', SameAsNullFilter,
('user', RequesterFilter))
search_fields = ['title', "description", 'secret_address']
ordering = ('-last_message',)
date_hierarchy = 'first_message'
autocomplete_resource_name = 'request'
actions = ['mark_checked', 'mark_not_foi', 'tag_all',
'mark_same_as', 'remove_from_index', 'confirm_request']
raw_id_fields = ('same_as', 'public_body', 'user',)
save_on_top = True
def mark_checked(self, request, queryset):
rows_updated = queryset.update(checked=True)
self.message_user(request, _("%d request(s) successfully marked as checked." % rows_updated))
mark_checked.short_description = _("Mark selected requests as checked")
def mark_not_foi(self, request, queryset):
rows_updated = queryset.update(is_foi=False)
self.message_user(request, _("%d request(s) successfully marked as not FoI." % rows_updated))
mark_not_foi.short_description = _("Mark selected requests as not FoI")
def mark_same_as(self, request, queryset):
"""
Mark selected requests as same as the one we are choosing now.
"""
opts = self.model._meta
# Check that the user has change permission for the actual model
if not self.has_change_permission(request):
raise PermissionDenied
# User has already chosen the other req
if request.POST.get('req_id'):
try:
req = self.model.objects.get(id=int(request.POST.get('req_id')))
except (ValueError, self.model.DoesNotExist,):
raise PermissionDenied
queryset.update(same_as=req)
count_same_foirequests.delay(req.id)
self.message_user(request, _("Successfully marked requests as identical."))
# Return None to display the change list page again.
return None
db = router.db_for_write(self.model)
context = {
'opts': opts,
'queryset': queryset,
'media': self.media,
'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME,
'req_widget': mark_safe(admin.widgets.ForeignKeyRawIdWidget(
self.model._meta.get_field(
'same_as').rel, self.admin_site, using=db).render(
'req_id', None,
{'id': 'id_req_id'})
.replace('../../..', '../..')),
'applabel': opts.app_label
}
# Display the confirmation page
return TemplateResponse(request, 'foirequest/admin_mark_same_as.html',
context, current_app=self.admin_site.name)
mark_same_as.short_description = _("Mark selected requests as identical to...")
def remove_from_index(self, request, queryset):
from haystack import connections as haystack_connections
for obj in queryset:
for using in haystack_connections.connections_info.keys():
backend = haystack_connections[using].get_backend()
backend.remove(obj)
self.message_user(request, _("Removed from search index"))
remove_from_index.short_description = _("Remove from search index")
def confirm_request(self, request, queryset):
foireq = queryset[0]
if foireq.status != 'awaiting_user_confirmation':
self.message_user(request, _("Request not in correct state!"))
return None
self.message_user(request, _("Message send successfully!"))
FoiRequest.confirmed_request(foireq.user, foireq.pk)
return None
confirm_request.short_description = _("Confirm request if unconfirmed")
class FoiAttachmentInline(admin.TabularInline):
model = FoiAttachment
raw_id_fields = ('redacted', 'converted')
class FoiMessageAdmin(admin.ModelAdmin):
save_on_top = True
list_display = ('subject', 'timestamp', 'sender_email', 'recipient_email',)
list_filter = ('is_postal', 'is_response', 'sent', 'status',)
search_fields = ['subject', 'sender_email', 'recipient_email']
ordering = ('-timestamp',)
date_hierarchy = 'timestamp'
exclude = ('original',)
raw_id_fields = ('request', 'sender_user', 'sender_public_body', 'recipient_public_body')
inlines = [
FoiAttachmentInline,
]
class RedactedVersionNullFilter(NullFilterSpec):
title = _(u'Has redacted version')
parameter_name = u'redacted'
class ConvertedVersionNullFilter(NullFilterSpec):
title = _(u'Has converted version')
parameter_name = u'converted'
class FoiAttachmentAdmin(admin.ModelAdmin):
raw_id_fields = ('belongs_to', 'redacted', 'converted')
ordering = ('-id',)
list_display = ('name', 'filetype', 'admin_link_message', 'approved', 'can_approve',)
list_filter = ('can_approve', 'approved', 'is_redacted', 'is_converted',
RedactedVersionNullFilter, ConvertedVersionNullFilter)
search_fields = ['name']
actions = ['approve', 'cannot_approve', 'convert']
def approve(self, request, queryset):
rows_updated = queryset.update(approved=True)
self.message_user(request, _("%d attachment(s) successfully approved." % rows_updated))
approve.short_description = _("Mark selected as approved")
def cannot_approve(self, request, queryset):
rows_updated = queryset.update(can_approve=False)
self.message_user(request, _("%d attachment(s) successfully marked as not approvable." % rows_updated))
cannot_approve.short_description = _("Mark selected as NOT approvable")
def convert(self, request, queryset):
if not queryset:
return
for instance in queryset:
if (instance.filetype in FoiAttachment.CONVERTABLE_FILETYPES or
instance.name.endswith(FoiAttachment.CONVERTABLE_FILETYPES)):
convert_attachment_task.delay(instance.pk)
self.message_user(request, _("Conversion tasks started."))
convert.short_description = _("Convert to PDF")
class FoiEventAdmin(admin.ModelAdmin):
list_display = ('event_name', 'request', 'timestamp',)
list_filter = ('event_name', 'public')
search_fields = ['request__title', "public_body__name"]
ordering = ('-timestamp',)
date_hierarchy = 'timestamp'
raw_id_fields = ('request', 'user', 'public_body')
class PublicBodySuggestionAdmin(admin.ModelAdmin):
list_display = ('request', 'public_body', 'user', 'reason',)
search_fields = ['request', 'reason']
ordering = ('-timestamp',)
date_hierarchy = 'timestamp'
raw_id_fields = ('request', 'public_body', 'user')
class RequestNullFilter(NullFilterSpec):
title = _(u'Has request')
parameter_name = u'request'
class DeferredMessageAdmin(admin.ModelAdmin):
model = DeferredMessage
list_filter = (RequestNullFilter, 'spam')
date_hierarchy = 'timestamp'
ordering = ('-timestamp',)
list_display = ('recipient', 'timestamp', 'request', 'spam')
raw_id_fields = ('request',)
actions = ['redeliver']
save_on_top = True
def redeliver(self, request, queryset):
"""
Redeliver undelivered mails
"""
opts = self.model._meta
# Check that the user has change permission for the actual model
if not self.has_change_permission(request):
raise PermissionDenied
# User has already chosen the other req
if request.POST.get('req_id'):
req_id = int(request.POST.get('req_id'))
try:
req = FoiRequest.objects.get(id=req_id)
except (ValueError, FoiRequest.DoesNotExist,):
raise PermissionDenied
for deferred in queryset:
deferred.redeliver(req)
self.message_user(request, _("Successfully triggered redelivery."))
return None
db = router.db_for_write(self.model)
context = {
'opts': opts,
'queryset': queryset,
'media': self.media,
'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME,
'req_widget': mark_safe(admin.widgets.ForeignKeyRawIdWidget(
self.model._meta.get_field(
'request').rel, self.admin_site, using=db).render(
'req_id', None,
{'id': 'id_req_id'})
.replace('../../..', '../..')),
'applabel': opts.app_label
}
# Display the confirmation page
return TemplateResponse(request, 'foirequest/admin_redeliver.html',
context, current_app=self.admin_site.name)
redeliver.short_description = _("Redeliver to...")
admin.site.register(FoiRequest, FoiRequestAdmin)
admin.site.register(FoiMessage, FoiMessageAdmin)
admin.site.register(FoiAttachment, FoiAttachmentAdmin)
admin.site.register(FoiEvent, FoiEventAdmin)
admin.site.register(PublicBodySuggestion, PublicBodySuggestionAdmin)
admin.site.register(DeferredMessage, DeferredMessageAdmin)
|
okfse/froide
|
froide/foirequest/admin.py
|
Python
|
mit
| 11,487
|
from twisted.trial.unittest import TestCase
from txssmi.builder import SSMIRequest, SSMICommandException
class SSMICommandTestCase(TestCase):
def test_valid_fields(self):
LoginRequest = SSMIRequest.create('LOGIN', {})
login = LoginRequest(username='foo', password='bar')
self.assertEqual(login.username, 'foo')
self.assertEqual(login.password, 'bar')
def test_defaults(self):
LoginRequest = SSMIRequest.create('LOGIN', {'username': 'foo'})
login = LoginRequest(password='bar')
self.assertEqual(login.username, 'foo')
self.assertEqual(login.password, 'bar')
def test_missing_fields(self):
LoginRequest = SSMIRequest.create('LOGIN', {'username': 'foo'})
self.assertRaisesRegexp(
SSMICommandException, 'Missing fields: password', LoginRequest)
def test_unsupported_field(self):
LoginRequest = SSMIRequest.create('LOGIN', {})
self.assertRaisesRegexp(
SSMICommandException, 'Unsupported fields: foo',
LoginRequest, foo='foo')
def test_parse(self):
LoginRequest = SSMIRequest.create('LOGIN')
expected_cmd = LoginRequest(username='foo', password='bar')
cmd = SSMIRequest.parse('SSMI,1,foo,bar')
self.assertEqual(cmd, expected_cmd)
def test_parse_missing_fields(self):
self.assertRaisesRegexp(
SSMICommandException,
'Too few parameters for command: LOGIN \(expected 2 got 1\)',
SSMIRequest.parse, 'SSMI,1,foo')
|
praekelt/txssmi
|
txssmi/tests/test_commands.py
|
Python
|
bsd-3-clause
| 1,544
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Created on: 01/28/15
from __future__ import print_function
__author__ = 'karas84'
import gi
gi.require_version('Unity', '7.0')
from gi.repository import Unity, GLib
import threading
import re
import subprocess
import sys
import os
import hashlib
import shutil
import Xlib
from Xlib import X, display
from Xlib.protocol.event import PropertyNotify
try:
# noinspection PyCompatibility
from queue import Queue
except ImportError:
# noinspection PyCompatibility
from Queue import Queue
badge_queue = Queue()
GLib.threads_init()
_NET_WM_NAME = display.Display().intern_atom('_NET_WM_NAME')
_NET_CLIENT_LIST = display.Display().intern_atom('_NET_CLIENT_LIST')
_NET_CLOSE_WINDOW = display.Display().intern_atom('_NET_CLOSE_WINDOW')
UTF8_STRING = display.Display().intern_atom('UTF8_STRING')
class XTools(object):
INSTANCE = None
def __init__(self):
if self.INSTANCE is not None:
raise ValueError("An instantiation already exists!")
# do your init stuff
self.display = display.Display()
self.root = self.display.screen().root
@classmethod
def instance(cls):
if cls.INSTANCE is None:
cls.INSTANCE = XTools()
return cls.INSTANCE
def get_root(self):
return self.root
def get_display(self):
return self.display
def create_window_from_id(self, window_id):
return self.display.create_resource_object('window', window_id)
def get_client_list(self):
return self.root.get_full_property(_NET_CLIENT_LIST, Xlib.X.AnyPropertyType).value
def get_window_by_class_name(self, class_name):
window = None
for win in self.root.query_tree().children:
if win.get_wm_class() is not None:
if class_name in win.get_wm_class()[0] or class_name in win.get_wm_class()[1]:
window = self.display.create_resource_object('window', win.id)
break
return window
def get_client_by_class_name(self, class_name):
window = None
for win_id in self.get_client_list():
try:
win = self.create_window_from_id(win_id)
wclass = win.get_wm_class()
if wclass is not None and (class_name in wclass[0] or class_name in wclass[1]):
window = win
break
except:
pass
return window
class XWindow(object):
class WindowIsNone(Exception):
def __init__(self):
super(XWindow.WindowIsNone, self).__init__()
def __init__(self, window):
if window is None:
raise WAWindow.WindowIsNone()
self.XTools = XTools.instance()
self.window = window
def click(self, button=1):
self.XTools.mouse_down(self.window, button)
self.XTools.mouse_up(self.window, button)
def double_click(self, button=1):
self.click(button)
self.click(button)
def close(self):
close_message = Xlib.protocol.event.ClientMessage(window=self.window, client_type=_NET_CLOSE_WINDOW,
data=(32, [0, 0, 0, 0, 0]))
mask = (X.SubstructureRedirectMask | X.SubstructureNotifyMask)
self.XTools.instance().get_root().send_event(close_message, event_mask=mask)
self.XTools.get_display().flush()
def hide(self):
Xlib.protocol.request.UnmapWindow(display=self.XTools.get_display().display, window=self.window.id)
self.XTools.get_display().sync()
def show(self):
Xlib.protocol.request.MapWindow(display=self.XTools.get_display().display, window=self.window.id)
self.XTools.get_display().sync()
def get_title(self):
return self.window.get_full_property(_NET_WM_NAME, UTF8_STRING).value
def set_class(self, app_name, app_class):
self.window.set_wm_class(app_name, app_class)
self.XTools.get_display().sync()
def set_app_name(self, app_class):
class_name = app_class, str(self.window.get_wm_class()[1])
self.window.set_wm_class(*class_name)
self.XTools.get_display().sync()
def set_app_class(self, app_name):
class_name = str(self.window.get_wm_class()[0]), app_name
self.window.set_wm_class(*class_name)
self.XTools.get_display().sync()
def next_event(self, instance=None, atom=None):
ev = None
while ev is None:
ev = self.window.display.next_event()
if atom is not None:
ev = ev if hasattr(ev, 'atom') and ev.atom == atom else None
if instance is not None:
ev = ev if isinstance(ev, instance) else None
return ev
class LocalInstaller(object):
class RestartNeeded(Exception):
def __init__(self):
super(LocalInstaller.RestartNeeded, self).__init__()
INSTANCE = None
ICON_DATA = """iVBORw0KGgoAAAANSUhEUgAAAQAAAAEACAYAAABccqhmAAAABmJLR0QA/wD/AP+gvaeTAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH3wEXCzcz/JBsDwAAIABJREFUeNrsfXecHMWV//dVz8zmXe0KS0gCSQgBIiMQwYBMEthgog0cxjacAYMx6QwYMHC2MTbncGcw/pE5MCCCST4nwGCDyBkBAiEQIBAIpJVWYdPMzkzX+/0xHaq7q8Ok
lQTb+rR2pqe6u7q63qsXvu89YGQb2Ua2kW1kG9lGtpFtZBvZRraRbWQb2Ua2kW1kG9lGtpFtZBvZRrbP5kYjQ/CZfacEQFifhfJdaH7XbazsMuSzuo9sIwxgZBvmdycAGNYuAKSU3QCQtv6miMgQQhhElCIig6zNxxAYAJhZMjMDkMwspZQmMxeZ2QRg7wXrb9HaTeW79DGKkW2EAYxsVWz2ym0TdMb6nAbQaH3PAGjaeuutO4477rgtdtllly3H
jRu36ejRozdua2sbl85kWn0v26J/gJkBIoAZBAITAwwwM7ttpDkwMLBi9eo1S7q7uz98++2337n33nvfePrpp7u7u7v7AQwByFu7+rlo/TV9jGFkG2EAI1vMyp4C0KDsjQAap0yZssGPfvSjnWfOnPmljTbaaMempqZOgAyABRERCCBrQS8t6aXPFo2XXrrymUuneIV/YrB1DWIARArVMjxfSuKCKaUsrlix4r033njj2VtuueWp2bNnvw/AZg5Z
62/Okh4KitQwwhBGGMDnfoVPKQTfCKAJQMO4cePGXHXVVfvvscceh3R1dU0lorQwjLSPCO013VrFHZp3ZXv7N4vw1faBE3wThD0f9C3Uny2hAVKylCzzQ7mh/oULFz59zTXX/OX6669/w2IG6j6kMIQRCWGEAXxuVvmULboDaAbQMH369I2vueaab2y91VYHNDQ2jk6lU00uORBArCFW6zslIx0tUXvOJZAtPUQQv91Kzz/I+p8VxkAwTdMsFAr9
3d3db9911133nH/++U8AGLAYgf03r9gVRpjBCAP4zBF9o0L0TXfeeefhBxxwwLGtra0T0+l0q3eZVlbqCm5YD+qpsDua65QYwtDQ0MqFCxc+e8YZZ9z65JNPvgtg0GIGg4rKMCIZjDCA9XaM05Zo32ztnU8++eQJO2y//SHNLS0ThBAGByiqTPItl9pDV3WNZlETyvPrJEFmAAC5XK532bLut6644rc3XXHF756zJIJ+hRnYksHINsIA1ulxNQBk
iKiJmVsBtM2ZM+fEHXbY4fD29vbJRATmkQUtSjrI5XL9n3766euXXnrpDTfffPNLFjPoVewGxRGpYIQBrEubKuK3Ami44YYbDj788MNP7Orq2l4IMUL0FTKDgYGBnrfeemvOfvvtd21vb+/HRNTPzP0WMxiRCkYYwFonfGe133DDDSc88sgjZ06dOvWgxsbGjhGirx0jAIBPPvnkjbvvvvu2H/zgBw9atoJe629+hBGMMIDh3Gwxv4WZmy655JKZ
J5544mkTJkzYHcDIal9/qWD1c889d/+sWbNuBNADYLXFCHIjjGCEAdSb8BssMb/5jjvu+NqBBx74/VGjRm06QvTDzwiYGa+88sqfjz322Oveeeed932MoDgySiMMoB6E33r//fcf8+Uvf/kHzc3NG0iWoM/8cPI6PWWICPPeeOOR751yytXPPPPMAgBrLPVgRCIYYQBV6/gNAFoAtN1+++1HHH744Rc2NTePZilLuinDwdA7K5M9wNYHVghIHXgb
iu+nMQWMa53j/shwQTbk/MI2iteB+jqgYEIA7qseYwslqLZi3SSxu8DuvT2zibkECtKwwyD78Dxs6T9Sn4VcfBOpY+c7z3MJhhACL7/88t9OPfXU61988cWFAFbChSSPMIIRBlDW+GQswm+/4YYbDvzGN77x0+bm5jH2ZPNO/sRLlXWuRbrWdxVQSz4ycVGA6v2sY8rtbWbgQIJtvF60G96D6nPuq/QzSL7hEGKHIZHFWOwWPgSRGq8QwA3aHJE1
iEdtv0tBTOoQCSHwzDPP3Pe9733vf+fNm/cugFUWI8ijBCwameAjQxA6LgZKoJ32iy+6+IvnnHvOrzo6OjYJ0/HD6N8HqnW/WyNPTAoJcER3uCJEjkq6QYIOf4Dkt4qJIYg8izRyBie4RwiGmazPvqESQuCvf/3r9UcfffRtuVzuY8tGMIgSwpBHGMDI5tHziaiBmdtnzZq16fXXX3/JJptssp+Usi6Dz3V/vbzOTDSutk2Vj8PMuPzyyy/64Q9/
+CCA5RYj+FwbCkcYgF7c/8KDDz543Fe+8pUL60H469ID82fsTvHaF6Gnp2fxt4877ryHHnzwNSLqYea+z6taMMIA3FW/iZlHnXnmmTN+85vf3JFOpZr4M/nC3YxhRGErpVcU58+glCyEwJw5c+469NBDr+nr6/tQUQs+V/DizzsDsFf91vb29vHPP//8JVtsscUR66sv38oEAjBQ5CIKMo8iCijIPIZkHt1Dn2BpdgmWDy1Db3EVegtrYHIRA2Y/
TDbRbLTCIIH21Ci0ptsxKt2JDRsmYGzTBLSnOpAWGaREBhmkkRYZh5lwqO6+7m9SSvmd73znlNmzZz8PYBmAPpS8BXKEAXzGV32UMPtdF1xwwe6XXXbZXTUj/Fq5yyOuYxsTCzKPAufRPbQUT6x4GC+ufBKvrn4ROZmFQQZSlIIgA4IMi0EQhOKKDBogSfnGpX8MMEsUUYRkCVMWUeACNm2Zhp06d8MXR++D7Tp2QrPRijSlYVCqOqkh4fiVXJdU
9cALITB//vwnpk+ffnE+n/8IJVRh9vNgG/g8MgA7PLcFwJh33nnnV1OnTj3MJn57Uun84vYcY3LkZNffr9BRwOdeIyYgWSIv83ij92Xc8/EteHrFozC5CEOkYFAKggSICILIIuOSmG9/tp+eFFGfKMTFBxef4PJFdv+xDXVmSGaYXIQpTRTkECY2b4pDxh+Ng8Ydia70aJf5VDUGrPQXCuuiqniv2t40zcLhhx9+wgMPPDAXQDdKIKL8Z1kloM8h
8TcA6Dz44IO3uv/++/5qGEYTK77sEkHb/mRleik58oKZb1xmUZqr1uS0Q341iBgLO+RwC4enkJeDFDmPfy37O27+4PdYPPgeUiKDFAkQGSVCt4je7ikRRaoI9qMSlUMgHJsAhJnBkNZfWJKCCVMWYZCBWWMPxYmbnInxjRMdvz0U5AGxl6htUJUn65ACeYKDMSDFzalRRSxAgosP8GcwIgVyUGL+L7300l923XXXXwD4BCXswGcWSfh5YgC2X7/r
ySefPHP33Xc/20GtaQg6OEwc6al2m6p59kKuG2EUN8jA4uz7+N3Cn+O5nsctkV3AEAJEorTKg7SE7pFYSIc5hIdJsAMxUkFA7m+E8JwFrDJC1gv8JUmBwSwhWaLIJlhKdKQ7ceKUs3DE+GMBpoC6EJYrwZ93kAPDGQWkUtoqQg+x/lXkcrlVra2tRwFYbEkDNm5ghAGsh1uaiNqYuaunZ+WfOjrat9FOiIqGjyP5gH72wpN/zxACb/e/icvmn4f3
+98Gg2GIkt5uCAJIeIVdNYuvCrsld50cLhOGqx1w4HvwtxJDkGyiKE2wZGREBsdMPAnfnXI2WJakiGQjnPxdVDwOzOaFF1543m9+85tHACz9LKoEn3UGQAAyhmGMmjlz5rR//OMffzMMo3WdEEfIwBpzNf5z3ul4bdVLKHAeKSMNQQIGGZ4V3v7sErn7l0LINigh1C6QhwNqEGvZjguXVlQIdi0JJfuBRFGakMxoE204Y/ML8dXxR8I0150UgG++
+ebD22+//cUAllgqwWfGS0CfceJvAtB1//33f/PQQw/9r3XheQ3DwB8X34z/ff936C2uRlqkIYThIXpblHcJP9yqqFcFaNgC9wKiOqvKRXh73V+TTRTNAgxKYcv27fBf21yDroYNYMq1r3739vZ+3NXV9Q1LJViJkpfAHGEA666+3wSg6/333/9/EydOPCR6EiuxJ9CHviQSlkNOEiRgwsQFr52MF1Y+jQIXkDbSMEhAkOFd5X1MwHNRClq9I41+
RCFrc7USAGLTAquMQddelSLYIyWUvApFWUTBzGODhrE4e4ufYt+xB8I01y695fP5galTp37jk08+eUuxCxRHGMC6R/wtADp7enr+2tHRsa092cKIhX1WcddaTKGTW71W0B9tdUQY6M59inNfOwkLeuchJdJIGWlrtVcJ2HLXBa7hQ+tZK7u3GSW26NdfGlCxBREMwYc0tNUCu50qFUiWyBVzaEu145iJJ+CkTf/DZQRchjujdhKPPOuss866+uqr
/2XZBfrXZ+PgZ40BpAC0ARjT19f3aGNj43h7FQRIcb8pLiYl0M4OrXVD093QWtfwxq5/HGS5/MgNRCNGilJYkluM8189GfN7X0dzugUpkSq57BQjHpWo3vsS/Lq98rvtWiz3pZVr1PNLQHHnh/3upg5grdHQbyRUmYU97qXPDCklhswcGkUjjpl4Er632bkoFotw3y9crwWV3gucuobKM5HvyZiC+Q3I7jM5AA/1OQzDwG23zb78+OOPuxnAp4px
cIQBrMUtDaB92rRpU1599dV/GobR7g/FTT4kWnN9PPcRKXyS+xg/nncWXl71rEP4wlnlXWs9KYkvbCnAK/q7XbBr81VGvn7So+CSHU2+fvNiBPlHsAo1cYeH4NklNoUpqGqDLRUwMyQk8mYOTaIFx0/+Pv59ymkoFouhE5tDJ71PVlFxAZ7j5K4IyhUMQ+Dpp5++Z+bMmZdZxsE16yMT+KwwgAyA9uOPP376jTfe+KClBlRG8xUNosCg7MeP552F
x7ofREu6FYawkHkKwftFfK11P0avL6tfRFqMX7VXT2IDSHohP4N2mIMPW6AyAba8B0PFLNpSo3DutEtw4LgjUDSLw0o5ggTefPPNR7fbbrvziGgJM6/CeuYmpM8I8Xdccskle1100UX3DHcgT8pI4Xfv/By3LroGGdGIdCrtIPNUote79VwR08MAqiR29inmpFT2jWMGHCEHUZiKEMQMJ1IXAjAJxVfIrJMWNIxASmSLg5jWtjV+us3vMLVlCxR5
+BgBEWHRokXPb7bZZmcR0WJmXrk+MQH6DBB/++WXX37AmWeeeXvdY/cVqkgZKczp/gd+/PpZyMpBNBiNMIRwRX3fih+2+ldixHPgPuv62+OYOMEQDqTGWvgNgypT4FJAghOL0Ffow6ET/g0/3eq3w0tERFi8ePHcKVOmfF8I8aGU0sYK8AgDqDPxX3vttV89+eST/zCcLqKczOKi10/H490PoyXdCmHBdEuGPVEiTI8xj4LHIox5ek2eXHU0gb4b
VePP0z7EBRp1HlBhdh8NZDjaeKioCKxKBm6wkgs3LqkFplnEkBzCpdv+Dl8d93UUZLHiSV6OukREWLJkybxJkyadIoT4QEq5XkgC6ysDSAPouPLKKw88/fTTb1WJP/DSaugqShkpPLDkflzw2qloTjcjJdJeA58gj+swSvxPMsH0kXpVztS4FbuWYkXwZUBN4as10IZAK0IlAKuFtKQBZglTSuTMLGZ0fhG/2uF6tBntKKVtV+OslOSpqlMAbiyC
k2jU9v/Y7l6PB5M9QK0lS5a8Pnny5FOI6MP1QR2g9ZT4237961/vf+65595VcgXZL1RJoM3e2URqpBgroT2eHNpK0I+alJaArDmIM17+Nl5f8wqajEYIYXhEffKDdRTQjkP0FK9ke1B/iZZZL1AooMh7mlO0+KD0wyYAdaJrRZRQ0SJkhnnOtdKAM4ef7+ujCyCywMQaVcHeTVnEmvwq/G7HW7D/2ENQkIXI1+C/pycJsxIl6s1erLwGq91HH300
d8qUKaf6mMAIA6jBZgDouOCCC/a47LLL/mK7f8qpV+994eG5fO0ZZ5CBF1c/jeOfOQQdDaOQEmk3BFej53vEe4InXDVGhowUtSlERPdiBKAFPOnv78MI2jH3ifL5R/j4lVBnl85ZiUCMeCesb6ElVgUr4BC/wiTUKMRscRB7j/kK/mu7q2GU6SCyGSeX4Rq2DIPPbb755qejBB1eZ12EtJ4Rf/vxxx+/w0033fRofXR+H3sgwn8v+Anu/OAmNKeb
S6u+zsinAntUKSCB3lil9amKFxjPlqrTLLgsT2GA8SU82a8OqN4DW7IoBRwVMVTM4Z6Zj2JKy+aQXF+bERHhrbfe+td22213LoCPLSZQGGEAlW0CQNuMGTOmPvPMM8+hhPirEZnrOf6a4ioc/sRM5GS2pOsLUYrN9/vzrWPK19hhrZTwidZfm2017tkk5zIHocSqNCCZIbmIVbmV+MX2v8cRE46F5Pp6jYQQeOqpp/649957XwbgI5TyDa5TsQPG
ejB3iIhaAIx/5513HjEMo72+HJHwVt887PevHZA2MjBE2km1FfTrB79761T5OC35f+eEzIKGkVdr+sS1GVl7L58XJDmPnFJoUI18Hj+KgUajCY8texhv9r6Gr2x4aB1CpbxMafLkydtMmjRp4C9/+ctCuDUIeIQBJCf+JmYevXz58vtbWlo2LWP0y7ZoEwnc8/GtOOOl49DZ0Gmh+WzCF24Aj6MGWJ8Di743DVgJ8x8hivgMceTk8osxCMRcJxKl
ywnlwEjkjsaiFms0gBX4BIRaBUKehfzG3KBG5NpeFOOGfdw+lCIDHw0swp0f3oR/m3Q8UpSuaL74LSbabjNj+vTpu3/44YdvvPbaa93rGhNYl2VKO2X36EWLFl07YcKEQ8Jom33l5NSqnMxendw7N9lZOQwh8KPXTsPDn/4VGaMBQgjPyq+K4Dr3Xi1EdqqrC+6zpyJEnePPNyDtGoU2ZkBKmGwiWxzA3/Z6BuObJrmJXjXU6U9T5s/R4OaR9L5L
BSVpjh8//vCenp43UAolzq4LTGBdniJpAJ3PP//8D3fcccdzpWT4E3fG5XVz3yZ5VgM3BqZ0HUEGDn/8S1iSXYy0ES7y+xN16AjNj+tP9AKI1lGKHw4uoqkZXBYziDY26mwDpc8Ao5TivD/fh6t3uR17jt7XJ9R4J1eg2Cp8c09TwlH9XCgW+1tbWg4C8B5KqcfXOkZArKPEbwBo/eUvf/mlHXfc8VyW3vRQjt/YMfTEzC+4zEOxD1nRZSZ2/8fm
+CSG+J3XafnanOuQb8KqnF/pp5pK2yOxKDh99kBnWUMQrPzGviNeOZ01MNwgYXHA9abeCwoAptSWlTt7ewNvHF/g/k5vmX1Hg8/vgSYq56jj53bJK895DIDsTbDiZ+al9GsptGbaccaLx+PWD65zqytrJpdnxFkz9zioKamf06lU64oVK2YD2AhAO6o0Zn9WJQAC0ApgfG5oaB6VJIG6bHkMYc+HpoFIIG2UgniEEIre7nXvhaH4Yi3/fimBSHM8
QUGBCpF6uoxHtVr3/dmEQ+8f1cQ/Dj51LblLMNpiGZACrJNKoCETOTOLb046Ceds+VNwHT0Er7322t932WWXCwF8iFJCkbWW6shYB4m/EUDHmjVrHkunUqPrdaN+sw97PLQVDGEgZaRLhjdBngw99oqvJ3A1is9NOhLpXqQkj89l/A07Hwl+C8fDceR94mCKSfuadG1y7TRJz2OGTzJRzY3kSjGK14BAEDDw6qqX8NHgIsza8KC61UTccMMNN1+x
YsWCl1566dO1bRRc1ySANIDRH3300f+OGTPmoHrpoSsK3dj/kZ2QMTIwjBQECCSET0SkyNW9HN19ffbfr4PWwmSUwskSk6owZLYMg/liHnuM2Re/n3ELpCYhaTIpKtbBWGxoaDgQwNsolSrPfd5tAIYQovXGG288cOzYsQeVdDylGo9GqfLlj/G9IlXUY0f57yksx/4P74S0SMMQKcvlZiXuYFYkUXYj0nwYc1c95dDdtgGQr53nOr7v/t/850Td
R9eWNW04YVsO6W+S/nBEP8OelWX4NT39Vgk85Jr+dGPOcel7J4An0AeWFJAxMnh62b9w2gvfhCChXNunjnC40ZL8UpUyb6zrpNasWXOXZQ9oQx1V3fVFAmgBsGEul3sTpfJdgZ564nb8wqvGAuss4dYJfbIXez20HVLCQMpIWZV2KGTlJy23j1rNvXaBoL0gLnde3PHyMhZXo9cntxNUmi+wFj4DDhgx9TEQKvH5lR13bWDFZSghpYm8WcDeYw/A
b3e8AczSsxhFKVSJVDBrPj/3/PP37rXXXj9T7AHDWm9gXbEBNADoXL169cOpVGpsXXiaYMx8cGuQIKSNtAXhFZ7svHH+fTWzjt7OG53Dfy3JzDHfa3v1tbGEhT0he9pweP+JPe3UefBu79voKazAnl/Yr4KnjZ8IG2+88VZz5sx5evHixWul1sC6MFUNIUTnE0888R87z5hxEZfT84SNGzIN2PkvmyLHWaSNDAhwsP1qzr444q9e16/nerk+IH/q
1ceEgUcczgYclVPJUyClhGQTueIQzt7qIhw3+ZR6DUu+Y1THAUNDQwtQqjyU/7wwAALQ0traOrmnp+c1Zq65TaIh04C9H9gOy/PdyBiZktgvfGI/yBPC65+vFED6BKXOQBx/jMuLlYWn0rcRSU5V0FpMHg9X9K9WF/GBaMq9XiURhDah+9+lB6fAKm6jBBYaLGRx/RfvxBdH71UXQnjzzTcf2Wmnnc61VIG+4VIF1rYKkAEwesWKFY8YhtFZ6/Ul
YzTg6DkHYFH/e8gYDY6fP0rnD67oFMvCalGM8/O81U4uSBpsFOKutZN9kAqsskyDZOCeD2bjyMnHojVV+3i0cePGbTpv3rwXFyxYMKwVh9amBGAA6Pj73/9+4r777vtrDxFqlyC3u2yBZjypnEipFW8hwC6ddwHu/uBWNKQaYZDQuvrC3HlEFL78qb1J4uKLWFI9d2GPa7ruL56Hm3priUKKacfMsc+ophSDO7MswyAH3IVFWUC2kMWrhy5GChkr
sInB7OZrpMB0UWQmuxQ6uQXZHSNlqUl+9OjR+w8ODi5Aqf5g3ZnA2nIDkmX4+8L+++//azvKy4F8gj2uF2iSPDgv0HbzwUaclb4/vPRvuPP9PyBjNIIgXMCOj8KYXGBIAEjGsLC+ULLVKl4I37msQGU9O/m+s7sCSSe1FSAtgcO5joImdr4jeA9VlA5r4wfgykCfvQg59bqROyl9dNq7/Qd8zwDFtQoVLc3663kR1cHrQIEFK+cQCBJeWDArJ9vz
QH0XJVgxBdZHWxI0KIXGVBNmPTzDqSTF7L0Bk+uaJPbBl0mpeeDYHDyopczdd999IoDRKNW2pOFYhdfGlgbQtXTp0tmZTGZiVcIKBX/tKSzHkf86AM2ZZiuqTwnfDQvqcWk6dtzXPUv/Z9i+V9UqQ7F2AfKlIA/OBTsZqGtozBYH8VbvPBw0/mtlogXjB2jatGnbP/74408uXrx4OYbBKyDWynsBmk499dRtR40atUfVmrIvAEMIAzP/vh2a0k0g
ElATUTirEauf/TtF4XscoS6qzedyr9mYcE37ZUt90W0izoMrKZRURgFDpPDPJQ/h/z6+y6nuXCtrTz6fx+zZs3/GzGNQwsbUdZFeGxJAJpPJjJkzZ87jlhGwdpyFCEfP+TJWFZYjZWRKK79QUnZZkoAI09vJjQVjjxlI/V2/8EUi9i0dL0ny3LjKPICafFOJcKSgfh+bhJjdSOmotgjpt42oY4q2L5D1/GHPE2rVD3t237txcz6ECCak6PfQ502B
JtGImwHcVZLsZoYwcO+iO3DklGPRnhpVUwJpbW0d3dCQWfTYY48tqrdBcLiFNgFg1F133XXsYYcd9vtai3t/WXIPLnjpTDQYDTCEYbn7hNfoR8HoOAAhTEHNI0BWtB4nILM4nBhFTPekq0UcdjAqfU85KYSS5PiO62/YfZP2O8xsSTFsw3tenHfAo49TuFFQMpfiBmQReTOPeYd/DKqxB5uZ883NzfuhFCuwsl6qwLBKAETUOGnSpAnXXXfd3z1l
vDylnDmQfcUxxPiLRrAbitpTXI7DH9kXrek2GMIACQJIWAF73gy+waUsWqcnnaGhGt2YI5Z9xLSNo/lKeEtceq84fsMJ+GFEvxlIXmOgXKyAB5+t5hwI9oXt5T3i3mwnlYHrdXq/byEO2uhwK9Mw+STIGOAXe4uPKPPd2G677XL33nvvvHpKAcMpARgARr3wwgv/uc0225zliFhcvqPKSfLDbgDAdv+3MUjAyeDrED0RhF2oQ2f00xXioIQGv3XA
8FVNfMC6buurG24wRhTQ/c6BUuUlkJCUEv35Ply/553Y+wv7e4yCYXEp9mLG8BajcVWpUrt0Oo3JkycfuGTJkldRyiBU87Tiw2kEbJg1a9bG06dPP8vDnUPpPyKxg1JJQpDAVQt+A5MlDCpF9zlKHaswb/JlknGLdqg+MtWr42HUvkw0kKygxRDin1KuFbOz5jP73Fe6NuT7y2XcU+cei/qNrfuUc05VYyDjxyjps/jdf1HXtOcQa46BXSGBrCwC
TekWHD/na8jxoGdpYa04okSoeqwaylfrS6FQxG233XYigC6U3OY154fDxcwNAJ3z58+/YvLkyd+s5YWX5BZjj79ug1ENnY7oL2y9Xyhgn2DqXTdkM1DKBsmLbmiildXzJBhDsoCeQh8GzSHXN622sSZEq9GIsZlRaDTS0UZkijAuU8UG6EQzw9HWkmYeJs15ScYzzioKX6YhX3WxSNOEIs6Hl1kCWAbDTm3idewBUoKlREEWsPuYvXDTHvcgb9YO
yp/JZDB9+vTD5s2b9yKAFbWWAoaLATQfe+yx2958883P1bKijxAC0+7bEIYQTlYfVfwPJvVQawWWkdE3JFV+lOXclBKLct3IyiI2b98MJ005CTuP3hmNqcbASS1GCzJGBk8tfwrnvHIO8uYApjaPq0o21mUPq1ikrqUsHjGWFd83ol1s+FWCzMLRqoCbZbgvvwbX7nk7Zo09sKbE8/rrrz+w2267nYdSnMAAaogeHw4GYAAYtWDBgisnTpx4bLRl
TI3Bj24jSOCPH96G/3zxbDSkmyBCcP7kN/v7iJuVunVa4q+HgMdRAAAgAElEQVRg68734tPcKvx6x1/jh1v9sOzzD3z0QDzVPQdTmzfUAE1qQ41cj5IYVRFsWAVDXZQ/EBb7X0lnK2ECfqiwdLIJDeHdo3tg162siRSQTmObbbc9dMGCBS+jlD2oZlLAcNgAGr797W9PnTJlyrEuPNSX1J8VxZ5DjAPs9cyvyC/HmU9/F+lUg2Psg3p9J1efVy13
4LvszW7raGVEGmhrsh0gvD/Yjcb0KPC3uCLiB4AH930QE5onY1VhQAOhpfB+cUifOfgZTGEmC/3zhx3j+HsFQEKkO4e014O2HQWuj8h3o4FGK88fda4H7q3MIXV+2ahSAQOCDPzoxbNgKAChoLbPnpuyhtmon4YKBdx6662nAhhVa1tAvSUAAWDU6/Ne/9WmUzY9Kdq2n9QZQGgwMjjm8UPw4vJnrBBfUuL7vbBf/5rhlHlWA4vgK+6pWSjivFkE
wgfZ5dhh9M6Ys/+cqmrhAUDPUA++8McvYLtRExOpHGGqeBQqoRxTAiPaQ4eIeyYpSMQxEzQJugJIhjCIBGyF9cn/GyvlyVmtP2hiVbYHzxw6Hxs1TUyWuzXEHaY2S6fTaGxs/LKU8nXLFlCsFYHWc8vMnDlzo2lbbHFSpbYoXxYlAIyXVj6Hx5c8ghSlfFYhlZ2TN5hIgXx6fyM3EYTHxaME1jgv2a/7wVnSFg/2YErrFjUhfgDYoHED7DduP6zM
90d4ATgQWOK29ef3c5cwnZeBfcscR3ghvOPKyjnexBpQk2z434VnnMNFLo7wbLBG/PDDisGsDwQK+csh3gQ/VNnJTad4FggAsUB7ZhTOePoENKeakxlkmWObFQoFPD5nzsmKFLDOSwAEoOOpp5764fTp0y+smTUx3Yxt7tsY/cU+pOxCHkIp2R3i8ydFRSDNUkhE4QCZsOXK+r17aA1SqTZ8/PWPtDaFSrcFaxZgqz9the06J2q9B94ljlX/aIVh
ylVa7xIkQqnMthBSDyEpqLCMPqrFTsgX38uSA9dyFgO4pchLBsFeXD/zduy/4UG1pKliS0vLTJQqC9UEHVhPCSANoH3GjBnn1/Ki9354J5Znu2EIw3m5nsy7NqhCBle/EnIwuLS4WWJ92qJ6TM1Qq/zeW8iie6gXbx+2oCYGRHXbctSW2KJzC/QVctrsv96ly7dmJIqU4djMxmoINoecz5rrxbYNyRisPa55Hg7pP/veLaOMZwYH5lIJi8BKzL5q
AFRAAQp3ESA0p1rw45d+iOaG5tqtqESp//u//zueiDpQoyzC9WIABKDxz3/+88FSSqN8hq9f7RpSDTj76e+hMdUEYuGAM6RjWPRmhPHkEpC+dw99tJjdRnLpnDBQjmTAlIzFa1bgnSPeRovRorUOV2VUZ8YZW5yBpdmVHiCTlBxHu4FdxnznCBE4jnZQBm+JVy/ij5dzfc93WUYUoTpPJMKBQz5QkY3oEyTQPbgU18z/Xe2cLMzYfffdj2DmNpTy
BYh1lQEIAJk99tjjNLuuHysrpzbPvoqOsmMB2EvF//vONRiSuVKudmIlypdD/zm/EoMhXSAH2MkCqz/PPkf/TwB4p/8T3L7f7disbfOAuy7MX1zuftqWpyHPjIIsBp9J809aKUasp/U+T8R3jhxDru4fJzyW9Hf2P3P0d7u9pITP658X5F45dM7YyVwcozKhIdWI377+CzQ3NisCmqJksBcByDGGRwDo6OgYe/bZZ89EqZZA1bUF6xIMJIRo+slP
frLLvvvue7Ya9EMxpbOitsZ0I474xwFIGW5BDzhlvLzAH5/cpDV5xAdqxPj6c2swY4Mv4n92/h+Y0qxb9R8CYfHAYsxdMRctqYYE4zecWfrrdc11YdPlFqQQuqSg79L60F/ohwEDM0bvErQ7IcwToBajZU+9Cyklttpqq3FXXnnlAyglD81X95T1Ef9HLViw4HcbbbTRt8tz8YVvt7x3I37y4rloybRagB/hhPCScN1/HvdOBJy3UoK1ufeClZ9i
5fEr0ZHuqPtUXDK4BBPvmIitRk+oiYehWhLnGk2eKBTxusCeWFN6Wlew1M0o7E1TJyXDlEWkKY13j12OgewAapGNUQiB5ubmfQAsqNYlWA8VIA2gddKkSceW4+KL2loaWvA/r/4CDUYpy48njZoNyCBoC4VpYkoCeQDLA/sAHw704KqZV6Ej1eGgwKSUVYv7YfvGrRtj13G7YU0hW3G/q9mh+Y5qr+vFwlR/vYR9TwQSQrCQKCNYnN07Hr4y5Q7k
XGBNvhc3vnUNOCjkV2wLeOyxx75LRO3VGgPrwQAaHnjggcOk5JqpF7ctvBk9ueVOnTZ1IPxWal02Vy6jJl/cbkqJbD6PE6ae4EyIUPWjRptpmjh3m3OwdHCl8pwxDEdy3RiSDXqp6lzUt3+JxkiL7fDbp/RtA/PLWgA83hJLhWs0GvE/c3+O1nRrzYyBW2+99X7M3IIqkYG1ZgACQHr77bf/t8j66hzu7g2s/pkWXPn6b9CcbvUm9fDr+Ep0l8rZ
fUKay6FZw9HtCK+I1eOjgZW4Ze9bkDEyGM7tyE2ORFo0IS+LnhUmuDp7E16Er4YcWM09r4Y5VKIKGDwRB2eNF/qC/eRAv/39CojrgTvoowg44v7e+eJKAeH9V3AXVJJE3RJybtzKstwyPL38iZrNh7a2trG77bbb5ijlDUytKwwg3dzcvMHorq49K7E8BKLXGHi2+2m8ufItCCJPuBB7fH0q8bMTG+/4byU86cRVHy/7skOy4i9ijb8sl8/jqMlH
OSJ/Eku/2sbvIdBJM9pzTMb5256HFdneyEq8/hUtvJIwIisE27531t4L2vsHy26HVz2Oq4YMD9JR3y+9BBgv+cX3ywvxpZBnst3LumuQj100phpx+hMnemIEIrVgjj5umiZuvPHGk1FlZeFaM4CGf/7zn8cUTekdcOgHWIVqupVa3eNpkcIvX/kZ2tKNpdx+VjCIJ5AFIcEmqs8bvoy+9gslZdLAC1fV6YirhgZw1NSjkKZ0LGYhDNcQxjTizjHZ
xCnTTkFvNuf1USt9lYzocYnQx0PPCUPpyojzEZE5OMn9EXJvLuP8uGujjLHiZIFhkr0eBFcKKAUKLR74AAVRCGc6qouT9AyaFUDV+PHjdwLQCqCxUjWglgxAAMhMmTLlELbKmoUbkPTCll9oN4WJRz9+2EL9+RFXikjIQQSf6r9W2YWtF3rFSWV1gSoJeIl0ebYP1+x+TUU6m+6Yzm4QxVDGNY/DgZMPwur8oPc5nbr3GgSfOz31q7Z08RHuWNrl
SqQibfmkCPIyeJayxBWk976sB9T7kH4ckLjY8y6lNxGHlFoEok6SUJGANjrU0xdZmhMl6vXd14OIglZaKRUHsa7hQ2ko6YRAREhRCqfM+fcgFZACBFB0VNXAqFNDGhsaOw444IAtATRXqgbUkgGkAbR3dHTMqKQitW7e/+LlnyEljFJlH+m69NgX9BOs9gJNsAcpwT8RqbYke4+rPl4TaE21Bog0TKQPW/HDztX9pu6FYgHnb38eugd6vc+JkCAW
61lKz0R6dB+8YwLPGJG3am4UWk+tsqMG5Vh9kJLDEYAyWN3H+468fnYGKWJ/NAoQMfn/PaHREchHx84UuAdpYpzJjTZ144phiBQeW/IPNKWbgxiCCtxlpjRx+eWXn1CNGpCqFfUTUeaJx584yp/xp1I/bGumFfe/f6cT7stkc0IlqSJx8sQWSfExpEoBrl0iW8xj7432hgHDkxIqCRMIW+V1f3UZkdVtr3F7YVRjJ4aKBaQNI8Ezszbnffme83Ld
V6zMb1ZQcuW9m+h4oxpgIsrJUEQaIyQHsvp5y7vBjSEgEHJmDo8ufQRfHL1HTZ5h3LhxO1iGwAxKlYTKumCtJABi5symUzc9JJDOu8J38lbvW3h/zWIICA+4x1lVlEj8uCowAEVXnIF3tXKkAGXFWJUbxMXTLy6zFFS8LSBKRdB9LhaL+M2uv8bybG/tq/DI+MpI5eDpa1LZp7z4pZpUNIIs73zdvIPPHmDP1ZRI48wnTqlZNqZMJjOqpaVlLEqx
AWW73mvFAFIAjLa2tp2qRqpZFtSLn7sALemUm0nSW/lRsUSVE9WRsEaURlbMFfLYfezuVfmldd6AMFUh7LNkia9N+hqyuUINqBLlU1dVFChr1KbO10YNnh9eC6LtFTBIoHvwU2QyNQvpx3333Xe0ZQxMrS0GkL7qqqv2FEI0AvBYNVW/vN9955hd1Kg9AlobWvH80qdKab5tn6ry11VbKbwir88IKVVrf8B6ywlQZQTi4cO8RzHSzkwnTtj6BKzM
DTh9qxTtVl6bcnbWjmGScY7vT7XXTrZLJEEHalKHIRxZSCQgwbjtnT9ocBMcqHXpwXewz1TOpbk7Y8aMAyw1IL22GEDmoIMOOto0Ta+l3mEC/uRtlgDPdqCDYuFlYG7PK1ieXenow8RWGW1d1hvERd4pvyN+ZdYj1ySIUfMkmlGivz9xqbrlzTzO3u5srM4OBhCQlaDdkrcpZ0eNz5d17GvtEaPM3uIS6jwzhMBPX7hIERQsKYHJmaPk9zhANZB6
PV/pdLrL8gQ0lEvTtWAAwhb/o20aFEBihTW9+LkL0ZQynPz+2hz9HG9CqQ6ey4q1VWJM85gSx/Vh/msdAxAHCrK/b92xNTZq2xi5Yh6fj23diTiMnVesR7bZvgFDGFgztBKZdMbjAWSEoyiZQ2Y7A8IwMkcdddTWldgBasEAUgAyTU1NUytxZfh/acm04PUVc71ZVZ1V3CsuySguLSvHtEvf8ULRxBbtW2hffsVRhRocQDn2kyFzCLfteyu6B3qH
Hcuvni/XwXgDWadry5i55jDqsDgHp9goAZB4eMk/kpmU4/iNlLjwwguPqgQPUBMGMHv27P1qFaa6qrgaywa7naq+Jb3JFn+Ehym4YZkazDfpdLBglVjyGP5cLqzKHEU2sVHrRonF9yTEHOfuS3L9XTfYFWZenXw695TXphlwc3FyzL56DdLMTeZAOHyslMYh7dyw22BbjsCOsC4rGoJ/CQiUgPNa9n0YAKU/7LHuB5T1YHk5O2KV3QzUhkjjomcS
ZstLQFpTpkzZFRXEBdSCAWRmzpx5uKfabxXbL1/6L6RJJW4VWy0DUVqOPCA1eHXpx3+71/SsYNa9JPwZdi1pQDK6GroixXL7+SuNEVBLTdnXivMiGGTg2lnXYllfXzByDazNX6fmSvS0k/H6r3qNsMg5VT9NoksjpJ2D3EOwLbSRehrEYkQaYNeVrEFN+p/Xf33p+83314NkhF96td8d4d01bydL3ppsa6rEDlAtAxAAUi0tLVtXAxmxB1yQwJ/f
/xPSRsoSj8njqXKs/5wAHw49qiw035yyhHlslhbTSIt0Ir2wXJXA7wYsJ7RYssShEw/FUL7glXjCVr4kRT04BsvPEfECiCkeEnb9JPdDMk+utk+6+1CwrUzSL0QXQAl4CXweQbWYiCDGoOyvxCwVEHeEEOkddthhAkpxAcPGAAwAlE6nN/bHSqsd1ObTBzwuPAKhpbEFywaXQDiD5A8Otd6SjVVXMrUiLHNvRF47j2OJfTnwlJWMQMgWssn0xDKN
goj1YkTvnZlOx1/FupUr5NkA/YoVlo/Pf17AKefjrqzJoKy9Pgf7F3hvrFtZEzgIddmB/Th95R7EwXgA7XMGC00gPrOpT08AkCKBa9+8VjHwKUFBWregJtxdmQuXXnrpARYDMIaNAcyaNWuiYRgtugAdr/jjBXT6w3kBxpsr30SukHOq9bJSptnjRfAVhtAh/+KKSuhx46Rta5DAx/0fl20hHo70XQTyYOmTlPxGTKGMJOdVk+k3KmVPkpLolWB4
kLC/FWccjsJXKYYpTzZxErj6tat89gb213DzAOA8bnV48zDsvPPO+1qqQGI7QLWxAKmf/vSns8L0/7BCS7qsaAzgqteughB28lC2/reJqiS3MSV1JtZuMwRh4eqFia35tXFDJtt68j2A4a85P7IN1xY25sGkKd5M1XaEYE9uBSRMiJC1mEMMnLqttbVlvOIJSJR8sFoGkJ4yZcq+5a50HLKSPfTBg0iRcGNXPFZrJel6FCepCwMQWLhqYaxlfrg3
Qxi4et7VaG5Ir9V+JH3ntJ7dI/H1krhRNFIPgZAWQJ7yaOTGqvtrmtKwVIBMUuqoRgUgAEZjY+OUWgx2Y7oRPUMrQIJKaj6RJ1KyJA9QZOqosKSN1e8UusIOxyoftvUWevGLZ3+BUc0tWtjqurRjPbwH6nlPa9oIIjz68aM1WxMmTZo02mIAiWi7GgYgSsZH0VWLnptkIlvod9BSSQJ4mBEojMkRiSfKCQhRz3FgmQKJqv4krSlfjRSREin8cu4v
0dSUcvpYvyCaiJJfFe5c4+vBTspRw75qU56FpC7juGeF73079iXCda9fV7OF5Pzzz99luBiAASBjCNFZkXjlm/RzlsxxVAEZwjH16Zj9cg7FcGmKCCrhUM6eEgKL+hclstqH+fB1rr9KPQCL+hfht8//FqOaWwLBK7Jc6QbxATGyTqtrtau22j/13csaXFsnucuQ34NtyRf+QnoVgwTmfDSnZirczJkzd0MZmYKrYgCnn376VAvS5nHtgcONFmoq
bfXoXQvugiC/+zCk2CUSmLw5iQncZ2WNmDItDRlc/vLlNQ8IqmSTJDHznpkY1d7keU6ypj5FAWA44diwkmZMYRPkczPGuRM4ur539LvkiHfMbn88RV/V/qpSQcjzcoQLgn3PC819wtwDrLhmdO5M/zJW4HyEYTG+fLj9beONN97WYgCpejMA8fWvH7mzZFnylBN7ou785RTssF9i+PLwlX5/4uMnICKIyzP0UWAPGdQeJCsJMzn6GmFFMxvTGdw0
7yZIyLVK/GkjjUP/dhj6i6vRmE5rnp0iIbBAgsSfnCBBKGuuhZg2iAHaRF0P4f0FooBAFPkciAAzwZcs1k0cE5JSDEHAD2tqgbhQ4pKdyzCAPArOfUjFyQSKkcLLWB0mU6KdTCYziogah4MBpCZPnjS9RA8cWO29FXp9sczMjkhEDKQohRXZ5SDhtfJ7WIgD2Y0xxMakWIsTRREi2tkCiwlzWEJRdXtapHH+s+fjiSWPYVRrU1ANouB4xD4jh38P
M2Br33OE+Bx4RzHvMCYjthZph5DnI901NCg+/2/e9hQoN6D6373qKSd+BiKAWCBFpfJv7vpPniQigXwa2ndWosGiaYKIMoorsH4qQFtb2xbsKYFQvjOQAaTTaeRl3vJbSA1bV2dbCI6ew7P5utlfQww0MtzYo14vk0rhsSWP1UQNKNelaJCB2xfejiteuAJj2tusEuHheHvE/GUfak/3PSxnvxqbH1iNQsbQn9paF3WJmNgBrSHOj9WXHFuXgDWo
H12GYvUYa+JNWJs9ComfwR4/IuBfH/5TmVfRRcQ49CBDmial02kbDCTqxQAIgEilUhvEMPVkOi0kTC7Y6Ahvnb+waLVyreiE8BwClIxdtbU04LuPfDcc/OEz+EV5DHRW31BLMANzV87FCX8/ARt0tWiVkHImS9K2HKGOJb1P1P0JwYSfnOA96Gr/sOZdMpJJMxzjuvev/GURZqB/5Ga3Urr88IePVO8JsITqsWPHJk4PlqriVsTMzbVwX7zf+z6k
tLL7svvXQQFyqdIA+YhFJSzJnIybceUMyyBC98AKZGUWjdSYaHVP6jaMGsdPcp9gjzv2QGdnU6kicmh5rDAGGyIKV8UE6sOEkr4brvF9kpQNo6hxYo48rupMdpSjLS0QgGeXPOtku65crCz9OeSQQyZeddVVL1kLvFkvCcAAUfXwJQAvfPqCE/OvxuT7i6hHTTAqdwLFxq7rr5JpSOHWt28twZVr5LqJIv5+7se0m6dhVHsT0ikj4fzgRL9yWWMW
l9CFQy3TSVbgtYFj5DJ+pwRtw2xLAcmHfLYEImRzWRRkoSbPteeee26KUn7AutkABABBJdhhqGEo6fbSpy95RDmPcUdT/KMWqaM9HkDoztWnx+5obsS5/zwXJsxQwk2a8juuXWtDK3a+fRe0tzaWiF8JR49+Pko0Puq1UOY1g9cjzXUo0Nc4r5+/vawAwyMTHkfMNVDGfRJ6B8PThxNQMGuT3m3KlCmbwIrUracK0EJEKbdUFAF+DhdjBLMJaO6y
ucpi71YBcD6RG/VLEUUc2Kvqa497zSykOa6/hufhU8DbvW9jWtu0RCW+KmUCD3z4AJb0fYwvdLW4+QJ0eq+m/7rPYWYP3TOzo+1TvFXdN67qOWH3IdXiHWETIE3/oDmu/g177xQjMYa1RcS5qgdGfUfqM0bLCtYIEFCEiVokCx8zZsxG9TYCioMPPrhT1cMZvmWbvVl4vZgAAIq+88nAJ27aL58UIZ2VikPSfPmSd+hES/b+lWrIsOoW8od4Q+/D
7mhtwiF/OsSqWVi/7cPeD2GkNP3RuNRY45LTGlFZf1z/mfSJLXxjC3hLZYUlaoFvjNnnsw0m1whP/xX2e+j9dHPGpwayTgoNG99gNHv4dRBemNRhNwZQ5KLmgsriwPF2GgbQ1ta2YVIJoGIbwM477zyabaJUSiax5eNnYg/7dGsp+pJ2MKNnsEeD+S2V9XXVJao8MsP3gsi2jKEE6ohUjDXHDUOge6Ab7/a9W9dgoEM3PRRmNnLx0PczBs/KlWBw
k4ytzyWlzSsARGOBUeb9qzkvydghpn3c8yTBF1uUWkTRCjwLAp/IIQ3vQPpTjxEYzc3Noy0GUD834LRp08ayn11Dw05DjWy2uiBKxg+CJ5aPya3959arZV/9VcUPTLqcP95/Ur2OdW2Q7zqa++my47S1ZHDkX45CSqTqBvzpMrqw15S9kM0WYp+tnH+o6dXKu0tt7i2H5Qlq1icOGyFXSiYCBguDIPJzSnuOekUQZ4F1Ct/aQfMEIURKCFFfJGBX
V1dnLVY5IlIKb4SvdhTDRanMSA8OWZ3UCMOo8zPpFN5dtRCvr369bhJAQRZwz1fvwUBf3krmiZE9LDJsXe5TjHRi20w+HfxUb0D3LaYUWGO9Vm2WjFQqVVcJQLS3t7fX0h2jJPHV054HvllZGatwHU0b6R+bfa6trRGH3X8YMqlMpMGvmq2BG/CjPX6ENf1ZgNZjmq3wna2/z1tee2JgVW5lIpRp3AyTLGEYRn3DgZubm2vCAEgQ3OVfJcUIEYB0
5JtwJ13kGlB21TwG0ilCd7YbD330UN0c2QzGJbtdggnNEzGUL66/JEH4fIkpESIpacaCCVg5tKo2c4YZQghRVxuAYRg1KW8qSDj1/7SGlwTiO1UqDiQ0hlHE+aPaG/C1+78GKWRs1V/1Belgw2GowYHcAP502J/Q35MfEf9rsDPzOskf+4b6apbTzDJO188LIAwjU8tVLypfvYQ3dzuSil7D8J6JBNJpgZ8+99NA7QC12IefyHWEr7ZV04sDwOZt
m+OHe/4Qvb25oLrJVYqpXBsxdl0WsUMr/K6FZ5Qhz1MwCzULNFMYANVFBUgZRroWWWhVLEGYRy0ZL9MgTyhcrK5Wz1J/b21rwJXPX4n5a+Z7CzwmqPQbwrkDW1EWcclul2Bi6yYYyprhz1zW4Iecz1Vedzg2WseuU4Nb5mU+0dyr5VYxA5BSmrVOwaorWOEPzaQKcrr5Q0gpNtdg8nx29ueWjgwOue8QNGeaA8xNJfpKcAP2+bl8Dg8d+RAG1+RL
kkIN8+npMKtcr+vXKf8gD3OfWIcvrzQfIkp5MZLzJCX5TngkU92yAjMzFylxV8OrOLIV/eetgcABzAV8IlRsjjoOUTGSegsS/K62y2QElg8twwVPX+BRBZKU+tLBgckDrXbLjo1tGIubDrsJa1YNgYnqKqqubbNdVVilOon24Ij+JfR0IKS/zrxhDXGHyBHkVM+AA7hTpOrY1OAVSwDFYjHrD4Hk0K5qymBbk5qZy5B5gnh4LadI9Og1ttYz0N7e
iCueuQIv9bwUawgME/vjmIUpTRwz9RgcteVRGOgbGn75dV0qP8BrQbSn6schbB40p5s9tEG+aNjQ8ykY2WC1ic1fV7EEkM/ns87KVOaY+3ViAln4fIRKCro7eJJA0Lqh37V1ZXDQPV+FkTac1b/kkXE5s33cLx3ojukYxFBxCDfsfwO6MmMwNGSun7r3cPWFa8BMdOJfHcahs7FT3yn2phtzVUvFS2U3tOaPLFmT68YA0NfXt7o2K2eJIIQ1qGSV
THKYip0qmFw6t2qGQDi0T+7vajvrN1LlEFKITTlO8MUdAJrj+mPqb6m0gGnkcMzfjkFLpiVA3DYz8BO4jujV4wGmIYGXvv0isn0FSJO1/QvupBmf5Dviru35G3acKri33W8KeccR70yQd95o5hHZ7UjXb6udKO3Q3Z+Uc9W/FJxbnj4qbUFAZ0OnnsOR9xmh3NO5GCtz2WUAdTMCypUrV66qDQOQiqhDKoU6JcJLmVLIfYNQPpNN/NZnDv4GZ7AI
xAqxKcfdduTUbVOv4//OyjHPbyA0t2bw4LsP4u737q7rAthutOOZf38GvSuHlJkZtUMzPsl3iru2d/aHHEcF91apQPf+I94ZlHPgO9fzGb7r+rgGQuaiE9Xqu6Yyj9X+qH1kVtoysGHLWFef58qlISEETNOsqwTAixYtWlaLSDjJDANGCAbaUxwwmV5FyQeqGtmWoqRIBto7G/Hvf/8OilSsowrM2LZjW9x4yI1YvTJXRsfXtiLPw3vJiosGJoTl
UvVdZwCjVBWgumtysVhMNPEqlgDmzp3bU6t319HYYfFXdmUAJRe3V9wv/RgmgoeKW7EibPW72seUQcg0Afves6/HNVjrzWQT/7bpv+G0nU7DqtVZd8GMFOGpRmJ/fHso75TKuL/+PN/OIeK8f6favmcQ10iNUmBJEkhVXau3tGWz2VUAinWVAB588MFVtZrEY5vHWgVB3bdKQiFkEXgDeokrRMpDRLt67k2Naby+4pK7Om0AACAASURBVHX84qVf
QAgR0ONrtUtI/GrPX+GgKQdhdW8uckzKfQaqsj1VSH2JzhPD+z51fat6fOC6Fw3UJsFMf39/N0rJQOtXHXj58uWrmDlZmZwYcWabL2zj5JBzM4xYepP6fdjW8trsDKCzowmXPHUJVhRW1FWoNqWJPx74R+y64W5Y05dbL8bnM7UzKpKfHAqVLhCoWoJauXLlJxYDqJsEIEvqOydTPGP40M4TdvZ685T8gETViVxJxTLU8Bz1uCBCe3sjdrp1J2TS
mboyAckS/zzin9hh9I5Y05cdIcth3BHpGYlmBza6LU3pePLW2nC8ZyxevHhRvSUA225REyTKzhvubDFQDhhRHT9oLZh0yGe/usAJRTkKaesX89JpgUH0YZ9790FKpOrKBIqyiCePfhI7jpmB1X3ZEcpch3YnDFgnPAggY2Ti10vW8QRvLM1rr722CEChngxAApBElFUck0GmREkEFsK0rmkWYyMn9x+pHnwiCOdI5f/UawjlHlHtVLRA4B8n71db
cwOe++Q5XDv/2rrZAuy9KIt4/MjHsf/EL2Pl6kFnXNe1f9mhIlatGcTK1Vn09Q/BLPKw3RvDcQ/fHCvhdISmL8BO43f0GL29OADSRgmSDydgU/vtt9/+dr2NgAAgTdPsUUE4UAAQbpwz6TutPGBrug2GSEESJ3O1EDz4Z61oFCZ72dcUpFfbNOd5DFLq9YWGu0W4J0Z3NuOcOeegu9Bdd0db0Szi3oPuxWnTT0fPmgEnq3KdZN2y2jKAFasHscfY
vbDoux9i6alLccMBN6GNNsDKNVn0DebAnPB+SCJjB79TZS6N6sfIct+qc1gysM+kfcFSaqRTcjwPDg7BOsYOrbnHCOAlS5b0WgygriqAmcvlFqtETURuZVMFVKOCIBgqFLb0IgqFPDLUEIBaeqz8KgYEPlSbCpn2tFGYkg/XIXznkU7N0rnUdJINefumQyXa49PZ0YStbt4KDemGujMByRKX7XYZbj/4dqxcPQjTlOHPi2hEtd+lFiLsef+S/vjK
1QO45oCrcd+B96HT6EQTmnDEpCPw5rfexOCZgzh7x/OwqjeL1b1ZSOZQWlRXP4p4n4H3SPo+V3wea51UITyKvX+tAJ79Ju3n1FTwygbK0qnOOwUt65uI3N/fP1RvBgAA5tKlS+eS8JbIIltfZ++KzAoR6Far9nQbPD4FRkB09ZpVws0upDPDcFDBJwagEfNtTksWMoEi7uH/P8zLbf9LCQMNjYSx141FykjVXR0gIhw68VAsOmUR+voLGBwqakG5
pOkrxXjsw8Y66m//YB67jtsN35r6LW1fC4UCLtzxQvSd1oc7v/pH9PUV0LNmEFJCe28KIEj0Ij5HvBdEqAZIcJ5/LlIC1APZ4iMDJgNbd21dE6afSqWYmbPDwgAee+yxFwg+HDvBB+2NtwMwGNPH7OitnWZhAVz6lQEjm+5v6C6sdkJZxQV5YgN01xeK8SaALHU4MbuqAji2T40NaQxyH/a9f18M17ZBagP0ntGL0amxWNU3CNIYP/391KFm/eNC
EW381yyaJgRn8PjRj8dLL1LiKxt9BStPXYnHj3kCuUHGijX9KJim9toi5v3H/R43h1Dl8YDBWU35zYgsNlvORqA8M+ctBoB6MoDiueee+0atoqKO2vJIKyLQsmjacfHWOAnN2g9UEImoE9vIlV7CrucG5fhEQvZaNjyBTBF9GNXahBeWPo9LX7502JiAWTAx/9vzcfK230N3Tz9MKfVidYyaEDe2uraSGav6snjrO28hXyijBh4DO4zaAcu+uwyv
Hfc6Gsx2dK/qRz5frHlAHlXwWznzTufBY2agWA4GIAafs2L5Ass7Z9abAUgA/VLK/lp0/OApB3vLhjlBN7akpAlK8RMoeQMwwoJ1QAQh3O+2uK8uE2oUntrOIXL7mCArs7Ei7iUMrNmgsxW/fuHXeOSTRyCoeqSg2reoNv+9x3/j5eNeRs+aQfRn8yAhAv0OjB28wSwUEiQUFjC0YvUArtv/OnSluip+viktU/D+d97H2995Gxs2TsKyVf3I5osg
IWoU0BR3LrTPSqQZG0dv936H8Aa5MYAtvrAFDKoNCvDVV199wWIAcjgYgJRS1gQS3JUZjQaj0SkXhhDcFCm+T4I3lJZijFI6Q5EnEAxBo52/HYUUQaWAkTKZoXhMZyu+9uev4d3+d6tOCJk0OEuyxLT2aVj9/dWYtdFXsKynF0Urw5saXaodpxjju/+9CQCr+gZwyNRDcPwWx1c9T5gZE5om4NVvvIoF/74AW3Vsj2UrejGYy0MQ1dSJETyXgsZS
DpeCgq+DA7X/pGQcv83xNYmPIiJceeWVT5XDAKqVojqXL1/+t3Q6vXs1YhcDMMjA1NumotfsRsYyjgmLs3sI3U/snuQicMN+1WFWKviq7iWPrZKCfdJ9RshvYQMbFoimHjdNieVr+vHRdz9CV7rLyZGQJINQGJGEtfVfM22k8Uz3M9jnnn3QkE6hs605KKYmnCi6KswDuSGMbZiIt741H3kzH3i2sMrK/jb+Z7K/p0QKHw58iDPmnIGH338YrS0N
aG1ucJJokO8BGMkr/0ZVEVb7GqyMxxqmWyJ+aWeKNkuF6gbzBSw47j2MTo0OeLE54psFm1HUWQKD5ahRo/YDMB/A8nobAQGgsHTp0meIvH6xoOefgkkbSCnNTaWotl033BWmyT5vIAdW+bCIViK97qpbtXXuqugVjQO2A8S4nuN0R/sRDEOgq70Zm9y0CQbNwVhxPo7wo9rqQEO7bLALek/txQ+mn4ulS3sxkB0KLfGdZDWx/+YLRZDZgNe++Sry
Zt5nSwl/Fl2bsOxJJpvYqHkj/O2Qv2HBdxZg1kYHYunyXvRnc9AtwOrciXtXencnR7tJQ5iaF67CVrl7RlECnemuANjFKV6vGJYdirDRg2xjAErfhRAmgEFLAkgkU1TLAIr33nvvQ0IYrkGMvLoP+Sxtfl3dTghKRPj+Dt+HySiV7FV9m3Y2X08GFz/4yJvbQZuvAnqLsKCQEFLPTh5vggdPAE3GGYo+Bij9ANCQTqG1KYVJf5iELGfrqgLoNgMG
Ltn5Erx78rvYbcMv4dOVa5DLF1yxOizjEunCZUtW/JX9g5j7zbkJhdHqnrFgFrBxy8a4+yt3463vvIWDJh2OpT296M8NOXOFRIJcJr53FpxLFDwGTShz0BHmVUGJwSyRohQE23Yo9tkTvIulMweZHE5GxI7XzTTNXgDZpB6AWqgAaQCdvb29nzBz1VaM9qZ2NPy/FBrShJRhAEQwlHx6kfny/GI8xTkea1KCIXIIy8lDYbcdHMpDcBPeP/49ZJDR
qDkc+b0cXVorfoPRYDTg+e7ncdrjp+PVZXMxqqUJzQ0NsTUNHBWAGUt7evG3r/8N+47bN/b+lT5D1JYxMni3911c+uLPcfsbs9HS0oD2psaa3oOtOH7/Sw4r/qImh5VSIl/M4/Apx+LqmVfXpD8LFy7864wZM84B8CGARK6WaiUAE0CxWCx+XIsHyA5lsUHTGNhRxsLmps5f9nJf5feIbGGWwdAbUSgCkgEHjI2RGayc67G7+9qLqNXFt/rY/Wlp
zMCkQWw1eyuYZEYmCK1m1Q8Vv0HIm3lMHz0drxz9Mh79+qPYomN7fNKzGgNDQ0F/uv95ibB0dS8u2+syfHnClxPdv9bEDwB5M4+JLRNxy75/wHsnvocjphyFT1euwVChoJdiwrKZIbm04OBFYgzOdv0Lkxnn73x+zSSja6+99u+WBJA4U2y1DEACkENDQwtr8RAFWcBuG34RRSmh5EBVqFt4tS4Pxbv2hmDYnvDJfwJBZIbQoIYiMj4411R2RGQh
oeRZOtoaG9EvV2HrO7ZBgQvgtZTCK1fMYbcxu+G5rz+LF499EftM+DKWrFiDNdksJKs6qu3WEljWtxrHbX0cfjT9RyjIQlXW/mq9BUCp2s64pnH4w75/wNxvzYU0M8jmC+EonaRZZcL+Ig4NBAcSL5mwScsmtZFGmM0bb7xxPoAcylC6RC3oduHChQ/qst1Wsl24y4UoSFU/YSdREIj1WV0peex1qC6LkFRTa2EHgI6mRqwpLsfWd26NAhdCjWIe
vEKYASoGL6A7X/2bLWaxTec2uP/L9+PTUz7F8VuchKWr12DFQF8JmWcR3LLe1fjS+H1wy6xbkDNzofcMM/ipf9UMSmFSQ5RxVE3Fbi8uW7Rvgfnfmu8iIZHAnpHEnUjhcw2BWAa3KlBbehSkWTMDSQ7AQFLRv6YMYJ999vlrrRjATmN2QlumBdJjCEQAbuwZVV2kGzj6zUU5hAXWLgewrLrtjY3oM3swdfZmGDAHYtWA0BTiMeJ/EiZgr6YdqQ5c
vsfl4LMYF874MTbIbIQla1ajP8f4/rZn4F+H/Qu5Qi5W5E/CfPztk3gP4q47rmUcJoyaAFPKRNQeGjXodwdFzjWlBLDFLYrSxLkzzquZtLZq1aq3LQ9AHmWgCmqBPyyaprkin89/KoQYV+3F+gb7MLVjGt5eM9fRSVWPgOtQ9HqdyZEXgvn9k5vkGHrPfxKPd3229sZG9A0NYJNbN8H8b87HmIYxgQKkOl9/NXp11PlEBMkS2aEsztv+PPx4xo8B
ozQU+XweQ8XqcsRU0veyDKMS6GrswrKhPqRq/O78puUgjoCtkmWlv9/f5vuuvb6KSlZEAvfdd9/9lgRQVhpqUZPnBmQ2m50HDeH5Q3KDmUK8KwMD+MGOZ2Oo6Fb/cwxlridUCSem2AAONaafolLDq/nloySEED2CEgYrIUT11JoJALQ3NKIhw5h4y0S82/8uDOGtOuRf8YXGc1IOfNgvzenOs+87VBzC0NAQhvJDvlDvynbd88Q9i/+cqASspdcq
Kg4KUgPDKMIg6v8OslVLBkuJtGiGkMJ19TG80aRECkaFQoqC2JlA2DznnHOesNQAc20wgML8+fP/LAwj4L90CZsc0I/ju4SXiO3LHTvtWKSEYRlyFFt7ICg/OgtbXHhq5GcqPwcRl3lPivjdG+oKNGcaMKa1DdvM3gZPLH3CEzyiM5jFIQA9TDfkfJVoYl2AVfyetGR6TQyDBORkLjbMuVbWHIYK6ilRdJFNfHPL4xxUYAnco8Wiu+fZjECNM7Bm
nCnNXgB9FgPAcDMAACjOmjXrT7bPnsG+LB5+hJQ3i5AqKxARBgYHMH3Mzq4dgGUQrJOAK6vHdLpaXPhmkvBR9TlEkvNQwW/W8cZ0GuM7R2HW/bNw3YLrnPyCSUTmqDbVnl/t70lF/mrdhUQEGMCq3CoYgpK9k4R7pFTgsAJL/Adw2W6/DJS1A2IiSUl/4NNPP31FMQDyWmEAAHIDAwNvBHXvCnyLLPEfO5yDbNHKaSBEwN3nMpcYN5uSXtzxBDp1
ApXILjUNucO3oijfV7VK+JCOSQLp/TqAiE9ykBIGNh7dhdMePw3ffuzbiesNRInzUapBEi9CtRGKtT4/6hwAWJ7rdoBm3qQPEXqa9d3jmkZUMgXtKgXJEi2pDqRkbaL/hBD4+c9/PttiAObakgAkAHP58uX/qpVB5cipR6I13VySAiRbNQThyTQUWvDR/x49rkLSQzgDRR0pPLKPlOvZkyvMHRTldvTPFeghw8HnI0zuHI373rsbm9+xOcwE7z0u
jiBstQ3zIujQbuWoGOX2oxKVQbct7V9qJeekSDcgdDTM3jwQntUfwYhVB8JufWYARVnA6dv/oDSva+GCKxQG77jjjtctD8BaYwAAkLv05z+fnU7VJrFB/2A/Zk08CFJKkAgxDiVZEUJXg/iVU0T+Liq6Zrn3ESHHQIQJ7Z1YXexGy/+2ojvfHcgpkGT1jGsTZhOIMzSqv/l98nEuwXIMheVKCP9c8k9kGkTEvHDHWATmm3KOSD4vXDuYBJHARTv9
Z7lsMPSXgYGBRSih/xIHANWLAZh33nHHe4O53IeVPZc3LovB+Nmuv0B/oQDJEswyEGUZTCK8toteDu9WKijZjPEtHdj41o1x1fyrnBUyqTstzsCWBLyTxJUXRvxxhku1X7rPuv6rmHv/b3/+4M9ozzTXd6YwazP/SNPE5LbNMJDt1xIDaY/o/YNkMeE777zzNssAWFEV2lo7sdsXLFjwy/Hjx5/qe7OlZKEeIvU9WOA5Cc3pZnzxz9PxQd9CpI00
hBPWZblyhCbteJjIat3aycQKN3qRWcVpI1E9A/sc5699fUIE9qCS8CDdOcFjDGBx7wpMadsUbx3zlgdhVm4wURRxJSH6sJj/qBwAYffX3dvPrHT3Vf/aW9pIY/O7NseguRwZI10yyTGBKGKsnTnCiTwYbE0ElXlJKSFZYmBoAHO+9hI2a97cySNpY9hYQ5UEglv0mksl6bn0twQolEMdHR27A1gEYPXalgAAYOiqq666LZ1Oe0V02MkL/CnBFbEK
XhELBGSLgzhl69MwVMyVcqYrsGAiLguyK+ANAFLTgqseBmHhDeLAgMJ3roAurJiqcDAlyUnrfb7J7RugZ+gTNNzQgDfWvJFIVA7DEajH7e9RBkd/+zD/fBKjpdpOvbf9DOoxFbMQhQewZ/vC1QvRaKTd3LwUM9Zl1zb1zktYgT9Smuhq2ADbdWzn+PBZMRp71VavMdqT5EaxN/X19S20xP9cpeJvrRlA4fe///1b2Wz23aBQ71uJtKChIIDoxC1O
RmumHUzsteghuY+GUQM/z3qydza1YmJbF3a6byfs9/f9YBiGVgQPE63L9eX7Q11r7bpL0oekKMDr3rwODWmj7DENmz/h88rNbWmbq/NmHqdtdzZyxVyE0J98MwwDt9xyy80AeisV/+uhAgBA2yuvvHL+ZpttdlEtLkYg/PqNn+PqN36LhnST1iimnwxecc5J2V1n6C4qFPjrcfHl2T7kiiYe/urD+NLYL8FkE5/XLUUp7Ph/O2LZ4AdoSqerYjjh
4r+d9stVA6SUMKUJMOHT4/vQn6tJDl0Ui8W+zs7OLwH4AMCadUUCAICh75166h3pMgY5ep4zztv+4lJKKeZgWQ5d/TV/hT8Oq/pX33+intXpKL7NmKZ2jG/pwN5/3RvT7pmGIR6qSfbh9XE3ycTc5XPRlE7X5D0gpLYkfHMTKGUrOnjy12tG/ACwdOnSFy3f/xCqsH7XgwEUX3j++SU9PT2PxVk5E3dSCpy09fdhcgFqgQ4t+k+H2RFlaQzlahgh
hR8QUzSCIrEkUTEGUfED/v6nDYGpo8ZidWEp2v/QjpOfOtlRCz4vmyCBAx88EBs0tVoSpGVDomBBGHdnhKH7XJ0W2iSVahVghgSDcd2Xbq5c7PadkEqlcMIJJ/zesv7nqxqbOoy3BFB44YUXbhbCcPtPXvu/Lm2ymm/f419kExds/xP0F7IISxgeZXajChJCU9nt/bEE4Sa/KNw/YspTef9PUBLN+tyRacbktg3wx/dno+WmVlzx5hXrFSOoBkz0
+NLH8ciSRzCqsQW6mG8KNatGJQjXnefV0JgZRbOIAycehuzQoGvZJ73TT+cABNRcm6Vt9eo1Hz777LMLLQmgKkRRvVTUFICugf7+dxncpmYuJcf14lNj2f+k3qdOiRQunnsO7lx4C9KpjBeFlzBNVmRGXfXOrMvpXp7aQsNoayj3heeliSUDK9GWHoVLdroEp219GqSU+CxuWTOLzls7MaGlAxmRqp7hsFKZWnERsqXzl9xzpfTfpiwim89iyXF9
MItFQAli182zgIs2QBIMYRh46KGH/ufrX//675n502olAKrjPGt98cUXz99yyy0vqpWFbFAOYOrdY9HZ2AUBASGCDMB2rZDGkx5o5/NI+AeFy/zs9W6E552PrTMQZsn25beH7/lsnIUuN0KwPWHILGDJ4Aq0Zbrwg63/AxdPvxhg1Aym6ieoWngEyrmOhMTo2aPRkhJoSzcFJnzo+CQ0/DG7DN82ANp1AqQ0MVTM4bBJR+OKXa+FrFFqZGbOt7W1
7aYY/6q6sKgTA2AiGrr44ovDjYEVzIU2ox0/3P4iFGVeBwP0CO1AdD27qLz+YW3iPuvun6QMd+CcsAlO8c8Zls8/2J7RYKSwads4dKQFLnvtZ8jc3ICTnjoJ3blu1CrDUxLpqx7XWZ1fjY7bOtBkENrTTdr6h7rx4bL6YjNcdzrS/2/vy8OkqK6+f6eqe2Z69gUEZlgGREFxhbhBXoISFxTUqMSowQjGuPKqWTTmlcQowfB9MYk+fu7R15gIBqOT
aAQFXEhUFJFFQQZkUWSAYWaYtaeXqnu/P2q7VV3V2/Q0M9L3eYYZqqurbt26Zz/nd4gAZmWuPjTp8YwRPwBs27ZtmW77dyMDgOu9qadKAEo+//zzRwcPHnxFpi6qkorRSwagyF8CnySDSHLdGK4bhTKrmGc3qNjbpoFWp94S7kRzdycmDpmEG8fegO+P/r4mTXUTIZkswnSPpaKae2UaSiThzb1vYurSqRhSWIEyXfIn+0JjAVitt5xMHgRjHIyr
6I4EceO423HHcfNSBHX1ZkM+nw9jx449d9euXesAtCCN4p9saQAAwGRZjjz11FOP+JIpEEpyL/i4D78/4zEoLGKmXdobLnjDNnu6dRJle3k0gegxdCAlmWlGaVwXqUNX+EjCEYFSHFtRgy87P8O1//kB6E+Ea/9zLTYc3KBl1iE+nHdPjiWrAXjhA0qShDvX3omp/5qKkcUDUZ4XSGlt4ALtbnV75i6VmhwiapVmBmhpv8X+Evxq/II0EJ29z//y
yy/f37Vr165MOP+yoQEYDKZ03759rxYXF09Kl/M5h9/nx8n/OBIhNQhZ8pvVWzzRpgOS9+71JI0/KVFDGbte3JXrgUdTAqByjpZIJxq72zG0ZBh+Mu7HmHPUHJTmlfYYoSeT5sWW1i2Ytvx8NHbvwciiI8BSJTwen1S1XhUUo4mIGoCR+NMebsWj//VnXFBzsccbIhePUJz3SoDf78eNN9z4/WeeeWYl57wJPcj+yyYDgCRJRQsXLpx0yy23vM5U
1fVxSSiKMCoBtfoBsjoFE7TogV4UsbVzE857/ZsozS+HTLJrSDFtGzKZjp69bTOkcG7CU11PSO1hJCIElQgOhNvR0R3ClGFTMHfsXEwfNh15cl5WmYFhPhARgkoQv/j4F3hw44OoLqlAhb/IIXXjMFuBBl3JP04hkEbwOmPQHaecc0RZFEeVjMXK8z9AMNIlRL8cTWxhVZNxo7UnWQVAZMXMAABNTU0ba2trLwewB0AnMlT6mg0TVgZQ1tLSsio/
P3+csdSu1rggrRJtzzwpD9e+/z28u/9t+IxKwUwxgGxsYiTuINwnfQX6Jm2NBNEYbkU4ouKSIy/FDWOux+RBk5Ev5/c6MyAihJQQHq1/FD9e/WOUFxRiaKAq7QYqqeIVmh2BHZKfcYbW0EGsvngThgZGJM3IEpq9Ph/uvffe6xcsWPAq5/wAgGjG1jJLKlrhvffeO/EnP/nJ8kxujm4exHF1w1GaVwLZEeMluHP9vsIAvi6OQwA4GOlCY7gV0SjD
hSMvxC1jb8GpA05FWV5Zxu+5t3svnqh/AvesuwfF/gIMK6yETLJG/GlwUs55XC6cnONP8/qHoiH86Ni5uHPcr6FyJWPP3NzcvGn48OGXZVr6Z40B6FpAeWtr62q/3z86kxvwnZYVuOndHyDfV6BpAWI9v6sfgBIHgwHvoD9EAAAPky7VVeUebyRd1SBTKkWyz8SttW6NdKIx0oZwRMUxVcfgZ+N+hsmDJmNE0QgTxDRVadgUasLa5rWYt2Ee1uxb
g/KCQtQEKmP8Pu5r6sy2iaP2O56TMx6TuGEjft0EYJxDYQrykYdPL/sKESWSMcLx+Xz49a/vvf7++zMv/bPJAEBEhbfddtuE3yxYsIpnMOtMlmRc8OZkfBXcpTkEbV5qF/InB3Vzsa85d2wGN8rmcWiMkqCceE1HeBKvxf3aVvYhT2IeqTRJicclvecqEaFLCaMx3I72aBBgwKmDTsXsI2dj0hGTUJlfiUJfIfKlfK21NVMRZmEElSD2de/DB00f
YOmeZVi5bwUikQgK8vwYnF+OMl9hAgef17Pan4HzBJ2dOY95HxbhczPV11D928IHsfS8d3Fs6fHpOWs9RmNj47qRI0deCeAraN5/3i8ZgOELaG5uXlFQUHBywuVIuFraCQQgjDBOemUkCv2FkEg2vyp5hp+E2G5MN5fEabyxpMEF5yV5kk+iTECvJbDcovGzCuP1NUolQxFJkZA3a4ndYBrjDaphNEc60a4EtQawDC4FIkC+LKNYDqDUF0CxrtkZ
GPqUBJtK9PxM6NfnyoI91H7o3zUZgE78YSWMOWNuwC+Om5+gISrBExHL5VSf7MPcuXNnPfnkkys4582Zlv7ZZgAgosCFF144dvHixR9zxu3SVIcN4uICWThiFsSTSGD65wQJGzrXYta/v4M8Kd/KeDOy2SxcJTNxKAbTy0sCx+xw5/nkctyxiZznmztVUDHJiQ9FzkIJ9+3q9HBzj/CSOAfuoh65zd8T88xDO+A8ptuTtfb24g8b/LqxLIzrIBrc
e15e84t5NpveLnjcEzoFBOvB/j3OLN3DIH6VqZAgY9N3voLCFGuPErdZIeZcvZgAkcXo9TXbsWPHiuOPP/4mAA269Ee/ZgDQQstlO3fufGzQoEHfzbRn+Or3L8Hm1g2QSPb2ARD1irnceyOeGdFbrzCZGEW2Vi41bMSkHH4pnhNj98MC+mgLH8Tqi+oxJL86o0/t9/kw9dvfPn/VqlVroWX9Kb2xur4s72YGIHjBBRfMW79+/XcVJXPPxDnH1vbN
Zp09R5KdcZPcQxnzq4lCMqkepIkqGXowac/7U4L7Z4jxJOWaSLaiI/H+SAqM1Mj8c+QAmOnGsJhAlEXwpyl/Q3V+TdohSK/xxvLlj69atWoreoD4m6xEzvaIbt68+cCLL754ayYv2s2DCCodJuKNCPrpmspr/F9Krj2UnEaSngAAIABJREFUlGy7L3j8FkFEJeG4FCc9OMlrxutP7/o5xbk/Umh6mcwc481LSuJ5KH46dtyUagPAg3Pva5E9DVgS
9oXY8ANmd0utSY3KovivwWfivMEXZpz4ZVnGhRde+AyAg+hhuW9fZAAMQPSmm256N1OAFATCiv1L4ZcLLECROO2dXFuIGcd4Gln9lESLYO+WxAmgheL85sm2s03h/pQAish5nCeAL0rUtjkRvJHzXO5xPY/jnCN+uy8z285RL6n7YSxPFemZf5r6H2FRPD7xr1BYZv1yRIT77rvvFlVVG6DF/HsVyNGHQzPkiRMnVqmqmjSJO51atiNE+M3GuyDr
MNAxfi83X1iaXpFsWMRJX4tS4pK95NntpetTz49ran9ie99uGpDgq7NgwQzQD8YZ2iIHseE7u0FcTrAvoTNIj3OIrF4VOhtqamr6dP78+W/p0l/pbUI8FAxABlD40EMP3W7Kb3J5YbaIgJktbUp8bq0c2tVWBJUgivNKPGHG7fsk/SRcygJ95XIVM+MTihfOjR/u5ULgiKx0X87QHQ3ijfM/RJmvXLuGDe3K0URFUCaM6xCRyzvWdrY/z4+ZM2f+
DEATNLBP9nVkAP7BgweXjxgx4jwlqugNQ1wcdC64aYTYUBKB8NaBN1DgC+hxf6cGYPe2WbcgF57tFuJKRKJJYurbZ2bhPXHhXuRVJeYVP44XyXcWocQJQZlzSjQHL89h4so2+8T0c0UJSYTEGRHx3hHZCdetqahApJYQ0fNJOMXY8pbQ0RhAVI3gd6c/ijFFx1oMxNFFyjP7FF4YFdYZz/7vs/NXr15d39uOv0PNAPK/+c1vDlcUBaD0EVKNRZUk
Cfdv/J8Y9d/gAFa6kOjYJoc+IKaUkqe+4L7VycNIcdcz7GF+sksIx5zs25uEe5MtjQkgx3EgFhpMrGq3b01zfhSLMRSrI5ErvBh31a9ipaz47NzGpJ3RByd7FVkZCestrH+CymdTQhuNgozuPAZQJ7cyAJ1qf1SN4rujZuHCmpm9UuwUDocbf/jDH74ILeTXI6jvvswAZACFv/3tbzMWATgQbkSX0omSvDKXrepuCHiZBoki7m7qOaVhHlAcM5oj
MfwYuRAwPJ4+mTlSiuYIJeEC4HFNsATGFyV3f4JT9U6m5yC5r4dZ3mv4Abjp9FNUFSdUnox7Tvi/GcVLNObv9/sxefLkawDsg9bmO2sIrdlmAP6hQ4dW1NbWTotGoykvVKxDT8KqphUI+ApNvHfnF11lGKW32RMdz4QjMJnvuzMJr3I274v2ZioPJflOe1z7JOBFAPESB70cgtxElhIlvxHuU9QoBhQMwKLJ/9I6/GTST6H/fu655+avWbNmC3rY
5qs/MID8M888szYaVeJvDCEL2JUodXPVJ/nw209/qWMDOhN/uJX2C2ciiAsseQYIIlvOO0rl7nFELmWTI6ShGSWaj3sb8thrcPCYvWGk3HLBSy/iDHJwKFxBQC7CW+etR/IRq3juCzK7Wxlz6Ozs2DF79uy/AWjOpupvjGzmAcgAAr/85S/nkuD4cHZG1VqJmz3AIWLdGl1cuP55Q/dudEXbbYRus1U5F7r5xjru4iLzxvk8VaInJBctowwREbn7
mVK6GCWYN6Uw33RMkLhzjteMFHB4fLjLcRJqNYSqP8Hbr6hRFEgF+HDGVjDGbN4NErpe265sdvElITHJ7ugivdaBA5BlCdXV1Vfrqn9GUH77sgbgr6mpqRo1atR50WgUNiYQz9FHLhY9aQVA/2l+E4X+YlvDUFd1wWsTxxSopCClEnwPCWzX3mQCSUnXNK7fk2TctJmAmx2fLLqz0FDa6QgUpb0ZQTAkv6qg0FeM98/fYkl+G8EbF/YAYTfRad32
scVA7rrrrh9Fo9EvddU/ikMwsskACq644oqjU7H943IT2Y8HNt8Ln45Ua2K0k5srTNgJjnZEJJGnumkeiqk4M9i7s7rP9b9xTY1kte7ktXOrQDlGA3XYnu51fTyRVyHu3HpashNzLTEnnye4jphP4lGDbYT6nN5+0+ZnUVTkVeCd8z6Boiq9Zuxt/OSTJQ888MDb0Lz+ERyikU1EoIHbtm17etiwYdMyccG94a9w9spTUJZXrncI8ugPINr7XAMU
hYsmYHM0OkB/yEFUcNtbDo2Ap+DM7K0Xy1O8b6Lzenv+Xgwq6bBbAnBPq3MPFywArY0X4wwRJYyhhSOw7NurM4rq4xxdXV27qqqqLoBW5tuBXk737QsaQF51dXXVqFGjpmVCA5BIwqoDK1GcVwxJsnq6kFskwMj0JgInco0K2DYN6bXc3C1fIMZqtDsuk+Csvc1xeQpqdzrzoywxL0BM1U1OW3BmOTjfLxe4ihnn13H8w2oI48pPwpJvvYFwNNx7
74fzUFVV1XcBHIBW43/IiD9bDIAA5N94443He6tUqcklv+zHI/W/0yr/XLu1ul3FKyvAoQoLGJHu4o7HJOnEEEYc/0YmVP54FfJ9E2k4vbr9RJa+yChiE6Ecn3HYQFrM9F5whKJBTB1yAR4+7Vk78Yt4HT3WfghE4FdeeeUsImrgnGc95HeoGIAEIP+KK753tQDMo6nUJuKP1RHASM20Za9x4W8CtnduRXO4ERX5lZZ6L/ZpJy+cOnFj2E+zfUPo
XhxLVJSkNOTuBEqUkLCTcRRy2EM4bnY9IXEj0+zAiZC7b8CJr5pA4nsyCiGzmgSXDrchLAn2vmEKMIag0oU5o2/GHeN+rav9ljbJRTQjg70ILiBuupa48Cxk+Y1EtiEBdS/XLairq/uIc27Y/fxwYAB5gwYNGlBbO3Kaqqo2GzwWupsc8hoxdr0MCW83voFif4lu9zuAPsVogJuoJIq/yQTKIsdxch7g5H4iKC4RczfthJIHJ3FzhollD+RQdU0f
qIOjUTLePC+PYRybI8b08Ej1N9trWe2bk1N/hLIwt3tyB7irmd2nf6LZ/CpCaggLJzyCGUMuRUSNCHOy54s4HTyOrQSxxoTg/FDbj5s2bXrx8ssvfxZaoU+oLxB/NhgAAci76667xpvNJXs6YdmHP217GDLJgnwRI7JkbThy2x3cjNW6yWuNi9uz552xJNM/IGwOK++d0sxoczIaL0jLBJRroz7H3BNhDIo2UMzaueD9wQMfEW6ck9vS9ThiicdD
HUNsc06y+V1MF60Iq8jtj8KZ4QDUPP3ghNfOeh/VBcOgcHczPLZwLI3qUSLs27fvw5NPPvluAI3Q4v19o6daFhiABKBgxowZV2aqgGJbxxYcCO9FRX6laTaYElRwAjqh4J3eAC46jIggeZYRU5yNIZovyZoGyW4jnrDHQbouR9d0WY4E2TfJVgV4nSOq1KmQEXk6A201AVyEmLWr/KbnH5qzr7pgKP511uo0svtSF19tbW3bhg8f/gOd+Hsd4CMd
Au1V9b+qqmrAiBEjzsvEekokYfm+V1HkKwFBsiQlOaWbkS1owHmR3s2XzIxAydAdiEBWFYglaRyufqsRhKVW2pLJXLgE2RBoLHVQikG9gXsiEwkZkdw9JGZlUAqMQmeERGSv8NPPlSTJ6nxrnCuRoDVZqDrGGtlMK5cOveL/naDKVhcdsqv6NpOMTA3K/mV75l1cZ69jbUQwT5WrCCkhXFX7Qyw7a036qb0pjHA43Dhw4MBLdbW/o68Rf29rAAQg
//777z+de0FVu6GlkCssOwgEn+zDX3Y8CYkkXYJLMWaAkKRlk3Z2pCCKJ9LjgomSQ7v2ZGRm/ziHje52S4rfchtm9mPsXG0AFLY5Cv8SJXAzOnwv5G3Xu6+jFVePYVAxwKIuKppobtjg1L1VL+5k2OCuhM84g8IURNQI6qa8jREFR5r2vpu3P57TNf5BsUCZEFWiHaWlpdMB7IeW6dfniL+3GYAMoGDq1KkzbWEdckhoRzdgE6yB7HgZnHPUt23C
/m5L/U9EPJQEYaeikvMYCZ+6kS+q30Q9g7lOnhOTvf9ATzz83D3Fibz8EpTaNd39HxSDuGNP5RVx+5hQx68iokZwdOk4/HXiq7ZMe9KjUMTJES1wIWln114hUhUD+0UExlh09OjRMwDsBtCKQ5jpdygZgL+4uLiqtrb2XEVVY3P6CTE2tyitHOnUkCQZr+15GcW+Il2F1nRwcqoMJDjvuOOYQ7q42e2xGaRGvJiBcRUcHBLJkCEBehciD1edK0ET
eaus8eklmcyAeP4EM6c2RaZFKVptToDNxIQf/w66gBDj8Za6AVt4jjNd82JQVAUhpRtPnrEEp5RPsgGBGN5DcuwDMZnL6BBkwoGbf5PdEQxo4WzopiTATjn11Gl79+7diiyg+vZVBkAACh555JHJViVVD9UJWcaSXc9BIhmMc90McCLS2JmIaz68IGE43B2FDNxq+sijULmKy4Z9Hz8aeTuKpRJ82rkeN6y9An7ya0hEQtyQEsJ5xCccrxBhz0uI
MlVm1LPv8Hgdk9x0Jm5FWWLeFVlIvYbUV7mKqBrBmNJxeOb0Ovjgi4X6osRzpRS0S1OASRKmTp16/saNGzfjEOf49+YbTlb9H7hz587nampqvp2JC37euQXnLz8NFYEqSCTpjizRqRXrTEumMYjNboSWFhphEcgk49pR/42ZNVcjIBVBhuzYhAy/+Gwu3ml8HQVywJaV6LrQGW5LTtSbry9zI/XoD7cpKWL7bhEk1kn4jGnhvVC0Gy9PWYXawOjs
EhIRpkyZcu677767UVf7s17b35cYQICIRkaj0U2sR52ANdktQcLCT+fh+R1PId9fEEP4bkzA7XeMw95U7RlCrBslvjL899G/wNQB56NQLoIEKW7TBwmEZuUArv5wOtqirfBLeYI3OznizyRjcHMSZiUn2KsIJ01G4fW308EHQCd8BUGlE3PH3IUfjLwJMuRk3HjIYHkTmzRp0rlr1qz5VFD7+zzx95YJQAACixcvPps7XeBpSAMAkP0y/vHlCyaY
AvQIgBjL55wBQjsw0YbjAtqtFs1jYFDRpXRhWGEtbjt6HsaXnY5SX5lt09mLhGJfKQNHhW8AXpn0AT48uAq3bbgGfsqDj3wgkuKCY1qApdyevkupFP26uyrtQJ+CysDtPTqTwRm2uyq45xuP1wnZqzTabg5wezmmbdpGCNaQ+NDeIFMRjHZi4sApWHjiYyj2len7wGW1YkCOKTbi4FiQZNiDoijBE0444bzPP/98q+Dw6xfE31sagAxg4K5dXzxf
XT3kTGsxyZGo4XgXYodZM9NLO3db22c4Z9kEVAYGQJZlzQQgAhkagGTF3CWykoLEV8g4gwoF7UobJpSfjpuPuhNji49Dua8SKlRbAYpIRKaU5lx39pLg+LVnuiuI4rX9L+GeTT9Gib8MMmRPoBKbtDZDl+TtwfeiwUT9xpPsR+6ZB5Tou/F6hLt8hzsSnLiVwRObwmuYA5ybnzHGoHIV3dEgRhSNwv875a+oyR8BJrxDGxGTV8NkimU03J1EjPZi
NiRo4ujq7Ppq9OjRlzY3N+/SiT/an4i/txhAAECtqqqbmd5OOW7wLS6Wsybpf/Xx7fjbjmdR4A9AkiTzhxxwYiRJtmADB0OUR9ERbcfZQ2Zg9sibcVTRsSj1l0FlakK1NR3tWSIJQbULLzc8j4Vb/gdl/nLI5NN9BIgbhkvkeHI7bqtiTAZUz3Eo/vfjL0zC2IOHxmdqWB6deN1+G624g0oXagLD8Yfxz2BMyTirNZce4nRqGGJTTxKEDBc0MJuA
Ig731h32I7t3735v1KhRNwHYAwvRp18Rf28wAAJQXldXN2f69Om/y0T6r8/nw/i/DUeQd8Iv+0GSZDoBDSAQkowcMQZGHBEWQYSFcMmwq3DpsO/jmNITEZADUFj2qi8lkhBi3Xh5z/P4P/XzEJAL4Zf8kCBZmXUOcyBmm7l2kaEY4BFyqPgJuw672fCUiJjj1+WLWU9c1OqF5B6x6s8OpsKFrrswIbqhY/KrTEVntB3HlJ2IX417AOPKTvR+l72M
gkxE+Oijj/56+umn/4aI9veVst6+wgB8AAbs3r37hcGDB0/OxAXr2zZjSt2JqAyUQ5ZlzQTQNQBDbebE0K2G4Jd9+F7tbEyrvhQnlk+ADBkKP7TvRiIJKlewvPEV/K7+V2iPtmlRA0iOiIWocqbmNCQdKDVuUwwib0rwYBh2KG1KyqPv5rRzYyZGbgJ38fYzMNO51x49iOnVM3Hdkbfj6NJjkR6mRIZsW0nC3196af7MmTOfJaIDnPM+l9t/qBlA
AMAIRVE+c/ZI42lNjvCT967HSzue09R/WVf9JZ3olW5UBQbie6Nm45whM3BC5QRwBqhc6YMLTZAlGe+3rMKzux7Ge01vodhXBp/ki4kc2JkCJRHy8y5xJkoBASBZFBK4eP1F7L4EzMGsq3Ck7xrSvlvpQpGvGLNHzcVFNVdgQN7ArGpvroxckrBgwYIfzps3bwU0CO+sNvDoD1EAAhB49dVXL3aSfLqGgOyT8frOfwEMUNQomMrQzUI4snw0Zh55
NaYNvRhjK44HV7UEkEMpHRJr2RwKU3BK+UScMX4y9ocaUNfwPBZ/8TRao60olIsgk6RjHOjJL5wAMCHH36xzFv4WVG/uobvbHInc0xFmfezBspkH8Xu8aLsdTwBntl3BAXCm516oEXQpnThr0Pm4YsS1mDhgCpjKwMDiEr+ncMkggGEwGNw3ZcqUK9etW1cPLczXZ+r5+5IG4AMwoKGh4aWBAweeEe+dxLadFHu0W5/Ut27BCc8dj0AeMH7wOHzn
yCtwyajvYUTpkdrmEEI+Ri0BDAgopxc4jZ1BicnFEzMjORBOCbJPwsct7+MfexZj+b5/IcSCKJCLIEuSBXcOt4YmDqnPCa7tEbxmI7j9bYDH4h+UAtlxjT+QWBhkGfUOSW/l6QeVLhxfNh4zamZiRs3lKJFLU5L27k8nQPYkemnG7nMJA0iShM+3fb5szNgxP4aG4deBfhbmyyYDCAAYrijKFnufNhH3iZsFQZy7kBcJ0M9E+P1Hvwckjtnj5mBA
4QCoiuC5N0Ny3KxxESGmLAcULM9YTABc/BJPfqcZjEYAoIALI3BUkwiY9FYxlPEtn+QDJOCDplV4reHveL95FfaHGjTnoezXy3IlDdZMYAp2J6BusxsIyTEUr2kXrlGEOM4/Z5We7S9u+dhjynHBhR57WvgurIbAOMOJ5d/AlCPOw8VDr0SZv1yLyggQXFYjTy3H3oazRFx4nSIkiFgbYv2fC3vPAn7hFuy4w7kKzgGJsPS1pb+ZMWPG09DSejv7
s7OvtxkAEVH50qVLbzr77LPnG0k53sqwB0qc45AsyRnoxyZW4aXjHvbOU++1vnqkRTgagrvxz4YXsLrpHWzt3IygEkRALoAs+czUY7LV7SdwAPZg0iZTjSP8mA6SYNZSMIYICyPKIqgJjMDx5Sdj6hHTcfbgGQDg+W45T6GbkfN71LO9QiShs7Nzz0UXXTTn7bff3qyr/Ieka09/YgA+AAP379//z6qqqm+kv8OQG67eZxkgYFv7Z3ht79+xsfUj
7Al+if3hBvgkH3ySHz7ym4QuOXokkCs2QHpM1LTdbaE7BpUxqExBhIeRLxVgWGEtRhcfgzOPmIYzB02DT/KBM57R7rqZZ7yELVu2vDxu3Lg7JElqZYx97VT+3mIAAQA1iqJsy5FrFhgCyYY2j/ea38Ka5v/gk7aP0RI+gFalFe3Rg3rzVBlEMmSSIEEWAEIpJeLnuuRnULVMPKhQmYICOYByfyXK8ypRXTAc36iciMlHnI3qwDDNJ8BYj2sEskX4
4XC47f7777/1vvvueweal7/766jyu0nuTKj/BW+8/saVTpCG3OidoXLVjD6fVjEZp1VMBoHM3AgA2N+9B5vaN+CLzu3Y1bUdu7t3olvtRoSFoPAoFKaYhVC6tW4aZjL54CMZPvIjT8qDX8pHRV4VhhfWYmTRURhTehzGlByHPDlf1/31ORnzU/tXaHznzp2vjx49eq4sy+0A2tBPKvn6igbgBzCgqalpaXl5+Yn97eH5YTRXs2Q52da+RtiPM7B4
jcbS7j126N6ALvVbbrv9tuufePyJNYKtH8VhNDLBAAoBVGdc/e/J3nBg4fNM3jjOx/G+6V5Lo3ujXcJUxFMLKab8JIkmy722CU/yvo5iG/MD3RPvCMXZntZlsSiZuycE7zO7Dapr1nz0zBlnnLFQkqR2xli7buszHGbDlwEyLVixYsUs0oE6zQo5IdTnHoq1UF5co3Cu4TUSW0J45BLACr2ZFXzJk6RrLyiTIIU8AzjDfVYVoQ2fXvh/bK8p7nHc
SvwR0W8AyxPvhKITiwCIu/QvFNZY7MdhfWY9lViJ6WybyF37oVlFNkaY1xaGjVldJ0MxJqZ/ydbqy8D7FwBLxfI+IcRLIuoZCVELgob9B46DBw9uqqmpmROJRBoAtDLGQoeDre+pFWaAgQQmTJhwGedMb81EMXLADTvfSkIhe7W8CEFtlvbCAgo1I1pCT0C9oaftfKEkOAYV2ES/5bHluub/BXhsTkaei9BkVJ+3Cf8tFLxwoRxZ33xkzkNHLbKm
KyB3O4CxBfBQ4nb1VXx2ElpXGSi8MWAonOw3NODADbBtc0KitHQ2FeH26kurFYsV1hU6dpjw3WTBo5vLwq33akCJW8lN+hqTntwlfM8keuO7XLgPSM8Nsd6dMc+oEm276qqrLh44cOCMSCSyDRpOf9fhTPyZYAB5AApLSkqOsWPTCxtVrM8n2DcsmViu1mcQEfu4bSOa257I3hNAYC9ObHmTAQn48ty8pJ0hmW3Cze4yXNlaX7/sjDNOv8jn832r
sXH/f0z5TLHApiQyLgj3cKdqxPQFgPBMAoMUD7v0pbIzV3KrKSDhsg6cQtf1s6ZgMi649961sVWB14jy3bONmbgcjsxGL8R1W/cAcpxIlupoJBJxzpUHH3zwloKCgtNeeOGFdwHs1R19X+vwXjZMAAKQv2LFilmSJFHq3v9EgJmJ3RMpRbMo0d20o4yx6Jo1a/529dVX/++OHTv267HgLgBqdXX1d4cPHz5w48aNz5eUlIw73FIX0mkxTtleIIHp
LVq06O5Zs2bVcc6Nxhxh9PPqvb7kBPQDGNTW1vZmcXHxUf12AYgQjUa731j+xhNzb5n7j4aGhgORSKQNWrVXSJcUqu4gkgHkAygZP3589fLly/9UUVFxci702bfe56JFi+6+4YYbXuno6DgADawjfLir+r3BAIqgef8/IyK5X9k9koRgMNjywgsvPLhw4cJVW7du3c8573Ahei/KNhhB8cSJE4cuWbLkt9XV1Wf3DADV04196HcI7x+E//jjj99+
9913r2xubj4gSPwc4fcCAyAAlStWrLjtrLPOurs/SEBJktDS0vLFE0888cfFixev27Bhwz5oBR4G0UcTEH08RlB42mmnVT/22GPXnXTSSbf0nBEkS5d9lTqzMy8iQnd3d9PPf/7zW5566qkN3d3dRtFOJEf4vcsA/AAGdXR0/LuwsLC2J9vAHg5OBznYe3MQEfbs2bPu4YcffnTZsmVb1q9fv1e354P6JkmH6F35CxHlc84DxxxzzIC77777W5dd
etnv/Pn+Us54couR8mKlQWfx1jfOZ64Iwl4x95jEC/ciae/S8MSPJEkS6rfUv/rzu37+UF1d3ee6tO/S32eO8LPAAIoADFUUZZNN/Xe8PaP4hDtqwuGyHcSYt9W3LWEptyNOrRH91q1b33r66aefq6ur21JfX79fJ/huXdKrGSJ6r/X0AygAUHLHHXeMve66664bPXr05VbxjB39wATX5Pa8CFtKg2N94q+D3V8fs+om5fHYC3hghZtBRrEMNyYn
wCjbdZaB22P1diZitNOy40GQcJbBbCSSEAp1t7744t8XPPDAA++sX79+DxF1cc6N7D2WI+esmVtUtWLFivmMMc5UlauqyhnTfhs/TP9RVcYZUzlTGVcZ40xlnDH9mP59pqpcNb+vnaeqTPu/cF3zfsLnnHOuqipfv379y3feeec1ejViLYBBAEp1FV1Gdh32BC3CUgig6qijjhr7l7/8ZXZTU9NHnHNtHYzn19fG+s30tVLNH6YyfR2FtTTXh5k/
TFw31XGefi4TryneS3wPxnHbOcz8TBXnoxrXNM53vC+mWj+2z7W/tTmrtmsZ+4ZzzhVFUVevXv3k9ddffwGAkQAG6gLIj1z96CEZfgDDurq6vrI2hPXCzd/M2LDCj2r/v7VJYn/ifcY555FIJPTee+/95c4775wF4AQAIwAcAaAEWn5CX3FMSjoTKgUwYPr06eNfeeWV21paWj7h+jCfmXk/c2//mIw62/dWY99tNBpVNmxY/9z8+fMvB3A0gEFE
VKavo5QjwUNrAhQCGKGq6kYi8mVtokTo6upqWbt27Ut1dXVv/uEPf9gAoIOIQroaaDh++rIqKOsMNB+A/9xzzx128803T5wwYcLFgwcP/rYkSf2zmrIHCRFGslJnZ+feTz755OWVK1e+PW/evHW6My8kOGgZcok7fYJhVC1fvvw+m/ToBclgjJaWli9Wrlz58Jw5cy7RpcEwIqoS1MD+KhFk3VdQCqAKwIiHHnroknXr1v3x4MGDm5zawdflxxjh
cLhr+/bty15++eU7ZsyYMRnAMN1sK9PXJdtmW04DSFL9HxIMBtcUFBQc0RtSHgAOHDiwZd26da//8Y9/XLZ06dLPdSkghuuUr5k0kPQNnyeYL+VPPvnkKaeccsrpQ4YMOaWiouIkv98fANBvtASxxqKjo2P3vn371m7fvv2jRYsWffDnP/95m/4+FWjxejHhKifp+ygDKAQwUlXVDZlK/jE2yd69ezesW7fujZtuuumVL774okEgenFz8MPknUgA
ZCLyc84NhpB3ySWX1F5zzTXjR40adXxVVdUxpaWlRwcCgUE2kM9DwBzE+0ej0VBnZ+fOgwcPbv/qq682ffDBB+vuuOOOT6B3sTcJAAACeUlEQVT1zlN1Bh4RGHmO4PsJAyAAlcuWLbv1nHPOmdfTDcMYQ0NDw5o1H3647JJLL/0HLLz1oCAZ1NzmsGkIPl0L8wnHCq666srhF1/8nWNHjhx5dGVl5fBAIHBEIBA4Ii8vr9Ln85X5/f78TEyCMYZo
NNoRiUQOhsPh5lAodKCtra2hoaFh5/r16+vvueee+mAw2CLY7Ir+d1R4n7lwXT9lAH4ANaFQaG1eXl5lOkQfjUbD+/bt+/jNN9/85zXXXPOKQOxdwkbJEX1y7834kYUfn/5bEjQJCYBcVlZWOW3atIqxY8eWDx40uGTAwAGl+fn5+UQkFRQU5HPOiYh4MBjsZoypHR0dwZaWls4vv/yyfdWqVc1r1qxp0rUxJvxAf18GcavC/3mO2L9eDKAIwChV
VdcTkZQs0YdCofbGxsaNS5YsWfzTn/50ha7+GYk5EeQ8vL3FHOBgBIDlWCMkBgYz3gcTiNn4mzsIPPfuDoNNNWDp0qX3JuPhDQaDzdu3b1956623zoLmua+FlsBRDCsxJzdyIzf6yfAR0dBIJNLOGOOccRvRM8Z4V1fXgS2fffbKOeecMwPAGGiJOQbR5yGXwJEbudFvR355WdlIplG9kX7Lurq6Dnz88ceLampqztKJfjiAAbq5kCP63MiNr8nI
q6+vv5WpLNodCu3797///TCAMwCMhZbAUQktRJjLz86N3OhPqn2S57GioqJGSZYG6zY8g+YRDuPrmZiTG7lxWIxUYN7E1EzRI5wbuZEbuZEbuZEbuZEbuZEbuZEbuZEbuZEbuZEbuZEbuZEbuZEbuZEbuZEbuZEbuZEbuZEbuZEbuZEbh2r8f9MLG+nctDnXAAAAAElFTkSuQmCC"""
@classmethod
def instance(cls):
if cls.INSTANCE is None:
cls.INSTANCE = LocalInstaller()
return cls.INSTANCE
def __init__(self):
if self.INSTANCE is not None:
raise ValueError("An instantiation already exists!")
self.CUR_SCRIPT_PATH = os.path.realpath(__file__)
self.WRAPPER_DIR_PATH = os.path.expanduser("~/.whatsapp-launcher") + "/"
self.SCRIPT_FILE_PATH = self.WRAPPER_DIR_PATH + os.path.basename(__file__)
self.ICON_FILE_PATH = self.WRAPPER_DIR_PATH + "whatsapp.png"
self.DESKTOP_FILE_PATH = os.path.expanduser("~/.local/share/applications/whatsapp.desktop")
self.CHROME_DATA_DIR = self.WRAPPER_DIR_PATH + "chrome-profile/"
self.CHROME_FIRST_RUN = self.CHROME_DATA_DIR + "First Run"
self.DESKTOP_FILE_CONTENT = "[Desktop Entry]\n" + \
"Version=1.0\n" + \
"Name=WhatsApp Web\n" + \
"Comment=Chat on WhatsApp from the Web\n" + \
"Exec=" + self.SCRIPT_FILE_PATH + "\n" + \
"Terminal=false\n" + \
"Icon=" + self.ICON_FILE_PATH + "\n" + \
"Type=Application\n" + \
"Categories=Network;WebBrowser;\n" + \
"MimeType=text/html;text/xml;application/xhtml_xml;image/webp;x-scheme-handler/" + \
"http;x-scheme-handler/https;x-scheme-handler/ftp;\n" + \
"StartupWMClass=whatsapp-web-app"
@staticmethod
def sha256sum_file(file_name, block_size=65536):
sha256_hasher = hashlib.sha256()
with open(file_name, mode="rb") as f_handle:
buf = [None]
while len(buf) > 0:
buf = f_handle.read(block_size)
sha256_hasher.update(buf)
return sha256_hasher.hexdigest()
@staticmethod
def sha256sum_string(data, decode_type=None):
if decode_type is None:
digest = hashlib.sha256(data).hexdigest()
else:
digest = hashlib.sha256(data.decode(decode_type)).hexdigest()
return digest
def compare_hash(self, file_name, data, decode_type=None):
if not os.path.exists(file_name):
return False
return self.sha256sum_file(file_name) == self.sha256sum_string(data, decode_type)
def compare_file_hash(self, file_name_1, file_name_2):
if not (os.path.exists(file_name_1) and os.path.exists(file_name_2)):
return False
if file_name_1 == file_name_2:
return True
return self.sha256sum_file(file_name_1) == self.sha256sum_file(file_name_2)
def write_file(self, file_name, file_data, decode_type=None):
file_written = False
if not os.path.exists(os.path.dirname(file_name)):
os.makedirs(os.path.dirname(file_name))
if not os.path.exists(file_name) or not self.compare_hash(file_name, file_data, decode_type):
with open(file_name, mode="wb") as f_handle:
if decode_type is None:
f_handle.write(file_data)
else:
f_handle.write(file_data.decode(decode_type))
f_handle.flush()
file_written = True
return file_written
def install(self):
need_restart = False
need_restart |= self.write_file(self.ICON_FILE_PATH, self.ICON_DATA, decode_type='base64')
need_restart |= self.write_file(self.DESKTOP_FILE_PATH, self.DESKTOP_FILE_CONTENT)
need_restart |= self.write_file(self.CHROME_FIRST_RUN, '')
if not self.compare_file_hash(self.CUR_SCRIPT_PATH, self.SCRIPT_FILE_PATH):
shutil.copyfile(self.CUR_SCRIPT_PATH, self.SCRIPT_FILE_PATH)
need_restart = True
if need_restart:
raise LocalInstaller.RestartNeeded()
class UnityNotRunning(Exception):
def __init__(self):
super(UnityNotRunning, self).__init__()
class UnityHelper(object):
INSTANCE = None
def __init__(self):
if self.INSTANCE is not None:
raise ValueError("An instantiation already exists!")
self.unity_running = False
@classmethod
def instance(cls):
if cls.INSTANCE is None:
cls.INSTANCE = UnityHelper()
return cls.INSTANCE
def check_unity(self):
if not self.unity_running:
try:
ins = Unity.Inspector.get_default()
self.unity_running = ins.get_property('unity-running')
except:
pass
if not self.unity_running:
raise UnityNotRunning()
return True
class WALauncher(threading.Thread):
def __init__(self, chrome_path="/usr/bin/google-chrome-stable"):
super(WALauncher, self).__init__()
self.chrome_path = chrome_path
def run(self):
sp_whatsapp = subprocess.Popen([self.chrome_path,
"--app=https://web.whatsapp.com",
"--user-data-dir=" + LocalInstaller.instance().CHROME_DATA_DIR,
"--no-default-browser-check"], stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
sp_whatsapp.wait()
loop.quit()
class CompizNotFound(Exception):
def __init__(self):
super(CompizNotFound, self).__init__()
class WAWindow(XWindow):
class CompizNotFound(Exception):
def __init__(self):
super(WAWindow.CompizNotFound, self).__init__()
def __init__(self):
self.whatsapp_window = None
try:
self.w_compiz = XWindow(XTools.instance().get_window_by_class_name('compiz'))
except XWindow.WindowIsNone:
try:
self.w_compiz = XWindow(XTools.instance().get_window_by_class_name('gnome-shell'))
except XWindow.WindowIsNone:
raise WAWindow.CompizNotFound()
XTools.instance().get_root().change_attributes(event_mask=X.SubstructureNotifyMask)
self.w_compiz.window.change_attributes(event_mask=X.SubstructureNotifyMask)
self.thread = threading.Thread(target=self.find_whatsapp)
self.thread.start()
self.wa_launcher = WALauncher()
self.wa_launcher.start()
self.thread.join()
super(WAWindow, self).__init__(self.whatsapp_window)
self.set_app_class('whatsapp-web-app')
self.window.change_attributes(event_mask=X.PropertyChangeMask)
def find_whatsapp(self):
while self.whatsapp_window is None:
if self.w_compiz.next_event():
self.whatsapp_window = XTools.instance().get_client_by_class_name('whatsapp')
class WACountUpdater(threading.Thread):
def __init__(self, wa_window):
super(WACountUpdater, self).__init__()
self.wa_window = wa_window
self.u_launcher = Unity.LauncherEntry.get_for_desktop_id("whatsapp.desktop")
self.re_w = re.compile('^\((\d+)\)(:?.+)?$')
self.setDaemon(True)
self.start()
def update_count(self, count):
if count == 0:
self.u_launcher.props.count_visible = False
self.u_launcher.props.count = 0
self.u_launcher.props.urgent = False
else:
badge_queue.put_nowait(count)
def parse_title(self):
try:
notif_count = int(self.re_w.match(self.wa_window.get_title()).group(1))
except:
notif_count = 0
return notif_count
def run(self):
while True:
self.wa_window.next_event(instance=PropertyNotify, atom=_NET_WM_NAME)
GLib.idle_add(self.update_count, self.parse_title())
class GLibMainLoopRunner(threading.Thread):
def __init__(self):
super(GLibMainLoopRunner, self).__init__()
self.loop = GLib.MainLoop()
self.setDaemon(True)
self.start()
def run(self):
self.loop.run()
def quit(self):
self.loop.quit()
class UnityCountUpdater(threading.Thread):
def __init__(self):
super(UnityCountUpdater, self).__init__()
self.launcher_entry = Unity.LauncherEntry.get_for_desktop_id("whatsapp.desktop")
[self.update(0) for _ in range(1000)] # Hack-Fix: Force GLib warning
self.setDaemon(True)
self.count = 0
self.start()
def update(self, count):
self.launcher_entry.props.count = count
self.launcher_entry.props.count_visible = count > 0
self.launcher_entry.props.urgent = count > 0
def run(self):
while True:
count = badge_queue.get()
if count > 0:
self.update(count)
if __name__ == "__main__":
try:
# UnityHelper.instance().check_unity()
LocalInstaller().instance().install()
loop = GLibMainLoopRunner()
UnityCountUpdater()
WACountUpdater(WAWindow())
except UnityNotRunning:
print("Unity not found!")
sys.exit(-1)
except CompizNotFound:
print("Compiz not found!")
sys.exit(-1)
except LocalInstaller.RestartNeeded:
os.chmod(LocalInstaller.instance().SCRIPT_FILE_PATH, 0o755)
os.chmod(LocalInstaller.instance().DESKTOP_FILE_PATH, 0o755)
subprocess.Popen([LocalInstaller.instance().SCRIPT_FILE_PATH])
|
karas84/whatsapp-launcher
|
whatsapp-launcher.py
|
Python
|
gpl-2.0
| 62,338
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import json
import time
import datetime
from util import download
if __name__ == '__main__':
data = download('http://news.at.zhihu.com/api/2/news/latest', check_exists=False, upload=False)
jd = json.loads(data)
date = datetime.date(int(jd['date'][:4]), int(jd['date'][4:6]), int(jd['date'][6:]))
while date >= datetime.date(2013, 5, 19):
url = 'http://news.at.zhihu.com/api/2/news/before/' + date.strftime("%Y%m%d")
print url
if not os.path.exists('api/2/news/before/' + date.strftime("%Y%m%d") + '.json'):
data = download(url)
jd = json.loads(data)
for item in jd['news']:
download(item['image'], prefix=(jd['date']+'/'))
time.sleep(60*30);
else:
print 'Exists:', url
date = date + datetime.timedelta(-1)
|
isayme/zhihudaily_sync
|
sync.py
|
Python
|
mit
| 932
|
#!/usr/bin/env python
import os
import sys
import simdna
import simdna.simdnautil.util as util
import simdna.synthetic as synthetic
import argparse
def do(options):
if (options.seed is not None):
import numpy as np
np.random.seed(options.seed)
from simdna import random
random.seed(options.seed)
outputFileName_core = util.addArguments("DensityEmbedding",
[util.ArgumentToAdd(options.prefix, "prefix"),
util.BooleanArgument(options.bestHit, "bestHit"),
util.ArrArgument(options.motifNames, "motifs"),
util.ArgumentToAdd(options.min_motifs, "min"),
util.ArgumentToAdd(options.max_motifs, "max"),
util.ArgumentToAdd(options.mean_motifs, "mean"),
util.FloatArgument(options.zero_prob, "zeroProb"),
util.ArgumentToAdd(options.seqLength, "seqLength"),
util.ArgumentToAdd(options.numSeqs, "numSeqs")])
loadedMotifs = synthetic.LoadedEncodeMotifs(options.pathToMotifs, pseudocountProb=0.001)
Constructor = synthetic.BestHitPwmFromLoadedMotifs if options.bestHit else synthetic.PwmSamplerFromLoadedMotifs
embedInBackground = synthetic.EmbedInABackground(
backgroundGenerator=synthetic.ZeroOrderBackgroundGenerator(seqLength=options.seqLength),
embedders=[
synthetic.RepeatedEmbedder(
synthetic.SubstringEmbedder(
synthetic.ReverseComplementWrapper(
substringGenerator=Constructor(
loadedMotifs=loadedMotifs,motifName=motifName),
reverseComplementProb=options.rc_prob
),
positionGenerator=synthetic.UniformPositionGenerator()),
quantityGenerator=synthetic.ZeroInflater(synthetic.MinMaxWrapper(
synthetic.PoissonQuantityGenerator(options.mean_motifs),
theMax=options.max_motifs, theMin=options.min_motifs), zeroProb=options.zero_prob)
)
for motifName in options.motifNames
]
)
sequenceSet = synthetic.GenerateSequenceNTimes(embedInBackground, options.numSeqs)
synthetic.printSequences(outputFileName_core+".simdata", sequenceSet,
includeFasta=True, includeEmbeddings=True,
prefix=options.prefix)
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--prefix")
parser.add_argument("--pathToMotifs",
default=simdna.ENCODE_MOTIFS_PATH)
parser.add_argument("--bestHit", action="store_true")
parser.add_argument("--motifNames", type=str, nargs='+', required=True)
parser.add_argument("--max-motifs",type=int, required=True)
parser.add_argument("--min-motifs",type=int, default=0)
parser.add_argument("--mean-motifs",type=int, required=True)
parser.add_argument("--zero-prob",type=float, required=False, default=0)
parser.add_argument("--rc-prob",type=float, required=False, default=0)
parser.add_argument("--seqLength", type=int, required=True)
parser.add_argument("--numSeqs", type=int, required=True)
parser.add_argument("--seed", type=int, default=None)
options = parser.parse_args()
do(options)
|
kundajelab/simdna
|
scripts/densityMotifSimulation.py
|
Python
|
mit
| 3,374
|
#!/usr/bin/python
"""
:copyright:
EQcorrscan developers.
:license:
GNU Lesser General Public License, Version 3
(https://www.gnu.org/copyleft/lesser.html)
"""
import importlib
import sys
import warnings
from eqcorrscan.core.match_filter.party import Party # NOQA
from eqcorrscan.core.match_filter.family import Family # NOQA
from eqcorrscan.core.match_filter.detection import Detection # NOQA
from eqcorrscan.core.match_filter.tribe import Tribe # NOQA
from eqcorrscan.core.match_filter.template import Template # NOQA
from eqcorrscan.core.subspace import Detector, read_detector # NOQA
from eqcorrscan.core.lag_calc import lag_calc # NOQA
from eqcorrscan.utils.correlate import ( # NOQA
get_stream_xcorr, get_array_xcorr, register_array_xcorr)
__all__ = ['core', 'utils', 'tutorials', 'tests']
__version__ = '0.4.3'
# Cope with changes to name-space to remove most of the camel-case
_import_map = {}
class EQcorrscanDeprecationWarning(UserWarning):
"""
Force pop-up of warnings.
"""
pass
if sys.version_info.major < 3:
raise NotImplementedError(
"EQcorrscan no longer supports Python 2.x"
" See https://github.com/eqcorrscan/EQcorrscan/issues/242 to read "
"more.")
class EQcorrscanRestructureAndLoad(object):
"""
Path finder and module loader for transitioning
"""
def find_module(self, fullname, path=None):
# Compatibility with namespace paths.
if hasattr(path, "_path"):
path = path._path
if not path or not path[0].startswith(__path__[0]):
return None
for key in _import_map.keys():
if fullname.startswith(key):
break
else:
return None
return self
def load_module(self, name):
# Use cached modules.
if name in sys.modules:
return sys.modules[name]
# Otherwise check if the name is part of the import map.
elif name in _import_map:
new_name = _import_map[name]
else:
new_name = name
for old, new in _import_map.items():
if not new_name.startswith(old):
continue
new_name = new_name.replace(old, new)
break
else:
return None
# Don't load again if already loaded.
if new_name in sys.modules:
module = sys.modules[new_name]
else:
module = importlib.import_module(new_name)
# Warn here as at this point the module has already been imported.
warnings.warn("Module '%s' is deprecated and will stop working "
"with the next EQcorrscan version. Please import module "
"'%s' instead." % (name, new_name),
EQcorrscanDeprecationWarning)
sys.modules[new_name] = module
sys.modules[name] = module
return module
sys.meta_path.append(EQcorrscanRestructureAndLoad())
if __name__ == '__main__':
import doctest
doctest.testmod(exclude_empty=True)
|
eqcorrscan/EQcorrscan
|
eqcorrscan/__init__.py
|
Python
|
gpl-3.0
| 3,103
|
import os
import time
import calculate
from github import Github
from django.conf import settings
from calaccess_raw import get_model_list
from calaccess_raw.management.commands import CalAccessCommand
from django.contrib.humanize.templatetags.humanize import intcomma
class Command(CalAccessCommand):
help = 'Create GitHub issues for model fields without documentation'
def set_options(self, *args, **kwargs):
"""
Hook up with the GitHub API and prepare to create issues.
"""
self.gh = Github(os.getenv('GITHUB_TOKEN'))
self.org = self.gh.get_organization("california-civic-data-coalition")
self.repo = self.org.get_repo("django-calaccess-raw-data")
self.labels = [
self.repo.get_label("small"),
self.repo.get_label("documentation"),
self.repo.get_label("enhancement"),
]
self.milestone = self.repo.get_milestone(3)
def handle(self, *args, **kwargs):
"""
Make it happen.
"""
self.set_options()
self.header(
"Creating GitHub issues for model fields without documentation"
)
# Loop through all the models and find any fields without docs
field_count = 0
missing_list = []
for m in get_model_list():
field_list = m().get_field_list()
field_count += len(field_list)
for f in field_list:
if not self.has_docs(f):
missing_list.append((m, f))
# If everything is done, declare victory
if not missing_list:
self.success("All %s fields documented!" % field_count)
return False
# If not, loop through the missing and create issues
missing_count = len(missing_list)
self.log(
"- %s/%s (%d%%) of fields lack documentation" % (
intcomma(missing_count),
intcomma(field_count),
calculate.percentage(missing_count, field_count)
)
)
for model, field in missing_list[50:101]:
# For now we are excluding the 'other' model module to
# avoid overkill
if model().klass_group != 'other':
self.create_issue(model, field)
def has_docs(self, field):
"""
Test if a Django field has some kind of documentation already.
Returns True or False
"""
if field.name == 'id':
return True
if field.help_text:
return True
if field.__dict__['_verbose_name']:
return True
return False
def create_issue(self, model, field):
"""
Create a GitHub issue for the provided model and field.
"""
title = TITLE_TEMPLATE % (field.name, model().klass_name)
body = BODY_TEMPLATE % (
field.name,
model().klass_name,
model().klass_group,
model().klass_group,
)
self.log("-- Creating issue for %s.%s" % (
model().klass_name,
field.name
)
)
self.repo.create_issue(
title,
body=body,
labels=self.labels,
milestone=self.milestone
)
time.sleep(2)
TITLE_TEMPLATE = """
Add documentation for the ``%s`` field on the ``%s`` database model
""".replace("\n", "")
BODY_TEMPLATE = """
## Your mission
Add documentation for the ``%s`` field on the ``%s`` database model.
## Here's how
**Step 1**: Claim this ticket by leaving a comment below. Tell everyone you're ON IT!
**Step 2**: Open up the file that contains this model. It should be in <a href="https://github.com/california-civic-data-coalition/django-calaccess-raw-data/blob/master/calaccess_raw/models/%s.py">calaccess_raw.models.%s.py</a>.
**Step 3**: Hit the little pencil button in the upper-right corner of the code box to begin editing the file.

**Step 4**: Find this model and field in the file. (Clicking into the box and searching with CTRL-F can help you here.) Once you find it, we expect the field to lack the ``help_text`` field typically used in Django to explain what a field contains.
```python
effect_dt = fields.DateField(
null=True,
db_column="EFFECT_DT"
)
```
**Step 5**: In a separate tab, open up the <a href="Quilmes">official state documentation</a> and find the page that defines all the fields in this model.

**Step 6**: Find the row in that table's definition table that spells out what this field contains. If it lacks documentation. Note that in the ticket and close it now.

**Step 7**: Return to the GitHub tab.
**Step 8**: Add the state's label explaining what's in the field, to our field definition by inserting it a ``help_text`` argument. That should look something like this:
```python
effect_dt = fields.DateField(
null=True,
db_column="EFFECT_DT",
# Add a help_text argument like the one here, but put your string in instead.
help_text="The other values in record were effective as of this date"
)
```
**Step 9**: Scroll down below the code box and describe the change you've made in the commit message. Press the button below.

**Step 10**: Review your changes and create a pull request submitting them to the core team for inclusion.

That's it! Mission accomplished!
"""
|
dwillis/django-calaccess-raw-data
|
example/toolbox/management/commands/createfielddocissues.py
|
Python
|
mit
| 5,833
|
# coding=utf-8
from typing import List
import networkx as nx
import pyisemail
from fuzzywuzzy import fuzz
from recordclass import recordclass
import pandas as pd
import saapy.util as su
from .lexeme import cleanup_proper_name
def connect_actors(actor_frame, connectivity_sets, connectivity_column):
"""
:param actor_frame:
:param connectivity_sets:
:param connectivity_column:
:return:
Examples:
same_actors = {
'ccason': [3, 14, 15], 'clipka': [4, 5, 13],
'wfpokorny': [11, 17], 'anshuarya': [0],
'bentsm': [1], 'cbarton': [2], 'dbodor': [6],
'jlecher': [7], 'jgrimbert': [8], 'nalvarez': [9],
'selvik': [10], 'wverhelst': [12], 'gryken': [16],
'github': [18]}
actor_frame = connect_actors(actor_frame, same_actors, 'actor_id')
"""
connectivity = {}
for actor_id, connectivity_set in connectivity_sets.items():
for actor in connectivity_set:
connectivity[actor] = actor_id
actor_frame[connectivity_column] = su.categorize(pd.Series(connectivity))
return actor_frame
def combine_actors(actor_frame, connectivity_column):
"""
:param actor_frame:
:param connectivity_column:
:return:
Examples:
combine_actors(actor_frame, 'actor_id')
"""
aggregator = {'name': 'first', 'email': 'first',
'author_commits': 'sum',
'committer_commits': 'sum'}
return actor_frame.groupby(connectivity_column).agg(
aggregator).reset_index()
def insert_actor_ids(commit_frame, actor_frame, drop_name_email=True):
actor_columns = ['author_name', 'author_email',
'committer_name', 'committer_email']
cf = commit_frame[actor_columns]
af = actor_frame[['name', 'email', 'actor_id']]
author = pd.merge(
cf, af, left_on=actor_columns[:2],
right_on=('name', 'email'),
how='left')['actor_id']
committer = pd.merge(
cf, af, left_on=actor_columns[2:],
right_on=('name', 'email'),
how='left')['actor_id']
commit_frame.insert(3, 'author', author)
commit_frame.insert(4, 'committer', committer)
if drop_name_email:
commit_frame.drop(actor_columns, axis=1, inplace=True)
return commit_frame
PARSED_EMAIL_FIELDS = ['email', 'valid', 'name', 'domain', 'parsed_name']
ParsedEmail = recordclass('ParsedEmail', PARSED_EMAIL_FIELDS)
PARSED_NAME_FIELDS = ['name', 'name_type']
ParsedName = recordclass('ParsedName', PARSED_NAME_FIELDS)
def proper(name: ParsedName):
return name.name_type == 'proper' or name.name_type == 'personal'
class Actor:
name: str
email: str
actor_id: str
parsed_email: ParsedEmail
parsed_name: ParsedName
def __init__(self, name: str, email: str):
self.name = name
self.email = email
self.actor_id = '{} <{}>'.format(name, email).lower()
self.parsed_email = None
self.parsed_name = None
def __repr__(self):
return "Actor('{}')".format(self.actor_id)
class ActorParser:
role_names = None
def __init__(self):
self.role_names = dict()
def add_role_names(self, name_roles):
for name, role in name_roles:
self.role_names[name] = role
def parse_name(self, name: str) -> List[str]:
"""
splits a name into parts separated by ., _, camel casing and
similar
:param name: potentially human name
:return: list of name parts
"""
parsed_name = ParsedName(**su.empty_dict(PARSED_NAME_FIELDS))
lower_name = name.lower()
if lower_name in self.role_names:
parsed_name.name_type = self.role_names[lower_name]
parsed_name.name = lower_name
else:
parsed_name.name_type = 'proper'
parsed_name.name = cleanup_proper_name(name)
return parsed_name
def parse_email(self, email: str) -> ParsedEmail:
lower_email = email.lower()
parsed_email = ParsedEmail(**su.empty_dict(PARSED_EMAIL_FIELDS))
parsed_email.email = lower_email
parsed_email.valid = pyisemail.is_email(lower_email)
email_parts = lower_email.split('@')
parsed_email.name = email_parts[0]
if len(email_parts) == 2:
parsed_email.domain = email_parts[1]
else:
parsed_email.domain = ''
parsed_email.parsed_name = self.parse_name(parsed_email.name)
return parsed_email
def parse_actor(self, name: str, email: str, name_from_email=True) -> Actor:
parsed_email = self.parse_email(email)
if not name and name_from_email:
name = parsed_email.parsed_name.name
actor = Actor(name, email)
actor.parsed_name = self.parse_name(name)
actor.parsed_email = parsed_email
return actor
ACTOR_SIMILARITY_FIELDS = ['possible',
'identical',
'same_name',
'same_email',
'same_email_name',
'name_ratio',
'email_name_ratio',
'email_domain_ratio',
'name1_email_ratio',
'name2_email_ratio',
'proper_name1',
'proper_name2',
'proper_email_name1',
'proper_email_name2',
'explicit']
ActorSimilarity = recordclass('ActorSimilarity', ACTOR_SIMILARITY_FIELDS)
ACTOR_SIMILARITY_SETTINGS_FIELDS = ['min_name_ratio',
'min_email_domain_ratio',
'min_email_name_ratio',
'min_name_email_ratio']
ActorSimilaritySettings = recordclass('ActorSimilaritySettings',
ACTOR_SIMILARITY_SETTINGS_FIELDS)
class ActorSimilarityGraph:
actor_graph: nx.Graph
settings: ActorSimilaritySettings
def __init__(self, settings=None):
self.actor_graph = nx.Graph()
self.similarity_checks = [self.identical_actors,
self.similar_emails,
self.similar_proper_names]
if settings is None:
settings = ActorSimilaritySettings(min_name_ratio=55,
min_email_domain_ratio=55,
min_email_name_ratio=55,
min_name_email_ratio=55)
self.settings = settings
def add_actor(self, actor: Actor, link_similar=True):
if self.actor_graph.has_node(actor.actor_id):
return
self.actor_graph.add_node(actor.actor_id, actor=actor)
for actor_id, actor_attrs in self.actor_graph.nodes_iter(data=True):
if actor.actor_id == actor_id:
continue
other_actor = actor_attrs['actor']
if link_similar:
similarity = self.evaluate_similarity(actor, other_actor)
if similarity.possible:
self.actor_graph.add_edge(actor.actor_id,
other_actor.actor_id,
similarity=similarity,
confidence=None)
def link_actors(self, actor1_id: str, actor2_id: str,
confidence: float = 1):
self.actor_graph.add_edge(actor1_id, actor2_id, confidence=confidence)
if 'similarity' not in self.actor_graph[actor1_id][actor2_id]:
self.actor_graph[actor1_id][actor2_id]['similarity'] = None
def unlink_actors(self, actor1_id: str, actor2_id: str):
self.actor_graph.remove_edge(actor1_id, actor2_id)
def evaluate_similarity(self, actor: Actor,
other_actor: Actor) -> ActorSimilarity:
similarity = self.build_similarity(actor, other_actor)
checks = list(self.similarity_checks)
while not similarity.possible and len(checks):
check = checks.pop()
similarity.possible = check(similarity)
return similarity
def build_similarity(self, actor, other_actor):
similarity = ActorSimilarity(**su.empty_dict(ACTOR_SIMILARITY_FIELDS))
# run comparisons for similarity
similarity.identical = (actor.actor_id == other_actor.actor_id)
similarity.proper_name1 = proper(actor.parsed_name)
similarity.proper_name2 = proper(other_actor.parsed_name)
similarity.proper_email_name1 = proper(actor.parsed_email.parsed_name)
similarity.proper_email_name2 = proper(
other_actor.parsed_email.parsed_name)
similarity.same_name = (actor.parsed_name.name ==
other_actor.parsed_name.name)
similarity.name_ratio = self.compare_names(actor.parsed_name,
other_actor.parsed_name)
similarity.same_email = (actor.parsed_email.email ==
other_actor.parsed_email.email)
similarity.email_domain_ratio = fuzz.ratio(
actor.parsed_email.domain,
other_actor.parsed_email.domain)
similarity.same_email_name = (actor.parsed_email.parsed_name.name ==
other_actor.parsed_email.parsed_name.name)
similarity.email_name_ratio = self.compare_names(
actor.parsed_email.parsed_name,
other_actor.parsed_email.parsed_name)
similarity.name1_email_ratio = self.compare_names(
actor.parsed_name,
other_actor.parsed_email.parsed_name)
similarity.name2_email_ratio = self.compare_names(
actor.parsed_email.parsed_name,
other_actor.parsed_name)
return similarity
@staticmethod
def compare_names(name1: ParsedName, name2: ParsedName):
if proper(name1) and proper(name2):
compare = fuzz.token_set_ratio
else:
compare = fuzz.ratio
return compare(name1.name, name2.name)
def similar_emails(self, s: ActorSimilarity):
return (s.same_email or
(s.email_domain_ratio >= self.settings.min_email_domain_ratio
and
s.email_name_ratio >= self.settings.min_email_name_ratio))
def similar_proper_names(self, s: ActorSimilarity):
return (s.proper_name1 and s.proper_name2 and
(s.same_name or s.name_ratio >= self.settings.min_name_ratio))
def similar_name_to_email(self, s: ActorSimilarity):
return (s.name1_email_ratio >= self.settings.min_name_email_ratio or
s.name2_email_ratio >= self.settings.min_name_email_ratio)
@staticmethod
def identical_actors(s: ActorSimilarity):
return s.identical
def group_similar_actors(self):
similar_actor_groups = [list(g) for g in
nx.connected_components(self.actor_graph)]
return similar_actor_groups
def print_similarity_groups(self):
similar_groups = self.group_similar_actors()
for i, group in enumerate(similar_groups):
if len(group) < 2:
continue
print('=== group', i, '===')
for actor1_id, actor2_id, data in self.actor_graph.edges_iter(
nbunch=group, data=True):
print(actor1_id, '->', actor2_id, data)
|
ashapochka/saapy
|
saapy/analysis/actor.py
|
Python
|
apache-2.0
| 11,608
|
# -*- coding: utf-8 -*-
import argparse
import logging
import os
import sys
import ipp_macro_series_parser.demographie.build_parameters as constructeur
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'-d',
'--download',
action = 'store_true',
help = "download all input files from their web sources"
)
parser.add_argument(
'-v',
'--verbose',
action = 'store_true',
default = False,
help = "increase output verbosity"
)
parser.add_argument(
'-o',
'--output',
type = str,
default = None,
help = "output directory"
)
parser.add_argument(
'-p',
'--pop_input',
type = str,
default = None,
help = "input directory for population files"
)
parser.add_argument(
'-w',
'--weight',
default = 200,
help = "weight used for TIL-France"
) # TODO remove weight from here
parser.add_argument(
'-t',
'--til_input',
default = None,
help = "input directory for til-specific files (dependance)"
)
args = parser.parse_args()
if not args.download and not (args.til_input and args.pop_input):
print("Error: no source given for input files ")
print("You must:")
print(" - give directories containing the input files using *both* -t and -p")
print(" - or download them from the web with -d")
sys.exit(-1)
if args.output is None:
default_output = "../param/demo2"
print("No output directory given. Default output directory used: " + default_output)
sys.argv.append("-o" + default_output)
constructeur.main()
if __name__ == '__main__':
sys.exit(main())
|
TaxIPP-Life/til-france
|
til_france/scripts/population_and_dependance_builder.py
|
Python
|
gpl-3.0
| 1,858
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
import os
import unittest
import uuid
from contextlib import closing
from unittest import mock
import MySQLdb.cursors
import pytest
from parameterized import parameterized
from airflow.models import Connection
from airflow.models.dag import DAG
from airflow.providers.mysql.hooks.mysql import MySqlHook
from airflow.utils import timezone
SSL_DICT = {'cert': '/tmp/client-cert.pem', 'ca': '/tmp/server-ca.pem', 'key': '/tmp/client-key.pem'}
class TestMySqlHookConn(unittest.TestCase):
def setUp(self):
super().setUp()
self.connection = Connection(
conn_type='mysql',
login='login',
password='password',
host='host',
schema='schema',
)
self.db_hook = MySqlHook()
self.db_hook.get_connection = mock.Mock()
self.db_hook.get_connection.return_value = self.connection
@mock.patch('MySQLdb.connect')
def test_get_conn(self, mock_connect):
self.db_hook.get_conn()
assert mock_connect.call_count == 1
args, kwargs = mock_connect.call_args
assert args == ()
assert kwargs['user'] == 'login'
assert kwargs['passwd'] == 'password'
assert kwargs['host'] == 'host'
assert kwargs['db'] == 'schema'
@mock.patch('MySQLdb.connect')
def test_get_uri(self, mock_connect):
self.connection.extra = json.dumps({'charset': 'utf-8'})
self.db_hook.get_conn()
assert mock_connect.call_count == 1
args, kwargs = mock_connect.call_args
assert self.db_hook.get_uri() == "mysql://login:password@host/schema?charset=utf-8"
@mock.patch('MySQLdb.connect')
def test_get_conn_from_connection(self, mock_connect):
conn = Connection(login='login-conn', password='password-conn', host='host', schema='schema')
hook = MySqlHook(connection=conn)
hook.get_conn()
mock_connect.assert_called_once_with(
user='login-conn', passwd='password-conn', host='host', db='schema', port=3306
)
@mock.patch('MySQLdb.connect')
def test_get_conn_from_connection_with_schema(self, mock_connect):
conn = Connection(login='login-conn', password='password-conn', host='host', schema='schema')
hook = MySqlHook(connection=conn, schema='schema-override')
hook.get_conn()
mock_connect.assert_called_once_with(
user='login-conn', passwd='password-conn', host='host', db='schema-override', port=3306
)
@mock.patch('MySQLdb.connect')
def test_get_conn_port(self, mock_connect):
self.connection.port = 3307
self.db_hook.get_conn()
assert mock_connect.call_count == 1
args, kwargs = mock_connect.call_args
assert args == ()
assert kwargs['port'] == 3307
@mock.patch('MySQLdb.connect')
def test_get_conn_charset(self, mock_connect):
self.connection.extra = json.dumps({'charset': 'utf-8'})
self.db_hook.get_conn()
assert mock_connect.call_count == 1
args, kwargs = mock_connect.call_args
assert args == ()
assert kwargs['charset'] == 'utf-8'
assert kwargs['use_unicode'] is True
@mock.patch('MySQLdb.connect')
def test_get_conn_cursor(self, mock_connect):
self.connection.extra = json.dumps({'cursor': 'sscursor'})
self.db_hook.get_conn()
assert mock_connect.call_count == 1
args, kwargs = mock_connect.call_args
assert args == ()
assert kwargs['cursorclass'] == MySQLdb.cursors.SSCursor
@mock.patch('MySQLdb.connect')
def test_get_conn_local_infile(self, mock_connect):
self.connection.extra = json.dumps({'local_infile': True})
self.db_hook.get_conn()
assert mock_connect.call_count == 1
args, kwargs = mock_connect.call_args
assert args == ()
assert kwargs['local_infile'] == 1
@mock.patch('MySQLdb.connect')
def test_get_con_unix_socket(self, mock_connect):
self.connection.extra = json.dumps({'unix_socket': "/tmp/socket"})
self.db_hook.get_conn()
assert mock_connect.call_count == 1
args, kwargs = mock_connect.call_args
assert args == ()
assert kwargs['unix_socket'] == '/tmp/socket'
@mock.patch('MySQLdb.connect')
def test_get_conn_ssl_as_dictionary(self, mock_connect):
self.connection.extra = json.dumps({'ssl': SSL_DICT})
self.db_hook.get_conn()
assert mock_connect.call_count == 1
args, kwargs = mock_connect.call_args
assert args == ()
assert kwargs['ssl'] == SSL_DICT
@mock.patch('MySQLdb.connect')
def test_get_conn_ssl_as_string(self, mock_connect):
self.connection.extra = json.dumps({'ssl': json.dumps(SSL_DICT)})
self.db_hook.get_conn()
assert mock_connect.call_count == 1
args, kwargs = mock_connect.call_args
assert args == ()
assert kwargs['ssl'] == SSL_DICT
@mock.patch('MySQLdb.connect')
@mock.patch('airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook.get_client_type')
def test_get_conn_rds_iam(self, mock_client, mock_connect):
self.connection.extra = '{"iam":true}'
mock_client.return_value.generate_db_auth_token.return_value = 'aws_token'
self.db_hook.get_conn()
mock_connect.assert_called_once_with(
user='login',
passwd='aws_token',
host='host',
db='schema',
port=3306,
read_default_group='enable-cleartext-plugin',
)
class TestMySqlHookConnMySqlConnectorPython(unittest.TestCase):
def setUp(self):
super().setUp()
self.connection = Connection(
login='login',
password='password',
host='host',
schema='schema',
extra='{"client": "mysql-connector-python"}',
)
self.db_hook = MySqlHook()
self.db_hook.get_connection = mock.Mock()
self.db_hook.get_connection.return_value = self.connection
@mock.patch('mysql.connector.connect')
def test_get_conn(self, mock_connect):
self.db_hook.get_conn()
assert mock_connect.call_count == 1
args, kwargs = mock_connect.call_args
assert args == ()
assert kwargs['user'] == 'login'
assert kwargs['password'] == 'password'
assert kwargs['host'] == 'host'
assert kwargs['database'] == 'schema'
@mock.patch('mysql.connector.connect')
def test_get_conn_port(self, mock_connect):
self.connection.port = 3307
self.db_hook.get_conn()
assert mock_connect.call_count == 1
args, kwargs = mock_connect.call_args
assert args == ()
assert kwargs['port'] == 3307
@mock.patch('mysql.connector.connect')
def test_get_conn_allow_local_infile(self, mock_connect):
extra_dict = self.connection.extra_dejson
extra_dict.update(allow_local_infile=True)
self.connection.extra = json.dumps(extra_dict)
self.db_hook.get_conn()
assert mock_connect.call_count == 1
args, kwargs = mock_connect.call_args
assert args == ()
assert kwargs['allow_local_infile'] == 1
class MockMySQLConnectorConnection:
DEFAULT_AUTOCOMMIT = 'default'
def __init__(self):
self._autocommit = self.DEFAULT_AUTOCOMMIT
@property
def autocommit(self):
return self._autocommit
@autocommit.setter
def autocommit(self, autocommit):
self._autocommit = autocommit
class TestMySqlHook(unittest.TestCase):
def setUp(self):
super().setUp()
self.cur = mock.MagicMock(rowcount=0)
self.conn = mock.MagicMock()
self.conn.cursor.return_value = self.cur
conn = self.conn
class SubMySqlHook(MySqlHook):
conn_name_attr = 'test_conn_id'
def get_conn(self):
return conn
self.db_hook = SubMySqlHook()
@parameterized.expand([(True,), (False,)])
def test_set_autocommit_mysql_connector(self, autocommit):
conn = MockMySQLConnectorConnection()
self.db_hook.set_autocommit(conn, autocommit)
assert conn.autocommit is autocommit
def test_get_autocommit_mysql_connector(self):
conn = MockMySQLConnectorConnection()
assert self.db_hook.get_autocommit(conn) == MockMySQLConnectorConnection.DEFAULT_AUTOCOMMIT
def test_set_autocommit_mysqldb(self):
autocommit = False
self.db_hook.set_autocommit(self.conn, autocommit)
self.conn.autocommit.assert_called_once_with(autocommit)
def test_get_autocommit_mysqldb(self):
self.db_hook.get_autocommit(self.conn)
self.conn.get_autocommit.assert_called_once()
def test_run_without_autocommit(self):
sql = 'SQL'
self.conn.get_autocommit.return_value = False
# Default autocommit setting should be False.
# Testing default autocommit value as well as run() behavior.
self.db_hook.run(sql, autocommit=False)
self.conn.autocommit.assert_called_once_with(False)
self.cur.execute.assert_called_once_with(sql)
assert self.conn.commit.call_count == 1
def test_run_with_autocommit(self):
sql = 'SQL'
self.db_hook.run(sql, autocommit=True)
self.conn.autocommit.assert_called_once_with(True)
self.cur.execute.assert_called_once_with(sql)
self.conn.commit.assert_not_called()
def test_run_with_parameters(self):
sql = 'SQL'
parameters = ('param1', 'param2')
self.db_hook.run(sql, autocommit=True, parameters=parameters)
self.conn.autocommit.assert_called_once_with(True)
self.cur.execute.assert_called_once_with(sql, parameters)
self.conn.commit.assert_not_called()
def test_run_multi_queries(self):
sql = ['SQL1', 'SQL2']
self.db_hook.run(sql, autocommit=True)
self.conn.autocommit.assert_called_once_with(True)
for i in range(len(self.cur.execute.call_args_list)):
args, kwargs = self.cur.execute.call_args_list[i]
assert len(args) == 1
assert args[0] == sql[i]
assert kwargs == {}
calls = [mock.call(sql[0]), mock.call(sql[1])]
self.cur.execute.assert_has_calls(calls, any_order=True)
self.conn.commit.assert_not_called()
def test_bulk_load(self):
self.db_hook.bulk_load('table', '/tmp/file')
self.cur.execute.assert_called_once_with(
"""
LOAD DATA LOCAL INFILE '/tmp/file'
INTO TABLE table
"""
)
def test_bulk_dump(self):
self.db_hook.bulk_dump('table', '/tmp/file')
self.cur.execute.assert_called_once_with(
"""
SELECT * INTO OUTFILE '/tmp/file'
FROM table
"""
)
def test_serialize_cell(self):
assert 'foo' == self.db_hook._serialize_cell('foo', None)
def test_bulk_load_custom(self):
self.db_hook.bulk_load_custom(
'table',
'/tmp/file',
'IGNORE',
"""FIELDS TERMINATED BY ';'
OPTIONALLY ENCLOSED BY '"'
IGNORE 1 LINES""",
)
self.cur.execute.assert_called_once_with(
"""
LOAD DATA LOCAL INFILE '/tmp/file'
IGNORE
INTO TABLE table
FIELDS TERMINATED BY ';'
OPTIONALLY ENCLOSED BY '"'
IGNORE 1 LINES
"""
)
DEFAULT_DATE = timezone.datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
TEST_DAG_ID = 'unit_test_dag'
class MySqlContext:
def __init__(self, client):
self.client = client
self.connection = MySqlHook.get_connection(MySqlHook.default_conn_name)
self.init_client = self.connection.extra_dejson.get('client', 'mysqlclient')
def __enter__(self):
self.connection.set_extra(f'{{"client": "{self.client}"}}')
def __exit__(self, exc_type, exc_val, exc_tb):
self.connection.set_extra(f'{{"client": "{self.init_client}"}}')
@pytest.mark.backend("mysql")
class TestMySql(unittest.TestCase):
def setUp(self):
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
dag = DAG(TEST_DAG_ID, default_args=args)
self.dag = dag
def tearDown(self):
drop_tables = {'test_mysql_to_mysql', 'test_airflow'}
with closing(MySqlHook().get_conn()) as conn:
with closing(conn.cursor()) as cursor:
for table in drop_tables:
cursor.execute(f"DROP TABLE IF EXISTS {table}")
@parameterized.expand(
[
("mysqlclient",),
("mysql-connector-python",),
]
)
@mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_AIRFLOW_DB': 'mysql://root@mysql/airflow?charset=utf8mb4&local_infile=1',
},
)
def test_mysql_hook_test_bulk_load(self, client):
with MySqlContext(client):
records = ("foo", "bar", "baz")
import tempfile
with tempfile.NamedTemporaryFile() as f:
f.write("\n".join(records).encode('utf8'))
f.flush()
hook = MySqlHook('airflow_db')
with closing(hook.get_conn()) as conn:
with closing(conn.cursor()) as cursor:
cursor.execute(
"""
CREATE TABLE IF NOT EXISTS test_airflow (
dummy VARCHAR(50)
)
"""
)
cursor.execute("TRUNCATE TABLE test_airflow")
hook.bulk_load("test_airflow", f.name)
cursor.execute("SELECT dummy FROM test_airflow")
results = tuple(result[0] for result in cursor.fetchall())
assert sorted(results) == sorted(records)
@parameterized.expand(
[
("mysqlclient",),
("mysql-connector-python",),
]
)
def test_mysql_hook_test_bulk_dump(self, client):
with MySqlContext(client):
hook = MySqlHook('airflow_db')
priv = hook.get_first("SELECT @@global.secure_file_priv")
# Use random names to allow re-running
if priv and priv[0]:
# Confirm that no error occurs
hook.bulk_dump(
"INFORMATION_SCHEMA.TABLES",
os.path.join(priv[0], f"TABLES_{client}-{uuid.uuid1()}"),
)
elif priv == ("",):
hook.bulk_dump("INFORMATION_SCHEMA.TABLES", f"TABLES_{client}_{uuid.uuid1()}")
else:
self.skipTest("Skip test_mysql_hook_test_bulk_load since file output is not permitted")
@parameterized.expand(
[
("mysqlclient",),
("mysql-connector-python",),
]
)
@mock.patch('airflow.providers.mysql.hooks.mysql.MySqlHook.get_conn')
def test_mysql_hook_test_bulk_dump_mock(self, client, mock_get_conn):
with MySqlContext(client):
mock_execute = mock.MagicMock()
mock_get_conn.return_value.cursor.return_value.execute = mock_execute
hook = MySqlHook('airflow_db')
table = "INFORMATION_SCHEMA.TABLES"
tmp_file = "/path/to/output/file"
hook.bulk_dump(table, tmp_file)
from tests.test_utils.asserts import assert_equal_ignore_multiple_spaces
assert mock_execute.call_count == 1
query = f"""
SELECT * INTO OUTFILE '{tmp_file}'
FROM {table}
"""
assert_equal_ignore_multiple_spaces(self, mock_execute.call_args[0][0], query)
|
nathanielvarona/airflow
|
tests/providers/mysql/hooks/test_mysql.py
|
Python
|
apache-2.0
| 16,771
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.