repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
sunils34/buffer-django-nonrel | django/utils/crypto.py | 245 | 1443 | """
Django's standard crypto functions and utilities.
"""
import hmac
from django.conf import settings
from django.utils.hashcompat import sha_constructor, sha_hmac
def salted_hmac(key_salt, value, secret=None):
"""
Returns the HMAC-SHA1 of 'value', using a key generated from key_salt and a
secret (which defaults to settings.SECRET_KEY).
A different key_salt should be passed in for every application of HMAC.
"""
if secret is None:
secret = settings.SECRET_KEY
# We need to generate a derived key from our base key. We can do this by
# passing the key_salt and our base key through a pseudo-random function and
# SHA1 works nicely.
key = sha_constructor(key_salt + secret).digest()
# If len(key_salt + secret) > sha_constructor().block_size, the above
# line is redundant and could be replaced by key = key_salt + secret, since
# the hmac module does the same thing for keys longer than the block size.
# However, we need to ensure that we *always* do this.
return hmac.new(key, msg=value, digestmod=sha_hmac)
def constant_time_compare(val1, val2):
"""
Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match.
"""
if len(val1) != len(val2):
return False
result = 0
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0
| bsd-3-clause |
shwinpiocess/mongo | requests/packages/__init__.py | 838 | 1384 | '''
Debian and other distributions "unbundle" requests' vendored dependencies, and
rewrite all imports to use the global versions of ``urllib3`` and ``chardet``.
The problem with this is that not only requests itself imports those
dependencies, but third-party code outside of the distros' control too.
In reaction to these problems, the distro maintainers replaced
``requests.packages`` with a magical "stub module" that imports the correct
modules. The implementations were varying in quality and all had severe
problems. For example, a symlink (or hardlink) that links the correct modules
into place introduces problems regarding object identity, since you now have
two modules in `sys.modules` with the same API, but different identities::
requests.packages.urllib3 is not urllib3
With version ``2.5.2``, requests started to maintain its own stub, so that
distro-specific breakage would be reduced to a minimum, even though the whole
issue is not requests' fault in the first place. See
https://github.com/kennethreitz/requests/pull/2375 for the corresponding pull
request.
'''
from __future__ import absolute_import
import sys
try:
from . import urllib3
except ImportError:
import urllib3
sys.modules['%s.urllib3' % __name__] = urllib3
try:
from . import chardet
except ImportError:
import chardet
sys.modules['%s.chardet' % __name__] = chardet
| apache-2.0 |
dgoedkoop/QGIS | tests/src/python/test_db_manager_gpkg.py | 17 | 24869 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for the DBManager GPKG plugin
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Even Rouault'
__date__ = '2016-10-17'
__copyright__ = 'Copyright 2016, Even Rouault'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
import os
import tempfile
import shutil
from osgeo import gdal, ogr, osr
from qgis.core import QgsDataSourceUri, QgsSettings
from qgis.PyQt.QtCore import QCoreApplication
from qgis.testing import start_app, unittest
from plugins.db_manager.db_plugins import supportedDbTypes, createDbPlugin
from plugins.db_manager.db_plugins.plugin import TableField
def GDAL_COMPUTE_VERSION(maj, min, rev):
return ((maj) * 1000000 + (min) * 10000 + (rev) * 100)
class TestPyQgsDBManagerGpkg(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
QCoreApplication.setOrganizationName("QGIS_Test")
QCoreApplication.setOrganizationDomain("TestPyQgsDBManagerGpkg.com")
QCoreApplication.setApplicationName("TestPyQgsDBManagerGpkg")
QgsSettings().clear()
start_app()
cls.basetestpath = tempfile.mkdtemp()
cls.test_gpkg = os.path.join(cls.basetestpath, 'TestPyQgsDBManagerGpkg.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(cls.test_gpkg)
lyr = ds.CreateLayer('testLayer', geom_type=ogr.wkbLineString, options=['SPATIAL_INDEX=NO'])
cls.supportsAlterFieldDefn = lyr.TestCapability(ogr.OLCAlterFieldDefn) == 1
lyr.CreateField(ogr.FieldDefn('text_field', ogr.OFTString))
f = ogr.Feature(lyr.GetLayerDefn())
f['text_field'] = 'foo'
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING(1 2,3 4)'))
lyr.CreateFeature(f)
f = None
ds = None
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
QgsSettings().clear()
shutil.rmtree(cls.basetestpath, True)
def testSupportedDbTypes(self):
self.assertIn('gpkg', supportedDbTypes())
def testCreateDbPlugin(self):
plugin = createDbPlugin('gpkg')
self.assertIsNotNone(plugin)
def testConnect(self):
connection_name = 'testConnect'
plugin = createDbPlugin('gpkg')
uri = QgsDataSourceUri()
uri.setDatabase(self.test_gpkg)
self.assertTrue(plugin.addConnection(connection_name, uri))
connections = plugin.connections()
self.assertEqual(len(connections), 1)
connection = createDbPlugin('gpkg', connection_name + '_does_not_exist')
connection_succeeded = False
try:
connection.connect()
connection_succeeded = True
except:
pass
self.assertFalse(connection_succeeded, 'exception should have been raised')
connection = connections[0]
connection.connect()
connection.reconnect()
connection.remove()
self.assertEqual(len(plugin.connections()), 0)
connection = createDbPlugin('gpkg', connection_name)
connection_succeeded = False
try:
connection.connect()
connection_succeeded = True
except:
pass
self.assertFalse(connection_succeeded, 'exception should have been raised')
def testListLayer(self):
connection_name = 'testListLayer'
plugin = createDbPlugin('gpkg')
uri = QgsDataSourceUri()
uri.setDatabase(self.test_gpkg)
self.assertTrue(plugin.addConnection(connection_name, uri))
connection = createDbPlugin('gpkg', connection_name)
connection.connect()
db = connection.database()
self.assertIsNotNone(db)
tables = db.tables()
self.assertEqual(len(tables), 1)
table = tables[0]
self.assertEqual(table.name, 'testLayer')
info = table.info()
expected_html = """<div class="section"><h2>General info</h2><div><table><tr><td>Relation type: </td><td>Table </td></tr><tr><td>Rows: </td><td>1 </td></tr></table></div></div><div class="section"><h2>GeoPackage</h2><div><table><tr><td>Column: </td><td>geom </td></tr><tr><td>Geometry: </td><td>LINESTRING </td></tr><tr><td>Dimension: </td><td>XY </td></tr><tr><td>Spatial ref: </td><td>Undefined (-1) </td></tr><tr><td>Extent: </td><td>1.00000, 2.00000 - 3.00000, 4.00000 </td></tr></table><p><warning> No spatial index defined (<a href="action:spatialindex/create">create it</a>)</p></div></div><div class="section"><h2>Fields</h2><div><table class="header"><tr><th># </th><th>Name </th><th>Type </th><th>Null </th><th>Default </th></tr><tr><td>0 </td><td class="underline">fid </td><td>INTEGER </td><td>Y </td><td> </td></tr><tr><td>1 </td><td>geom </td><td>LINESTRING </td><td>Y </td><td> </td></tr><tr><td>2 </td><td>text_field </td><td>TEXT </td><td>Y </td><td> </td></tr></table></div></div>"""
# GDAL 2.2.0
expected_html_2 = """<div class="section"><h2>General info</h2><div><table><tr><td>Relation type: </td><td>Table </td></tr><tr><td>Rows: </td><td>1 </td></tr></table></div></div><div class="section"><h2>GeoPackage</h2><div><table><tr><td>Column: </td><td>geom </td></tr><tr><td>Geometry: </td><td>LINESTRING </td></tr><tr><td>Dimension: </td><td>XY </td></tr><tr><td>Spatial ref: </td><td>Undefined (-1) </td></tr><tr><td>Extent: </td><td>1.00000, 2.00000 - 3.00000, 4.00000 </td></tr></table><p><warning> No spatial index defined (<a href="action:spatialindex/create">create it</a>)</p></div></div><div class="section"><h2>Fields</h2><div><table class="header"><tr><th># </th><th>Name </th><th>Type </th><th>Null </th><th>Default </th></tr><tr><td>0 </td><td class="underline">fid </td><td>INTEGER </td><td>N </td><td> </td></tr><tr><td>1 </td><td>geom </td><td>LINESTRING </td><td>Y </td><td> </td></tr><tr><td>2 </td><td>text_field </td><td>TEXT </td><td>Y </td><td> </td></tr></table></div></div><div class="section"><h2>Triggers</h2><div><table class="header"><tr><th>Name </th><th>Function </th></tr><tr><td>trigger_insert_feature_count_testLayer (<a href="action:trigger/trigger_insert_feature_count_testLayer/delete">delete</a>) </td><td>CREATE TRIGGER "trigger_insert_feature_count_testLayer" AFTER INSERT ON "testLayer" BEGIN UPDATE gpkg_ogr_contents SET feature_count = feature_count + 1 WHERE table_name = 'testLayer'; END </td></tr><tr><td>trigger_delete_feature_count_testLayer (<a href="action:trigger/trigger_delete_feature_count_testLayer/delete">delete</a>) </td><td>CREATE TRIGGER "trigger_delete_feature_count_testLayer" AFTER DELETE ON "testLayer" BEGIN UPDATE gpkg_ogr_contents SET feature_count = feature_count - 1 WHERE table_name = 'testLayer'; END </td></tr></table></div></div>"""
# GDAL 2.3.0
expected_html_3 = """<div class="section"><h2>General info</h2><div><table><tr><td>Relation type: </td><td>Table </td></tr><tr><td>Rows: </td><td>1 </td></tr></table></div></div><div class="section"><h2>GeoPackage</h2><div><table><tr><td>Column: </td><td>geom </td></tr><tr><td>Geometry: </td><td>LINESTRING </td></tr><tr><td>Dimension: </td><td>XY </td></tr><tr><td>Spatial ref: </td><td>Undefined (-1) </td></tr><tr><td>Extent: </td><td>1.00000, 2.00000 - 3.00000, 4.00000 </td></tr></table><p><warning> No spatial index defined (<a href="action:spatialindex/create">create it</a>)</p></div></div><div class="section"><h2>Fields</h2><div><table class="header"><tr><th># </th><th>Name </th><th>Type </th><th>Null </th><th>Default </th></tr><tr><td>0 </td><td class="underline">fid </td><td>INTEGER </td><td>N </td><td> </td></tr><tr><td>1 </td><td>geom </td><td>LINESTRING </td><td>Y </td><td> </td></tr><tr><td>2 </td><td>text_field </td><td>TEXT </td><td>Y </td><td> </td></tr></table></div></div><div class="section"><h2>Triggers</h2><div><table class="header"><tr><th>Name </th><th>Function </th></tr><tr><td>trigger_insert_feature_count_testLayer (<a href="action:trigger/trigger_insert_feature_count_testLayer/delete">delete</a>) </td><td>CREATE TRIGGER "trigger_insert_feature_count_testLayer" AFTER INSERT ON "testLayer" BEGIN UPDATE gpkg_ogr_contents SET feature_count = feature_count + 1 WHERE lower(table_name) = lower('testLayer'); END </td></tr><tr><td>trigger_delete_feature_count_testLayer (<a href="action:trigger/trigger_delete_feature_count_testLayer/delete">delete</a>) </td><td>CREATE TRIGGER "trigger_delete_feature_count_testLayer" AFTER DELETE ON "testLayer" BEGIN UPDATE gpkg_ogr_contents SET feature_count = feature_count - 1 WHERE lower(table_name) = lower('testLayer'); END </td></tr></table></div></div>"""
self.assertIn(info.toHtml(), [expected_html, expected_html_2, expected_html_3])
connection.remove()
def testCreateRenameDeleteTable(self):
connection_name = 'testCreateRenameDeleteTable'
plugin = createDbPlugin('gpkg')
uri = QgsDataSourceUri()
test_gpkg_new = os.path.join(self.basetestpath, 'testCreateRenameDeleteTable.gpkg')
shutil.copy(self.test_gpkg, test_gpkg_new)
uri.setDatabase(test_gpkg_new)
self.assertTrue(plugin.addConnection(connection_name, uri))
connection = createDbPlugin('gpkg', connection_name)
connection.connect()
db = connection.database()
self.assertIsNotNone(db)
tables = db.tables()
self.assertEqual(len(tables), 1)
table = tables[0]
self.assertTrue(table.rename('newName'))
self.assertEqual(table.name, 'newName')
connection.reconnect()
db = connection.database()
tables = db.tables()
self.assertEqual(len(tables), 1)
table = tables[0]
self.assertEqual(table.name, 'newName')
fields = []
geom = ['geometry', 'POINT', 4326, 3]
field1 = TableField(table)
field1.name = 'fid'
field1.dataType = 'INTEGER'
field1.notNull = True
field1.primaryKey = True
field2 = TableField(table)
field2.name = 'str_field'
field2.dataType = 'TEXT'
field2.modifier = 20
fields = [field1, field2]
self.assertTrue(db.createVectorTable('newName2', fields, geom))
tables = db.tables()
self.assertEqual(len(tables), 2)
new_table = tables[1]
self.assertEqual(new_table.name, 'newName2')
fields = new_table.fields()
self.assertEqual(len(fields), 3)
self.assertFalse(new_table.hasSpatialIndex())
self.assertTrue(new_table.createSpatialIndex())
self.assertTrue(new_table.hasSpatialIndex())
self.assertTrue(new_table.delete())
tables = db.tables()
self.assertEqual(len(tables), 1)
connection.remove()
def testCreateRenameDeleteFields(self):
if not self.supportsAlterFieldDefn:
return
connection_name = 'testCreateRenameDeleteFields'
plugin = createDbPlugin('gpkg')
uri = QgsDataSourceUri()
test_gpkg_new = os.path.join(self.basetestpath, 'testCreateRenameDeleteFields.gpkg')
shutil.copy(self.test_gpkg, test_gpkg_new)
uri.setDatabase(test_gpkg_new)
self.assertTrue(plugin.addConnection(connection_name, uri))
connection = createDbPlugin('gpkg', connection_name)
connection.connect()
db = connection.database()
self.assertIsNotNone(db)
tables = db.tables()
self.assertEqual(len(tables), 1)
table = tables[0]
field_before_count = len(table.fields())
field = TableField(table)
field.name = 'real_field'
field.dataType = 'DOUBLE'
self.assertTrue(table.addField(field))
self.assertEqual(len(table.fields()), field_before_count + 1)
self.assertTrue(field.update('real_field2', new_type_str='TEXT (30)', new_not_null=True, new_default_str='foo'))
field = table.fields()[field_before_count]
self.assertEqual(field.name, 'real_field2')
self.assertEqual(field.dataType, 'TEXT(30)')
self.assertEqual(field.notNull, 1)
self.assertEqual(field.default, "'foo'")
self.assertTrue(table.deleteField(field))
self.assertEqual(len(table.fields()), field_before_count)
connection.remove()
def testTableDataModel(self):
connection_name = 'testTableDataModel'
plugin = createDbPlugin('gpkg')
uri = QgsDataSourceUri()
uri.setDatabase(self.test_gpkg)
self.assertTrue(plugin.addConnection(connection_name, uri))
connection = createDbPlugin('gpkg', connection_name)
connection.connect()
db = connection.database()
self.assertIsNotNone(db)
tables = db.tables()
self.assertEqual(len(tables), 1)
table = tables[0]
self.assertEqual(table.name, 'testLayer')
model = table.tableDataModel(None)
self.assertEqual(model.rowCount(), 1)
self.assertEqual(model.getData(0, 0), 1) # fid
self.assertEqual(model.getData(0, 1), 'LINESTRING (1 2,3 4)')
self.assertEqual(model.getData(0, 2), 'foo')
connection.remove()
def testRaster(self):
if int(gdal.VersionInfo('VERSION_NUM')) < GDAL_COMPUTE_VERSION(2, 0, 2):
return
connection_name = 'testRaster'
plugin = createDbPlugin('gpkg')
uri = QgsDataSourceUri()
test_gpkg_new = os.path.join(self.basetestpath, 'testRaster.gpkg')
shutil.copy(self.test_gpkg, test_gpkg_new)
mem_ds = gdal.GetDriverByName('MEM').Create('', 20, 20)
mem_ds.SetGeoTransform([2, 0.01, 0, 49, 0, -0.01])
sr = osr.SpatialReference()
sr.ImportFromEPSG(4326)
mem_ds.SetProjection(sr.ExportToWkt())
mem_ds.GetRasterBand(1).Fill(255)
gdal.GetDriverByName('GPKG').CreateCopy(test_gpkg_new, mem_ds, options=['APPEND_SUBDATASET=YES', 'RASTER_TABLE=raster_table'])
mem_ds = None
uri.setDatabase(test_gpkg_new)
self.assertTrue(plugin.addConnection(connection_name, uri))
connection = createDbPlugin('gpkg', connection_name)
connection.connect()
db = connection.database()
self.assertIsNotNone(db)
tables = db.tables()
self.assertEqual(len(tables), 2)
table = None
for i in range(2):
if tables[i].name == 'raster_table':
table = tables[i]
break
self.assertIsNotNone(table)
info = table.info()
expected_html = """<div class="section"><h2>General info</h2><div><table><tr><td>Relation type: </td><td>Table </td></tr><tr><td>Rows: </td><td>Unknown (<a href="action:rows/count">find out</a>) </td></tr></table></div></div><div class="section"><h2>GeoPackage</h2><div><table><tr><td>Column: </td><td> </td></tr><tr><td>Geometry: </td><td>RASTER </td></tr><tr><td>Spatial ref: </td><td>WGS 84 geodetic (4326) </td></tr><tr><td>Extent: </td><td>2.00000, 48.80000 - 2.20000, 49.00000 </td></tr></table></div></div><div class="section"><h2>Fields</h2><div><table class="header"><tr><th># </th><th>Name </th><th>Type </th><th>Null </th><th>Default </th></tr><tr><td>0 </td><td class="underline">id </td><td>INTEGER </td><td>Y </td><td> </td></tr><tr><td>1 </td><td>zoom_level </td><td>INTEGER </td><td>N </td><td> </td></tr><tr><td>2 </td><td>tile_column </td><td>INTEGER </td><td>N </td><td> </td></tr><tr><td>3 </td><td>tile_row </td><td>INTEGER </td><td>N </td><td> </td></tr><tr><td>4 </td><td>tile_data </td><td>BLOB </td><td>N </td><td> </td></tr></table></div></div><div class="section"><h2>Indexes</h2><div><table class="header"><tr><th>Name </th><th>Column(s) </th></tr><tr><td>sqlite_autoindex_raster_table_1 </td><td>zoom_level<br>tile_column<br>tile_row </td></tr></table></div></div>"""
self.assertEqual(info.toHtml(), expected_html)
connection.remove()
def testTwoRaster(self):
if int(gdal.VersionInfo('VERSION_NUM')) < GDAL_COMPUTE_VERSION(2, 0, 2):
return
connection_name = 'testTwoRaster'
plugin = createDbPlugin('gpkg')
uri = QgsDataSourceUri()
test_gpkg_new = os.path.join(self.basetestpath, 'testTwoRaster.gpkg')
shutil.copy(self.test_gpkg, test_gpkg_new)
mem_ds = gdal.GetDriverByName('MEM').Create('', 20, 20)
mem_ds.SetGeoTransform([2, 0.01, 0, 49, 0, -0.01])
sr = osr.SpatialReference()
sr.ImportFromEPSG(4326)
mem_ds.SetProjection(sr.ExportToWkt())
mem_ds.GetRasterBand(1).Fill(255)
for i in range(2):
gdal.GetDriverByName('GPKG').CreateCopy(test_gpkg_new, mem_ds, options=['APPEND_SUBDATASET=YES', 'RASTER_TABLE=raster_table%d' % (i + 1)])
mem_ds = None
uri.setDatabase(test_gpkg_new)
self.assertTrue(plugin.addConnection(connection_name, uri))
connection = createDbPlugin('gpkg', connection_name)
connection.connect()
db = connection.database()
self.assertIsNotNone(db)
tables = db.tables()
self.assertEqual(len(tables), 3)
table = None
for i in range(2):
if tables[i].name.startswith('raster_table'):
table = tables[i]
info = table.info()
info.toHtml()
connection.remove()
def testNonSpatial(self):
connection_name = 'testNonSpatial'
plugin = createDbPlugin('gpkg')
uri = QgsDataSourceUri()
test_gpkg = os.path.join(self.basetestpath, 'testNonSpatial.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(test_gpkg)
lyr = ds.CreateLayer('testNonSpatial', geom_type=ogr.wkbNone)
lyr.CreateField(ogr.FieldDefn('text_field', ogr.OFTString))
f = ogr.Feature(lyr.GetLayerDefn())
f['text_field'] = 'foo'
lyr.CreateFeature(f)
f = None
ds = None
uri.setDatabase(test_gpkg)
self.assertTrue(plugin.addConnection(connection_name, uri))
connection = createDbPlugin('gpkg', connection_name)
connection.connect()
db = connection.database()
self.assertIsNotNone(db)
tables = db.tables()
self.assertEqual(len(tables), 1)
table = tables[0]
self.assertEqual(table.name, 'testNonSpatial')
info = table.info()
expected_html = """<div class="section"><h2>General info</h2><div><table><tr><td>Relation type: </td><td>Table </td></tr><tr><td>Rows: </td><td>1 </td></tr></table></div></div><div class="section"><h2>Fields</h2><div><table class="header"><tr><th># </th><th>Name </th><th>Type </th><th>Null </th><th>Default </th></tr><tr><td>0 </td><td class="underline">fid </td><td>INTEGER </td><td>Y </td><td> </td></tr><tr><td>1 </td><td>text_field </td><td>TEXT </td><td>Y </td><td> </td></tr></table></div></div>"""
# GDAL 2.2.0
expected_html_2 = """<div class="section"><h2>General info</h2><div><table><tr><td>Relation type: </td><td>Table </td></tr><tr><td>Rows: </td><td>1 </td></tr></table></div></div><div class="section"><h2>Fields</h2><div><table class="header"><tr><th># </th><th>Name </th><th>Type </th><th>Null </th><th>Default </th></tr><tr><td>0 </td><td class="underline">fid </td><td>INTEGER </td><td>N </td><td> </td></tr><tr><td>1 </td><td>text_field </td><td>TEXT </td><td>Y </td><td> </td></tr></table></div></div><div class="section"><h2>Triggers</h2><div><table class="header"><tr><th>Name </th><th>Function </th></tr><tr><td>trigger_insert_feature_count_testNonSpatial (<a href="action:trigger/trigger_insert_feature_count_testNonSpatial/delete">delete</a>) </td><td>CREATE TRIGGER "trigger_insert_feature_count_testNonSpatial" AFTER INSERT ON "testNonSpatial" BEGIN UPDATE gpkg_ogr_contents SET feature_count = feature_count + 1 WHERE table_name = 'testNonSpatial'; END </td></tr><tr><td>trigger_delete_feature_count_testNonSpatial (<a href="action:trigger/trigger_delete_feature_count_testNonSpatial/delete">delete</a>) </td><td>CREATE TRIGGER "trigger_delete_feature_count_testNonSpatial" AFTER DELETE ON "testNonSpatial" BEGIN UPDATE gpkg_ogr_contents SET feature_count = feature_count - 1 WHERE table_name = 'testNonSpatial'; END </td></tr></table></div></div>"""
# GDAL 2.3.0
expected_html_3 = """<div class="section"><h2>General info</h2><div><table><tr><td>Relation type: </td><td>Table </td></tr><tr><td>Rows: </td><td>1 </td></tr></table></div></div><div class="section"><h2>Fields</h2><div><table class="header"><tr><th># </th><th>Name </th><th>Type </th><th>Null </th><th>Default </th></tr><tr><td>0 </td><td class="underline">fid </td><td>INTEGER </td><td>N </td><td> </td></tr><tr><td>1 </td><td>text_field </td><td>TEXT </td><td>Y </td><td> </td></tr></table></div></div><div class="section"><h2>Triggers</h2><div><table class="header"><tr><th>Name </th><th>Function </th></tr><tr><td>trigger_insert_feature_count_testNonSpatial (<a href="action:trigger/trigger_insert_feature_count_testNonSpatial/delete">delete</a>) </td><td>CREATE TRIGGER "trigger_insert_feature_count_testNonSpatial" AFTER INSERT ON "testNonSpatial" BEGIN UPDATE gpkg_ogr_contents SET feature_count = feature_count + 1 WHERE lower(table_name) = lower('testNonSpatial'); END </td></tr><tr><td>trigger_delete_feature_count_testNonSpatial (<a href="action:trigger/trigger_delete_feature_count_testNonSpatial/delete">delete</a>) </td><td>CREATE TRIGGER "trigger_delete_feature_count_testNonSpatial" AFTER DELETE ON "testNonSpatial" BEGIN UPDATE gpkg_ogr_contents SET feature_count = feature_count - 1 WHERE lower(table_name) = lower('testNonSpatial'); END </td></tr></table></div></div>"""
self.assertIn(info.toHtml(), [expected_html, expected_html_2, expected_html_3], info.toHtml())
connection.remove()
def testAllGeometryTypes(self):
connection_name = 'testAllGeometryTypes'
plugin = createDbPlugin('gpkg')
uri = QgsDataSourceUri()
test_gpkg = os.path.join(self.basetestpath, 'testAllGeometryTypes.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(test_gpkg)
ds.CreateLayer('testPoint', geom_type=ogr.wkbPoint)
ds.CreateLayer('testLineString', geom_type=ogr.wkbLineString)
ds.CreateLayer('testPolygon', geom_type=ogr.wkbPolygon)
ds.CreateLayer('testMultiPoint', geom_type=ogr.wkbMultiPoint)
ds.CreateLayer('testMultiLineString', geom_type=ogr.wkbMultiLineString)
ds.CreateLayer('testMultiPolygon', geom_type=ogr.wkbMultiPolygon)
ds.CreateLayer('testGeometryCollection', geom_type=ogr.wkbGeometryCollection)
ds.CreateLayer('testCircularString', geom_type=ogr.wkbCircularString)
ds.CreateLayer('testCompoundCurve', geom_type=ogr.wkbCompoundCurve)
ds.CreateLayer('testCurvePolygon', geom_type=ogr.wkbCurvePolygon)
ds.CreateLayer('testMultiCurve', geom_type=ogr.wkbMultiCurve)
ds.CreateLayer('testMultiSurface', geom_type=ogr.wkbMultiSurface)
ds = None
uri.setDatabase(test_gpkg)
self.assertTrue(plugin.addConnection(connection_name, uri))
connection = createDbPlugin('gpkg', connection_name)
connection.connect()
db = connection.database()
self.assertIsNotNone(db)
# tables = db.tables()
# for i in range(len(tables)):
# table = tables[i]
# info = table.info()
connection.remove()
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
Spinkelben/remote-radio | radio_api/main.py | 1 | 3419 | from flask import Flask, abort, request, jsonify
from redis import Redis, RedisError
from message_pb2 import Request, Response
import os
import socket
import logging
import json
redis = Redis(host="redis", port=6379, db=0, socket_connect_timeout=2, socket_timeout=2)
app = Flask(__name__)
def play_station(station):
logging.debug("Sending change station request")
request_message = Request()
request_message.type = Request.SET_CHANNEL
request_message.channel = station['stream_url']
radio_answer = _send_message_to_radio(request_message)
logging.debug("Playing", station['name'], "from url:\n", station['stream_url'])
return radio_answer.success
def get_info():
logging.debug("Sending get info request")
info_request = Request()
info_request.type = Request.INFO
info_answer = _send_message_to_radio(info_request)
return {
"success": True,
"station_info": {
"name": info_answer.name,
"stream_url": None,
"bitrate": info_answer.bitrate,
"codec": info_answer.codec,
"title": info_answer.title,
"location": info_answer.location,
"extra": json.loads(info_answer.extra) if info_answer.extra else None,
"stereo": info_answer.stereo
}
}
def _send_message_to_radio(request_message):
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect('/var/sockets/radio-sock')
s.send(request_message.SerializeToString())
s.shutdown(socket.SHUT_WR)
response_message = []
while True:
data = s.recv(4096)
if not data: break
response_message.append(data)
s.close()
response = Response()
response.ParseFromString(b''.join(response_message))
return response
# Used for changing station, or pausing
@app.route('/api/station/', methods=['GET', 'POST'])
def current_station():
if request.method == 'GET':
return jsonify(get_info())
elif request.method == 'POST':
logging.debug("Recieved POST request")
stream_url = request.form['stream_url']
name = request.form['name']
new_station = {'name': name, 'stream_url': stream_url}
success = play_station(new_station)
logging.info("Station changed successfully = {}".format(success))
new_station['bitrate'] = None
new_station['codec'] = None
response = {
"success": success,
"station_info": new_station
}
return jsonify(response)
@app.route('/api/stop', methods=["GET"])
def stop_playing():
stop_request = Request()
stop_request.type = Request.STOP
answer = _send_message_to_radio(stop_request)
logging.debug("Stop request sent")
return jsonify(answer.success)
@app.route('/api/start', methods=["GET"])
def start_playing():
start_request = Request()
start_request.type = Request.PLAY
answer = _send_message_to_radio(start_request)
logging.debug("Start request sent")
return jsonify(answer.success)
@app.route('/api/pause', methods=["GET"])
def pause_playing():
pause_request = Request()
pause_request.type = Request.STOP
answer = _send_message_to_radio(pause_request)
logging.debug("Pause request sent")
return jsonify(answer.success)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
app.run(host="0.0.0.0", port=80, debug=True)
| mit |
UrusTeam/android_ndk_toolchain_cross | lib/python2.7/ctypes/macholib/dylib.py | 268 | 2041 | ######################################################################
# This file should be kept compatible with Python 2.3, see PEP 291. #
######################################################################
"""
Generic dylib path manipulation
"""
import re
__all__ = ['dylib_info']
DYLIB_RE = re.compile(r"""(?x)
(?P<location>^.*)(?:^|/)
(?P<name>
(?P<shortname>\w+?)
(?:\.(?P<version>[^._]+))?
(?:_(?P<suffix>[^._]+))?
\.dylib$
)
""")
def dylib_info(filename):
"""
A dylib name can take one of the following four forms:
Location/Name.SomeVersion_Suffix.dylib
Location/Name.SomeVersion.dylib
Location/Name_Suffix.dylib
Location/Name.dylib
returns None if not found or a mapping equivalent to:
dict(
location='Location',
name='Name.SomeVersion_Suffix.dylib',
shortname='Name',
version='SomeVersion',
suffix='Suffix',
)
Note that SomeVersion and Suffix are optional and may be None
if not present.
"""
is_dylib = DYLIB_RE.match(filename)
if not is_dylib:
return None
return is_dylib.groupdict()
def test_dylib_info():
def d(location=None, name=None, shortname=None, version=None, suffix=None):
return dict(
location=location,
name=name,
shortname=shortname,
version=version,
suffix=suffix
)
assert dylib_info('completely/invalid') is None
assert dylib_info('completely/invalide_debug') is None
assert dylib_info('P/Foo.dylib') == d('P', 'Foo.dylib', 'Foo')
assert dylib_info('P/Foo_debug.dylib') == d('P', 'Foo_debug.dylib', 'Foo', suffix='debug')
assert dylib_info('P/Foo.A.dylib') == d('P', 'Foo.A.dylib', 'Foo', 'A')
assert dylib_info('P/Foo_debug.A.dylib') == d('P', 'Foo_debug.A.dylib', 'Foo_debug', 'A')
assert dylib_info('P/Foo.A_debug.dylib') == d('P', 'Foo.A_debug.dylib', 'Foo', 'A', 'debug')
if __name__ == '__main__':
test_dylib_info()
| gpl-2.0 |
rezoo/chainer | chainer/functions/pooling/average_pooling_nd_kernel.py | 13 | 1310 | from chainer.functions.pooling import pooling_nd_kernel
class AveragePoolingNDKernelForward(pooling_nd_kernel.PoolingNDKernelForward):
def name(self):
# avg_pool_{N}d_fwd
return 'avg'
def in_params(self):
# 2D: raw T in, int32 d_0, int32 d_1, int32 out_0, int32 out_1,
# int32 k_0, int32 k_1, int32 s_0, int32 s_1, int32 p_0,
# int32 p_1, T coeff
return ['T coeff']
def before(self):
return 'T val = 0;'
def main(self, offset, xs):
# 2D: val = val + in[offset_1];
return 'val = val + in[{}];'.format(offset)
def after(self, out_xs):
return 'out = val * coeff;'
class AveragePoolingNDKernelBackward(
pooling_nd_kernel.PoolingNDKernelBackward):
def name(self):
# avg_pool_{N}d_bwd
return 'avg'
def in_params(self):
# 2D: raw T gy, int32 d_0, int32 d_1, int32 out_0, int32 out_1,
# int32 k_0, int32 k_1, int32 s_0, int32 s_1, int32 p_0,
# int32 p_1, T coeff
return ['T coeff']
def before(self):
return 'T val = 0;'
def main(self, offset, xs, out_xs):
# 2D: val = val + gy[offset_1];
return 'val = val + gy[{}];'.format(offset)
def after(self, xs):
return 'gx = val * coeff;'
| mit |
sergey-shandar/autorest | src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/StorageManagementClient/fixtures/acceptancetestsstoragemanagementclient/storage_management_client.py | 6 | 3926 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import ServiceClient
from msrest import Serializer, Deserializer
from msrestazure import AzureConfiguration
from .version import VERSION
from .operations.storage_accounts_operations import StorageAccountsOperations
from .operations.usage_operations import UsageOperations
from . import models
class StorageManagementClientConfiguration(AzureConfiguration):
"""Configuration for StorageManagementClient
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: Gets subscription credentials which uniquely
identify Microsoft Azure subscription. The subscription ID forms part of
the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if not isinstance(subscription_id, str):
raise TypeError("Parameter 'subscription_id' must be str.")
if not base_url:
base_url = 'https://management.azure.com'
super(StorageManagementClientConfiguration, self).__init__(base_url)
self.add_user_agent('storagemanagementclient/{}'.format(VERSION))
self.add_user_agent('Azure-SDK-For-Python')
self.credentials = credentials
self.subscription_id = subscription_id
class StorageManagementClient(object):
"""StorageManagementClient
:ivar config: Configuration for client.
:vartype config: StorageManagementClientConfiguration
:ivar storage_accounts: StorageAccounts operations
:vartype storage_accounts: fixtures.acceptancetestsstoragemanagementclient.operations.StorageAccountsOperations
:ivar usage: Usage operations
:vartype usage: fixtures.acceptancetestsstoragemanagementclient.operations.UsageOperations
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: Gets subscription credentials which uniquely
identify Microsoft Azure subscription. The subscription ID forms part of
the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
self.config = StorageManagementClientConfiguration(credentials, subscription_id, base_url)
self._client = ServiceClient(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '2015-05-01-preview'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.storage_accounts = StorageAccountsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.usage = UsageOperations(
self._client, self.config, self._serialize, self._deserialize)
| mit |
smnitro555/ESWegarden | raspibot/lib/python2.7/site-packages/pip/commands/uninstall.py | 798 | 2884 | from __future__ import absolute_import
import pip
from pip.wheel import WheelCache
from pip.req import InstallRequirement, RequirementSet, parse_requirements
from pip.basecommand import Command
from pip.exceptions import InstallationError
class UninstallCommand(Command):
"""
Uninstall packages.
pip is able to uninstall most installed packages. Known exceptions are:
- Pure distutils packages installed with ``python setup.py install``, which
leave behind no metadata to determine what files were installed.
- Script wrappers installed by ``python setup.py develop``.
"""
name = 'uninstall'
usage = """
%prog [options] <package> ...
%prog [options] -r <requirements file> ..."""
summary = 'Uninstall packages.'
def __init__(self, *args, **kw):
super(UninstallCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help='Uninstall all the packages listed in the given requirements '
'file. This option can be used multiple times.',
)
self.cmd_opts.add_option(
'-y', '--yes',
dest='yes',
action='store_true',
help="Don't ask for confirmation of uninstall deletions.")
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
with self._build_session(options) as session:
format_control = pip.index.FormatControl(set(), set())
wheel_cache = WheelCache(options.cache_dir, format_control)
requirement_set = RequirementSet(
build_dir=None,
src_dir=None,
download_dir=None,
isolated=options.isolated_mode,
session=session,
wheel_cache=wheel_cache,
)
for name in args:
requirement_set.add_requirement(
InstallRequirement.from_line(
name, isolated=options.isolated_mode,
wheel_cache=wheel_cache
)
)
for filename in options.requirements:
for req in parse_requirements(
filename,
options=options,
session=session,
wheel_cache=wheel_cache):
requirement_set.add_requirement(req)
if not requirement_set.has_requirements:
raise InstallationError(
'You must give at least one requirement to %(name)s (see '
'"pip help %(name)s")' % dict(name=self.name)
)
requirement_set.uninstall(auto_confirm=options.yes)
| gpl-3.0 |
nott/next.filmfest.by | cpm_data/migrations/0009_film.py | 2 | 9214 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-12-10 22:48
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('submissions', '0001_initial'),
('wagtailimages', '0013_make_rendition_upload_callable'),
('cpm_data', '0008_add_partners_data_2012_2013_2015'),
]
operations = [
migrations.CreateModel(
name='Film',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), # noqa:E501
('title_en', models.CharField(blank=True, default='', max_length=1000)), # noqa:E501
('title_be', models.CharField(blank=True, default='', max_length=1000)), # noqa:E501
('title_ru', models.CharField(blank=True, default='', max_length=1000)), # noqa:E501
('director_en', models.CharField(blank=True, default='', max_length=1000)), # noqa:E501
('director_be', models.CharField(blank=True, default='', max_length=1000)), # noqa:E501
('director_ru', models.CharField(blank=True, default='', max_length=1000)), # noqa:E501
('country', models.CharField(blank=True, choices=[(b'AD', 'Andorra'), (b'AE', 'United Arab Emirates'), (b'AF', 'Afghanistan'), (b'AG', 'Antigua & Barbuda'), (b'AI', 'Anguilla'), (b'AL', 'Albania'), (b'AM', 'Armenia'), (b'AN', 'Netherlands Antilles'), (b'AO', 'Angola'), (b'AQ', 'Antarctica'), (b'AR', 'Argentina'), (b'AS', 'American Samoa'), (b'AT', 'Austria'), (b'AU', 'Australia'), (b'AW', 'Aruba'), (b'AZ', 'Azerbaijan'), (b'BA', 'Bosnia and Herzegovina'), (b'BB', 'Barbados'), (b'BD', 'Bangladesh'), (b'BE', 'Belgium'), (b'BF', 'Burkina Faso'), (b'BG', 'Bulgaria'), (b'BH', 'Bahrain'), (b'BI', 'Burundi'), (b'BJ', 'Benin'), (b'BM', 'Bermuda'), (b'BN', 'Brunei Darussalam'), (b'BO', 'Bolivia'), (b'BR', 'Brazil'), (b'BS', 'Bahama'), (b'BT', 'Bhutan'), (b'BV', 'Bouvet Island'), (b'BW', 'Botswana'), (b'BY', 'Belarus'), (b'BZ', 'Belize'), (b'CA', 'Canada'), (b'CC', 'Cocos (Keeling) Islands'), (b'CF', 'Central African Republic'), (b'CG', 'Congo'), (b'CH', 'Switzerland'), (b'CI', 'Ivory Coast'), (b'CK', 'Cook Iislands'), (b'CL', 'Chile'), (b'CM', 'Cameroon'), (b'CN', 'China'), (b'CO', 'Colombia'), (b'CR', 'Costa Rica'), (b'CU', 'Cuba'), (b'CV', 'Cape Verde'), (b'CX', 'Christmas Island'), (b'CY', 'Cyprus'), (b'CZ', 'Czech Republic'), (b'DE', 'Germany'), (b'DJ', 'Djibouti'), (b'DK', 'Denmark'), (b'DM', 'Dominica'), (b'DO', 'Dominican Republic'), (b'DZ', 'Algeria'), (b'EC', 'Ecuador'), (b'EE', 'Estonia'), (b'EG', 'Egypt'), (b'EH', 'Western Sahara'), (b'ER', 'Eritrea'), (b'ES', 'Spain'), (b'ET', 'Ethiopia'), (b'FI', 'Finland'), (b'FJ', 'Fiji'), (b'FK', 'Falkland Islands (Malvinas)'), (b'FM', 'Micronesia'), (b'FO', 'Faroe Islands'), (b'FR', 'France'), (b'FX', 'France, Metropolitan'), (b'GA', 'Gabon'), (b'GB', 'United Kingdom (Great Britain)'), (b'GD', 'Grenada'), (b'GE', 'Georgia'), (b'GF', 'French Guiana'), (b'GH', 'Ghana'), (b'GI', 'Gibraltar'), (b'GL', 'Greenland'), (b'GM', 'Gambia'), (b'GN', 'Guinea'), (b'GP', 'Guadeloupe'), (b'GQ', 'Equatorial Guinea'), (b'GR', 'Greece'), (b'GS', 'South Georgia and the South Sandwich Islands'), (b'GT', 'Guatemala'), (b'GU', 'Guam'), (b'GW', 'Guinea-Bissau'), (b'GY', 'Guyana'), (b'HK', 'Hong Kong'), (b'HM', 'Heard & McDonald Islands'), (b'HN', 'Honduras'), (b'HR', 'Croatia'), (b'HT', 'Haiti'), (b'HU', 'Hungary'), (b'ID', 'Indonesia'), (b'IE', 'Ireland'), (b'IL', 'Israel'), (b'IN', 'India'), (b'IO', 'British Indian Ocean Territory'), (b'IQ', 'Iraq'), (b'IR', 'Islamic Republic of Iran'), (b'IS', 'Iceland'), (b'IT', 'Italy'), (b'JM', 'Jamaica'), (b'JO', 'Jordan'), (b'JP', 'Japan'), (b'KE', 'Kenya'), (b'KG', 'Kyrgyzstan'), (b'KH', 'Cambodia'), (b'KI', 'Kiribati'), (b'KM', 'Comoros'), (b'KN', 'St. Kitts and Nevis'), (b'KP', "Korea, Democratic People's Republic of"), (b'KR', 'Korea, Republic of'), (b'KW', 'Kuwait'), (b'KY', 'Cayman Islands'), (b'KZ', 'Kazakhstan'), (b'LA', "Lao People's Democratic Republic"), (b'LB', 'Lebanon'), (b'LC', 'Saint Lucia'), (b'LI', 'Liechtenstein'), (b'LK', 'Sri Lanka'), (b'LR', 'Liberia'), (b'LS', 'Lesotho'), (b'LT', 'Lithuania'), (b'LU', 'Luxembourg'), (b'LV', 'Latvia'), (b'LY', 'Libyan Arab Jamahiriya'), (b'MA', 'Morocco'), (b'MC', 'Monaco'), (b'MD', 'Moldova, Republic of'), (b'MG', 'Madagascar'), (b'MH', 'Marshall Islands'), (b'ML', 'Mali'), (b'MN', 'Mongolia'), (b'MM', 'Myanmar'), (b'MO', 'Macau'), (b'MP', 'Northern Mariana Islands'), (b'MQ', 'Martinique'), (b'MR', 'Mauritania'), (b'MS', 'Monserrat'), (b'MT', 'Malta'), (b'MU', 'Mauritius'), (b'MV', 'Maldives'), (b'MW', 'Malawi'), (b'MX', 'Mexico'), (b'MY', 'Malaysia'), (b'MZ', 'Mozambique'), (b'NA', 'Namibia'), (b'NC', 'New Caledonia'), (b'NE', 'Niger'), (b'NF', 'Norfolk Island'), (b'NG', 'Nigeria'), (b'NI', 'Nicaragua'), (b'NL', 'Netherlands'), (b'NO', 'Norway'), (b'NP', 'Nepal'), (b'NR', 'Nauru'), (b'NU', 'Niue'), (b'NZ', 'New Zealand'), (b'OM', 'Oman'), (b'PA', 'Panama'), (b'PE', 'Peru'), (b'PF', 'French Polynesia'), (b'PG', 'Papua New Guinea'), (b'PH', 'Philippines'), (b'PK', 'Pakistan'), (b'PL', 'Poland'), (b'PM', 'St. Pierre & Miquelon'), (b'PN', 'Pitcairn'), (b'PR', 'Puerto Rico'), (b'PT', 'Portugal'), (b'PW', 'Palau'), (b'PY', 'Paraguay'), (b'QA', 'Qatar'), (b'RE', 'Reunion'), (b'RO', 'Romania'), (b'RU', 'Russian Federation'), (b'RW', 'Rwanda'), (b'SA', 'Saudi Arabia'), (b'SB', 'Solomon Islands'), (b'SC', 'Seychelles'), (b'SD', 'Sudan'), (b'SE', 'Sweden'), (b'SG', 'Singapore'), (b'SH', 'St. Helena'), (b'SI', 'Slovenia'), (b'SJ', 'Svalbard & Jan Mayen Islands'), (b'SK', 'Slovakia'), (b'SL', 'Sierra Leone'), (b'SM', 'San Marino'), (b'SN', 'Senegal'), (b'SO', 'Somalia'), (b'SR', 'Suriname'), (b'ST', 'Sao Tome & Principe'), (b'SV', 'El Salvador'), (b'SY', 'Syrian Arab Republic'), (b'SZ', 'Swaziland'), (b'TC', 'Turks & Caicos Islands'), (b'TD', 'Chad'), (b'TF', 'French Southern Territories'), (b'TG', 'Togo'), (b'TH', 'Thailand'), (b'TJ', 'Tajikistan'), (b'TK', 'Tokelau'), (b'TM', 'Turkmenistan'), (b'TN', 'Tunisia'), (b'TO', 'Tonga'), (b'TP', 'East Timor'), (b'TR', 'Turkey'), (b'TT', 'Trinidad & Tobago'), (b'TV', 'Tuvalu'), (b'TW', 'Taiwan, Province of China'), (b'TZ', 'Tanzania, United Republic of'), (b'UA', 'Ukraine'), (b'UG', 'Uganda'), (b'UM', 'United States Minor Outlying Islands'), (b'US', 'United States of America'), (b'UY', 'Uruguay'), (b'UZ', 'Uzbekistan'), (b'VA', 'Vatican City State (Holy See)'), (b'VC', 'St. Vincent & the Grenadines'), (b'VE', 'Venezuela'), (b'VG', 'British Virgin Islands'), (b'VI', 'United States Virgin Islands'), (b'VN', 'Viet Nam'), (b'VU', 'Vanuatu'), (b'WF', 'Wallis & Futuna Islands'), (b'WS', 'Samoa'), (b'YE', 'Yemen'), (b'YT', 'Mayotte'), (b'YU', 'Yugoslavia'), (b'ZA', 'South Africa'), (b'ZM', 'Zambia'), (b'ZR', 'Zaire'), (b'ZW', 'Zimbabwe'), (b'ZZ', 'Other')], max_length=2, null=True)), # noqa:E501
('city_en', models.CharField(blank=True, default='', max_length=100)), # noqa:E501
('city_be', models.CharField(blank=True, default='', max_length=100)), # noqa:E501
('city_ru', models.CharField(blank=True, default='', max_length=100)), # noqa:E501
('year', models.IntegerField(blank=True, null=True)), # noqa:E501
('duration_en', models.CharField(blank=True, default='', max_length=100)), # noqa:E501
('duration_be', models.CharField(blank=True, default='', max_length=100)), # noqa:E501
('duration_ru', models.CharField(blank=True, default='', max_length=100)), # noqa:E501
('genre_en', models.CharField(blank=True, default='', max_length=1000)), # noqa:E501
('genre_be', models.CharField(blank=True, default='', max_length=1000)), # noqa:E501
('genre_ru', models.CharField(blank=True, default='', max_length=1000)), # noqa:E501
('synopsis_short_en', wagtail.wagtailcore.fields.RichTextField(blank=True, default='')), # noqa:E501
('synopsis_short_be', wagtail.wagtailcore.fields.RichTextField(blank=True, default='')), # noqa:E501
('synopsis_short_ru', wagtail.wagtailcore.fields.RichTextField(blank=True, default='')), # noqa:E501
('synopsis_en', wagtail.wagtailcore.fields.RichTextField(blank=True, default='')), # noqa:E501
('synopsis_be', wagtail.wagtailcore.fields.RichTextField(blank=True, default='')), # noqa:E501
('synopsis_ru', wagtail.wagtailcore.fields.RichTextField(blank=True, default='')), # noqa:E501
('frame', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')), # noqa:E501
('submission', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='submissions.Submission')), # noqa:E501
],
options={
'abstract': False,
},
),
]
| unlicense |
satishgoda/pipe2py | tests/pypipelines/pipe_125e9fe8bb5f84526d21bebfec3ad116.py | 6 | 3594 | # Pipe pipe_125e9fe8bb5f84526d21bebfec3ad116 generated by pipe2py
from pipe2py import Context
from pipe2py.modules.pipeforever import pipe_forever
from pipe2py.modules.pipetextinput import pipe_textinput
from pipe2py.modules.pipeitembuilder import pipe_itembuilder
from pipe2py.modules.pipeurlbuilder import pipe_urlbuilder
from pipe2py.modules.pipeloop import pipe_loop
from pipe2py.modules.pipefetchdata import pipe_fetchdata
from pipe2py.modules.pipeloop import pipe_loop
from pipe2py.modules.piperename import pipe_rename
from pipe2py.modules.pipeoutput import pipe_output
def pipe_125e9fe8bb5f84526d21bebfec3ad116(context=None, _INPUT=None, conf=None, **kwargs):
# todo: insert pipeline description here
conf = conf or {}
if context and context.describe_input:
return [(u'', u'textinput1', u'Enter Text', u'text', u'defunkt')]
if context and context.describe_dependencies:
return [u'pipefetchdata', u'pipeitembuilder', u'pipeloop', u'pipeoutput', u'piperename', u'pipetextinput', u'pipeurlbuilder']
forever = pipe_forever()
# We need to wrap submodules (used by loops) so we can pass the
# input at runtime (as we can to subpipelines)
def pipe_sw_72(context=None, _INPUT=None, conf=None, **kwargs):
# todo: insert submodule description here
return pipe_urlbuilder(context, _INPUT, conf=conf)
# We need to wrap submodules (used by loops) so we can pass the
# input at runtime (as we can to subpipelines)
def pipe_sw_150(context=None, _INPUT=None, conf=None, **kwargs):
# todo: insert submodule description here
return pipe_fetchdata(context, _INPUT, conf=conf)
sw_479 = pipe_textinput(
context, forever, conf={'debug': {'type': 'text', 'value': 'defunkt'}, 'default': {'type': 'text', 'value': 'defunkt'}, 'prompt': {'type': 'text', 'value': 'Enter Text'}, 'name': {'type': 'text', 'value': 'textinput1'}, 'position': {'type': 'number', 'value': ''}})
sw_467 = pipe_itembuilder(
context, forever, conf={'attrs': [{'value': {'terminal': 'attrs_1_value', 'type': 'text'}, 'key': {'type': 'text', 'value': 'title'}}]}, attrs_1_value=sw_479)
sw_61 = pipe_loop(
context, sw_467, embed=pipe_sw_72, conf={'assign_part': {'type': 'text', 'value': 'all'}, 'assign_to': {'type': 'text', 'value': 'api'}, 'emit_part': {'type': 'text', 'value': 'all'}, 'mode': {'type': 'text', 'value': 'assign'}, 'embed': {'type': 'module', 'value': {'type': 'urlbuilder', 'id': 'sw-72', 'conf': {'BASE': {'type': 'text', 'value': 'https://api.github.com/search/users'}, 'PARAM': [{'value': {'type': 'text', 'subkey': 'title'}, 'key': {'type': 'text', 'value': 'q'}}]}}}, 'with': {'type': 'text', 'value': ''}})
sw_142 = pipe_loop(
context, sw_61, embed=pipe_sw_150, conf={'assign_part': {'type': 'text', 'value': 'first'}, 'assign_to': {'type': 'text', 'value': 'info'}, 'mode': {'type': 'text', 'value': 'assign'}, 'embed': {'type': 'module', 'value': {'type': 'fetchdata', 'id': 'sw-150', 'conf': {'URL': {'type': 'url', 'subkey': 'api'}, 'path': {'type': 'text', 'value': 'items'}}}}, 'with': {'type': 'text', 'value': ''}})
sw_351 = pipe_rename(
context, sw_142, conf={'RULE': [{'field': {'type': 'text', 'value': 'language.0.content'}, 'op': {'type': 'text', 'value': 'copy'}, 'newval': {'type': 'text', 'value': 'description'}}]})
_OUTPUT = pipe_output(
context, sw_351, conf={})
return sw_142
if __name__ == "__main__":
pipeline = pipe_125e9fe8bb5f84526d21bebfec3ad116(Context())
for i in pipeline:
print i
| gpl-2.0 |
bioinformatics-ua/catalogue | emif/questionnaire/imports.py | 2 | 26425 | # -*- coding: utf-8 -*-
# Copyright (C) 2014 Universidade de Aveiro, DETI/IEETA, Bioinformatics Group - http://bioinformatics.ua.pt/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from questionnaire.models import Questionnaire, Choice, Question, QuestionSet
import os
import re
import json
from openpyxl import Workbook, load_workbook
from openpyxl.styles import Style, PatternFill, Alignment, Font, Border, Side
from openpyxl.cell import Cell
from openpyxl.worksheet.datavalidation import DataValidation, ValidationType
from searchengine.search_indexes import convert_text_to_slug
from searchengine.models import Slugs
from questionnaire.utils import *
from fingerprint.models import Answer, AnswerChange
import datetime
from django.db import transaction
from Levenshtein import ratio
from qprocessors.choice import choice_list, serialize_list
"""This class is used to import the fingerprint template
"""
class ImportQuestionnaire(object):
def __init__(self, file_path):
self.file_path = file_path
def import_questionnaire(self, merge=None):
raise NotImplementedError("Please Implement this method")
def writeLog(self, log):
pass
#with open("log_%s.txt" % datetime.datetime.now().strftime("%Y%m%d-%H%M%S"), "w") as f:
# f.write(log)
# f.close()
def get_slug(self, slug, questionnaire):
return next_free_slug(slug, create=False, scope=questionnaire)
def format_number(self, number):
# print number
number_arr = number.split(".")
result = number_arr[0] + "."
for i in range(1,len(number_arr)):
val = int(number_arr[i])
if val<10:
val = "0" + str(val)
number_arr[i] = str(val)
if (i!=len(number_arr)-1):
result += str(val) + "."
else:
result += str(val)
# print "result " + result
return result
"""This method will build the object according with the type
of the object to import.
"""
@staticmethod
def factory(t_type, file_path):
if t_type == "excel":
return ImportQuestionnaireExcel(file_path)
else:
raise Exception("The supplied format is not supported")
class CommentPlaceholder:
value='comment'
class ImportQuestionnaireExcel(ImportQuestionnaire):
QUESTION=0
CATEGORY=1
# choice match mode
EXACT_MATCH=0
SIMILARITY_MODE=1
def __init__(self, file_path):
ImportQuestionnaire.__init__(self, file_path)
# this function implements the similarity algorithm, baed on a levenstein similarity algorithm
# the idea is this algorithm be dynamically defined, for now its static
def __isSimilar(self, comparing_option, options, percentage):
closest = 0
match = None
for option in options:
this_ratio = ratio(comparing_option,option)
#print "'%s' and '%s' is %r similar" %(comparing_option, option, this_ratio)
if this_ratio > closest and this_ratio > percentage:
closest = this_ratio
match = option
return (closest, match)
def __handleAnswerChanges(self, question, change_map, debug=False):
if len(change_map) > 0:
#print "ANSWERS:"
def __answerChange(data, change_map):
response = choice_list(data).values()
for res in response:
try:
res['key'] = change_map[res['key']]
except KeyError:
pass
return serialize_list(response)
# Handle answer changes modifications
answers = Answer.objects.filter(question=question)
for ans in answers:
ans.data = __answerChange(ans.data, change_map)
if not debug:
ans.save()
# Update answer history
# For old values
ans_history = AnswerChange.objects.filter(answer=ans)
for hist in ans_history:
hist.old_value = __answerChange(hist.old_value, change_map)
hist.new_value = __answerChange(hist.new_value, change_map)
if not debug:
hist.save()
# Update dependencies too
'''print "UPDATE DEPENDENCIES FOR: %s" %(question.number)
for key, value in change_map:
print key
print value
print "--"
dependent_questions = Question.objects.filter(checks__contains='dependent="%s,%s"' % (question.number, key))
print "QUESTIONS DEPENDENT"
print dependent_questions
'''
#raise Exception("CRAZY")
def __processChoices(self, row, question, list_aux, log, mode=EXACT_MATCH, match_percentage=0.75, debug=False, infer_function=None):
#_choices_array_aux=[]
i = 0
# get current questions if any
old_choices = list(Choice.objects.filter(question=question).values_list('value', flat=True))
#print old_choices
change_map={} # maps the changes to be made over the question
indexes_map = {} # since ordering is absolute, on first passthrough i generate the sortid numbers
look_similar = [] # potential matches for 2nd pass similarity lookup
maybe_new = [] # for 3rd pass we may infer
# 1st pass: we do a first pass through to remove exact matches (is the same on both approaches)
for ch in list_aux:
i+=1
indexes_map[ch] = i
if ch in old_choices:
choice = Choice.objects.get(question=question, value=ch)
choice.sortid = i
if not debug:
choice.save()
#_choices_array_aux.append(ch)
old_choices.remove(ch)
else:
look_similar.append(ch)
def __similarMap(question, similar, ch):
change_map[similar] = ch
choice=Choice.objects.get(question=question, value=similar)
choice.text_en = ch
choice.value = ch
choice.sortid=indexes_map[ch]
if not debug:
choice.save()
#_choices_array_aux.append(ch)
old_choices.remove(similar)
# 2nd pass: lets analyse the rest that are not exact matches
for ch in look_similar:
if mode==self.SIMILARITY_MODE:
(closest, similar) = self.__isSimilar(ch, old_choices, match_percentage)
# if considered similar
if similar != None:
if closest < 1:
print "Replacing '%r' which is %r similar to '%r' on question %r" % (similar, closest, ch, question.number)
__similarMap(question, similar, ch)
else:
# maybe be new, to check on 3rd pass
maybe_new.append(ch)
# if this is exact match mode, we skip this step
else:
maybe_new.append(ch)
# 3rd pass: if there's an boolean lambda infer function to non obvious cases dealing, run it
run = list(maybe_new)
if infer_function != None and len(maybe_new) > 0 and len(old_choices) > 0:
for new in run:
print "RUN for " + str(new)
if old_choices > 0:
for old in old_choices:
if infer_function(question.number, new, old) == True:
print "Replacing '%r' which is user indicated similar to '%r' on question %r" % (old, new, question.number)
__similarMap(question, old, new)
maybe_new.remove(new)
#if we find a hit its done
break
else:
print "No more old choices, others must be new"
for ch in maybe_new:
# otherwise we create a new entry
print "Create new '%s'" %(ch)
try:
choice = Choice(question=question, sortid=indexes_map[ch], text_en=ch, value=ch)
log += '\n%s - Choice created %s ' % (row, choice)
if not debug:
choice.save()
#_choices_array_aux.append(ch)
log += '\n%s - Choice saved %s ' % (row, choice)
except:
log += "\n%s - Error to save Choice %s" % (row, choice)
self.writeLog(log)
raise
if len(old_choices)> 0:
print "REMOVED:"
print old_choices
# at last, we must remove the choices that dont appear in the new listing (considered removed)
Choice.objects.filter(question=question, value__in=old_choices).delete()
if mode==self.SIMILARITY_MODE:
self.__handleAnswerChanges(question, change_map, debug=debug)
return list_aux #_choices_array_aux
def __processDisposition(self, disposition):
if disposition == 'horizontal':
return 1
elif disposition == 'dropdown':
return 2
return 0
def __handleQuestionNumber(self, level, qNumber, questionset):
questionNumber = None
if level.startswith('h'):
questionNumber = qNumber.getNumber(level)
questionNumber = self.format_number(str(questionNumber))
else:
questionNumber = level
pos = level.split('.')
poslen = len(pos)
for question in questionset.questions():
this_q = question.number.split('.')
if poslen == len(this_q):
if pos[poslen-1] <= this_q[poslen-1]:
n = int(this_q[poslen-1])+1
if n < 10:
this_q[poslen-1] = '0'+str(n)
else:
this_q[poslen-1] = str(n)
question.number = ".".join(this_q)
question.save()
#raise Exception('STOP THERE')
return questionNumber
def __getChoices(self, question):
''' Gets the choices_array from a question back into an choices_array.
Useful on merge operation that point dependencies to questions already on the database
'''
if question.type in ['choice', 'choice-freeform', 'choice-multiple', 'choice-multiple-freeform']:
choices = Choice.objects.get(question=question).values_list('value', flat=true)
return choices
elif question.type in ['choice-yesno', 'choice-yesnodontknow']:
return ['yes', 'no', 'dontknow']
return []
def __handleQuestion(self, type, row,type_Column, level_number_column, text_question_Column, _questions_rows,
_choices_array, qNumber, questionset, log, _checks, _debug, questionnaire, mode=EXACT_MATCH, percentage=0.75, infer_function=None):
try:
slug = None
text_en = None
if level_number_column.value.startswith('h'):
text_en = str(level_number_column.value) + '. ' + str(text_question_Column.value)
else:
level = len(level_number_column.value.split('.'))-1
text_en = 'h%s. %s' % (str(level),str(text_question_Column.value))
dataType_column = None
if type == self.CATEGORY:
dataType_column = CommentPlaceholder()
else:
dataType_column = row[3]
if row[7].value:
slug = row[7].value
else:
slug = convert_text_to_slug(str(row[1].value)[:50])
slug = self.get_slug(slug, questionnaire)
if row[5].value:
helpText = row[5].value
else:
helpText = ''
_tooltip = False
if row[6].value:
if str(row[6].value).lower() == 'yes':
_tooltip = True
#If has dependencies
if row[8].value:
try:
dependencies_list = row[8]
list_dep_aux = dependencies_list.value.split('|')
question_num_parent = None
try:
question_num_parent = _questions_rows.get(list_dep_aux[0]).number
except AttributeError:
''' If this is a merge, the dependant question can already be on the questionset,
lets try looking for it
'''
try:
question = Question.objects.get(slug_fk__slug1=list_dep_aux[0],
questionset=questionset)
_questions_rows[list_dep_aux[0]] = question_num_parent
question_num_parent = question.number
_choices_array[list_dep_aux[0]] = self.__getChoices(question)
except Question.DoesNotExist:
raise Exception('The dependant with slug %s does not exist.' %(list_dep_aux[0]))
index_aux = int(str(list_dep_aux[1]))-1
choice_parent_list = _choices_array.get(list_dep_aux[0])
choice_parent = choice_parent_list[index_aux]
_checks = 'dependent=\"%s,%s\"' % (str(question_num_parent), str(choice_parent))
except:
raise
try:
questionNumber = self.__handleQuestionNumber(level_number_column.value, qNumber, questionset)
except:
if type==self.QUESTION:
log += "\n%s - Error to create question number %s" % (type_Column.row, text_en)
elif type==self.CATEGORY:
log += "\n%s - Error to create Category number %s" % (type_Column.row, text_en)
self.writeLog(log)
raise
#print slug
#Create or load slug
slugs = Slugs.objects.filter(slug1=slug, description=text_en)
if len(slugs) <= 0:
slug_db = Slugs(slug1=slug, description=text_en)
slug_db.save()
else:
slug_db = slugs[0]
visible_default = False
if row[10].value:
if str(row[10].value).lower() == 'visible':
visible_default = True
is_category=None
is_stats=None
if type==self.QUESTION:
is_stats=True
is_category=False
elif type==self.CATEGORY:
is_stats=False
is_category=True
try:
question = Question.objects.get(slug_fk__slug1=slug_db.slug1, questionset=questionset)
question.text_en=text_en
question.number=str(questionNumber)
question.type=dataType_column.value
question.help_text=helpText
question.stats=is_stats
question.category=is_category
question.tooltip=_tooltip
question.checks=_checks
question.visible_default=visible_default
except Question.DoesNotExist:
question = Question(questionset=questionset, text_en=text_en, number=str(questionNumber),
type=dataType_column.value, help_text=helpText, slug=slug, slug_fk=slug_db, stats=is_stats,
category=is_category, tooltip=_tooltip,
checks=_checks, visible_default=visible_default,
disposition=self.__processDisposition(row[11].value.lower()))
if dataType_column.value in ['open-validated']:
ardict = {}
if row[4].value:
# some basic types dont need regex
known_validations = {
"integer": "[+-]?\d+",
"decimal": "[+-]?\d*([.]\d*)?",
"scientific": "[+-]?\d*([.]\d*)?e[+-]?\d*([.]\d*)?",
"range": "[+\-]?\d*([.]\d*);[+\-]?\d*([.]\d*)",
"date": "\d{2}/\d{2}/\d{4}",
"time": "\d{2}:\d{2}:\d{2}",
"datetime": "\d{2}/\d{2}/\d{4} \d{2}:\d{2}:\d{2}",
"text": ".*"
}
try:
ardict['regex'] = known_validations[row[4].value]
ardict['base'] = row[4].value
except KeyError:
# If this is not known, try to validate it as a regex
try:
re.compile(row[4].value)
ardict['regex'] = row[4].value
except re.error:
raise Exception("--ERROR: The regex on row %d, column 4 is not valid" % (type_Column.row))
if row[5].value:
split = row[5].value.split('|')
lensplit = len(split)
if lensplit == 1:
ardict['unit'] = split[0]
question.help_text=""
elif lensplit == 2:
ardict['unit'] = split[0]
ardict['unit_desc'] = split[1]
question.help_text=""
elif lensplit == 3:
ardict['unit'] = split[0]
ardict['unit_desc'] = split[1]
question.help_text = split[2]
else:
raise Exception("-- ERROR: Invalid number of segments on help text row %d, column 5. Max syntax is unit|desc|help_text" % (type_Column.row))
question.metadata = json.dumps(ardict)
if not _debug:
question.save()
if type==self.QUESTION:
log += '\n%s - Question created %s ' % (type_Column.row, question)
elif type==self.CATEGORY:
log += '\n%s - Category created %s ' % (type_Column.row, question)
_questions_rows[slug] = question
if type == self.QUESTION:
if dataType_column.value in ['choice', 'choice-freeform', 'choice-multiple', 'choice-multiple-freeform']:
_choices_array_aux = []
# Parse of values list
values_list = row[4]
if (values_list!=None and values_list.value!=None):
list_aux = values_list.value.split('|')
_choices_array[slug] = self.__processChoices(type_Column.row, question, list_aux, log, debug=_debug,
mode=mode, match_percentage=percentage, infer_function=infer_function)
if dataType_column.value in ['choice-yesno',
'choice-yesnodontknow']:
_choices_array[slug] = ['yes', 'no', 'dontknow']
except:
log += "\n%s - Error to save question %s" % (type_Column.row, text_en)
self.writeLog(log)
raise
@transaction.commit_on_success
def import_questionnaire(self, merge=None, mode=EXACT_MATCH, percentage=0.75, infer_function=None):
_debug = False
qNumber = QuestionNumber()
slugs = []
wb = load_workbook(filename = self.file_path, data_only=True)
ws = wb.get_active_sheet()
log = ''
# Cell B1: Name of questionnaire
name = ws.cell('B1').value
slugQ = convert_text_to_slug(ws.cell('B1').value)
disable = False
questionnaire = None
if merge != None:
try:
questionnaire = Questionnaire.objects.get(id=merge)
except Questionnaire.DoesNotExist:
raise Exception('Questionnaire does not exist, so cant merge against it.')
else:
questionnaire = Questionnaire(name=name, disable=disable, slug=slugQ, redirect_url='/')
log += '\nQuestionnaire created %s ' % questionnaire
if not _debug:
questionnaire.save()
log += '\nQuestionnaire saved %s ' % questionnaire
try:
_choices_array = {}
_questions_rows = {}
#############################
# TIPS:
# Type of Row: QuestionSet, Category, Question
# Columns: Type, Text/Question, Level/Number, Data type, Value list, Help text/Description, Tooltip, Slug, Stats
#############################
for row in ws.rows[2:]:
if len(row) > 0 and row[0].value != None:
type_Column = row[0]
text_question_Column = row[1]
if (text_question_Column.value!=None):
text_question_Column.value = text_question_Column.value.encode('ascii', 'ignore')
level_number_column = row[2]
_checks = ''
# Type = QUESTIONSET
# Columns required: Type, Text/Question
# Columns optional: Help text/Description, Tooltip
if str(type_Column.value) == "QuestionSet":
sortid = str(level_number_column.value)
try:
qNumber.getNumber('h0', sortid)
except:
self.writeLog(log)
raise
text_en = 'h1. %s' % text_question_Column.value
slug_qs = None
if row[7].value:
slug_qs = row[7].value
else:
slug_qs = str(slugQ) + "_" + convert_text_to_slug(str(text_question_Column.value))
if row[5].value:
helpText = row[5].value
else:
helpText = ""
tooltip = False
if row[6].value:
if str(row[6].value).lower() == 'yes':
tooltip = True
questionset = None
created = False
try:
questionset = QuestionSet.objects.get(questionnaire=questionnaire, sortid=sortid, heading=slug_qs)
except QuestionSet.DoesNotExist:
questionset = QuestionSet(questionnaire=questionnaire, sortid=sortid, heading=slug_qs, checks='required', text_en=text_en, help_text=helpText, tooltip=tooltip)
created=True
if created:
log += '\n%s - QuestionSet created %s - %s ' % (type_Column.row, sortid, text_en)
else:
log += '\n%s - QuestionSet retrieved %s - %s ' % (type_Column.row, sortid, text_en)
try:
if not _debug:
questionset.save()
log += '\n%s - QuestionSet saved %s - %s ' % (type_Column.row, sortid, text_en)
except:
log += "\n%s - Error to save questionset %s - %s" % (type_Column.row, sortid, text_en)
self.writeLog(log)
raise
#if not created:
# last_question = Question.objects.filter(questionset=questionset).order_by('-id')[0]
# qNumber.setState(last_question.number)
# Type = CATEGORY
# Columns required: Type, Text/Question, Level/Number, Category
# Columns optional: Help text/Description, Slug, Tooltip, Dependencies
elif str(type_Column.value) == "Category":
self.__handleQuestion(self.CATEGORY, row, type_Column, level_number_column, text_question_Column,
_questions_rows, _choices_array, qNumber, questionset, log, _checks, _debug,
questionnaire, mode=mode, percentage=percentage, infer_function=infer_function)
# Type = QUESTION
# Columns required: Type, Text/Question, Level/Number, Data Type, Category, Stats
# Columns optional: Value List, Help text/Description, Tooltip, Dependencies
else:
self.__handleQuestion(self.QUESTION, row, type_Column, level_number_column, text_question_Column,
_questions_rows, _choices_array, qNumber, questionset, log, _checks, _debug,
questionnaire, mode=mode, percentage=percentage, infer_function=infer_function)
except:
log += '\nError to save questionsets and questions of the questionnaire %s ' % questionnaire
self.writeLog(log)
raise
log += '\nQuestionnaire %s, questionsets, questions and choices created with success!! ' % questionnaire
self.writeLog(log)
#raise Exception('Dont commit me dude')
return True
| gpl-3.0 |
indigo-dc/im | IM/UnixHTTPAdapter.py | 2 | 2982 | # IM - Infrastructure Manager
# Copyright (C) 2011 - GRyCAP - Universitat Politecnica de Valencia
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# The following file has been taken from requests-unixsocket
# https://github.com/msabramo/requests-unixsocket/blob/master/requests_unixsocket/adapters.py
import socket
from requests.adapters import HTTPAdapter
from requests.compat import urlparse, unquote
try:
from requests.packages.urllib3.connection import HTTPConnection
from requests.packages.urllib3.connectionpool import HTTPConnectionPool
except ImportError:
from urllib3.connection import HTTPConnection
from urllib3.connectionpool import HTTPConnectionPool
class UnixHTTPConnection(HTTPConnection):
def __init__(self, unix_socket_url, timeout=60):
"""Create an HTTP connection to a unix domain socket
:param unix_socket_url: A URL with a scheme of 'http+unix' and the
netloc is a percent-encoded path to a unix domain socket. E.g.:
'http+unix://%2Ftmp%2Fprofilesvc.sock/status/pid'
"""
HTTPConnection.__init__(self, 'localhost', timeout=timeout)
self.unix_socket_url = unix_socket_url
self.timeout = timeout
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.timeout)
socket_path = unquote(urlparse(self.unix_socket_url).netloc)
sock.connect(socket_path)
self.sock = sock
class UnixHTTPConnectionPool(HTTPConnectionPool):
def __init__(self, socket_path, timeout=60):
HTTPConnectionPool.__init__(self, 'localhost', timeout=timeout)
self.socket_path = socket_path
self.timeout = timeout
def _new_conn(self):
return UnixHTTPConnection(self.socket_path, self.timeout)
class UnixHTTPAdapter(HTTPAdapter):
def __init__(self, timeout=60):
super(UnixHTTPAdapter, self).__init__()
self.timeout = timeout
def get_connection(self, socket_path, proxies=None):
proxies = proxies or {}
proxy = proxies.get(urlparse(socket_path.lower()).scheme)
if proxy:
raise ValueError('%s does not support specifying proxies'
% self.__class__.__name__)
return UnixHTTPConnectionPool(socket_path, self.timeout)
def request_url(self, request, proxies):
return request.path_url
| gpl-3.0 |
pritamsamadder048/libforensics | unittests/tests/dec/splitraw.py | 13 | 4378 | # Copyright 2010 Michael Murr
#
# This file is part of LibForensics.
#
# LibForensics is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LibForensics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with LibForensics. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the lf.dec.splitraw module."""
# stdlib imports
import os.path
from unittest import TestCase
# local imports
from lf.dec.consts import SEEK_SET, SEEK_CUR, SEEK_END
from lf.dec.base import StreamInfo
from lf.dec.splitraw import SplitRaw, SplitRawIStream
__docformat__ = "restructuredtext en"
__all__ = [
"SplitRawTestCase", "SplitRawIStreamTestCase"
]
class SplitRawTestCase(TestCase):
def setUp(self):
names = ["alpha{0}.txt".format(x) for x in range(1,7)]
names = [os.path.join("data", "txt", name) for name in names]
self.splitraw = SplitRaw(names)
# end def setUp
def test_list(self):
ae = self.assertEqual
ae(self.splitraw.list(), [StreamInfo(0)])
# end def test_list
def test_open(self):
ae = self.assertEqual
ae(self.splitraw.open(), self.splitraw.stream)
# end def test_open
# end class SplitRawTestCase
class SplitRawIStreamTestCase(TestCase):
def setUp(self):
names = ["alpha{0}.txt".format(x) for x in range(1,7)]
names = [os.path.join("data", "txt", name) for name in names]
self.names = names
self.sris = SplitRawIStream(names)
# end def setUp
def test__init__(self):
ae = self.assertEqual
ae(self.sris.size, 26)
ae(self.sris._names, self.names)
# end def test__init__
def test_seek(self):
ae = self.assertEqual
ar = self.assertRaises
sris = self.sris
ae(sris.seek(10, SEEK_SET), 10)
ae(sris._position, 10)
ar(ValueError, sris.seek, -10, SEEK_SET)
sris.seek(3, SEEK_SET)
ae(sris.seek(5, SEEK_CUR), 8)
ae(sris._position, 8)
ae(sris.seek(-2, SEEK_CUR), 6)
ae(sris._position, 6)
ae(sris.seek(-3, SEEK_END), 23)
ae(sris._position, 23)
ae(sris.seek(3, SEEK_END), 29)
ae(sris._position, 29)
# end def test_seek
def test_tell(self):
ae = self.assertEqual
sris = self.sris
sris._position = 0
ae(sris.tell(), 0)
sris._position = 2
ae(sris.tell(), 2)
# end def test_tell
def test_read(self):
ae = self.assertEqual
self.sris.seek(0, SEEK_SET)
ae(self.sris.read(0), b"")
ae(self.sris.read(1), b"a")
ae(self.sris.read(2), b"bc")
ae(self.sris.read(), b"defghijklmnopqrstuvwxyz")
self.sris.seek(-3, SEEK_END)
ae(self.sris.read(5), b"xyz")
self.sris.seek(30, SEEK_SET)
ae(self.sris.read(), b"")
# end def test_read
def test_readall(self):
ae = self.assertEqual
self.sris.seek(0, SEEK_SET)
ae(self.sris.readall(), b"abcdefghijklmnopqrstuvwxyz")
self.sris.seek(3, SEEK_SET)
ae(self.sris.readall(), b"defghijklmnopqrstuvwxyz")
# end def test_readall
def test_readinto(self):
ae = self.assertEqual
sris = self.sris
barray0 = bytearray(5)
barray1 = bytearray(10)
barray2 = bytearray(26)
barray3 = bytearray(1)
sris.seek(-12, SEEK_END)
retval0 = sris.readinto(barray0)
retval1 = sris.readinto(barray1)
sris.seek(0, SEEK_SET)
retval2 = sris.readinto(barray2)
sris.seek(30, SEEK_SET)
retval3 = sris.readinto(barray3)
ae(retval0, 5)
ae(retval1, 7)
ae(retval2, 26)
ae(retval3, 0)
ae(barray0, b"opqrs")
ae(barray1, b"tuvwxyz\x00\x00\x00")
ae(barray2, b"abcdefghijklmnopqrstuvwxyz")
ae(barray3, b"\x00")
# end def test_readinto
# end class SplitRawIStreamTestCase
| lgpl-3.0 |
awangga/csvfromdbf | dbfread/codepages.py | 8 | 3069 | # Table from dbf.py by Ethan Furman
codepages = {
0x00: ('ascii', "plain ol' ascii"),
0x01: ('cp437', 'U.S. MS-DOS'),
0x02: ('cp850', 'International MS-DOS'),
0x03: ('cp1252', 'Windows ANSI'),
0x04: ('mac_roman', 'Standard Macintosh'),
0x08: ('cp865', 'Danish OEM'),
0x09: ('cp437', 'Dutch OEM'),
0x0A: ('cp850', 'Dutch OEM (secondary)'),
0x0B: ('cp437', 'Finnish OEM'),
0x0D: ('cp437', 'French OEM'),
0x0E: ('cp850', 'French OEM (secondary)'),
0x0F: ('cp437', 'German OEM'),
0x10: ('cp850', 'German OEM (secondary)'),
0x11: ('cp437', 'Italian OEM'),
0x12: ('cp850', 'Italian OEM (secondary)'),
0x13: ('cp932', 'Japanese Shift-JIS'),
0x14: ('cp850', 'Spanish OEM (secondary)'),
0x15: ('cp437', 'Swedish OEM'),
0x16: ('cp850', 'Swedish OEM (secondary)'),
0x17: ('cp865', 'Norwegian OEM'),
0x18: ('cp437', 'Spanish OEM'),
0x19: ('cp437', 'English OEM (Britain)'),
0x1A: ('cp850', 'English OEM (Britain) (secondary)'),
0x1B: ('cp437', 'English OEM (U.S.)'),
0x1C: ('cp863', 'French OEM (Canada)'),
0x1D: ('cp850', 'French OEM (secondary)'),
0x1F: ('cp852', 'Czech OEM'),
0x22: ('cp852', 'Hungarian OEM'),
0x23: ('cp852', 'Polish OEM'),
0x24: ('cp860', 'Portuguese OEM'),
0x25: ('cp850', 'Portuguese OEM (secondary)'),
0x26: ('cp866', 'Russian OEM'),
0x37: ('cp850', 'English OEM (U.S.) (secondary)'),
0x40: ('cp852', 'Romanian OEM'),
0x4D: ('cp936', 'Chinese GBK (PRC)'),
0x4E: ('cp949', 'Korean (ANSI/OEM)'),
0x4F: ('cp950', 'Chinese Big 5 (Taiwan)'),
0x50: ('cp874', 'Thai (ANSI/OEM)'),
0x57: ('cp1252', 'ANSI'),
0x58: ('cp1252', 'Western European ANSI'),
0x59: ('cp1252', 'Spanish ANSI'),
0x64: ('cp852', 'Eastern European MS-DOS'),
0x65: ('cp866', 'Russian MS-DOS'),
0x66: ('cp865', 'Nordic MS-DOS'),
0x67: ('cp861', 'Icelandic MS-DOS'),
# 0x68: (None, 'Kamenicky (Czech) MS-DOS'),
# 0x69: (None, 'Mazovia (Polish) MS-DOS'),
0x6a: ('cp737', 'Greek MS-DOS (437G)'),
0x6b: ('cp857', 'Turkish MS-DOS'),
0x78: ('cp950', 'Traditional Chinese '
'(Hong Kong SAR, Taiwan) Windows'),
0x79: ('cp949', 'Korean Windows'),
0x7a: ('cp936', 'Chinese Simplified (PRC, Singapore) Windows'),
0x7b: ('cp932', 'Japanese Windows'),
0x7c: ('cp874', 'Thai Windows'),
0x7d: ('cp1255', 'Hebrew Windows'),
0x7e: ('cp1256', 'Arabic Windows'),
0xc8: ('cp1250', 'Eastern European Windows'),
0xc9: ('cp1251', 'Russian Windows'),
0xca: ('cp1254', 'Turkish Windows'),
0xcb: ('cp1253', 'Greek Windows'),
0x96: ('mac_cyrillic', 'Russian Macintosh'),
0x97: ('mac_latin2', 'Macintosh EE'),
0x98: ('mac_greek', 'Greek Macintosh'),
}
def guess_encoding(language_driver):
if language_driver in codepages:
return codepages[language_driver][0]
else:
raise LookupError('Unable to guess encoding '
'for languager driver byte '
'0x{:x}'.format(language_driver))
| agpl-3.0 |
songfj/calibre | src/cherrypy/process/plugins.py | 81 | 25628 | """Site services for use with a Web Site Process Bus."""
import os
import re
import signal as _signal
import sys
import time
import threading
from cherrypy._cpcompat import basestring, get_daemon, get_thread_ident, ntob, set
# _module__file__base is used by Autoreload to make
# absolute any filenames retrieved from sys.modules which are not
# already absolute paths. This is to work around Python's quirk
# of importing the startup script and using a relative filename
# for it in sys.modules.
#
# Autoreload examines sys.modules afresh every time it runs. If an application
# changes the current directory by executing os.chdir(), then the next time
# Autoreload runs, it will not be able to find any filenames which are
# not absolute paths, because the current directory is not the same as when the
# module was first imported. Autoreload will then wrongly conclude the file has
# "changed", and initiate the shutdown/re-exec sequence.
# See ticket #917.
# For this workaround to have a decent probability of success, this module
# needs to be imported as early as possible, before the app has much chance
# to change the working directory.
_module__file__base = os.getcwd()
class SimplePlugin(object):
"""Plugin base class which auto-subscribes methods for known channels."""
bus = None
"""A :class:`Bus <cherrypy.process.wspbus.Bus>`, usually cherrypy.engine."""
def __init__(self, bus):
self.bus = bus
def subscribe(self):
"""Register this object as a (multi-channel) listener on the bus."""
for channel in self.bus.listeners:
# Subscribe self.start, self.exit, etc. if present.
method = getattr(self, channel, None)
if method is not None:
self.bus.subscribe(channel, method)
def unsubscribe(self):
"""Unregister this object as a listener on the bus."""
for channel in self.bus.listeners:
# Unsubscribe self.start, self.exit, etc. if present.
method = getattr(self, channel, None)
if method is not None:
self.bus.unsubscribe(channel, method)
class SignalHandler(object):
"""Register bus channels (and listeners) for system signals.
You can modify what signals your application listens for, and what it does
when it receives signals, by modifying :attr:`SignalHandler.handlers`,
a dict of {signal name: callback} pairs. The default set is::
handlers = {'SIGTERM': self.bus.exit,
'SIGHUP': self.handle_SIGHUP,
'SIGUSR1': self.bus.graceful,
}
The :func:`SignalHandler.handle_SIGHUP`` method calls
:func:`bus.restart()<cherrypy.process.wspbus.Bus.restart>`
if the process is daemonized, but
:func:`bus.exit()<cherrypy.process.wspbus.Bus.exit>`
if the process is attached to a TTY. This is because Unix window
managers tend to send SIGHUP to terminal windows when the user closes them.
Feel free to add signals which are not available on every platform. The
:class:`SignalHandler` will ignore errors raised from attempting to register
handlers for unknown signals.
"""
handlers = {}
"""A map from signal names (e.g. 'SIGTERM') to handlers (e.g. bus.exit)."""
signals = {}
"""A map from signal numbers to names."""
for k, v in vars(_signal).items():
if k.startswith('SIG') and not k.startswith('SIG_'):
signals[v] = k
del k, v
def __init__(self, bus):
self.bus = bus
# Set default handlers
self.handlers = {'SIGTERM': self.bus.exit,
'SIGHUP': self.handle_SIGHUP,
'SIGUSR1': self.bus.graceful,
}
if sys.platform[:4] == 'java':
del self.handlers['SIGUSR1']
self.handlers['SIGUSR2'] = self.bus.graceful
self.bus.log("SIGUSR1 cannot be set on the JVM platform. "
"Using SIGUSR2 instead.")
self.handlers['SIGINT'] = self._jython_SIGINT_handler
self._previous_handlers = {}
def _jython_SIGINT_handler(self, signum=None, frame=None):
# See http://bugs.jython.org/issue1313
self.bus.log('Keyboard Interrupt: shutting down bus')
self.bus.exit()
def subscribe(self):
"""Subscribe self.handlers to signals."""
for sig, func in self.handlers.items():
try:
self.set_handler(sig, func)
except ValueError:
pass
def unsubscribe(self):
"""Unsubscribe self.handlers from signals."""
for signum, handler in self._previous_handlers.items():
signame = self.signals[signum]
if handler is None:
self.bus.log("Restoring %s handler to SIG_DFL." % signame)
handler = _signal.SIG_DFL
else:
self.bus.log("Restoring %s handler %r." % (signame, handler))
try:
our_handler = _signal.signal(signum, handler)
if our_handler is None:
self.bus.log("Restored old %s handler %r, but our "
"handler was not registered." %
(signame, handler), level=30)
except ValueError:
self.bus.log("Unable to restore %s handler %r." %
(signame, handler), level=40, traceback=True)
def set_handler(self, signal, listener=None):
"""Subscribe a handler for the given signal (number or name).
If the optional 'listener' argument is provided, it will be
subscribed as a listener for the given signal's channel.
If the given signal name or number is not available on the current
platform, ValueError is raised.
"""
if isinstance(signal, basestring):
signum = getattr(_signal, signal, None)
if signum is None:
raise ValueError("No such signal: %r" % signal)
signame = signal
else:
try:
signame = self.signals[signal]
except KeyError:
raise ValueError("No such signal: %r" % signal)
signum = signal
prev = _signal.signal(signum, self._handle_signal)
self._previous_handlers[signum] = prev
if listener is not None:
self.bus.log("Listening for %s." % signame)
self.bus.subscribe(signame, listener)
def _handle_signal(self, signum=None, frame=None):
"""Python signal handler (self.set_handler subscribes it for you)."""
signame = self.signals[signum]
self.bus.log("Caught signal %s." % signame)
self.bus.publish(signame)
def handle_SIGHUP(self):
"""Restart if daemonized, else exit."""
if os.isatty(sys.stdin.fileno()):
# not daemonized (may be foreground or background)
self.bus.log("SIGHUP caught but not daemonized. Exiting.")
self.bus.exit()
else:
self.bus.log("SIGHUP caught while daemonized. Restarting.")
self.bus.restart()
try:
import pwd, grp
except ImportError:
pwd, grp = None, None
class DropPrivileges(SimplePlugin):
"""Drop privileges. uid/gid arguments not available on Windows.
Special thanks to Gavin Baker: http://antonym.org/node/100.
"""
def __init__(self, bus, umask=None, uid=None, gid=None):
SimplePlugin.__init__(self, bus)
self.finalized = False
self.uid = uid
self.gid = gid
self.umask = umask
def _get_uid(self):
return self._uid
def _set_uid(self, val):
if val is not None:
if pwd is None:
self.bus.log("pwd module not available; ignoring uid.",
level=30)
val = None
elif isinstance(val, basestring):
val = pwd.getpwnam(val)[2]
self._uid = val
uid = property(_get_uid, _set_uid,
doc="The uid under which to run. Availability: Unix.")
def _get_gid(self):
return self._gid
def _set_gid(self, val):
if val is not None:
if grp is None:
self.bus.log("grp module not available; ignoring gid.",
level=30)
val = None
elif isinstance(val, basestring):
val = grp.getgrnam(val)[2]
self._gid = val
gid = property(_get_gid, _set_gid,
doc="The gid under which to run. Availability: Unix.")
def _get_umask(self):
return self._umask
def _set_umask(self, val):
if val is not None:
try:
os.umask
except AttributeError:
self.bus.log("umask function not available; ignoring umask.",
level=30)
val = None
self._umask = val
umask = property(_get_umask, _set_umask,
doc="""The default permission mode for newly created files and directories.
Usually expressed in octal format, for example, ``0644``.
Availability: Unix, Windows.
""")
def start(self):
# uid/gid
def current_ids():
"""Return the current (uid, gid) if available."""
name, group = None, None
if pwd:
name = pwd.getpwuid(os.getuid())[0]
if grp:
group = grp.getgrgid(os.getgid())[0]
return name, group
if self.finalized:
if not (self.uid is None and self.gid is None):
self.bus.log('Already running as uid: %r gid: %r' %
current_ids())
else:
if self.uid is None and self.gid is None:
if pwd or grp:
self.bus.log('uid/gid not set', level=30)
else:
self.bus.log('Started as uid: %r gid: %r' % current_ids())
if self.gid is not None:
os.setgid(self.gid)
os.setgroups([])
if self.uid is not None:
os.setuid(self.uid)
self.bus.log('Running as uid: %r gid: %r' % current_ids())
# umask
if self.finalized:
if self.umask is not None:
self.bus.log('umask already set to: %03o' % self.umask)
else:
if self.umask is None:
self.bus.log('umask not set', level=30)
else:
old_umask = os.umask(self.umask)
self.bus.log('umask old: %03o, new: %03o' %
(old_umask, self.umask))
self.finalized = True
# This is slightly higher than the priority for server.start
# in order to facilitate the most common use: starting on a low
# port (which requires root) and then dropping to another user.
start.priority = 77
class Daemonizer(SimplePlugin):
"""Daemonize the running script.
Use this with a Web Site Process Bus via::
Daemonizer(bus).subscribe()
When this component finishes, the process is completely decoupled from
the parent environment. Please note that when this component is used,
the return code from the parent process will still be 0 if a startup
error occurs in the forked children. Errors in the initial daemonizing
process still return proper exit codes. Therefore, if you use this
plugin to daemonize, don't use the return code as an accurate indicator
of whether the process fully started. In fact, that return code only
indicates if the process succesfully finished the first fork.
"""
def __init__(self, bus, stdin='/dev/null', stdout='/dev/null',
stderr='/dev/null'):
SimplePlugin.__init__(self, bus)
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.finalized = False
def start(self):
if self.finalized:
self.bus.log('Already deamonized.')
# forking has issues with threads:
# http://www.opengroup.org/onlinepubs/000095399/functions/fork.html
# "The general problem with making fork() work in a multi-threaded
# world is what to do with all of the threads..."
# So we check for active threads:
if threading.activeCount() != 1:
self.bus.log('There are %r active threads. '
'Daemonizing now may cause strange failures.' %
threading.enumerate(), level=30)
# See http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
# (or http://www.faqs.org/faqs/unix-faq/programmer/faq/ section 1.7)
# and http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
# Finish up with the current stdout/stderr
sys.stdout.flush()
sys.stderr.flush()
# Do first fork.
try:
pid = os.fork()
if pid == 0:
# This is the child process. Continue.
pass
else:
# This is the first parent. Exit, now that we've forked.
self.bus.log('Forking once.')
os._exit(0)
except OSError:
# Python raises OSError rather than returning negative numbers.
exc = sys.exc_info()[1]
sys.exit("%s: fork #1 failed: (%d) %s\n"
% (sys.argv[0], exc.errno, exc.strerror))
os.setsid()
# Do second fork
try:
pid = os.fork()
if pid > 0:
self.bus.log('Forking twice.')
os._exit(0) # Exit second parent
except OSError:
exc = sys.exc_info()[1]
sys.exit("%s: fork #2 failed: (%d) %s\n"
% (sys.argv[0], exc.errno, exc.strerror))
os.chdir("/")
os.umask(0)
si = open(self.stdin, "r")
so = open(self.stdout, "a+")
se = open(self.stderr, "a+")
# os.dup2(fd, fd2) will close fd2 if necessary,
# so we don't explicitly close stdin/out/err.
# See http://docs.python.org/lib/os-fd-ops.html
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
self.bus.log('Daemonized to PID: %s' % os.getpid())
self.finalized = True
start.priority = 65
class PIDFile(SimplePlugin):
"""Maintain a PID file via a WSPBus."""
def __init__(self, bus, pidfile):
SimplePlugin.__init__(self, bus)
self.pidfile = pidfile
self.finalized = False
def start(self):
pid = os.getpid()
if self.finalized:
self.bus.log('PID %r already written to %r.' % (pid, self.pidfile))
else:
open(self.pidfile, "wb").write(ntob("%s" % pid, 'utf8'))
self.bus.log('PID %r written to %r.' % (pid, self.pidfile))
self.finalized = True
start.priority = 70
def exit(self):
try:
os.remove(self.pidfile)
self.bus.log('PID file removed: %r.' % self.pidfile)
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
class PerpetualTimer(threading._Timer):
"""A responsive subclass of threading._Timer whose run() method repeats.
Use this timer only when you really need a very interruptible timer;
this checks its 'finished' condition up to 20 times a second, which can
results in pretty high CPU usage
"""
def run(self):
while True:
self.finished.wait(self.interval)
if self.finished.isSet():
return
try:
self.function(*self.args, **self.kwargs)
except Exception:
self.bus.log("Error in perpetual timer thread function %r." %
self.function, level=40, traceback=True)
# Quit on first error to avoid massive logs.
raise
class BackgroundTask(threading.Thread):
"""A subclass of threading.Thread whose run() method repeats.
Use this class for most repeating tasks. It uses time.sleep() to wait
for each interval, which isn't very responsive; that is, even if you call
self.cancel(), you'll have to wait until the sleep() call finishes before
the thread stops. To compensate, it defaults to being daemonic, which means
it won't delay stopping the whole process.
"""
def __init__(self, interval, function, args=[], kwargs={}, bus=None):
threading.Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.running = False
self.bus = bus
def cancel(self):
self.running = False
def run(self):
self.running = True
while self.running:
time.sleep(self.interval)
if not self.running:
return
try:
self.function(*self.args, **self.kwargs)
except Exception:
if self.bus:
self.bus.log("Error in background task thread function %r."
% self.function, level=40, traceback=True)
# Quit on first error to avoid massive logs.
raise
def _set_daemon(self):
return True
class Monitor(SimplePlugin):
"""WSPBus listener to periodically run a callback in its own thread."""
callback = None
"""The function to call at intervals."""
frequency = 60
"""The time in seconds between callback runs."""
thread = None
"""A :class:`BackgroundTask<cherrypy.process.plugins.BackgroundTask>` thread."""
def __init__(self, bus, callback, frequency=60, name=None):
SimplePlugin.__init__(self, bus)
self.callback = callback
self.frequency = frequency
self.thread = None
self.name = name
def start(self):
"""Start our callback in its own background thread."""
if self.frequency > 0:
threadname = self.name or self.__class__.__name__
if self.thread is None:
self.thread = BackgroundTask(self.frequency, self.callback,
bus = self.bus)
self.thread.setName(threadname)
self.thread.start()
self.bus.log("Started monitor thread %r." % threadname)
else:
self.bus.log("Monitor thread %r already started." % threadname)
start.priority = 70
def stop(self):
"""Stop our callback's background task thread."""
if self.thread is None:
self.bus.log("No thread running for %s." % self.name or self.__class__.__name__)
else:
if self.thread is not threading.currentThread():
name = self.thread.getName()
self.thread.cancel()
if not get_daemon(self.thread):
self.bus.log("Joining %r" % name)
self.thread.join()
self.bus.log("Stopped thread %r." % name)
self.thread = None
def graceful(self):
"""Stop the callback's background task thread and restart it."""
self.stop()
self.start()
class Autoreloader(Monitor):
"""Monitor which re-executes the process when files change.
This :ref:`plugin<plugins>` restarts the process (via :func:`os.execv`)
if any of the files it monitors change (or is deleted). By default, the
autoreloader monitors all imported modules; you can add to the
set by adding to ``autoreload.files``::
cherrypy.engine.autoreload.files.add(myFile)
If there are imported files you do *not* wish to monitor, you can adjust the
``match`` attribute, a regular expression. For example, to stop monitoring
cherrypy itself::
cherrypy.engine.autoreload.match = r'^(?!cherrypy).+'
Like all :class:`Monitor<cherrypy.process.plugins.Monitor>` plugins,
the autoreload plugin takes a ``frequency`` argument. The default is
1 second; that is, the autoreloader will examine files once each second.
"""
files = None
"""The set of files to poll for modifications."""
frequency = 1
"""The interval in seconds at which to poll for modified files."""
match = '.*'
"""A regular expression by which to match filenames."""
def __init__(self, bus, frequency=1, match='.*'):
self.mtimes = {}
self.files = set()
self.match = match
Monitor.__init__(self, bus, self.run, frequency)
def start(self):
"""Start our own background task thread for self.run."""
if self.thread is None:
self.mtimes = {}
Monitor.start(self)
start.priority = 70
def sysfiles(self):
"""Return a Set of sys.modules filenames to monitor."""
files = set()
for k, m in sys.modules.items():
if re.match(self.match, k):
if hasattr(m, '__loader__') and hasattr(m.__loader__, 'archive'):
f = m.__loader__.archive
else:
f = getattr(m, '__file__', None)
if f is not None and not os.path.isabs(f):
# ensure absolute paths so a os.chdir() in the app doesn't break me
f = os.path.normpath(os.path.join(_module__file__base, f))
files.add(f)
return files
def run(self):
"""Reload the process if registered files have been modified."""
for filename in self.sysfiles() | self.files:
if filename:
if filename.endswith('.pyc'):
filename = filename[:-1]
oldtime = self.mtimes.get(filename, 0)
if oldtime is None:
# Module with no .py file. Skip it.
continue
try:
mtime = os.stat(filename).st_mtime
except OSError:
# Either a module with no .py file, or it's been deleted.
mtime = None
if filename not in self.mtimes:
# If a module has no .py file, this will be None.
self.mtimes[filename] = mtime
else:
if mtime is None or mtime > oldtime:
# The file has been deleted or modified.
self.bus.log("Restarting because %s changed." % filename)
self.thread.cancel()
self.bus.log("Stopped thread %r." % self.thread.getName())
self.bus.restart()
return
class ThreadManager(SimplePlugin):
"""Manager for HTTP request threads.
If you have control over thread creation and destruction, publish to
the 'acquire_thread' and 'release_thread' channels (for each thread).
This will register/unregister the current thread and publish to
'start_thread' and 'stop_thread' listeners in the bus as needed.
If threads are created and destroyed by code you do not control
(e.g., Apache), then, at the beginning of every HTTP request,
publish to 'acquire_thread' only. You should not publish to
'release_thread' in this case, since you do not know whether
the thread will be re-used or not. The bus will call
'stop_thread' listeners for you when it stops.
"""
threads = None
"""A map of {thread ident: index number} pairs."""
def __init__(self, bus):
self.threads = {}
SimplePlugin.__init__(self, bus)
self.bus.listeners.setdefault('acquire_thread', set())
self.bus.listeners.setdefault('start_thread', set())
self.bus.listeners.setdefault('release_thread', set())
self.bus.listeners.setdefault('stop_thread', set())
def acquire_thread(self):
"""Run 'start_thread' listeners for the current thread.
If the current thread has already been seen, any 'start_thread'
listeners will not be run again.
"""
thread_ident = get_thread_ident()
if thread_ident not in self.threads:
# We can't just use get_ident as the thread ID
# because some platforms reuse thread ID's.
i = len(self.threads) + 1
self.threads[thread_ident] = i
self.bus.publish('start_thread', i)
def release_thread(self):
"""Release the current thread and run 'stop_thread' listeners."""
thread_ident = get_thread_ident()
i = self.threads.pop(thread_ident, None)
if i is not None:
self.bus.publish('stop_thread', i)
def stop(self):
"""Release all threads and run all 'stop_thread' listeners."""
for thread_ident, i in self.threads.items():
self.bus.publish('stop_thread', i)
self.threads.clear()
graceful = stop
| gpl-3.0 |
BigBrother1984/android_external_chromium_org | tools/find_runtime_symbols/proc_maps.py | 32 | 3642 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
_MAPS_PATTERN = re.compile(
r'^([a-f0-9]+)-([a-f0-9]+)\s+(.)(.)(.)(.)\s+([a-f0-9]+)\s+(\S+):(\S+)\s+'
r'(\d+)\s*(.*)$', re.IGNORECASE)
class ProcMapsEntry(object):
"""A class representing one line in /proc/.../maps."""
def __init__(
self, begin, end, readable, writable, executable, private, offset,
major, minor, inode, name):
self.begin = begin
self.end = end
self.readable = readable
self.writable = writable
self.executable = executable
self.private = private
self.offset = offset
self.major = major
self.minor = minor
self.inode = inode
self.name = name
def as_dict(self):
return {
'begin': self.begin,
'end': self.end,
'readable': self.readable,
'writable': self.writable,
'executable': self.executable,
'private': self.private,
'offset': self.offset,
'major': self.major,
'minor': self.minor,
'inode': self.inode,
'name': self.name,
}
class ProcMaps(object):
"""A class representing contents in /proc/.../maps."""
def __init__(self):
self._sorted_indexes = []
self._dictionary = {}
self._sorted = True
def iter(self, condition):
if not self._sorted:
self._sorted_indexes.sort()
self._sorted = True
for index in self._sorted_indexes:
if not condition or condition(self._dictionary[index]):
yield self._dictionary[index]
def __iter__(self):
if not self._sorted:
self._sorted_indexes.sort()
self._sorted = True
for index in self._sorted_indexes:
yield self._dictionary[index]
@staticmethod
def load(f):
table = ProcMaps()
for line in f:
table.append_line(line)
return table
def append_line(self, line):
entry = self.parse_line(line)
if entry:
self._append_entry(entry)
@staticmethod
def parse_line(line):
matched = _MAPS_PATTERN.match(line)
if matched:
return ProcMapsEntry( # pylint: disable=W0212
int(matched.group(1), 16), # begin
int(matched.group(2), 16), # end
matched.group(3), # readable
matched.group(4), # writable
matched.group(5), # executable
matched.group(6), # private
int(matched.group(7), 16), # offset
matched.group(8), # major
matched.group(9), # minor
int(matched.group(10), 10), # inode
matched.group(11) # name
)
else:
return None
@staticmethod
def constants(entry):
return (entry.writable == '-' and entry.executable == '-' and re.match(
'\S+(\.(so|dll|dylib|bundle)|chrome)((\.\d+)+\w*(\.\d+){0,3})?',
entry.name))
@staticmethod
def executable(entry):
return (entry.executable == 'x' and re.match(
'\S+(\.(so|dll|dylib|bundle)|chrome)((\.\d+)+\w*(\.\d+){0,3})?',
entry.name))
@staticmethod
def executable_and_constants(entry):
return (((entry.writable == '-' and entry.executable == '-') or
entry.executable == 'x') and re.match(
'\S+(\.(so|dll|dylib|bundle)|chrome)((\.\d+)+\w*(\.\d+){0,3})?',
entry.name))
def _append_entry(self, entry):
if self._sorted_indexes and self._sorted_indexes[-1] > entry.begin:
self._sorted = False
self._sorted_indexes.append(entry.begin)
self._dictionary[entry.begin] = entry
| bsd-3-clause |
yigitguler/django | django/core/management/commands/runfcgi.py | 120 | 1073 | import argparse
import warnings
from django.core.management.base import BaseCommand
from django.utils.deprecation import RemovedInDjango19Warning
class Command(BaseCommand):
help = "Runs this project as a FastCGI application. Requires flup."
def add_arguments(self, parser):
parser.add_argument('args', nargs=argparse.REMAINDER,
help='Various KEY=val options.')
def handle(self, *args, **options):
warnings.warn(
"FastCGI support has been deprecated and will be removed in Django 1.9.",
RemovedInDjango19Warning)
from django.conf import settings
from django.utils import translation
# Activate the current language, because it won't get activated later.
try:
translation.activate(settings.LANGUAGE_CODE)
except AttributeError:
pass
from django.core.servers.fastcgi import runfastcgi
runfastcgi(args)
def usage(self, subcommand):
from django.core.servers.fastcgi import FASTCGI_HELP
return FASTCGI_HELP
| bsd-3-clause |
eugeneai/icc.studprogs | src/icc/studprogs/uctotokenizer.py | 1 | 6739 | #!/usr/bin/env python3
import ucto
from pkg_resources import resource_filename
settingsfile = resource_filename("icc.studprogs", "etc/tokconfig-generic")
def join(lexems,
only=[],
filter=[],
decor=("", ""),
with_type=False,
no_symbols=False,
subst={}):
"""Joins sentence token into a string.
Arguments:
- `sent`: List or generator of tokens or tuples
with wirst item being a token. The result will
unclude str() of the token.
- `only`: Include only tokens of type mentioned in this list.
- `filter`: Filter out tokens of type mentioned in this list.
- `decor`: Decorate tokens with symbols. E.g. decor=("[","]")
produces "[<token>]".
- `with_type`: Print each token as <token>/<token_type> is possible.
- `no_symbols`: Remove all symbols if True.
- `subst`: A dictionary to substitute token of a type
to a string. Useful to substitute unwanted
ellpsis to, e.g., dot (".", "PUNCTUATION"),.
"""
s = []
only_rules = len(only) >= 1
try:
for lexem in lexems:
space = " "
if type(lexem) == tuple:
token = lexem[0]
tt = token.type()
if tt in filter:
continue
if only_rules and not tt in only:
continue
if token.nospace():
space = ""
t = token
if tt in subst:
token, tt = subst[tt]
else:
token = str(token)
if with_type:
token += "/" + tt
if t.isendofsentence():
token += "<"
if t.isbeginofsentence():
token = ">" + token
else:
if only_rules:
continue
if no_symbols:
continue
token = str(lexem)
s.append(decor[0] + token + decor[1] + space)
answer = "".join(s).strip()
return answer
except TypeError:
return lexems
def clean_join(sent, with_type=False, decor=("", "")):
"""Joins sentence token into a string.
It is the same as join, but some arguments
are fixed to a reasonable values to get *clean*
sentence.
Arguments:
- `sent`: List or generator of tokens or tuples
with wirst item being a token. The result will
unclude str() of the token.
- `decor`: Decorate tokens with symbols. E.g. decor=("[","]")
produces "[<token>]".
- `with_type`: Print each token as <token>/<token_type> is possible.
"""
return join(
sent,
only=[
"WORD",
"PUNCTUATION-MULTI",
"PUNCTUATION",
#"ABBREVIATION",
],
with_type=with_type,
decor=decor,
subst={
"PUNCTUATION-MULTI": (".", "PUNCTUATION"),
},
no_symbols=True)
class Tokenizer(object):
"""Utilization of ucto tokenizer as
tokenizer class.
"""
def __init__(self, textiter=None, **kwargs):
"""Initializes tokinizer. A text
generator needed as input source.
Arguments:
- `textiter`: Inout source generator.
"""
self.textiter = textiter
#Initialise the tokeniser, options are passed as keyword arguments, defaults:
# lowercase=False,uppercase=False,sentenceperlineinput=False,
# sentenceperlineoutput=False,
# sentencedetection=True, paragraphdetection=True, quotedetectin=False,
# debug=False
defaults = {
'lowercase': False,
'uppercase': False,
'sentencedetection': False,
'paragraphdetection': False,
'quotedetection': False,
'sentenceperlineinput': False,
'sentenceperlineoutput': False,
'debug': False
}
defaults.update(kwargs)
tokenizer = ucto.Tokenizer(settingsfile, **defaults)
self.tokenizer = tokenizer
def tokens(self, textiter):
"""Generate tokens from source text.
"""
if textiter is None:
textiter = self.textiter
for text in textiter:
self.tokenizer.process(text)
yield from self.tokenizer
def sentences(self):
"""Generate sentences from source text.
"""
if self.textiter != None:
for text in self.textiter:
self.tokenizer.process(text)
yield from self.sentences()
else:
yield from self.sentences()
def process(self, text):
"""Add text to further processing.
"""
self.tokenizer.process(text)
'''
#pass the text (may be called multiple times),
tokenizer.process(text)
#we can continue with more text:
tokenizer.process("This was not enough. We want more text. More sentences are better!!!")
#there is a high-levelinterface to iterate over sentences as string, with all tokens space-separated:
for sentence in tokenizer.sentences():
print(sentence)
'''
def test():
text = """To be or not to be, that's the question. This is a test to tokenise. We can span
multiple lines!!! The number 6 is Mr Li's favourite. We can't stop yet.
This is the next paragraph. And so it ends.
А теперь руссие идут... бутявки.
1.1 Linux для гиков.
1.1.2 Для продвинутых разработчиков
1.2. Другой формат
Технический текст <123.234>.
<<Тест кавычек>>
<<Тест кавычек>>
"""
def textgen():
for line in text.split("\n\n"):
yield line
print("\n\nThe first demo, TOKEN recognition. -------")
t = Tokenizer(textgen())
#read the tokenised data
for token in t.tokens():
if token.isnewparagraph():
print("\t", end="")
#token is an instance of ucto.Token, serialise to string using str()
print("[" + str(token) + "]", end="")
#tokens remember whether they are followed by a space
if token.isendofsentence():
print(r"\\")
elif not token.nospace():
print(" ", end="")
#the type of the token (i.e. the rule that build it) is available as token.type
print("\n\nThe Second demo, sentence recognition. -------")
t = Tokenizer(textgen())
for sentence in t.sentences():
print(sentence)
if __name__ == "__main__":
test()
quit()
| gpl-3.0 |
glogiotatidis/snippets-service | snippets/base/admin/fields.py | 2 | 9200 | from django.core.exceptions import ValidationError
from django.forms import (ChoiceField, ModelChoiceField,
ModelMultipleChoiceField, MultipleChoiceField,
MultiValueField)
from snippets.base.models import Addon, TargetedCountry
from .widgets import JEXLMultiWidget
class MultipleChoiceFieldCSV(MultipleChoiceField):
# To be used with in snippets.base.forms.SnippetAdminForm and in
# combination with DynamicField. We don't directly save() this field in the
# database so get_prep_value has not been implemented.
def prepare_value(self, value):
value = super(MultipleChoiceFieldCSV, self).prepare_value(value)
if value and not isinstance(value, list):
value = value.split(';')
return value
def clean(self, value):
value = super(MultipleChoiceFieldCSV, self).clean(value)
return ';'.join(value)
class JEXLBaseField():
def to_jexl(self, value):
if value:
return self.jexl.format(attr_name=self.attr_name, value=value)
return None
class JEXLMultipleChoiceField(JEXLBaseField, MultipleChoiceFieldCSV):
def __init__(self, attr_name, *args, **kwargs):
self.attr_name = attr_name
super().__init__(*args, **kwargs)
def to_jexl(self, value):
if value:
return f'{self.attr_name} in {[x for x in value.split(";")]}'
return None
class JEXLChannelField(JEXLMultipleChoiceField):
def to_jexl(self, value):
if not value:
return None
values = value.split(';')
# When `release` is selected, also add `default`
if 'release' in values:
values.append('default')
return f'{self.attr_name} in {[x for x in values]}'
class JEXLChoiceField(JEXLBaseField, ChoiceField):
def __init__(self, attr_name, *args, **kwargs):
self.attr_name = attr_name
self.jexl = '{attr_name} == {value}'
self.jexl = kwargs.pop('jexl', self.jexl)
return super().__init__(*args, **kwargs)
def to_jexl(self, value):
if value:
return self.jexl.format(attr_name=self.attr_name, value=value)
class JEXLModelMultipleChoiceField(JEXLBaseField, ModelMultipleChoiceField):
def __init__(self, attr_name, *args, **kwargs):
self.attr_name = attr_name
self.jexl = '{attr_name} in {value}'
self.jexl = kwargs.pop('jexl', self.jexl)
return super().__init__(*args, **kwargs)
def prepare_value(self, value):
if isinstance(value, str):
value = value.split(';')
return super().prepare_value(value)
def clean(self, value):
value = super().clean(value)
return ';'.join([str(x.id) for x in value])
class JEXLCountryField(JEXLModelMultipleChoiceField):
def to_jexl(self, value):
if value:
values = TargetedCountry.objects.filter(id__in=value.split(";"))
return f'region in {[x.code for x in values]}'
return None
class JEXLRangeField(JEXLBaseField, MultiValueField):
def __init__(self, attr_name, choices, **kwargs):
self.attr_name = attr_name
self.jexl = {
'minimum': '{value} <= {attr_name}',
'maximum': '{attr_name} < {value}'
}
self.jexl = kwargs.pop('jexl', self.jexl)
fields = (
ChoiceField(choices=choices),
ChoiceField(choices=choices),
)
super().__init__(fields, **kwargs)
self.widget = JEXLMultiWidget(widgets=[f.widget for f in self.fields],
template_name='widgets/jexlrange.html')
def compress(self, data_list):
return ','.join(data_list)
def to_jexl(self, value):
final_jexl = []
if value:
minimum, maximum = value.split(',')
if minimum:
final_jexl.append(
self.jexl['minimum'].format(attr_name=self.attr_name, value=minimum)
)
if maximum:
final_jexl.append(
self.jexl['maximum'].format(attr_name=self.attr_name, value=maximum)
)
return ' && '.join(final_jexl)
def validate(self, value):
minimum, maximum = value.split(',')
self.fields[0].validate(minimum)
self.fields[1].validate(maximum)
if minimum and maximum and int(minimum) > int(maximum):
raise ValidationError('Minimum value must be lower or equal to maximum value.')
return value
class JEXLFirefoxRangeField(JEXLRangeField):
def __init__(self, **kwargs):
# Include only versions greater than 63, where ASRSnippets exist.
min_version = 64
# Need to be able to dynamically change this, probably using
# product_details. Issue #855
max_version = 84
choices = (
[(None, 'No limit')] +
[(x, x) for x in reversed(range(min_version, max_version + 1))]
)
super().__init__('firefoxVersion', choices, **kwargs)
def validate(self, value):
minimum, maximum = value.split(',')
self.fields[0].validate(minimum)
self.fields[1].validate(maximum)
if minimum and maximum and minimum > maximum:
raise ValidationError('Minimum value must be lower or equal to maximum value.')
return value
class JEXLAddonField(MultiValueField):
def __init__(self, **kwargs):
choices = (
(None, "I don't care"),
('not_installed', 'Not Installed'),
('installed', 'Installed'),
)
fields = (
ChoiceField(choices=choices),
ModelChoiceField(queryset=Addon.objects.all(), required=False),
)
super().__init__(fields, **kwargs)
self.widget = JEXLMultiWidget(widgets=[f.widget for f in self.fields])
def compress(self, data_list):
if data_list:
return '{},{}'.format(data_list[0], getattr(data_list[1], 'id', ''))
return ''
def to_jexl(self, value):
check, addon_id = value.split(',')
if not check or not addon_id:
return ''
addon = Addon.objects.get(id=addon_id)
if check == 'not_installed':
jexl = '("{}" in addonsInfo.addons|keys) == false'.format(addon.guid)
elif check == 'installed':
jexl = '("{}" in addonsInfo.addons|keys) == true'.format(addon.guid)
return jexl
def validate(self, value):
check, addon_id = value.split(',')
self.fields[0].validate(check)
self.fields[1].validate(addon_id)
if check and not addon_id:
raise ValidationError('You must select an add-on')
if not check and addon_id:
raise ValidationError('You must select a check')
return value
class JEXLFirefoxServicesField(MultiValueField):
def __init__(self, **kwargs):
check_choices = (
(None, "I don't care"),
('no_account', "User hasn't signed up for"),
('has_account', 'User has signed up for'),
)
# Verify IDs using
# curl -s https://oauth.stage.mozaws.net/v1/client/<ID> | jq .
# Incomplete list of IDs
# https://docs.telemetry.mozilla.org/datasets/fxa_metrics/attribution.html#service-attribution # noqa
service_choices = (
(None, '---------'),
('e7ce535d93522896|98adfa37698f255b', 'Firefox Lockwise'),
('802d56ef2a9af9fa', 'Firefox Monitor'),
('1f30e32975ae5112|20f7931c9054d833', 'Firefox Send'),
('a8c528140153d1c6|565585c1745a144d', 'Firefox Private Network'),
('e6eb0d1e856335fc', 'Firefox VPN'),
('9ebfe2c2f9ea3c58', 'Firefox Private Relay'),
('7ad9917f6c55fb77', 'Firefox Reality'),
('7377719276ad44ee|749818d3f2e7857f', 'Pocket'),
)
fields = (
ChoiceField(choices=check_choices),
ChoiceField(choices=service_choices),
)
super().__init__(fields, **kwargs)
self.widget = JEXLMultiWidget(widgets=[f.widget for f in self.fields])
def compress(self, data_list):
if data_list:
return f'{data_list[0]},{data_list[1]}'
return ''
def to_jexl(self, value):
check, ids = value.split(',')
ids = ids.split('|') if ids else ''
if not check or not ids:
return ''
jexl = '('
for id in ids:
jexl += f'("{id}" in attachedFxAOAuthClients|mapToProperty("id")) || '
jexl = jexl[:-4]
if check == 'no_account':
jexl += ') == false'
elif check == 'has_account':
jexl += ') == true'
return jexl
def validate(self, value):
check, service_name = value.split(',')
self.fields[0].validate(check)
self.fields[1].validate(service_name)
if check and not service_name:
raise ValidationError('You must select an Service.')
if not check and service_name:
raise ValidationError('You must select a check.')
return value
| mpl-2.0 |
domesticduck/MenuConciergeServer | vendor/bundle/ruby/2.0.0/gems/libv8-3.16.14.3/vendor/gyp/test/win/gyptest-cl-function-level-linking.py | 332 | 1595 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure function-level linking setting is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'compiler-flags'
test.run_gyp('function-level-linking.gyp', chdir=CHDIR)
test.build('function-level-linking.gyp', test.ALL, chdir=CHDIR)
def CheckForSectionString(binary, search_for, should_exist):
output = test.run_dumpbin('/headers', binary)
if should_exist and search_for not in output:
print 'Did not find "%s" in %s' % (search_for, binary)
test.fail_test()
elif not should_exist and search_for in output:
print 'Found "%s" in %s (and shouldn\'t have)' % (search_for, binary)
test.fail_test()
def Object(proj, obj):
sep = '.' if test.format == 'ninja' else '\\'
return 'obj\\%s%s%s' % (proj, sep, obj)
look_for = '''COMDAT; sym= "int __cdecl comdat_function'''
# When function level linking is on, the functions should be listed as
# separate comdat entries.
CheckForSectionString(
test.built_file_path(Object('test_fll_on', 'function-level-linking.obj'),
chdir=CHDIR),
look_for,
should_exist=True)
CheckForSectionString(
test.built_file_path(Object('test_fll_off', 'function-level-linking.obj'),
chdir=CHDIR),
look_for,
should_exist=False)
test.pass_test()
| apache-2.0 |
rynomster/django | tests/null_fk/tests.py | 352 | 2982 | from __future__ import unicode_literals
from django.db.models import Q
from django.test import TestCase
from .models import (
Comment, Forum, Item, Post, PropertyValue, SystemDetails, SystemInfo,
)
class NullFkTests(TestCase):
def test_null_fk(self):
d = SystemDetails.objects.create(details='First details')
s = SystemInfo.objects.create(system_name='First forum', system_details=d)
f = Forum.objects.create(system_info=s, forum_name='First forum')
p = Post.objects.create(forum=f, title='First Post')
c1 = Comment.objects.create(post=p, comment_text='My first comment')
c2 = Comment.objects.create(comment_text='My second comment')
# Starting from comment, make sure that a .select_related(...) with a specified
# set of fields will properly LEFT JOIN multiple levels of NULLs (and the things
# that come after the NULLs, or else data that should exist won't). Regression
# test for #7369.
c = Comment.objects.select_related().get(id=c1.id)
self.assertEqual(c.post, p)
self.assertEqual(Comment.objects.select_related().get(id=c2.id).post, None)
self.assertQuerysetEqual(
Comment.objects.select_related('post__forum__system_info').all(),
[
(c1.id, 'My first comment', '<Post: First Post>'),
(c2.id, 'My second comment', 'None')
],
transform=lambda c: (c.id, c.comment_text, repr(c.post))
)
# Regression test for #7530, #7716.
self.assertIsNone(Comment.objects.select_related('post').filter(post__isnull=True)[0].post)
self.assertQuerysetEqual(
Comment.objects.select_related('post__forum__system_info__system_details'),
[
(c1.id, 'My first comment', '<Post: First Post>'),
(c2.id, 'My second comment', 'None')
],
transform=lambda c: (c.id, c.comment_text, repr(c.post))
)
def test_combine_isnull(self):
item = Item.objects.create(title='Some Item')
pv = PropertyValue.objects.create(label='Some Value')
item.props.create(key='a', value=pv)
item.props.create(key='b') # value=NULL
q1 = Q(props__key='a', props__value=pv)
q2 = Q(props__key='b', props__value__isnull=True)
# Each of these individually should return the item.
self.assertEqual(Item.objects.get(q1), item)
self.assertEqual(Item.objects.get(q2), item)
# Logically, qs1 and qs2, and qs3 and qs4 should be the same.
qs1 = Item.objects.filter(q1) & Item.objects.filter(q2)
qs2 = Item.objects.filter(q2) & Item.objects.filter(q1)
qs3 = Item.objects.filter(q1) | Item.objects.filter(q2)
qs4 = Item.objects.filter(q2) | Item.objects.filter(q1)
# Regression test for #15823.
self.assertEqual(list(qs1), list(qs2))
self.assertEqual(list(qs3), list(qs4))
| bsd-3-clause |
anaderi/lhcb_trigger_ml | hep_ml/experiments/gradient_boosting.py | 1 | 20009 | from __future__ import division, print_function
import copy
import numbers
import numpy
import pandas
from scipy.special import expit, logit
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.ensemble._gradient_boosting import _random_sample_mask
from sklearn.ensemble.gradient_boosting import LossFunction
from sklearn.tree.tree import DecisionTreeRegressor, DTYPE
from sklearn.utils.random import check_random_state
from sklearn.utils.validation import check_arrays, column_or_1d, array2d
from hep_ml.commonutils import check_sample_weight, generate_sample, map_on_cluster, indices_of_values
from hep_ml.losses import AbstractLossFunction
from transformations import enhance_data, Shuffler
real_s = 691.988607712
real_b = 410999.847322
#region Functions for measurements
def get_higgs_data(train_file = '/Users/axelr/ipython/datasets/higgs/training.csv'):
data = pandas.read_csv(train_file, index_col='EventId')
answers_bs = numpy.ravel(data.Label)
weights = numpy.ravel(data.Weight)
data = data.drop(['Label', 'Weight'], axis=1)
answers = numpy.zeros(len(answers_bs), dtype=numpy.int)
answers[answers_bs == 's'] = 1
return data, answers, weights
def AMS(answers, predictions, sample_weight):
""" Predictions are classes """
assert len(answers) == len(predictions) == len(sample_weight)
predictions = column_or_1d(predictions)
total_s = numpy.sum(sample_weight[answers > 0.5])
total_b = numpy.sum(sample_weight[answers < 0.5])
s = numpy.sum(sample_weight[answers * predictions > 0.5])
b = numpy.sum(sample_weight[(1 - answers) * predictions > 0.5])
s *= real_s / total_s
b *= real_b / total_b
br = 10.
radicand = 2 * ( (s+b+br) * numpy.log(1.0 + s/(b+br)) - s)
if radicand < 0:
raise ValueError('Radicand is negative')
else:
return numpy.sqrt(radicand)
def compute_ams_on_cuts(answers, predictions, sample_weight):
""" Prediction is probabilities"""
assert len(answers) == len(predictions) == len(sample_weight)
answers = column_or_1d(answers)
predictions = column_or_1d(predictions)
sample_weight = column_or_1d(sample_weight)
order = numpy.argsort(predictions)[::-1]
reordered_answers = answers[order]
reordered_weights = sample_weight[order]
s_cumulative = numpy.cumsum(reordered_answers * reordered_weights)
b_cumulative = numpy.cumsum((1 - reordered_answers) * reordered_weights)
b_cumulative *= real_b / b_cumulative[-1]
s_cumulative *= real_s / s_cumulative[-1]
br = 10.
s = s_cumulative
b = b_cumulative
radicands = 2 * ((s + b + br) * numpy.log(1.0 + s/(b + br)) - s)
return predictions[order], radicands
def optimal_AMS(answers, predictions, sample_weight):
""" Prediction is probabilities """
cuts, radicands = compute_ams_on_cuts(answers, predictions, sample_weight)
return numpy.sqrt(numpy.max(radicands))
def plot_ams_report(answers, predictions, sample_weight=None):
import pylab
cuts, radicands = compute_ams_on_cuts(answers, predictions, sample_weight)
pylab.figure(figsize=(18, 9))
pylab.subplot(131)
pylab.title('On cuts')
pylab.plot(cuts, numpy.sqrt(numpy.clip(radicands, 0, 100)))
pylab.subplot(132)
pylab.title('On signal order')
order = numpy.argsort(predictions)[::-1]
pylab.plot( numpy.sqrt(numpy.clip(radicands[answers[order] > 0.5], 0, 100)) )
pylab.subplot(133)
pylab.title('On common order')
pylab.plot( numpy.sqrt(radicands) )
def plot_AMS_on_cuts(answers, predictions, sample_weight):
""" Prediction is probabilities """
import pylab
cuts, radicands = compute_ams_on_cuts(answers, predictions, sample_weight)
pylab.plot(cuts, numpy.sqrt(numpy.clip(radicands, 0, 100)))
def plot_AMS_on_signal_order(answers, predictions, sample_weight):
""" Prediction is probabilities """
import pylab
cuts, radicands = compute_ams_on_cuts(answers, predictions, sample_weight)
order = numpy.argsort(predictions)[::-1]
pylab.plot( numpy.sqrt(numpy.clip(radicands[answers[order] > 0.5], 0, 100)) )
#endregion
#region Losses
class MyLossFunction(BaseEstimator):
def fit(self, X, y, sample_weight=None):
pass
def negative_gradient(self, y, y_pred, sample_weight=None):
raise NotImplementedError()
def update_terminal_regions(self, tree, X, y, residual, pred, sample_mask, sample_weight):
assert y.ndim == 1 and residual.ndim == 1 and \
pred.ndim == 1 and sample_mask.ndim == 1 and sample_weight.ndim == 1
# residual is negative gradient
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
for leaf, leaf_indices in indices_of_values(masked_terminal_regions):
if leaf == -1:
continue
self._update_terminal_region(tree, terminal_regions=masked_terminal_regions,
leaf=leaf, X=X, y=y, residual=residual, pred=pred,
sample_weight=sample_weight, leaf_indices=leaf_indices)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight, leaf_indices):
"""This function should select a better values for leaves"""
pass
class LogitLossFunction(MyLossFunction):
def __init__(self, shift=0.):
MyLossFunction.__init__(self)
self.shift = shift
def __call__(self, y, y_pred, sample_weight=None):
y_signed = 2. * y - 1
sample_weight = check_sample_weight(y, sample_weight=sample_weight)
return numpy.sum(sample_weight * numpy.log(1 + numpy.exp(- y_signed * y_pred - self.shift)))
def negative_gradient(self, y, y_pred, sample_weight=None):
y_signed = 2. * y - 1
sample_weight = check_sample_weight(y, sample_weight=sample_weight)
return sample_weight * y_signed * expit(-y_signed * y_pred - self.shift)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight, leaf_indices):
"""Making one Newton step"""
# terminal_region = numpy.where(terminal_regions == leaf)[0]
terminal_region = leaf_indices
y = y.take(terminal_region, axis=0)
y_signed = 2. * y - 1
pred = pred.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region)
argument = -y_signed * pred - self.shift
n_gradient = numpy.sum(sample_weight * y_signed * expit(argument))
laplacian = numpy.sum(sample_weight / numpy.logaddexp(0., argument) / numpy.logaddexp(0., -argument))
tree.value[leaf, 0, 0] = n_gradient / laplacian
class AdaLossFunction(MyLossFunction):
def __init__(self, signal_curvature=1.):
self.signal_curvature = signal_curvature
# we need only one variable
MyLossFunction.__init__(self)
def fit(self, X, y, sample_weight=None):
pass
def _signed_multiplier(self, y):
result = numpy.ones(len(y), dtype=float)
result[y > 0.5] = - self.signal_curvature
return result
def _weight_multiplier(self, y):
result = numpy.ones(len(y), dtype=float)
result[y > 0.5] = 1 / self.signal_curvature
return result
def __call__(self, y, y_pred, sample_weight=None):
signed_multiplier = self._signed_multiplier(y)
weight_multiplier = self._weight_multiplier(y)
sample_weight = check_sample_weight(y, sample_weight=sample_weight)
return numpy.sum(sample_weight * weight_multiplier * numpy.exp(y_pred * signed_multiplier))
def negative_gradient(self, y, y_pred, sample_weight=None, **kargs):
multiplier = self._signed_multiplier(y)
y_signed = 2. * y - 1
sample_weight = check_sample_weight(y, sample_weight=sample_weight)
return sample_weight * y_signed * numpy.exp(y_pred * multiplier)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight, leaf_indices):
terminal_region = leaf_indices
curv = self.signal_curvature
y = y.take(terminal_region, axis=0)
pred = pred.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region)
w_sig = numpy.sum(sample_weight[y > 0.5] * numpy.exp(- curv * pred[y > 0.5]))
w_bck = numpy.sum(sample_weight[y < 0.5] * numpy.exp(pred[y < 0.5]))
# minimizing w_sig * exp(-curv * x) / curv + w_bck * exp(x)
w_sum = w_sig + w_bck
w_sig += 1e-4 * w_sum
w_bck += 1e-4 * w_sum
tree.value[leaf, 0, 0] = 1 / (1. + curv) * numpy.log(w_sig / w_bck)
#endregion
#region Interpolation
def interpolate(vals, step, steps, use_log=False):
if isinstance(vals, numbers.Number):
return vals
t = numpy.clip(step / float(steps), 0, 1)
assert len(vals) == 2, 'Not two values'
if use_log:
return numpy.exp(numpy.interp(t, [0., 1.], numpy.log(vals)))
else:
return numpy.interp(t, [0., 1.], vals)
#endregion
#region GradientBoosting
class GradientBoosting(BaseEstimator, ClassifierMixin):
def __init__(self, loss,
n_estimators=10,
learning_rate=1.,
max_depth=15,
min_samples_leaf=5,
min_samples_split=2,
max_features='auto',
subsample=1.,
criterion='mse',
splitter='best',
weights_in_loss=True,
update_tree=True,
update_on='all',
smearing=0.0,
recount_step=1000,
random_state=None):
"""
Supports only two classes
:type loss: LossFunction
:type n_estimators: int,
:type learning_rate: float,
:type max_depth: int | NoneType,
:type min_samples_leaf: int,
:type min_samples_split: int,
:type max_features: int | 'auto',
:type subsample: float,
:type splitter: str,
:type weights_in_loss: bool,
:type update_on: str, 'all', 'same', 'other', 'random'
:type smearing: float
:type init_smearing: float
:rtype:
"""
self.loss = loss
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.max_depth = max_depth
self.min_samples_leaf = min_samples_leaf
self.min_samples_split = min_samples_split
self.max_features = max_features
self.subsample = subsample
self.splitter = splitter
self.criterion = criterion
self.weights_in_loss = weights_in_loss
self.random_state = random_state
self.update_tree = update_tree
self.update_on = update_on
self.smearing = smearing
self.recount_step = recount_step
def fit(self, X, y, sample_weight=None):
shuffler = Shuffler(X, random_state=self.random_state)
X, y = check_arrays(X, y, dtype=DTYPE, sparse_format="dense", check_ccontiguous=True)
y = column_or_1d(y, warn=True)
n_samples = len(X)
n_inbag = int(self.subsample * n_samples)
sample_weight = check_sample_weight(y, sample_weight=sample_weight).copy()
self.random_state = check_random_state(self.random_state)
# skipping all checks
assert self.update_on in ['all', 'same', 'other', 'random']
y_pred = numpy.zeros(len(y), dtype=float)
self.classifiers = []
self.learning_rates = []
self.loss_values = []
self.loss = copy.copy(self.loss)
self.loss.fit(X, y, sample_weight=sample_weight)
iter_X = shuffler.generate(0.)
prev_smearing = 1
for iteration in range(self.n_estimators):
if iteration % self.recount_step == 0:
if prev_smearing > 0:
iter_smearing = interpolate(self.smearing, iteration, self.n_estimators)
prev_smearing = iter_smearing
iter_X = shuffler.generate(iter_smearing)
iter_X, = check_arrays(iter_X, dtype=DTYPE, sparse_format="dense", check_ccontiguous=True)
y_pred = numpy.zeros(len(y))
y_pred += sum(cl.predict(X) * rate for rate, cl in zip(self.learning_rates, self.classifiers))
self.loss_values.append(self.loss(y, y_pred, sample_weight=sample_weight))
tree = DecisionTreeRegressor(
criterion=self.criterion,
splitter=self.splitter,
max_depth=interpolate(self.max_depth, iteration, self.n_estimators),
min_samples_split=self.min_samples_split,
min_samples_leaf=interpolate(self.min_samples_leaf, iteration, self.n_estimators, use_log=True),
max_features=self.max_features,
random_state=self.random_state)
sample_mask = _random_sample_mask(n_samples, n_inbag, self.random_state)
loss_weight = sample_weight if self.weights_in_loss else numpy.ones(len(sample_weight))
tree_weight = sample_weight if not self.weights_in_loss else numpy.ones(len(sample_weight))
residual = self.loss.negative_gradient(y, y_pred, sample_weight=loss_weight)
tree.fit(numpy.array(iter_X)[sample_mask, :],
residual[sample_mask],
sample_weight=tree_weight[sample_mask], check_input=False)
# update tree leaves
if self.update_tree:
if self.update_on == 'all':
update_mask = numpy.ones(len(sample_mask), dtype=bool)
elif self.update_on == 'same':
update_mask = sample_mask
elif self.update_on == 'other':
update_mask = ~sample_mask
else: # random
update_mask = _random_sample_mask(n_samples, n_inbag, self.random_state)
self.loss.update_terminal_regions(tree.tree_, X=iter_X, y=y, residual=residual, pred=y_pred,
sample_mask=update_mask, sample_weight=sample_weight)
iter_learning_rate = interpolate(self.learning_rate, iteration, self.n_estimators, use_log=True)
y_pred += iter_learning_rate * tree.predict(X)
self.classifiers.append(tree)
self.learning_rates.append(iter_learning_rate)
return self
def decision_function(self, X):
X = array2d(X, dtype=DTYPE)
result = numpy.zeros(len(X))
for rate, estimator in zip(self.learning_rates, self.classifiers):
result += rate * estimator.predict(X)
return result
def staged_decision_function(self, X):
X = array2d(X, dtype=DTYPE)
result = numpy.zeros(len(X))
for rate, classifier in zip(self.learning_rates, self.classifiers):
result += rate * classifier.predict(X)
yield result
@staticmethod
def _score_to_proba(score):
result = numpy.zeros([len(score), 2], dtype=float)
result[:, 1] = expit(score / 100.)
result[:, 0] = 1. - result[:, 1]
return result
def _proba_to_score(self, proba):
# for init_estimator
return numpy.clip(logit(proba[:, 1]), -5., 5.)
def predict(self, X):
return numpy.argmax(self.predict_proba(X), axis=1)
def predict_proba(self, X):
return self._score_to_proba(self.decision_function(X))
def staged_predict_proba(self, X):
for score in self.staged_decision_function(X):
yield self._score_to_proba(score)
def test_gradient_boosting(size=100, n_features=10):
trainX, trainY = generate_sample(size, n_features)
testX, testY = generate_sample(size, n_features)
for loss in [AdaLossFunction()]:
for update in ['all', 'same', 'other', 'random']:
gb = GradientBoosting(loss=loss, update_on=update, smearing=[0.1, -0.1])
score = gb.fit(trainX, trainY).score(testX, testY)
print(update, score)
test_gradient_boosting()
#endregion
#region Reweighters
def normalize_weight(y, weights, sig_weight=1., pow_sig=1., pow_bg=1.):
result = numpy.copy(weights)
assert numpy.all((y == 0) | (y == 1)), 'Supports only two classes'
result[y == 1] **= pow_sig
result[y == 0] **= pow_bg
result[y == 1] /= numpy.mean(result[y == 1]) / sig_weight
result[y == 0] /= numpy.mean(result[y == 0])
return result
class ReweightingGB(GradientBoosting):
def __init__(self, loss,
sig_weight=1., pow_sig=1., pow_bg=1.,
n_estimators=10, learning_rate=1., max_depth=None, min_samples_leaf=5, min_samples_split=2,
max_features='auto', criterion='mse',
subsample=1., splitter='best', weights_in_loss=True, update_tree=True,
update_on='all', smearing=0.01,
init_estimator=None, init_smearing=0.05, recount_step=1000, random_state=None):
GradientBoosting.__init__(self, loss=loss, n_estimators=n_estimators, learning_rate=learning_rate,
max_depth=max_depth, min_samples_leaf=min_samples_leaf,
min_samples_split=min_samples_split, max_features=max_features, criterion=criterion,
subsample=subsample, splitter=splitter, weights_in_loss=weights_in_loss,
update_on=update_on, update_tree=update_tree, random_state=random_state,
recount_step=recount_step,
smearing=smearing)
# Everything should be set via set_params
self.sig_weight = sig_weight
self.pow_bg = pow_bg
self.pow_sig = pow_sig
def fit(self, X, y, sample_weight=None):
sample_weight = normalize_weight(y, sample_weight, sig_weight=self.sig_weight, pow_sig=self.pow_sig,
pow_bg=self.pow_bg)
return GradientBoosting.fit(self, X, y, sample_weight=sample_weight)
base_gb = ReweightingGB(loss=AdaLossFunction())
base_gb.set_params(loss__signal_curvature=0.7, learning_rate=0.03, min_samples_leaf=125, n_estimators=400,
smearing=0.01, max_features=13, update_tree=True, max_depth=16, subsample=0.5,
sig_weight=0.1, weights_in_loss=False, update_on='all')
base_gb_short = ReweightingGB(loss=AdaLossFunction())
base_gb_short.set_params(loss__signal_curvature=0.7, learning_rate=0.03, min_samples_leaf=150, n_estimators=500,
smearing=0.0, max_features=16, update_tree=True, max_depth=14, subsample=0.4,
sig_weight=0.1, weights_in_loss=False, update_on='all')
base_gb_no_shuffle = ReweightingGB(loss=AdaLossFunction())
base_gb_no_shuffle.set_params(loss__signal_curvature=0.7, learning_rate=0.03, min_samples_leaf=125, n_estimators=250,
smearing=0., max_features=13, update_tree=True, max_depth=16, subsample=0.5,
sig_weight=0.1, weights_in_loss=False, update_on='all')
base_gb_test = ReweightingGB(loss=AdaLossFunction())
base_gb_test.set_params(loss__signal_curvature=0.7, learning_rate=0.03, min_samples_leaf=125, n_estimators=1,
smearing=0.01, max_features=15, update_tree=True, max_depth=16, subsample=0.5,
sig_weight=0.1, weights_in_loss=False, update_on='all')
#endregion
"""
import gradient_boosting as gb
data, y, w = gb.get_higgs_data()
voter = gb.base_gb
voter.set_params(n_estimators=10)
voter.fit(gb.enhance_data(data), y, w)
"""
| mit |
cloudera/Impala | tests/query_test/test_spilling.py | 1 | 5425 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from copy import deepcopy
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.skip import SkipIfNotHdfsMinicluster
from tests.common.test_dimensions import (create_exec_option_dimension_from_dict,
create_parquet_dimension)
# Test with denial of reservations at varying frequency.
# Always test with the minimal amount of spilling and running with the absolute minimum
# memory requirement.
CORE_DEBUG_ACTION_DIMS = [None,
'-1:OPEN:SET_DENY_RESERVATION_PROBABILITY@1.0']
# Test with different frequency of denial on exhaustive to try and exercise more
# interesting code paths.
EXHAUSTIVE_DEBUG_ACTION_DIMS = [
'-1:OPEN:SET_DENY_RESERVATION_PROBABILITY@0.1',
'-1:OPEN:SET_DENY_RESERVATION_PROBABILITY@0.5',
'-1:OPEN:SET_DENY_RESERVATION_PROBABILITY@0.9']
@pytest.mark.xfail(pytest.config.option.testing_remote_cluster,
reason='Queries may not spill on larger clusters')
class TestSpillingDebugActionDimensions(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestSpillingDebugActionDimensions, cls).add_test_dimensions()
cls.ImpalaTestMatrix.clear_constraints()
cls.ImpalaTestMatrix.add_dimension(create_parquet_dimension('tpch'))
debug_action_dims = CORE_DEBUG_ACTION_DIMS
if cls.exploration_strategy() == 'exhaustive':
debug_action_dims = CORE_DEBUG_ACTION_DIMS + EXHAUSTIVE_DEBUG_ACTION_DIMS
# Tests are calibrated so that they can execute and spill with this page size.
cls.ImpalaTestMatrix.add_dimension(
create_exec_option_dimension_from_dict({'default_spillable_buffer_size' : ['256k'],
'debug_action' : debug_action_dims}))
def test_spilling(self, vector):
self.run_test_case('QueryTest/spilling', vector)
def test_spilling_aggs(self, vector):
self.run_test_case('QueryTest/spilling-aggs', vector)
def test_spilling_large_rows(self, vector, unique_database):
"""Test that we can process large rows in spilling operators, with or without
spilling to disk"""
self.run_test_case('QueryTest/spilling-large-rows', vector, unique_database)
def test_spilling_naaj(self, vector):
"""Test spilling null-aware anti-joins"""
self.run_test_case('QueryTest/spilling-naaj', vector)
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
def test_spilling_regression_exhaustive(self, vector):
"""Regression tests for spilling. mem_limits tuned for 3-node minicluster."""
if self.exploration_strategy() != 'exhaustive':
pytest.skip("only run large sorts on exhaustive")
self.run_test_case('QueryTest/spilling-regression-exhaustive', vector)
new_vector = deepcopy(vector)
del new_vector.get_value('exec_option')['default_spillable_buffer_size']
self.run_test_case(
'QueryTest/spilling-regression-exhaustive-no-default-buffer-size', new_vector)
@pytest.mark.xfail(pytest.config.option.testing_remote_cluster,
reason='Queries may not spill on larger clusters')
class TestSpillingNoDebugActionDimensions(ImpalaTestSuite):
"""Spilling tests to which we don't want to apply the debug_action dimension."""
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestSpillingNoDebugActionDimensions, cls).add_test_dimensions()
cls.ImpalaTestMatrix.clear_constraints()
cls.ImpalaTestMatrix.add_dimension(create_parquet_dimension('tpch'))
# Tests are calibrated so that they can execute and spill with this page size.
cls.ImpalaTestMatrix.add_dimension(
create_exec_option_dimension_from_dict({'default_spillable_buffer_size' : ['256k']}))
def test_spilling_naaj_no_deny_reservation(self, vector):
"""
Null-aware anti-join tests that depend on getting more than the minimum reservation
and therefore will not reliably pass with the deny reservation debug action enabled.
"""
self.run_test_case('QueryTest/spilling-naaj-no-deny-reservation', vector)
def test_spilling_query_options(self, vector):
"""Test that spilling-related query options work end-to-end. These tests rely on
setting debug_action to alternative values via query options."""
self.run_test_case('QueryTest/spilling-query-options', vector)
def test_spilling_no_debug_action(self, vector):
"""Spilling tests that will not succeed if run with an arbitrary debug action.
These tests either run with no debug action set or set their own debug action."""
self.run_test_case('QueryTest/spilling-no-debug-action', vector)
| apache-2.0 |
PeterPetrik/QGIS | tests/src/python/test_qgsprojectrelationmanager.py | 30 | 2685 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsRelationManager
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'David Marteau'
__date__ = '19/12/2019'
__copyright__ = 'Copyright 2019, The QGIS Project'
import os
import qgis # NOQA
from qgis.core import (QgsVectorLayer,
QgsRelation,
QgsRelationManager,
QgsProject
)
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath
start_app()
#
# Check consistency of relations when getting manager from project
#
# We want to make sure that updated relation from project which is not the global project
# instance are valid
#
def createReferencingLayer():
layer = QgsVectorLayer("Point?field=fldtxt:string&field=foreignkey:integer",
"referencinglayer", "memory")
return layer
def createReferencedLayer():
layer = QgsVectorLayer(
"Point?field=x:string&field=y:integer&field=z:integer",
"referencedlayer", "memory")
return layer
class TestQgsProjectRelationManager(unittest.TestCase):
def setUp(self):
self.referencedLayer = createReferencedLayer()
self.referencingLayer = createReferencingLayer()
self.project = QgsProject()
self.project.addMapLayers([self.referencedLayer, self.referencingLayer])
def test_addRelation(self):
""" test adding relations to a manager
"""
manager = self.project.relationManager()
relations = manager.relations()
self.assertEqual(len(relations), 0)
rel = QgsRelation(manager.context())
rel.setReferencingLayer(self.referencingLayer.id())
rel.setReferencedLayer(self.referencedLayer.id())
rel.addFieldPair('foreignkey', 'y')
rel.setId('rel1')
rel.setName('Relation Number One')
assert rel.isValid()
manager.addRelation(rel)
relations = manager.relations()
self.assertEqual(len(relations), 1)
self.assertEqual(relations['rel1'].id(), 'rel1')
def test_loadRelation(self):
""" Test loading relation with project """
project = QgsProject()
project.read(os.path.join(unitTestDataPath(), 'projects', 'test-project-with-relations.qgs'))
manager = project.relationManager()
relations = manager.relations()
assert len(relations) > 0
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
juniwang/open-hackathon | open-hackathon-server/src/hackathon/__init__.py | 2 | 9919 | # -*- coding: utf-8 -*-
"""
This file is covered by the LICENSING file in the root of this project.
"""
from werkzeug.exceptions import HTTPException
__author__ = 'Junbo Wang'
__version__ = '2.0'
from flask import Flask
from flask_restful import Api
from flask_cors import CORS
from datetime import timedelta
from hackathon.util import safe_get_config, get_class, Utility, Email, DisabledVoiceVerify, RonglianVoiceVerify, \
DisabledSms, \
ChinaTelecomSms
from hackathon.hackathon_factory import factory, RequiredFeature
from hackathon.hackathon_scheduler import HackathonScheduler
from hackathon.hackathon_response import *
from hackathon.hackathon_exception import *
from hackathon.log import log
from hackathon.context import Context
from hackathon.config import Config
__all__ = [
"app",
"Context",
"RequiredFeature",
"Component",
]
# initialize flask and flask restful
app = Flask(__name__)
app.config['SECRET_KEY'] = safe_get_config("app.secret_key", "secret_key")
class HackathonApi(Api):
"""Customize Api to give a chance to handle exceptions in framework level.
So that our restful APIs will always respond with code 200 even if Exception thrown and not caught in our codes
We can raise HTTPException and it's inheritances directly in components, they will be caught here. Now we have two
ways to response with error:
- return bad_request("some message")
- raise Bad_Request("some message")
You can decide to use either way ,they are of the same.
"""
def handle_error(self, e):
log.error(e)
if isinstance(e, HTTPException):
message = e.description
if hasattr(e, "data") and "message" in e.data:
message = e.data["message"]
if e.code == 400:
return self.make_response(bad_request(message), 200)
if e.code == 401:
return self.make_response(unauthorized(message), 200)
if e.code == 403:
return self.make_response(forbidden(message), 200)
if e.code == 404:
return self.make_response(not_found(message), 200)
if e.code == 409:
return self.make_response(conflict(message), 200)
if e.code == 412:
return self.make_response(precondition_failed(message), 200)
if e.code == 415:
return self.make_response(unsupported_mediatype(message), 200)
if e.code == 500:
return self.make_response(internal_server_error(message), 200)
# if exception cannot be handled, return error 500
return self.make_response(internal_server_error(e.message), 200)
# init restful API
api = HackathonApi(app)
# Enable CORS support. Currently requests of all methods from all domains are allowed
app.config['CORS_HEADERS'] = 'Content-Type, token, hackathon_name, Authorization'
cors = CORS(app)
# initialize hackathon scheduler
scheduler = HackathonScheduler(app)
@app.errorhandler(400)
def bad_request_handler(error):
log.error(error)
return bad_request(error.message)
@app.errorhandler(412)
def precondition_failed_handler(error):
log.error(error)
return precondition_failed(error.message)
@app.errorhandler(Exception)
def exception_handler(error):
log.error(error)
return internal_server_error(error.message)
class Component(object):
"""Base class of business object
inheritance classes can make use of self.log, self.db and self.util directly without import or instantiating,
"""
log = RequiredFeature("log")
db = RequiredFeature("db")
util = RequiredFeature("util")
scheduler = RequiredFeature("scheduler")
cache = RequiredFeature("cache")
def init_components():
"""Init hackathon factory"""
from hackathon.user import UserManager, UserProfileManager, OAuthLoginManager
from hackathon.hack import HackathonManager, AdminManager, TeamManager, DockerHostManager, \
RegisterManager, HackathonTemplateManager, Cryptor
from hackathon.template import TemplateLibrary
from hackathon.remote.guacamole import GuacamoleInfo
from hackathon.cache.cache_mgr import CacheManagerExt
# dependencies MUST be provided in advance
factory.provide("util", Utility)
factory.provide("log", log)
init_db()
# utils
init_voice_verify()
init_sms()
factory.provide("email", Email)
# cache
factory.provide("cache", CacheManagerExt)
# scheduler
factory.provide("scheduler", scheduler)
# business components
factory.provide("user_manager", UserManager)
factory.provide("user_profile_manager", UserProfileManager)
factory.provide("oauth_login_manager", OAuthLoginManager)
factory.provide("hackathon_manager", HackathonManager)
factory.provide("register_manager", RegisterManager)
factory.provide("cryptor", Cryptor)
factory.provide("docker_host_manager", DockerHostManager)
factory.provide("hackathon_template_manager", HackathonTemplateManager)
factory.provide("template_library", TemplateLibrary)
factory.provide("admin_manager", AdminManager)
factory.provide("team_manager", TeamManager)
factory.provide("guacamole", GuacamoleInfo)
# experiment starter
init_expr_components()
# health check items
factory.provide("health_check_hosted_docker", get_class("hackathon.health.health_check.HostedDockerHealthCheck"))
factory.provide("health_check_guacamole", get_class("hackathon.health.health_check.GuacamoleHealthCheck"))
factory.provide("health_check_mongodb", get_class("hackathon.health.health_check.MongoDBHealthCheck"))
# docker
factory.provide("hosted_docker_proxy", get_class("hackathon.docker.hosted_docker.HostedDockerFormation"))
# storage
init_hackathon_storage()
def init_db():
from .hmongo import db
factory.provide("db", db, suspend_callable=True)
def init_expr_components():
from .expr import ExprManager, K8SExprStarter
factory.provide("expr_manager", ExprManager)
factory.provide("k8s_service", K8SExprStarter)
def init_voice_verify():
""" initial voice verify service
Example for config.py:
"voice_verify": {
"enabled": True,
"provider": "rong_lian",
"rong_lian": {
... ...
}
}
"""
provider_name = Config.get("voice_verify").get("provider")
enabled = Config.get("voice_verify").get("enabled")
if not enabled:
log.warn("voice verify disabled")
factory.provide("voice_verify", DisabledVoiceVerify)
elif provider_name and safe_get_config("voice_verify." + provider_name, None):
log.warn("Voice verify initialized to:" + provider_name)
# if provider other than Ronglian is supported, update following lines
factory.provide("voice_verify", RonglianVoiceVerify)
else:
log.warn("either voice verify provider name or provider config is missing, Please check your configuration")
raise ConfigurationException("voice_verify.provider")
def init_sms():
""" initial SMS service """
provider_name = safe_get_config("sms.provider", None)
enabled = safe_get_config("sms.enabled", False)
if not enabled:
log.warn("SMS service disabled")
factory.provide("sms", DisabledSms)
elif provider_name and safe_get_config("sms." + provider_name, None):
log.warn("SMS initialized to:" + provider_name)
# if provider other than ChinaTelecom is supported, update following lines
factory.provide("sms", ChinaTelecomSms)
else:
log.warn("Either SMS provider name or provider config is missing, Please check your configuration")
raise ConfigurationException("sms.provider")
def init_hackathon_storage():
from hackathon.storage import LocalStorage
factory.provide("storage", LocalStorage)
def init_schedule_jobs():
"""Init scheduled jobs
Note that scheduler job will NOT be enabled in main thread. So the real initialization work are completed in a
separated thread. Otherwise there might be dead lock in main thread.
"""
import threading
t = threading.Thread(target=__init_schedule_jobs)
t.start()
def __init_schedule_jobs():
"""Init scheduled jobs in fact"""
log.debug("init scheduled jobs......")
util = RequiredFeature("util")
sche = RequiredFeature("scheduler")
if not util.is_local():
hackathon_manager = RequiredFeature("hackathon_manager")
# schedule job to check recycle operation
next_run_time = util.get_now() + timedelta(seconds=10)
sche.add_interval(feature="expr_manager",
method="scheduler_recycle_expr",
id="scheduler_recycle_expr",
next_run_time=next_run_time,
minutes=10)
# schedule job to pre-allocate environment
hackathon_manager.schedule_pre_allocate_expr_job()
# schedule job to pre-create a docker host server VM
# host_server_manager.schedule_pre_allocate_host_server_job()
# init the overtime-sessions detection to update users' online status
sche.add_interval(feature="user_manager",
method="check_user_online_status",
id="check_user_online_status",
minutes=10)
def init_app():
"""Initialize the application.
Works including :
- setting up hackathon factory,
- register restful API routes
- initialize scheduled jobs
"""
init_components()
from .views import init_routes
init_routes()
init_schedule_jobs()
health_check_guacamole = RequiredFeature("health_check_guacamole")
u = RequiredFeature("util")
if u.is_local():
log.debug("guacamole status: %s" % health_check_guacamole.report_health())
init_app()
| mit |
nickvandewiele/RMG-Py | rmgpy/qm/main.py | 4 | 11154 | #!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2012 Prof. Richard H. West (r.west@neu.edu),
# Prof. William H. Green (whgreen@mit.edu)
# and the RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
import os
import logging
import rmgpy.qm.mopac
import rmgpy.qm.gaussian
from rmgpy.data.thermo import ThermoLibrary
class QMSettings():
"""
A minimal class to store settings related to quantum mechanics calculations.
=================== ======================= ====================================
Attribute Type Description
=================== ======================= ====================================
`software` ``str`` Quantum chemical package name in common letters
`method` ``str`` Semi-empirical method
`fileStore` ``str`` The path to the QMfiles directory
`scratchDirectory` ``str`` The path to the scratch directory
`onlyCyclics` ``bool`` ``True`` if to run QM only on ringed species
`maxRadicalNumber` ``int`` Radicals larger than this are saturated before applying HBI
=================== ======================= ====================================
"""
def __init__(self,
software = None,
method = 'pm3',
fileStore = None,
scratchDirectory = None,
onlyCyclics = True,
maxRadicalNumber = 0,
):
self.software = software
self.method = method
if fileStore:
self.fileStore = os.path.join(fileStore, method)
else:
self.fileStore = None
if scratchDirectory:
self.scratchDirectory = os.path.join(scratchDirectory, method)
else:
self.scratchDirectory = None
self.onlyCyclics = onlyCyclics
self.maxRadicalNumber = maxRadicalNumber
if os.sys.platform == 'win32':
symmetryPath = os.path.join(rmgpy.getPath(),'..', 'bin', 'symmetry.exe')
# If symmetry is not installed in the bin folder, assume it is available on the path somewhere
if not os.path.exists(symmetryPath):
symmetryPath = 'symmetry.exe'
else:
symmetryPath = os.path.join(rmgpy.getPath(),'..', 'bin', 'symmetry')
if not os.path.exists(symmetryPath):
symmetryPath = 'symmetry'
self.symmetryPath = symmetryPath
def __reduce__(self):
"""
A helper function used when pickling an object.
"""
return (QMSettings, (
self.software,
self.method,
self.fileStore,
self.scratchDirectory,
self.onlyCyclics,
self.maxRadicalNumber,
self.symmetryPath
)
)
def checkAllSet(self):
"""
Check that all the required settings are set.
"""
from types import BooleanType, IntType
assert self.fileStore
#assert self.scratchDirectory
assert self.software
assert self.method
assert self.onlyCyclics is not None # but it can be False
assert type(self.onlyCyclics) is BooleanType
assert self.maxRadicalNumber is not None # but it can be 0
assert type(self.maxRadicalNumber) is IntType
class QMCalculator():
"""
A Quantum Mechanics calculator object, to store settings.
The attributes are:
=================== ======================= ====================================
Attribute Type Description
=================== ======================= ====================================
`settings` :class:`QMSettings` Settings for QM calculations
`database` :class:`ThermoLibrary` Database containing QM calculations
=================== ======================= ====================================
"""
def __init__(self,
software = None,
method = 'pm3',
fileStore = None,
scratchDirectory = None,
onlyCyclics = True,
maxRadicalNumber = 0,
):
self.settings = QMSettings(software = software,
method = method,
fileStore = fileStore,
scratchDirectory = scratchDirectory,
onlyCyclics = onlyCyclics,
maxRadicalNumber = maxRadicalNumber,
)
self.database = ThermoLibrary(name='QM Thermo Library')
def __reduce__(self):
"""
A helper function used when pickling an object.
"""
return (QMCalculator, (self.settings, self.database))
def setDefaultOutputDirectory(self, outputDirectory):
"""
IF the fileStore or scratchDirectory are not already set, put them in here.
"""
if not self.settings.fileStore:
self.settings.fileStore = os.path.abspath(os.path.join(outputDirectory, 'QMfiles', self.settings.method))
logging.info("Setting the quantum mechanics fileStore to {0}".format(self.settings.fileStore))
if not self.settings.scratchDirectory:
self.settings.scratchDirectory = os.path.abspath(os.path.join(outputDirectory, 'QMscratch', self.settings.method))
logging.info("Setting the quantum mechanics scratchDirectory to {0}".format(self.settings.scratchDirectory))
def initialize(self):
"""
Do any startup tasks.
"""
self.checkReady()
def checkReady(self):
"""
Check that it's ready to run calculations.
"""
self.settings.checkAllSet()
self.checkPaths()
def checkPaths(self):
"""
Check the paths in the settings are OK. Make folders as necessary.
"""
self.settings.fileStore = os.path.expandvars(self.settings.fileStore) # to allow things like $HOME or $RMGpy
self.settings.scratchDirectory = os.path.expandvars(self.settings.scratchDirectory)
for path in [self.settings.fileStore, self.settings.scratchDirectory]:
if not os.path.exists(path):
logging.info("Creating directory %s for QM files."%os.path.abspath(path))
# This try/except should be redundant, but some networked file systems
# seem to be slow or buggy or respond strangely causing problems
# between checking the path exists and trying to create it.
try:
os.makedirs(path)
except OSError as e:
logging.warning("Error creating directory {0}: {1!r}".format(path, e))
logging.warning("Checking it already exists...")
assert os.path.exists(path), "Path {0} still doesn't exist?".format(path)
def getThermoData(self, molecule):
"""
Generate thermo data for the given :class:`Molecule` via a quantum mechanics calculation.
Ignores the settings onlyCyclics and maxRadicalNumber and does the calculation anyway if asked.
(I.e. the code that chooses whether to call this method should consider those settings).
"""
self.initialize()
if self.settings.software == 'mopac':
if self.settings.method == 'pm3':
qm_molecule_calculator = rmgpy.qm.mopac.MopacMolPM3(molecule, self.settings)
elif self.settings.method == 'pm6':
qm_molecule_calculator = rmgpy.qm.mopac.MopacMolPM6(molecule, self.settings)
elif self.settings.method == 'pm7':
qm_molecule_calculator = rmgpy.qm.mopac.MopacMolPM7(molecule, self.settings)
else:
raise Exception("Unknown QM method '{0}' for mopac".format(self.settings.method))
thermo0 = qm_molecule_calculator.generateThermoData()
elif self.settings.software == 'gaussian':
if self.settings.method == 'pm3':
qm_molecule_calculator = rmgpy.qm.gaussian.GaussianMolPM3(molecule, self.settings)
elif self.settings.method == 'pm6':
qm_molecule_calculator = rmgpy.qm.gaussian.GaussianMolPM6(molecule, self.settings)
else:
raise Exception("Unknown QM method '{0}' for gaussian".format(self.settings.method))
thermo0 = qm_molecule_calculator.generateThermoData()
else:
raise Exception("Unknown QM software '{0}'".format(self.settings.software))
return thermo0
def save(rmg):
# Save the QM thermo to a library if QM was turned on
if rmg.quantumMechanics:
logging.info('Saving the QM generated thermo to qmThermoLibrary.py ...')
rmg.quantumMechanics.database.save(os.path.join(rmg.outputDirectory,'qmThermoLibrary.py'))
class QMDatabaseWriter(object):
"""
This class listens to a RMG subject
and saves the thermochemistry of species computed via the
QMTPmethods.
A new instance of the class can be appended to a subject as follows:
rmg = ...
listener = QMDatabaseWriter()
rmg.attach(listener)
Whenever the subject calls the .notify() method, the
.update() method of the listener will be called.
To stop listening to the subject, the class can be detached
from its subject:
rmg.detach(listener)
"""
def __init__(self):
super(QMDatabaseWriter, self).__init__()
def update(self, rmg):
save(rmg)
| mit |
tsdmgz/ansible | contrib/inventory/collins.py | 46 | 18031 | #!/usr/bin/env python
"""
Collins external inventory script
=================================
Ansible has a feature where instead of reading from /etc/ansible/hosts
as a text file, it can query external programs to obtain the list
of hosts, groups the hosts are in, and even variables to assign to each host.
Collins is a hardware asset management system originally developed by
Tumblr for tracking new hardware as it built out its own datacenters. It
exposes a rich API for manipulating and querying one's hardware inventory,
which makes it an ideal 'single point of truth' for driving systems
automation like Ansible. Extensive documentation on Collins, including a quickstart,
API docs, and a full reference manual, can be found here:
http://tumblr.github.io/collins
This script adds support to Ansible for obtaining a dynamic inventory of
assets in your infrastructure, grouping them in Ansible by their useful attributes,
and binding all facts provided by Collins to each host so that they can be used to
drive automation. Some parts of this script were cribbed shamelessly from mdehaan's
Cobbler inventory script.
To use it, copy it to your repo and pass -i <collins script> to the ansible or
ansible-playbook command; if you'd like to use it by default, simply copy collins.ini
to /etc/ansible and this script to /etc/ansible/hosts.
Alongside the options set in collins.ini, there are several environment variables
that will be used instead of the configured values if they are set:
- COLLINS_USERNAME - specifies a username to use for Collins authentication
- COLLINS_PASSWORD - specifies a password to use for Collins authentication
- COLLINS_ASSET_TYPE - specifies a Collins asset type to use during querying;
this can be used to run Ansible automation against different asset classes than
server nodes, such as network switches and PDUs
- COLLINS_CONFIG - specifies an alternative location for collins.ini, defaults to
<location of collins.py>/collins.ini
If errors are encountered during operation, this script will return an exit code of
255; otherwise, it will return an exit code of 0.
Collins attributes are accessible as variables in ansible via the COLLINS['attribute_name'].
Tested against Ansible 1.8.2 and Collins 1.3.0.
"""
# (c) 2014, Steve Salevan <steve.salevan@gmail.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import argparse
import ConfigParser
import logging
import os
import re
import sys
from time import time
import traceback
try:
import json
except ImportError:
import simplejson as json
from six import iteritems
from six.moves.urllib.parse import urlencode
from ansible.module_utils.urls import open_url
class CollinsDefaults(object):
ASSETS_API_ENDPOINT = '%s/api/assets'
SPECIAL_ATTRIBUTES = set([
'CREATED',
'DELETED',
'UPDATED',
'STATE',
])
LOG_FORMAT = '%(asctime)-15s %(message)s'
class Error(Exception):
pass
class MaxRetriesError(Error):
pass
class CollinsInventory(object):
def __init__(self):
""" Constructs CollinsInventory object and reads all configuration. """
self.inventory = dict() # A list of groups and the hosts in that group
self.cache = dict() # Details about hosts in the inventory
# Read settings and parse CLI arguments
self.read_settings()
self.parse_cli_args()
logging.basicConfig(format=CollinsDefaults.LOG_FORMAT,
filename=self.log_location)
self.log = logging.getLogger('CollinsInventory')
def _asset_get_attribute(self, asset, attrib):
""" Returns a user-defined attribute from an asset if it exists; otherwise,
returns None. """
if 'ATTRIBS' in asset:
for attrib_block in asset['ATTRIBS'].keys():
if attrib in asset['ATTRIBS'][attrib_block]:
return asset['ATTRIBS'][attrib_block][attrib]
return None
def _asset_has_attribute(self, asset, attrib):
""" Returns whether a user-defined attribute is present on an asset. """
if 'ATTRIBS' in asset:
for attrib_block in asset['ATTRIBS'].keys():
if attrib in asset['ATTRIBS'][attrib_block]:
return True
return False
def run(self):
""" Main execution path """
# Updates cache if cache is not present or has expired.
successful = True
if self.args.refresh_cache:
successful = self.update_cache()
elif not self.is_cache_valid():
successful = self.update_cache()
else:
successful = self.load_inventory_from_cache()
successful &= self.load_cache_from_cache()
data_to_print = ""
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of instances for inventory
data_to_print = self.json_format_dict(self.inventory, self.args.pretty)
else: # default action with no options
data_to_print = self.json_format_dict(self.inventory, self.args.pretty)
print(data_to_print)
return successful
def find_assets(self, attributes=None, operation='AND'):
""" Obtains Collins assets matching the provided attributes. """
attributes = {} if attributes is None else attributes
# Formats asset search query to locate assets matching attributes, using
# the CQL search feature as described here:
# http://tumblr.github.io/collins/recipes.html
attributes_query = ['='.join(attr_pair) for attr_pair in iteritems(attributes)]
query_parameters = {
'details': ['True'],
'operation': [operation],
'query': attributes_query,
'remoteLookup': [str(self.query_remote_dcs)],
'size': [self.results_per_query],
'type': [self.collins_asset_type],
}
assets = []
cur_page = 0
num_retries = 0
# Locates all assets matching the provided query, exhausting pagination.
while True:
if num_retries == self.collins_max_retries:
raise MaxRetriesError("Maximum of %s retries reached; giving up" % self.collins_max_retries)
query_parameters['page'] = cur_page
query_url = "%s?%s" % (
(CollinsDefaults.ASSETS_API_ENDPOINT % self.collins_host),
urlencode(query_parameters, doseq=True)
)
try:
response = open_url(query_url,
timeout=self.collins_timeout_secs,
url_username=self.collins_username,
url_password=self.collins_password,
force_basic_auth=True)
json_response = json.loads(response.read())
# Adds any assets found to the array of assets.
assets += json_response['data']['Data']
# If we've retrieved all of our assets, breaks out of the loop.
if len(json_response['data']['Data']) == 0:
break
cur_page += 1
num_retries = 0
except:
self.log.error("Error while communicating with Collins, retrying:\n%s", traceback.format_exc())
num_retries += 1
return assets
def is_cache_valid(self):
""" Determines if the cache files have expired, or if it is still valid """
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_inventory):
return True
return False
def read_settings(self):
""" Reads the settings from the collins.ini file """
config_loc = os.getenv('COLLINS_CONFIG', os.path.dirname(os.path.realpath(__file__)) + '/collins.ini')
config = ConfigParser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/collins.ini')
self.collins_host = config.get('collins', 'host')
self.collins_username = os.getenv('COLLINS_USERNAME', config.get('collins', 'username'))
self.collins_password = os.getenv('COLLINS_PASSWORD', config.get('collins', 'password'))
self.collins_asset_type = os.getenv('COLLINS_ASSET_TYPE', config.get('collins', 'asset_type'))
self.collins_timeout_secs = config.getint('collins', 'timeout_secs')
self.collins_max_retries = config.getint('collins', 'max_retries')
self.results_per_query = config.getint('collins', 'results_per_query')
self.ip_address_index = config.getint('collins', 'ip_address_index')
self.query_remote_dcs = config.getboolean('collins', 'query_remote_dcs')
self.prefer_hostnames = config.getboolean('collins', 'prefer_hostnames')
cache_path = config.get('collins', 'cache_path')
self.cache_path_cache = cache_path + \
'/ansible-collins-%s.cache' % self.collins_asset_type
self.cache_path_inventory = cache_path + \
'/ansible-collins-%s.index' % self.collins_asset_type
self.cache_max_age = config.getint('collins', 'cache_max_age')
log_path = config.get('collins', 'log_path')
self.log_location = log_path + '/ansible-collins.log'
def parse_cli_args(self):
""" Command line argument processing """
parser = argparse.ArgumentParser(
description='Produces an Ansible Inventory file based on Collins')
parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to Collins '
'(default: False - use cache files)')
parser.add_argument('--pretty', action='store_true', default=False, help='Pretty print all JSON output')
self.args = parser.parse_args()
def update_cache(self):
""" Make calls to Collins and saves the output in a cache """
self.cache = dict()
self.inventory = dict()
# Locates all server assets from Collins.
try:
server_assets = self.find_assets()
except:
self.log.error("Error while locating assets from Collins:\n%s", traceback.format_exc())
return False
for asset in server_assets:
# Determines the index to retrieve the asset's IP address either by an
# attribute set on the Collins asset or the pre-configured value.
if self._asset_has_attribute(asset, 'ANSIBLE_IP_INDEX'):
ip_index = self._asset_get_attribute(asset, 'ANSIBLE_IP_INDEX')
try:
ip_index = int(ip_index)
except:
self.log.error(
"ANSIBLE_IP_INDEX attribute on asset %s not an integer: %s", asset,
ip_index)
else:
ip_index = self.ip_address_index
asset['COLLINS'] = {}
# Attempts to locate the asset's primary identifier (hostname or IP address),
# which will be used to index the asset throughout the Ansible inventory.
if self.prefer_hostnames and self._asset_has_attribute(asset, 'HOSTNAME'):
asset_identifier = self._asset_get_attribute(asset, 'HOSTNAME')
elif 'ADDRESSES' not in asset:
self.log.warning("No IP addresses found for asset '%s', skipping", asset)
continue
elif len(asset['ADDRESSES']) < ip_index + 1:
self.log.warning(
"No IP address found at index %s for asset '%s', skipping",
ip_index, asset)
continue
else:
asset_identifier = asset['ADDRESSES'][ip_index]['ADDRESS']
# Adds an asset index to the Ansible inventory based upon unpacking
# the name of the asset's current STATE from its dictionary.
if 'STATE' in asset['ASSET'] and asset['ASSET']['STATE']:
state_inventory_key = self.to_safe(
'STATE-%s' % asset['ASSET']['STATE']['NAME'])
self.push(self.inventory, state_inventory_key, asset_identifier)
# Indexes asset by all user-defined Collins attributes.
if 'ATTRIBS' in asset:
for attrib_block in asset['ATTRIBS'].keys():
for attrib in asset['ATTRIBS'][attrib_block].keys():
asset['COLLINS'][attrib] = asset['ATTRIBS'][attrib_block][attrib]
attrib_key = self.to_safe('%s-%s' % (attrib, asset['ATTRIBS'][attrib_block][attrib]))
self.push(self.inventory, attrib_key, asset_identifier)
# Indexes asset by all built-in Collins attributes.
for attribute in asset['ASSET'].keys():
if attribute not in CollinsDefaults.SPECIAL_ATTRIBUTES:
attribute_val = asset['ASSET'][attribute]
if attribute_val is not None:
attrib_key = self.to_safe('%s-%s' % (attribute, attribute_val))
self.push(self.inventory, attrib_key, asset_identifier)
# Indexes asset by hardware product information.
if 'HARDWARE' in asset:
if 'PRODUCT' in asset['HARDWARE']['BASE']:
product = asset['HARDWARE']['BASE']['PRODUCT']
if product:
product_key = self.to_safe(
'HARDWARE-PRODUCT-%s' % asset['HARDWARE']['BASE']['PRODUCT'])
self.push(self.inventory, product_key, asset_identifier)
# Indexing now complete, adds the host details to the asset cache.
self.cache[asset_identifier] = asset
try:
self.write_to_cache(self.cache, self.cache_path_cache)
self.write_to_cache(self.inventory, self.cache_path_inventory)
except:
self.log.error("Error while writing to cache:\n%s", traceback.format_exc())
return False
return True
def push(self, dictionary, key, value):
""" Adds a value to a list at a dictionary key, creating the list if it doesn't
exist. """
if key not in dictionary:
dictionary[key] = []
dictionary[key].append(value)
def get_host_info(self):
""" Get variables about a specific host. """
if not self.cache or len(self.cache) == 0:
# Need to load index from cache
self.load_cache_from_cache()
if self.args.host not in self.cache:
# try updating the cache
self.update_cache()
if self.args.host not in self.cache:
# host might not exist anymore
return self.json_format_dict({}, self.args.pretty)
return self.json_format_dict(self.cache[self.args.host], self.args.pretty)
def load_inventory_from_cache(self):
""" Reads the index from the cache file sets self.index """
try:
cache = open(self.cache_path_inventory, 'r')
json_inventory = cache.read()
self.inventory = json.loads(json_inventory)
return True
except:
self.log.error("Error while loading inventory:\n%s",
traceback.format_exc())
self.inventory = {}
return False
def load_cache_from_cache(self):
""" Reads the cache from the cache file sets self.cache """
try:
cache = open(self.cache_path_cache, 'r')
json_cache = cache.read()
self.cache = json.loads(json_cache)
return True
except:
self.log.error("Error while loading host cache:\n%s",
traceback.format_exc())
self.cache = {}
return False
def write_to_cache(self, data, filename):
""" Writes data in JSON format to a specified file. """
json_data = self.json_format_dict(data, self.args.pretty)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def to_safe(self, word):
""" Converts 'bad' characters in a string to underscores so they
can be used as Ansible groups """
return re.sub(r"[^A-Za-z0-9\-]", "_", word)
def json_format_dict(self, data, pretty=False):
""" Converts a dict to a JSON object and dumps it as a formatted string """
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
if __name__ in '__main__':
inventory = CollinsInventory()
if inventory.run():
sys.exit(0)
else:
sys.exit(-1)
| gpl-3.0 |
sonofatailor/django-oscar | tests/_site/myauth/models.py | 5 | 1632 | # -*- coding: utf-8 -*-
import re
from django.utils.translation import ugettext_lazy as _
from django.db import models
from django.core import validators
from django.contrib.auth.models import BaseUserManager
from oscar.apps.customer.abstract_models import AbstractUser
class CustomUserManager(BaseUserManager):
def create_user(self, username, email, password):
"""
Creates and saves a User with the given email and password.
"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=CustomUserManager.normalize_email(email),
username=username,
is_active=True,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, username, email, password):
u = self.create_user(username, email, password=password)
u.is_admin = True
u.is_staff = True
u.save(using=self._db)
return u
class User(AbstractUser):
"""
Custom user based on Oscar's AbstractUser
"""
username = models.CharField(
_('username'), max_length=30, unique=True,
help_text=_('Required. 30 characters or fewer. Letters, numbers and '
'@/./+/-/_ characters'),
validators=[
validators.RegexValidator(re.compile('^[\w.@+-]+$'), _('Enter a valid username.'), 'invalid')
])
extra_field = models.CharField(
_('Nobody needs me'), max_length=5, blank=True)
objects = CustomUserManager()
class Meta:
app_label = 'myauth'
| bsd-3-clause |
nebril/fuel-web | fuel_upgrade_system/fuel_package_updates/fuel_package_updates/fuel_package_updates.py | 2 | 26310 | #!/usr/bin/env python
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import namedtuple
from copy import deepcopy
import functools
import json
import logging
import os
import re
import string
import subprocess
import sys
import traceback
import urllib2
import yaml
import zlib
try:
from collections import OrderedDict
except Exception:
# python 2.6 or earlier use backport
from ordereddict import OrderedDict
from keystoneclient import exceptions
from keystoneclient.v2_0 import Client as keystoneclient
from optparse import OptionParser
from urllib2 import urlopen
from urlparse import urlparse
from xml.dom.minidom import parseString
logger = logging.getLogger(__name__)
distros_dict = {
'ubuntu': 'ubuntu',
'centos': 'centos',
'centos_security': 'centos-security',
'ubuntu_baseos': 'ubuntu-baseos',
}
DISTROS = namedtuple('Distros', distros_dict.keys())(**distros_dict)
KEYSTONE_CREDS = {'username': os.environ.get('KEYSTONE_USERNAME', 'admin'),
'password': os.environ.get('KEYSTONE_PASSWORD', 'admin'),
'tenant_name': os.environ.get('KEYSTONE_TENANT', 'admin')}
# TODO(mattymo): parse from Fuel API
FUEL_VER = "6.1"
OPENSTACK_RELEASE = "2014.2.2-6.1"
UBUNTU_CODENAME = 'trusty'
CENTOS_VERSION = 'centos-6'
class Settings(object):
supported_distros = DISTROS
supported_releases = (OPENSTACK_RELEASE, )
updates_destinations = {
DISTROS.centos: r'/var/www/nailgun/{0}/centos/updates',
DISTROS.centos_security: r'/var/www/nailgun/{0}/centos/security',
DISTROS.ubuntu: r'/var/www/nailgun/{0}/ubuntu/updates',
DISTROS.ubuntu_baseos: os.path.join(r'/var/www/nailgun/{0}/ubuntu/',
UBUNTU_CODENAME),
}
mirror_base = "http://mirror.fuel-infra.org/mos"
default_mirrors = {
DISTROS.centos_security: '{0}/{1}/mos{2}/security/'.format(
mirror_base,
CENTOS_VERSION,
FUEL_VER),
DISTROS.centos: '{0}/{1}/mos{2}/updates/'.format(mirror_base,
CENTOS_VERSION,
FUEL_VER),
DISTROS.ubuntu: '{0}/ubuntu/'.format(mirror_base),
}
exclude_dirs = ('repodata/', 'mos?.?/')
httproot = "/var/www/nailgun"
port = 8080
class HTTPClient(object):
def __init__(self, url, keystone_url, credentials, **kwargs):
logger.debug('Initiate HTTPClient with url %s', url)
self.url = url
self.keystone_url = keystone_url
self.creds = dict(credentials, **kwargs)
self.keystone = None
self.opener = urllib2.build_opener(urllib2.HTTPHandler)
def authenticate(self):
try:
logger.debug('Initialize keystoneclient with url %s',
self.keystone_url)
self.keystone = keystoneclient(
auth_url=self.keystone_url, **self.creds)
# it depends on keystone version, some versions doing auth
# explicitly some dont, but we are making it explicitly always
self.keystone.authenticate()
logger.debug('Authorization token is successfully updated')
except exceptions.AuthorizationFailure:
logger.warning(
'Cant establish connection to keystone with url %s',
self.keystone_url)
@property
def token(self):
if self.keystone is not None:
try:
return self.keystone.auth_token
except exceptions.AuthorizationFailure:
logger.warning(
'Cant establish connection to keystone with url %s',
self.keystone_url)
except exceptions.Unauthorized:
logger.warning("Keystone returned unauthorized error, trying "
"to pass authentication.")
self.authenticate()
return self.keystone.auth_token
return None
def get(self, endpoint):
req = urllib2.Request(self.url + endpoint)
return self._open(req)
def post(self, endpoint, data=None, content_type="application/json"):
if not data:
data = {}
logger.info('self url is %s' % self.url)
req = urllib2.Request(self.url + endpoint, data=json.dumps(data))
req.add_header('Content-Type', content_type)
return self._open(req)
def put(self, endpoint, data=None, content_type="application/json"):
if not data:
data = {}
req = urllib2.Request(self.url + endpoint, data=json.dumps(data))
req.add_header('Content-Type', content_type)
req.get_method = lambda: 'PUT'
return self._open(req)
def delete(self, endpoint):
req = urllib2.Request(self.url + endpoint)
req.get_method = lambda: 'DELETE'
return self._open(req)
def _open(self, req):
try:
return self._get_response(req)
except urllib2.HTTPError as e:
if e.code == 401:
logger.warning('Authorization failure: {0}'.format(e.read()))
self.authenticate()
return self._get_response(req)
else:
raise
def _get_response(self, req):
if self.token is not None:
try:
logger.debug('Set X-Auth-Token to {0}'.format(self.token))
req.add_header("X-Auth-Token", self.token)
except exceptions.AuthorizationFailure:
logger.warning('Failed with auth in http _get_response')
logger.warning(traceback.format_exc())
return self.opener.open(req)
def repo_merge(list_a, list_b):
"merges two lists of repositories. list_b replaces records from list_a"
if not isinstance(list_b, list):
return deepcopy(list_b)
to_merge = list_a + list_b
primary_repos = sorted(filter(
lambda x:
x['name'].startswith(DISTROS.ubuntu)
or x['name'].startswith(UBUNTU_CODENAME),
to_merge))
result = OrderedDict()
for repo in primary_repos:
result[repo['name']] = None
for repo in to_merge:
name = repo['name']
if repo.get('delete') is True:
result.pop(name, None)
else:
result[name] = repo
return result.values()
class FuelWebClient(object):
def __init__(self, admin_node_ip):
self.admin_node_ip = admin_node_ip
self.client = NailgunClient(admin_node_ip)
super(FuelWebClient, self).__init__()
def environment(self):
"""Environment Model
:rtype: EnvironmentModel
"""
return self._environment
def update_cluster_repos(self,
cluster_id,
settings=None):
"""Updates a cluster with new settings
:param cluster_id:
:param settings:
"""
logger.info("Updating default repositories...")
if not settings:
settings = {}
attributes = self.client.get_cluster_attributes(cluster_id)
if 'repo_setup' in attributes['editable']:
repos_attr = attributes['editable']['repo_setup']['repos']
repos_attr['value'] = repo_merge(repos_attr['value'], settings)
logger.debug("Try to update cluster "
"with next attributes {0}".format(attributes))
self.client.update_cluster_attributes(cluster_id, attributes)
def update_default_repos(self,
release_id,
settings=None):
"""Updates a cluster with new settings
:param cluster_id:
:param settings:
"""
if settings is None:
settings = {}
attributes = self.client.get_release_attributes(release_id)
if 'repo_setup' in attributes['attributes_metadata']['editable']:
repos_attr = \
attributes['attributes_metadata']['editable']['repo_setup'][
'repos']
repos_attr['value'] = repo_merge(repos_attr['value'], settings)
logger.debug("Try to update release "
"with next attributes {0}".format(attributes))
self.client.update_release_attributes(release_id, attributes)
class NailgunClient(object):
def __init__(self, admin_node_ip, **kwargs):
url = "http://{0}:8000".format(admin_node_ip)
logger.debug('Initiate Nailgun client with url %s', url)
self.keystone_url = "http://{0}:5000/v2.0".format(admin_node_ip)
self._client = HTTPClient(url=url, keystone_url=self.keystone_url,
credentials=KEYSTONE_CREDS,
**kwargs)
super(NailgunClient, self).__init__()
def json_parse(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
response = func(*args, **kwargs)
return json.loads(response.read())
return wrapped
@property
def client(self):
return self._client
@json_parse
def get_cluster_attributes(self, cluster_id):
return self.client.get(
"/api/clusters/{0}/attributes/".format(cluster_id)
)
@json_parse
def get_release_attributes(self, release_id):
return self.client.get(
"/api/releases/{0}/".format(release_id)
)
@json_parse
def update_cluster_attributes(self, cluster_id, attrs):
return self.client.put(
"/api/clusters/{0}/attributes/".format(cluster_id),
attrs
)
@json_parse
def update_release_attributes(self, release_id, attrs):
return self.client.put(
"/api/releases/{0}/".format(release_id), attrs)
@json_parse
def get_releases(self):
return self.client.get("/api/releases/")
def get_release_id(self, operating_system, release_version):
for release in self.get_releases():
if release["version"] == release_version:
if release["operating_system"].lower() == \
operating_system.lower():
return release["id"]
logger.error("Release not found for {0} - {1}".format(operating_system,
release_version))
class UpdatePackagesException(Exception):
pass
def exec_cmd(cmd):
logger.debug('Execute command "%s"', cmd)
child = subprocess.Popen(
cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
logger.debug('Stdout and stderr of command "%s":', cmd)
for line in child.stdout:
logger.debug(line.rstrip())
return _wait_and_check_exit_code(cmd, child)
def _wait_and_check_exit_code(cmd, child):
child.wait()
exit_code = child.returncode
logger.debug('Command "%s" was executed', cmd)
return exit_code
def get_repository_packages(remote_repo_url, distro):
repo_url = urlparse(remote_repo_url)
packages = []
if distro == DISTROS.ubuntu_baseos:
raise UpdatePackagesException(
"Use fuel-createmirror to mirror base Ubuntu OS.")
if distro == DISTROS.ubuntu:
packages_url = '{0}/Packages'.format(repo_url.geturl())
pkgs_raw = urlopen(packages_url).read()
for pkg in pkgs_raw.split('\n'):
match = re.search(r'^Package: (\S+)\s*$', pkg)
if match:
packages.append(match.group(1))
elif distro == DISTROS.centos:
packages_url = '{0}/repodata/primary.xml.gz'.format(repo_url.geturl())
pkgs_xml = parseString(zlib.decompressobj(zlib.MAX_WBITS | 32).
decompress(urlopen(packages_url).read()))
for pkg in pkgs_xml.getElementsByTagName('package'):
packages.append(
pkg.getElementsByTagName('name')[0].firstChild.nodeValue)
return packages
def get_ubuntu_baseos_repos(repopath, ip, httproot, port,
baseurl=None, clear=False):
# TODO(mattymo): parse all repo metadata
repolist = ['base', 'updates', 'security']
reponames = {
'base': 'ubuntu',
'updates': 'ubuntu-updates',
'security': 'ubuntu-security'}
repourl = baseurl or "http://{ip}:{port}{repopath}".format(
ip=ip,
port=port,
repopath=repopath.replace(httproot, ''))
if clear:
repos = [
{"name": "ubuntu", "delete": True},
{"name": "ubuntu-security", "delete": True},
{"name": "ubuntu-updates", "delete": True},
{
"type": "deb",
"name": UBUNTU_CODENAME,
"uri": repourl,
"suite": UBUNTU_CODENAME,
"section": "main",
"priority": None,
},
]
else:
repos = [
{"name": UBUNTU_CODENAME, "delete": True},
]
for repo in repolist:
name = reponames[repo]
repoentry = {
"type": "deb",
"name": name,
"uri": repourl,
"suite": repo,
"section": "main universe multiverse",
"priority": None}
if "holdback" in repo:
repoentry['priority'] = 1100
repos.append(repoentry)
return repos
def get_ubuntu_repos(repopath, ip, httproot, port, baseurl=None):
# TODO(mattymo): parse all repo metadata
repolist = [
'mos{0}-updates'.format(FUEL_VER),
'mos{0}-holdback'.format(FUEL_VER),
'mos{0}-security'.format(FUEL_VER),
]
repourl = baseurl or "http://{ip}:{port}{repopath}".format(
ip=ip,
port=port,
repopath=repopath.replace(httproot, ''))
repos = []
for repo in repolist:
# FIXME(mattymo): repositories cannot have a period in their name
name = repo.replace(FUEL_VER, '')
repoentry = {
"type": "deb",
"name": name,
"uri": repourl,
"suite": repo,
"section": "main restricted",
"priority": 1050}
if "holdback" in repo:
repoentry['priority'] = 1100
repos.append(repoentry)
return repos
def get_centos_repos(repopath, ip, httproot, port, baseurl=None):
repourl = baseurl or "http://{ip}:{port}{repopath}".format(
ip=ip,
port=port,
repopath=repopath.replace(httproot, ''))
repoentry = {
"type": "rpm",
"name": "mos-updates",
"uri": repourl,
"priority": 20}
return [repoentry]
def get_centos_security_repos(repopath, ip, httproot, port, baseurl=None):
repourl = baseurl or "http://{ip}:{port}{repopath}".format(
ip=ip,
port=port,
repopath=repopath.replace(httproot, ''))
repoentry = {
"type": "rpm",
"name": "mos-security",
"uri": repourl,
"priority": 20}
return [repoentry]
def reindent(s, numSpaces):
s = string.split(s, '\n')
s = [(numSpaces * ' ') + line for line in s]
s = string.join(s, '\n')
return s
def show_env_conf(repos, showuri=False, ip="10.20.0.2"):
print("Your repositories are now ready for use. You will need to update "
"your Fuel environment configuration to use these repositories.")
print("Note: Be sure to replace ONLY the repositories listed below.\n")
if not showuri:
print("Replace the entire repos section of your environment using "
"the following commands:\n fuel --env 1 env --attributes "
"--download\n vim cluster_1/attributes.yaml\n fuel --env "
"1 env --attributes --upload")
if showuri:
for repo in repos:
if repo['type'] == "deb":
print("{name}:\ndeb {uri} {suite} {section}".format(
name=repo['name'],
uri=repo['uri'],
suite=repo['suite'],
section=repo['section']))
else:
print("{name}:\n{uri}".format(
name=repo['name'],
uri=repo['uri']))
else:
spaces = 10
yamldata = {"repos": repos}
print(reindent(yaml.dump(yamldata, default_flow_style=False), spaces))
def update_env_conf(ip, distro, release, repos, env_id=None,
makedefault=False):
fwc = FuelWebClient(ip)
if env_id is not None:
logger.info("Updating environment repositories...")
fwc.update_cluster_repos(env_id, repos)
if makedefault:
#ubuntu-baseos updates ubuntu release
if DISTROS.ubuntu in distro:
distro = DISTROS.ubuntu
release_id = fwc.client.get_release_id(distro, release)
logger.info("Updating release ID {0}".format(release_id))
if release_id is not None:
fwc.update_default_repos(release_id, repos)
def mirror_remote_repository(remote_repo_url, local_repo_path, exclude_dirs,
distro):
repo_url = urlparse(remote_repo_url)
cut_dirs = len(repo_url.path.strip('/').split('/'))
if "rsync://" in remote_repo_url:
excl_dirs = "ubuntu/dists/mos?.?/,repodata/"
download_cmd = ('rsync --exclude="*.key","*.gpg",{excl_dirs} -vPr '
'{url} {path}').format(pwd=repo_url.path.rstrip('/'),
path=local_repo_path,
excl_dirs=excl_dirs,
url=repo_url.geturl())
else:
excl_dirs = "--exclude-directories='ubuntu/dists/mos?.?/,repodata'"
download_cmd = (
'wget -N --recursive --no-parent --no-verbose -R "*.html" -R '
'"*.gif" -R "*.key" -R "*.gpg" -R "*.dsc" -R "*.tar.gz" '
'{excl_dirs} --directory-prefix {path} -nH '
'--cut-dirs={cutd} '
'{url}').format(pwd=repo_url.path.rstrip('/'),
excl_dirs=excl_dirs,
path=local_repo_path,
cutd=cut_dirs,
url=repo_url.geturl())
logger.debug('Execute command "%s"', download_cmd)
if exec_cmd(download_cmd) != 0:
raise UpdatePackagesException('Mirroring of remote packages'
' repository failed!')
def main():
settings = Settings()
sh = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
sh.setFormatter(formatter)
logger.addHandler(sh)
logger.setLevel(logging.INFO)
parser = OptionParser(
description="Pull updates for a given release of Fuel based on "
"the provided URL."
)
parser.add_option('-l', '--list-distros', dest='list_distros',
default=None, action="store_true",
help='List available distributions.')
parser.add_option('-d', '--distro', dest='distro', default=None,
help='Distribution name (required)')
parser.add_option('-c', '--clear-upstream-repos',
action="store_true",
dest='clear_upstream_repos',
help="Clears upstream repos if {0} distro is "
"chosen. By default just replacing their's URIs".format(
DISTROS.ubuntu_baseos))
parser.add_option('-r', '--release', dest='release', default=None,
help='Fuel release name (required)')
parser.add_option("-u", "--url", dest="url", default="",
help="Remote repository URL")
parser.add_option("-N", "--no-download",
action="store_true", dest="nodownload", default=False,
help="Skip downloading repository (conflicts with"
" --url)")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False,
help="Enable debug output")
parser.add_option("-i", "--show-uris", dest="showuri", default=False,
action="store_true",
help="Show URIs for new repositories (optional). "
"Useful for WebUI.")
parser.add_option("-m", "--make-default", dest="makedefault",
default=False, action="store_true",
help="Make default for new environments (optional).")
parser.add_option("-a", "--apply", dest="apply", default=False,
action="store_true",
help="Apply changes to Fuel environment (optional)")
parser.add_option("-e", "--env", dest="env", default=None,
help="Fuel environment ID to update")
parser.add_option("-s", "--fuel-server", dest="ip", default="10.20.0.2",
help="Address of Fuel Master public address (defaults "
"to 10.20.0.2)")
parser.add_option("-b", "--baseurl", dest="baseurl", default=None,
help="Full URL of repo to set, such as http://myserver."
"company.com/mos-ubuntu/ (optional)")
parser.add_option("-p", "--password", dest="admin_pass", default=None,
help="Fuel Master admin password (defaults to admin)."
" Alternatively, use env var KEYSTONE_PASSWORD).")
(options, args) = parser.parse_args()
if options.verbose:
logger.setLevel(logging.DEBUG)
if options.list_distros:
logger.info("Available distributions:\n {0}".format(
"\n ".join(settings.supported_distros)))
sys.exit(0)
if options.distro not in settings.supported_distros:
raise UpdatePackagesException(
'Distro "{0}" is not supported. Please specify one of the '
'following: "{1}". See help (--help) for details.'.format(
options.distro, ', '.join(settings.supported_distros)))
if options.release not in settings.supported_releases:
raise UpdatePackagesException(
'Fuel release "{0}" is not supported. Please specify one of the '
'following: "{1}". See help (--help) for details.'.format(
options.release, ', '.join(settings.supported_releases)))
if not options.url and options.distro != DISTROS.ubuntu_baseos:
options.url = settings.default_mirrors[options.distro]
logger.debug("Using {0} as mirror URL.".format(options.url))
if 'http' not in urlparse(options.url) and 'rsync' not in \
urlparse(options.url) and not options.nodownload:
raise UpdatePackagesException(
'Repository url "{0}" does not look like a valid URL. '
'See help (--help) for details.'.format(options.url))
if options.apply and (not options.env and not options.makedefault):
raise UpdatePackagesException(
'--apply option requires --env or --makedefault to be specified. '
'See help (--help) for details.')
updates_path = settings.updates_destinations[options.distro].format(
options.release)
if not os.path.exists(updates_path):
os.makedirs(updates_path)
if options.nodownload:
logger.info('Skipping repository download...')
else:
logger.info('Started mirroring remote repository...')
mirror_remote_repository(options.url, updates_path,
settings.exclude_dirs, options.distro)
logger.info('Remote repository "{url}" for "{release}" ({distro}) was '
'successfuly mirrored to {path} folder.'.format(
url=options.url,
release=options.release,
distro=options.distro,
path=updates_path))
if options.distro == "ubuntu":
repos = get_ubuntu_repos(updates_path, options.ip, settings.httproot,
settings.port, options.baseurl)
elif options.distro == DISTROS.ubuntu_baseos:
if options.clear_upstream_repos:
logger.warning('*IMPORTANT* If there are any custom Ubuntu '
'mirrors that have been configured by hand, '
'please remove them manually as they will not be '
'removed with the --clear-upstream-repos option.')
repos = get_ubuntu_baseos_repos(updates_path, options.ip,
settings.httproot, settings.port,
options.baseurl,
options.clear_upstream_repos)
elif options.distro == DISTROS.centos:
repos = get_centos_repos(updates_path, options.ip, settings.httproot,
settings.port, options.baseurl)
elif options.distro == DISTROS.centos_security:
repos = get_centos_security_repos(updates_path, options.ip,
settings.httproot, settings.port,
options.baseurl)
else:
raise UpdatePackagesException('Unknown distro "{0}"'.format(
options.distro))
if options.admin_pass:
KEYSTONE_CREDS['password'] = options.admin_pass
if options.apply:
update_env_conf(options.ip, options.distro, options.release, repos,
options.env, options.makedefault)
else:
show_env_conf(repos, options.showuri, options.ip)
if __name__ == '__main__':
main()
| apache-2.0 |
sgerhart/ansible | test/units/modules/network/f5/test_bigip_service_policy.py | 4 | 4329 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_service_policy import ApiParameters
from library.modules.bigip_service_policy import ModuleParameters
from library.modules.bigip_service_policy import ModuleManager
from library.modules.bigip_service_policy import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_service_policy import ApiParameters
from ansible.modules.network.f5.bigip_service_policy import ModuleParameters
from ansible.modules.network.f5.bigip_service_policy import ModuleManager
from ansible.modules.network.f5.bigip_service_policy import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
description='my description',
timer_policy='timer1',
port_misuse_policy='misuse1',
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.description == 'my description'
assert p.timer_policy == '/Common/timer1'
assert p.port_misuse_policy == '/Common/misuse1'
def test_api_parameters(self):
args = load_fixture('load_net_service_policy_1.json')
p = ApiParameters(params=args)
assert p.name == 'baz'
assert p.description == 'my description'
assert p.timer_policy == '/Common/foo'
assert p.port_misuse_policy == '/Common/bar'
@patch('ansible.module_utils.f5_utils.AnsibleF5Client._get_mgmt_root',
return_value=True)
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
try:
self.p1 = patch('library.modules.bigip_service_policy.module_provisioned')
self.m1 = self.p1.start()
self.m1.return_value = True
except Exception:
self.p1 = patch('ansible.modules.network.f5.bigip_service_policy.module_provisioned')
self.m1 = self.p1.start()
self.m1.return_value = True
def test_create_selfip(self, *args):
set_module_args(dict(
name='foo',
description='my description',
timer_policy='timer1',
port_misuse_policy='misuse1',
partition='Common',
state='present',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
mm.module_provisioned = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
| mit |
KontorConsulting/odoo | openerp/addons/test_inherits/tests/test_inherits.py | 266 | 1066 | # -*- coding: utf-8 -*-
from openerp.tests import common
class test_inherits(common.TransactionCase):
def test_create_3_levels_inherits(self):
""" Check that we can create an inherits on 3 levels """
pallet = self.env['test.pallet'].create({
'name': 'B',
'field_in_box': 'box',
'field_in_pallet': 'pallet',
})
self.assertTrue(pallet)
self.assertEqual(pallet.name, 'B')
self.assertEqual(pallet.field_in_box, 'box')
self.assertEqual(pallet.field_in_pallet, 'pallet')
def test_read_3_levels_inherits(self):
""" Check that we can read an inherited field on 3 levels """
pallet = self.env.ref('test_inherits.pallet_a')
self.assertEqual(pallet.read(['name']), [{'id': pallet.id, 'name': 'Unit A'}])
def test_write_3_levels_inherits(self):
""" Check that we can create an inherits on 3 levels """
pallet = self.env.ref('test_inherits.pallet_a')
pallet.write({'name': 'C'})
self.assertEqual(pallet.name, 'C')
| agpl-3.0 |
mahak/ansible | lib/ansible/modules/template.py | 24 | 2515 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# This is a virtual module that is entirely implemented as an action plugin and runs on the controller
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: template
version_added: historical
short_description: Template a file out to a target host
options:
follow:
description:
- Determine whether symbolic links should be followed.
- When set to C(yes) symbolic links will be followed, if they exist.
- When set to C(no) symbolic links will not be followed.
- Previous to Ansible 2.4, this was hardcoded as C(yes).
type: bool
default: no
version_added: '2.4'
notes:
- For Windows you can use M(ansible.windows.win_template) which uses '\\r\\n' as C(newline_sequence) by default.
seealso:
- module: ansible.builtin.copy
- module: ansible.windows.win_copy
- module: ansible.windows.win_template
author:
- Ansible Core Team
- Michael DeHaan
extends_documentation_fragment:
- backup
- files
- template_common
- validate
'''
EXAMPLES = r'''
- name: Template a file to /etc/file.conf
ansible.builtin.template:
src: /mytemplates/foo.j2
dest: /etc/file.conf
owner: bin
group: wheel
mode: '0644'
- name: Template a file, using symbolic modes (equivalent to 0644)
ansible.builtin.template:
src: /mytemplates/foo.j2
dest: /etc/file.conf
owner: bin
group: wheel
mode: u=rw,g=r,o=r
- name: Copy a version of named.conf that is dependent on the OS. setype obtained by doing ls -Z /etc/named.conf on original file
ansible.builtin.template:
src: named.conf_{{ ansible_os_family }}.j2
dest: /etc/named.conf
group: named
setype: named_conf_t
mode: 0640
- name: Create a DOS-style text file from a template
ansible.builtin.template:
src: config.ini.j2
dest: /share/windows/config.ini
newline_sequence: '\r\n'
- name: Copy a new sudoers file into place, after passing validation with visudo
ansible.builtin.template:
src: /mine/sudoers
dest: /etc/sudoers
validate: /usr/sbin/visudo -cf %s
- name: Update sshd configuration safely, avoid locking yourself out
ansible.builtin.template:
src: etc/ssh/sshd_config.j2
dest: /etc/ssh/sshd_config
owner: root
group: root
mode: '0600'
validate: /usr/sbin/sshd -t -f %s
backup: yes
'''
| gpl-3.0 |
lokirius/python-for-android | python3-alpha/python3-src/Lib/lib2to3/pgen2/grammar.py | 54 | 5379 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""This module defines the data structures used to represent a grammar.
These are a bit arcane because they are derived from the data
structures used by Python's 'pgen' parser generator.
There's also a table here mapping operators to their names in the
token module; the Python tokenize module reports all operators as the
fallback token code OP, but the parser needs the actual token code.
"""
# Python imports
import pickle
# Local imports
from . import token, tokenize
class Grammar(object):
"""Pgen parsing tables tables conversion class.
Once initialized, this class supplies the grammar tables for the
parsing engine implemented by parse.py. The parsing engine
accesses the instance variables directly. The class here does not
provide initialization of the tables; several subclasses exist to
do this (see the conv and pgen modules).
The load() method reads the tables from a pickle file, which is
much faster than the other ways offered by subclasses. The pickle
file is written by calling dump() (after loading the grammar
tables using a subclass). The report() method prints a readable
representation of the tables to stdout, for debugging.
The instance variables are as follows:
symbol2number -- a dict mapping symbol names to numbers. Symbol
numbers are always 256 or higher, to distinguish
them from token numbers, which are between 0 and
255 (inclusive).
number2symbol -- a dict mapping numbers to symbol names;
these two are each other's inverse.
states -- a list of DFAs, where each DFA is a list of
states, each state is is a list of arcs, and each
arc is a (i, j) pair where i is a label and j is
a state number. The DFA number is the index into
this list. (This name is slightly confusing.)
Final states are represented by a special arc of
the form (0, j) where j is its own state number.
dfas -- a dict mapping symbol numbers to (DFA, first)
pairs, where DFA is an item from the states list
above, and first is a set of tokens that can
begin this grammar rule (represented by a dict
whose values are always 1).
labels -- a list of (x, y) pairs where x is either a token
number or a symbol number, and y is either None
or a string; the strings are keywords. The label
number is the index in this list; label numbers
are used to mark state transitions (arcs) in the
DFAs.
start -- the number of the grammar's start symbol.
keywords -- a dict mapping keyword strings to arc labels.
tokens -- a dict mapping token numbers to arc labels.
"""
def __init__(self):
self.symbol2number = {}
self.number2symbol = {}
self.states = []
self.dfas = {}
self.labels = [(0, "EMPTY")]
self.keywords = {}
self.tokens = {}
self.symbol2label = {}
self.start = 256
def dump(self, filename):
"""Dump the grammar tables to a pickle file."""
f = open(filename, "wb")
pickle.dump(self.__dict__, f, 2)
f.close()
def load(self, filename):
"""Load the grammar tables from a pickle file."""
f = open(filename, "rb")
d = pickle.load(f)
f.close()
self.__dict__.update(d)
def copy(self):
"""
Copy the grammar.
"""
new = self.__class__()
for dict_attr in ("symbol2number", "number2symbol", "dfas", "keywords",
"tokens", "symbol2label"):
setattr(new, dict_attr, getattr(self, dict_attr).copy())
new.labels = self.labels[:]
new.states = self.states[:]
new.start = self.start
return new
def report(self):
"""Dump the grammar tables to standard output, for debugging."""
from pprint import pprint
print("s2n")
pprint(self.symbol2number)
print("n2s")
pprint(self.number2symbol)
print("states")
pprint(self.states)
print("dfas")
pprint(self.dfas)
print("labels")
pprint(self.labels)
print("start", self.start)
# Map from operator to number (since tokenize doesn't do this)
opmap_raw = """
( LPAR
) RPAR
[ LSQB
] RSQB
: COLON
, COMMA
; SEMI
+ PLUS
- MINUS
* STAR
/ SLASH
| VBAR
& AMPER
< LESS
> GREATER
= EQUAL
. DOT
% PERCENT
` BACKQUOTE
{ LBRACE
} RBRACE
@ AT
== EQEQUAL
!= NOTEQUAL
<> NOTEQUAL
<= LESSEQUAL
>= GREATEREQUAL
~ TILDE
^ CIRCUMFLEX
<< LEFTSHIFT
>> RIGHTSHIFT
** DOUBLESTAR
+= PLUSEQUAL
-= MINEQUAL
*= STAREQUAL
/= SLASHEQUAL
%= PERCENTEQUAL
&= AMPEREQUAL
|= VBAREQUAL
^= CIRCUMFLEXEQUAL
<<= LEFTSHIFTEQUAL
>>= RIGHTSHIFTEQUAL
**= DOUBLESTAREQUAL
// DOUBLESLASH
//= DOUBLESLASHEQUAL
-> RARROW
"""
opmap = {}
for line in opmap_raw.splitlines():
if line:
op, name = line.split()
opmap[op] = getattr(token, name)
| apache-2.0 |
alikins/ansible | lib/ansible/modules/windows/win_mapped_drive.py | 15 | 2819 | #!/usr/bin/python
# This file is part of Ansible
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub, actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_mapped_drive
version_added: '2.4'
short_description: maps a network drive for a user
description:
- Allows you to modify mapped network drives for individual users.
notes:
- This can only map a network drive for the current executing user and does not
allow you to set a default drive for all users of a system. Use other
Microsoft tools like GPOs to achieve this goal.
- You cannot use this module to access a mapped drive in another Ansible task,
drives mapped with this module are only accessible when logging in
interactively with the user through the console or RDP.
options:
letter:
description:
- The letter of the network path to map to.
- This letter must not already be in use with Windows.
required: yes
password:
description:
- The password for C(username).
path:
description:
- The UNC path to map the drive to.
- This is required if C(state=present).
- If C(state=absent) and path is not set, the module will delete the mapped
drive regardless of the target.
- If C(state=absent) and the path is set, the module will throw an error if
path does not match the target of the mapped drive.
state:
description:
- If C(state=present) will ensure the mapped drive exists.
- If C(state=absent) will ensure the mapped drive does not exist.
choices: [ absent, present ]
default: present
username:
description:
- Credentials to map the drive with.
- The username MUST include the domain or servername like SERVER\user, see
the example for more information.
author:
- Jordan Borean (@jborean93)
'''
EXAMPLES = r'''
- name: create a mapped drive under Z
win_mapped_drive:
letter: Z
path: \\domain\appdata\accounting
- name: delete any mapped drives under Z
win_mapped_drive:
letter: Z
state: absent
- name: only delete the mapped drive Z if the paths match (error is thrown otherwise)
win_mapped_drive:
letter: Z
path: \\domain\appdata\accounting
state: absent
- name: create mapped drive with local credentials
win_mapped_drive:
letter: M
path: \\SERVER\c$
username: SERVER\Administrator
password: Password
- name: create mapped drive with domain credentials
win_mapped_drive:
letter: M
path: \\domain\appdata\it
username: DOMAIN\IT
password: Password
'''
RETURN = r'''
'''
| gpl-3.0 |
sjev/ibpy | ib/ext/EWrapper.py | 8 | 6194 | #!/usr/bin/env python
""" generated source for module EWrapper """
#
# Original file copyright original author(s).
# This file copyright Troy Melhase, troy@gci.net.
#
# WARNING: all changes to this file will be lost.
from abc import ABCMeta, abstractmethod
from ib.ext.AnyWrapper import AnyWrapper
#
# * EWrapper.java
# *
#
# package: com.ib.client
class EWrapper(AnyWrapper):
""" generated source for interface EWrapper """
__metaclass__ = ABCMeta
# /////////////////////////////////////////////////////////////////////
# Interface methods
# /////////////////////////////////////////////////////////////////////
@abstractmethod
def tickPrice(self, tickerId, field, price, canAutoExecute):
""" generated source for method tickPrice """
@abstractmethod
def tickSize(self, tickerId, field, size):
""" generated source for method tickSize """
@abstractmethod
def tickOptionComputation(self, tickerId, field, impliedVol, delta, optPrice, pvDividend, gamma, vega, theta, undPrice):
""" generated source for method tickOptionComputation """
@abstractmethod
def tickGeneric(self, tickerId, tickType, value):
""" generated source for method tickGeneric """
@abstractmethod
def tickString(self, tickerId, tickType, value):
""" generated source for method tickString """
@abstractmethod
def tickEFP(self, tickerId, tickType, basisPoints, formattedBasisPoints, impliedFuture, holdDays, futureExpiry, dividendImpact, dividendsToExpiry):
""" generated source for method tickEFP """
@abstractmethod
def orderStatus(self, orderId, status, filled, remaining, avgFillPrice, permId, parentId, lastFillPrice, clientId, whyHeld):
""" generated source for method orderStatus """
@abstractmethod
def openOrder(self, orderId, contract, order, orderState):
""" generated source for method openOrder """
@abstractmethod
def openOrderEnd(self):
""" generated source for method openOrderEnd """
@abstractmethod
def updateAccountValue(self, key, value, currency, accountName):
""" generated source for method updateAccountValue """
@abstractmethod
def updatePortfolio(self, contract, position, marketPrice, marketValue, averageCost, unrealizedPNL, realizedPNL, accountName):
""" generated source for method updatePortfolio """
@abstractmethod
def updateAccountTime(self, timeStamp):
""" generated source for method updateAccountTime """
@abstractmethod
def accountDownloadEnd(self, accountName):
""" generated source for method accountDownloadEnd """
@abstractmethod
def nextValidId(self, orderId):
""" generated source for method nextValidId """
@abstractmethod
def contractDetails(self, reqId, contractDetails):
""" generated source for method contractDetails """
@abstractmethod
def bondContractDetails(self, reqId, contractDetails):
""" generated source for method bondContractDetails """
@abstractmethod
def contractDetailsEnd(self, reqId):
""" generated source for method contractDetailsEnd """
@abstractmethod
def execDetails(self, reqId, contract, execution):
""" generated source for method execDetails """
@abstractmethod
def execDetailsEnd(self, reqId):
""" generated source for method execDetailsEnd """
@abstractmethod
def updateMktDepth(self, tickerId, position, operation, side, price, size):
""" generated source for method updateMktDepth """
@abstractmethod
def updateMktDepthL2(self, tickerId, position, marketMaker, operation, side, price, size):
""" generated source for method updateMktDepthL2 """
@abstractmethod
def updateNewsBulletin(self, msgId, msgType, message, origExchange):
""" generated source for method updateNewsBulletin """
@abstractmethod
def managedAccounts(self, accountsList):
""" generated source for method managedAccounts """
@abstractmethod
def receiveFA(self, faDataType, xml):
""" generated source for method receiveFA """
@abstractmethod
def historicalData(self, reqId, date, open, high, low, close, volume, count, WAP, hasGaps):
""" generated source for method historicalData """
@abstractmethod
def scannerParameters(self, xml):
""" generated source for method scannerParameters """
@abstractmethod
def scannerData(self, reqId, rank, contractDetails, distance, benchmark, projection, legsStr):
""" generated source for method scannerData """
@abstractmethod
def scannerDataEnd(self, reqId):
""" generated source for method scannerDataEnd """
@abstractmethod
def realtimeBar(self, reqId, time, open, high, low, close, volume, wap, count):
""" generated source for method realtimeBar """
@abstractmethod
def currentTime(self, time):
""" generated source for method currentTime """
@abstractmethod
def fundamentalData(self, reqId, data):
""" generated source for method fundamentalData """
@abstractmethod
def deltaNeutralValidation(self, reqId, underComp):
""" generated source for method deltaNeutralValidation """
@abstractmethod
def tickSnapshotEnd(self, reqId):
""" generated source for method tickSnapshotEnd """
@abstractmethod
def marketDataType(self, reqId, marketDataType):
""" generated source for method marketDataType """
@abstractmethod
def commissionReport(self, commissionReport):
""" generated source for method commissionReport """
@abstractmethod
def position(self, account, contract, pos, avgCost):
""" generated source for method position """
@abstractmethod
def positionEnd(self):
""" generated source for method positionEnd """
@abstractmethod
def accountSummary(self, reqId, account, tag, value, currency):
""" generated source for method accountSummary """
@abstractmethod
def accountSummaryEnd(self, reqId):
""" generated source for method accountSummaryEnd """
| bsd-3-clause |
gaddman/ansible | lib/ansible/modules/network/aci/aci_config_snapshot.py | 2 | 9775 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_config_snapshot
short_description: Manage Config Snapshots (config:Snapshot, config:ExportP)
description:
- Manage Config Snapshots on Cisco ACI fabrics.
- Creating new Snapshots is done using the configExportP class.
- Removing Snapshots is done using the configSnapshot class.
notes:
- The APIC does not provide a mechanism for naming the snapshots.
- 'Snapshot files use the following naming structure: ce_<config export policy name>-<yyyy>-<mm>-<dd>T<hh>:<mm>:<ss>.<mss>+<hh>:<mm>.'
- 'Snapshot objects use the following naming structure: run-<yyyy>-<mm>-<dd>T<hh>-<mm>-<ss>.'
seealso:
- module: aci_config_rollback
- name: APIC Management Information Model reference
description: More information about the internal APIC classes B(config:Snapshot) and B(config:ExportP).
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Jacob McGill (@jmcgill298)
version_added: '2.4'
options:
description:
description:
- The description for the Config Export Policy.
aliases: [ descr ]
export_policy:
description:
- The name of the Export Policy to use for Config Snapshots.
aliases: [ name ]
format:
description:
- Sets the config backup to be formatted in JSON or XML.
- The APIC defaults to C(json) when unset.
choices: [ json, xml ]
include_secure:
description:
- Determines if secure information should be included in the backup.
- The APIC defaults to C(yes) when unset.
type: bool
max_count:
description:
- Determines how many snapshots can exist for the Export Policy before the APIC starts to rollover.
- Accepted values range between C(1) and C(10).
- The APIC defaults to C(3) when unset.
type: int
snapshot:
description:
- The name of the snapshot to delete.
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Create a Snapshot
aci_config_snapshot:
host: apic
username: admin
password: SomeSecretPassword
state: present
export_policy: config_backup
max_count: 10
description: Backups taken before new configs are applied.
delegate_to: localhost
- name: Query all Snapshots
aci_config_snapshot:
host: apic
username: admin
password: SomeSecretPassword
state: query
delegate_to: localhost
register: query_result
- name: Query Snapshots associated with a particular Export Policy
aci_config_snapshot:
host: apic
username: admin
password: SomeSecretPassword
export_policy: config_backup
state: query
delegate_to: localhost
register: query_result
- name: Delete a Snapshot
aci_config_snapshot:
host: apic
username: admin
password: SomeSecretPassword
export_policy: config_backup
snapshot: run-2017-08-24T17-20-05
state: absent
delegate_to: localhost
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
description=dict(type='str', aliases=['descr']),
export_policy=dict(type='str', aliases=['name']), # Not required for querying all objects
format=dict(type='str', choices=['json', 'xml']),
include_secure=dict(type='bool'),
max_count=dict(type='int'),
snapshot=dict(type='str'),
state=dict(type='str', choices=['absent', 'present', 'query'], default='present'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False,
required_if=[
['state', 'absent', ['export_policy', 'snapshot']],
['state', 'present', ['export_policy']],
],
)
aci = ACIModule(module)
description = module.params['description']
export_policy = module.params['export_policy']
file_format = module.params['format']
include_secure = aci.boolean(module.params['include_secure'])
max_count = module.params['max_count']
if max_count is not None:
if max_count in range(1, 11):
max_count = str(max_count)
else:
module.fail_json(msg="Parameter 'max_count' must be a number between 1 and 10")
snapshot = module.params['snapshot']
if snapshot is not None and not snapshot.startswith('run-'):
snapshot = 'run-' + snapshot
state = module.params['state']
if state == 'present':
aci.construct_url(
root_class=dict(
aci_class='configExportP',
aci_rn='fabric/configexp-{0}'.format(export_policy),
module_object=export_policy,
target_filter={'name': export_policy},
),
)
aci.get_existing()
aci.payload(
aci_class='configExportP',
class_config=dict(
adminSt='triggered',
descr=description,
format=file_format,
includeSecureFields=include_secure,
maxSnapshotCount=max_count,
name=export_policy,
snapshot='yes',
),
)
aci.get_diff('configExportP')
# Create a new Snapshot
aci.post_config()
else:
# Prefix the proper url to export_policy
if export_policy is not None:
export_policy = 'uni/fabric/configexp-{0}'.format(export_policy)
aci.construct_url(
root_class=dict(
aci_class='configSnapshotCont',
aci_rn='backupst/snapshots-[{0}]'.format(export_policy),
module_object=export_policy,
target_filter={'name': export_policy},
),
subclass_1=dict(
aci_class='configSnapshot',
aci_rn='snapshot-{0}'.format(snapshot),
module_object=snapshot,
target_filter={'name': snapshot},
),
)
aci.get_existing()
if state == 'absent':
# Build POST request to used to remove Snapshot
aci.payload(
aci_class='configSnapshot',
class_config=dict(
name=snapshot,
retire="yes",
),
)
if aci.existing:
aci.get_diff('configSnapshot')
# Mark Snapshot for Deletion
aci.post_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
mazaclub/mazabot-core | plugins/Nickometer/test.py | 19 | 1870 | ###
# Copyright (c) 2005, aafshar@gmail.com
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
class NickometerTestCase(PluginTestCase):
plugins = ('Nickometer',)
def testNickometer(self):
self.assertNotError('nickometer')
self.assertNotError('nickometer jemfinch')
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| bsd-3-clause |
caronc/nzb-subliminal | Subliminal/guessit/transfo/guess_filetype.py | 6 | 11007 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2013 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import mimetypes
import os.path
import re
from guessit.guess import Guess
from guessit.patterns.extension import subtitle_exts, info_exts, video_exts
from guessit.transfo import TransformerException
from guessit.plugins.transformers import Transformer, get_transformer
from guessit.matcher import log_found_guess, found_guess, found_property
class GuessFiletype(Transformer):
def __init__(self):
Transformer.__init__(self, 200)
# List of well known movies and series, hardcoded because they cannot be
# guessed appropriately otherwise
MOVIES = ['OSS 117']
SERIES = ['Band of Brothers']
MOVIES = [m.lower() for m in MOVIES]
SERIES = [s.lower() for s in SERIES]
def guess_filetype(self, mtree, options=None):
options = options or {}
# put the filetype inside a dummy container to be able to have the
# following functions work correctly as closures
# this is a workaround for python 2 which doesn't have the
# 'nonlocal' keyword which we could use here in the upgrade_* functions
# (python 3 does have it)
filetype_container = [mtree.guess.get('type')]
other = {}
filename = mtree.string
def upgrade_episode():
if filetype_container[0] == 'subtitle':
filetype_container[0] = 'episodesubtitle'
elif filetype_container[0] == 'info':
filetype_container[0] = 'episodeinfo'
elif (not filetype_container[0] or
filetype_container[0] == 'video'):
filetype_container[0] = 'episode'
def upgrade_movie():
if filetype_container[0] == 'subtitle':
filetype_container[0] = 'moviesubtitle'
elif filetype_container[0] == 'info':
filetype_container[0] = 'movieinfo'
elif (not filetype_container[0] or
filetype_container[0] == 'video'):
filetype_container[0] = 'movie'
def upgrade_subtitle():
if filetype_container[0] == 'movie':
filetype_container[0] = 'moviesubtitle'
elif filetype_container[0] == 'episode':
filetype_container[0] = 'episodesubtitle'
elif not filetype_container[0]:
filetype_container[0] = 'subtitle'
def upgrade_info():
if filetype_container[0] == 'movie':
filetype_container[0] = 'movieinfo'
elif filetype_container[0] == 'episode':
filetype_container[0] = 'episodeinfo'
elif not filetype_container[0]:
filetype_container[0] = 'info'
# look at the extension first
fileext = os.path.splitext(filename)[1][1:].lower()
if fileext in subtitle_exts:
upgrade_subtitle()
other = {'container': fileext}
elif fileext in info_exts:
upgrade_info()
other = {'container': fileext}
elif fileext in video_exts:
other = {'container': fileext}
else:
if fileext and not options.get('name_only'):
other = {'extension': fileext}
list(mtree.unidentified_leaves())[-1].guess = Guess(other)
# check whether we are in a 'Movies', 'Tv Shows', ... folder
folder_rexps = [(r'Movies?', upgrade_movie),
(r'Films?', upgrade_movie),
(r'Tv[ _-]?Shows?', upgrade_episode),
(r'Series?', upgrade_episode),
(r'Episodes?', upgrade_episode)]
for frexp, upgrade_func in folder_rexps:
frexp = re.compile(frexp, re.IGNORECASE)
for pathgroup in mtree.children:
if frexp.match(pathgroup.value):
upgrade_func()
return filetype_container[0], other
# check for a few specific cases which will unintentionally make the
# following heuristics confused (eg: OSS 117 will look like an episode,
# season 1, epnum 17, when it is in fact a movie)
fname = mtree.clean_string(filename).lower()
for m in self.MOVIES:
if m in fname:
self.log.debug('Found in exception list of movies -> type = movie')
upgrade_movie()
return filetype_container[0], other
for s in self.SERIES:
if s in fname:
self.log.debug('Found in exception list of series -> type = episode')
upgrade_episode()
return filetype_container[0], other
# if we have an episode_rexp (eg: s02e13), it is an episode
episode_transformer = get_transformer('guess_episodes_rexps')
if episode_transformer:
filename_parts = list(x.value for x in mtree.unidentified_leaves());
filename_parts.append(filename)
for filename_part in filename_parts:
guess = episode_transformer.guess_episodes_rexps(filename_part)
if guess:
self.log.debug('Found guess_episodes_rexps: %s -> type = episode', guess)
upgrade_episode()
return filetype_container[0], other
properties_transformer = get_transformer('guess_properties')
if properties_transformer:
# if we have certain properties characteristic of episodes, it is an ep
found = properties_transformer.container.find_properties(filename, mtree, options, 'episodeFormat')
guess = properties_transformer.container.as_guess(found, filename)
if guess:
self.log.debug('Found characteristic property of episodes: %s"', guess)
upgrade_episode()
return filetype_container[0], other
weak_episode_transformer = get_transformer('guess_weak_episodes_rexps')
if weak_episode_transformer:
found = properties_transformer.container.find_properties(filename, mtree, options, 'crc32')
guess = properties_transformer.container.as_guess(found, filename)
if guess:
found = weak_episode_transformer.container.find_properties(filename, mtree, options)
guess = weak_episode_transformer.container.as_guess(found, filename)
if guess:
self.log.debug('Found characteristic property of episodes: %s"', guess)
upgrade_episode()
return filetype_container[0], other
found = properties_transformer.container.find_properties(filename, mtree, options, 'format')
guess = properties_transformer.container.as_guess(found, filename)
if guess and guess['format'] in ('HDTV', 'WEBRip', 'WEB-DL', 'DVB'):
# Use weak episodes only if TV or WEB source
weak_episode_transformer = get_transformer('guess_weak_episodes_rexps')
if weak_episode_transformer:
guess = weak_episode_transformer.guess_weak_episodes_rexps(filename)
if guess:
self.log.debug('Found guess_weak_episodes_rexps: %s -> type = episode', guess)
upgrade_episode()
return filetype_container[0], other
website_transformer = get_transformer('guess_website')
if website_transformer:
found = website_transformer.container.find_properties(filename, mtree, options, 'website')
guess = website_transformer.container.as_guess(found, filename)
if guess:
for namepart in ('tv', 'serie', 'episode'):
if namepart in guess['website']:
# origin-specific type
self.log.debug('Found characteristic property of episodes: %s', guess)
upgrade_episode()
return filetype_container[0], other
if filetype_container[0] in ('subtitle', 'info') or (not filetype_container[0] and fileext in video_exts):
# if no episode info found, assume it's a movie
self.log.debug('Nothing characteristic found, assuming type = movie')
upgrade_movie()
if not filetype_container[0]:
self.log.debug('Nothing characteristic found, assuming type = unknown')
filetype_container[0] = 'unknown'
return filetype_container[0], other
def process(self, mtree, options=None):
"""guess the file type now (will be useful later)
"""
filetype, other = self.guess_filetype(mtree, options)
mtree.guess.set('type', filetype, confidence=1.0)
log_found_guess(mtree.guess)
filetype_info = Guess(other, confidence=1.0)
# guess the mimetype of the filename
# TODO: handle other mimetypes not found on the default type_maps
# mimetypes.types_map['.srt']='text/subtitle'
mime, _ = mimetypes.guess_type(mtree.string, strict=False)
if mime is not None:
filetype_info.update({'mimetype': mime}, confidence=1.0)
node_ext = mtree.node_at((-1,))
found_guess(node_ext, filetype_info)
if mtree.guess.get('type') in [None, 'unknown']:
if options.get('name_only'):
mtree.guess.set('type', 'movie', confidence=0.6)
else:
raise TransformerException(__name__, 'Unknown file type')
def post_process(self, mtree, options=None):
# now look whether there are some specific hints for episode vs movie
# If we have a date and no year, this is a TV Show.
if 'date' in mtree.info and 'year' not in mtree.info and mtree.info.get('type') != 'episode':
mtree.guess['type'] = 'episode'
for type_leaves in mtree.leaves_containing('type'):
type_leaves.guess['type'] = 'episode'
for title_leaves in mtree.leaves_containing('title'):
title_leaves.guess.rename('title', 'series') | gpl-3.0 |
TheMOOCAgency/edx-platform | lms/djangoapps/course_blocks/management/commands/tests/test_generate_course_blocks.py | 22 | 4170 | """
Tests for generate_course_blocks management command.
"""
from django.core.management.base import CommandError
from mock import patch
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from .. import generate_course_blocks
from openedx.core.djangoapps.content.block_structure.tests.helpers import is_course_in_block_structure_cache
class TestGenerateCourseBlocks(ModuleStoreTestCase):
"""
Tests generate course blocks management command.
"""
def setUp(self):
"""
Create courses in modulestore.
"""
super(TestGenerateCourseBlocks, self).setUp()
self.course_1 = CourseFactory.create()
self.course_2 = CourseFactory.create()
self.command = generate_course_blocks.Command()
def _assert_courses_not_in_block_cache(self, *courses):
"""
Assert courses don't exist in the course block cache.
"""
for course_key in courses:
self.assertFalse(is_course_in_block_structure_cache(course_key, self.store))
def _assert_courses_in_block_cache(self, *courses):
"""
Assert courses exist in course block cache.
"""
for course_key in courses:
self.assertTrue(is_course_in_block_structure_cache(course_key, self.store))
def test_generate_all(self):
self._assert_courses_not_in_block_cache(self.course_1.id, self.course_2.id)
self.command.handle(all=True)
self._assert_courses_in_block_cache(self.course_1.id, self.course_2.id)
with patch(
'openedx.core.lib.block_structure.factory.BlockStructureFactory.create_from_modulestore'
) as mock_update_from_store:
self.command.handle(all=True)
mock_update_from_store.assert_not_called()
def test_generate_force(self):
self._assert_courses_not_in_block_cache(self.course_1.id, self.course_2.id)
self.command.handle(all=True)
self._assert_courses_in_block_cache(self.course_1.id, self.course_2.id)
with patch(
'openedx.core.lib.block_structure.factory.BlockStructureFactory.create_from_modulestore'
) as mock_update_from_store:
self.command.handle(all=True, force=True)
mock_update_from_store.assert_called()
def test_generate_one(self):
self._assert_courses_not_in_block_cache(self.course_1.id, self.course_2.id)
self.command.handle(unicode(self.course_1.id))
self._assert_courses_in_block_cache(self.course_1.id)
self._assert_courses_not_in_block_cache(self.course_2.id)
@patch('lms.djangoapps.course_blocks.management.commands.generate_course_blocks.log')
def test_generate_no_dags(self, mock_log):
self.command.handle(dags=True, all=True)
self.assertEquals(mock_log.warning.call_count, 0)
@patch('lms.djangoapps.course_blocks.management.commands.generate_course_blocks.log')
def test_generate_with_dags(self, mock_log):
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
item1 = ItemFactory.create(parent=self.course_1)
item2 = ItemFactory.create(parent=item1)
item3 = ItemFactory.create(parent=item1)
item2.children.append(item3.location)
self.store.update_item(item2, ModuleStoreEnum.UserID.mgmt_command)
self.store.publish(self.course_1.location, ModuleStoreEnum.UserID.mgmt_command)
self.command.handle(dags=True, all=True)
self.assertEquals(mock_log.warning.call_count, 1)
@patch('lms.djangoapps.course_blocks.management.commands.generate_course_blocks.log')
def test_not_found_key(self, mock_log):
self.command.handle('fake/course/id', all=False)
self.assertTrue(mock_log.exception.called)
def test_invalid_key(self):
with self.assertRaises(CommandError):
self.command.handle('not/found', all=False)
def test_no_params(self):
with self.assertRaises(CommandError):
self.command.handle(all=False)
| agpl-3.0 |
smikes/node-gyp | gyp/pylib/gyp/generator/android.py | 106 | 44050 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This generates makefiles suitable for inclusion into the Android build system
# via an Android.mk file. It is based on make.py, the standard makefile
# generator.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level GypAndroid.mk. This means that all
# variables in .mk-files clobber one another, and furthermore that any
# variables set potentially clash with other Android build system variables.
# Try to avoid setting global variables where possible.
import gyp
import gyp.common
import gyp.generator.make as make # Reuse global functions from make backend.
import os
import re
import subprocess
generator_default_variables = {
'OS': 'android',
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.so',
'INTERMEDIATE_DIR': '$(gyp_intermediate_dir)',
'SHARED_INTERMEDIATE_DIR': '$(gyp_shared_intermediate_dir)',
'PRODUCT_DIR': '$(gyp_shared_intermediate_dir)',
'SHARED_LIB_DIR': '$(builddir)/lib.$(TOOLSET)',
'LIB_DIR': '$(obj).$(TOOLSET)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(RULE_SOURCES)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(GYP_DEFAULT_CONFIGURATION)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Generator-specific gyp specs.
generator_additional_non_configuration_keys = [
# Boolean to declare that this target does not want its name mangled.
'android_unmangled_name',
]
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
SHARED_FOOTER = """\
# "gyp_all_modules" is a concatenation of the "gyp_all_modules" targets from
# all the included sub-makefiles. This is just here to clarify.
gyp_all_modules:
"""
header = """\
# This file is generated by gyp; do not edit.
"""
android_standard_include_paths = set([
# JNI_H_INCLUDE in build/core/binary.mk
'dalvik/libnativehelper/include/nativehelper',
# from SRC_HEADERS in build/core/config.mk
'system/core/include',
'hardware/libhardware/include',
'hardware/libhardware_legacy/include',
'hardware/ril/include',
'dalvik/libnativehelper/include',
'frameworks/native/include',
'frameworks/native/opengl/include',
'frameworks/base/include',
'frameworks/base/opengl/include',
'frameworks/base/native/include',
'external/skia/include',
# TARGET_C_INCLUDES in build/core/combo/TARGET_linux-arm.mk
'bionic/libc/arch-arm/include',
'bionic/libc/include',
'bionic/libstdc++/include',
'bionic/libc/kernel/common',
'bionic/libc/kernel/arch-arm',
'bionic/libm/include',
'bionic/libm/include/arm',
'bionic/libthread_db/include',
])
# Map gyp target types to Android module classes.
MODULE_CLASSES = {
'static_library': 'STATIC_LIBRARIES',
'shared_library': 'SHARED_LIBRARIES',
'executable': 'EXECUTABLES',
}
def IsCPPExtension(ext):
return make.COMPILABLE_EXTENSIONS.get(ext) == 'cxx'
def Sourceify(path):
"""Convert a path to its source directory form. The Android backend does not
support options.generator_output, so this function is a noop."""
return path
# Map from qualified target to path to output.
# For Android, the target of these maps is a tuple ('static', 'modulename'),
# ('dynamic', 'modulename'), or ('path', 'some/path') instead of a string,
# since we link by module.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class AndroidMkWriter(object):
"""AndroidMkWriter packages up the writing of one target-specific Android.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, android_top_dir):
self.android_top_dir = android_top_dir
def Write(self, qualified_target, relative_target, base_path, output_filename,
spec, configs, part_of_all):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
relative_target: qualified target name relative to the root
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
"""
make.ensure_directory_exists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.relative_target = relative_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
self.android_class = MODULE_CLASSES.get(self.type, 'GYP')
self.android_module = self.ComputeAndroidModule(spec)
(self.android_stem, self.android_suffix) = self.ComputeOutputParts(spec)
self.output = self.output_binary = self.ComputeOutput(spec)
# Standard header.
self.WriteLn('include $(CLEAR_VARS)\n')
# Module class and name.
self.WriteLn('LOCAL_MODULE_CLASS := ' + self.android_class)
self.WriteLn('LOCAL_MODULE := ' + self.android_module)
# Only emit LOCAL_MODULE_STEM if it's different to LOCAL_MODULE.
# The library module classes fail if the stem is set. ComputeOutputParts
# makes sure that stem == modulename in these cases.
if self.android_stem != self.android_module:
self.WriteLn('LOCAL_MODULE_STEM := ' + self.android_stem)
self.WriteLn('LOCAL_MODULE_SUFFIX := ' + self.android_suffix)
self.WriteLn('LOCAL_MODULE_TAGS := optional')
if self.toolset == 'host':
self.WriteLn('LOCAL_IS_HOST_MODULE := true')
# Grab output directories; needed for Actions and Rules.
self.WriteLn('gyp_intermediate_dir := $(call local-intermediates-dir)')
self.WriteLn('gyp_shared_intermediate_dir := '
'$(call intermediates-dir-for,GYP,shared)')
self.WriteLn()
# List files this target depends on so that actions/rules/copies/sources
# can depend on the list.
# TODO: doesn't pull in things through transitive link deps; needed?
target_dependencies = [x[1] for x in deps if x[0] == 'path']
self.WriteLn('# Make sure our deps are built first.')
self.WriteList(target_dependencies, 'GYP_TARGET_DEPENDENCIES',
local_pathify=True)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs)
# GYP generated outputs.
self.WriteList(extra_outputs, 'GYP_GENERATED_OUTPUTS', local_pathify=True)
# Set LOCAL_ADDITIONAL_DEPENDENCIES so that Android's build rules depend
# on both our dependency targets and our generated files.
self.WriteLn('# Make sure our deps and generated files are built first.')
self.WriteLn('LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) '
'$(GYP_GENERATED_OUTPUTS)')
self.WriteLn()
# Sources.
if spec.get('sources', []) or extra_sources:
self.WriteSources(spec, configs, extra_sources)
self.WriteTarget(spec, configs, deps, link_deps, part_of_all)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = ('path', self.output_binary)
# Update global list of link dependencies.
if self.type == 'static_library':
target_link_deps[qualified_target] = ('static', self.android_module)
elif self.type == 'shared_library':
target_link_deps[qualified_target] = ('shared', self.android_module)
self.fp.close()
return self.android_module
def WriteActions(self, actions, extra_sources, extra_outputs):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
"""
for action in actions:
name = make.StringToMakefileVariable('%s_%s' % (self.relative_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Action for target "%s" writes output to local path '
'"%s".' % (self.target, out))
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
# Prepare the actual command.
command = gyp.common.EncodePOSIXShellList(action['action'])
if 'message' in action:
quiet_cmd = 'Gyp action: %s ($@)' % action['message']
else:
quiet_cmd = 'Gyp action: %s ($@)' % name
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the gyp_*
# variables for the action rule with an absolute version so that the
# output goes in the right place.
# Only write the gyp_* rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
main_output = make.QuoteSpaces(self.LocalPathify(outputs[0]))
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(GYP_ABS_ANDROID_TOP_DIR)/$(gyp_intermediate_dir)' %
main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(GYP_ABS_ANDROID_TOP_DIR)/$(gyp_shared_intermediate_dir)' %
main_output)
# Android's envsetup.sh adds a number of directories to the path including
# the built host binary directory. This causes actions/rules invoked by
# gyp to sometimes use these instead of system versions, e.g. bison.
# The built host binaries may not be suitable, and can cause errors.
# So, we remove them from the PATH using the ANDROID_BUILD_PATHS variable
# set by envsetup.
self.WriteLn('%s: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))'
% main_output)
for input in inputs:
assert ' ' not in input, (
"Spaces in action input filenames not supported (%s)" % input)
for output in outputs:
assert ' ' not in output, (
"Spaces in action output filenames not supported (%s)" % output)
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, ' '.join(map(self.LocalPathify, inputs))))
self.WriteLn('\t@echo "%s"' % quiet_cmd)
self.WriteLn('\t$(hide)%s\n' % command)
for output in outputs[1:]:
# Make each output depend on the main output, with an empty command
# to force make to notice that the mtime has changed.
self.WriteLn('%s: %s ;' % (self.LocalPathify(output), main_output))
extra_outputs += outputs
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
"""
if len(rules) == 0:
return
rule_trigger = '%s_rule_trigger' % self.android_module
did_write_rule = False
for rule in rules:
if len(rule.get('rule_sources', [])) == 0:
continue
did_write_rule = True
name = make.StringToMakefileVariable('%s_%s' % (self.relative_target,
rule['rule_name']))
self.WriteLn('\n### Generated for rule "%s":' % name)
self.WriteLn('# "%s":' % rule)
inputs = rule.get('inputs')
for rule_source in rule.get('rule_sources', []):
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Rule for target %s writes output to local path %s'
% (self.target, out))
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
extra_outputs += outputs
if int(rule.get('process_outputs_as_sources', False)):
extra_sources.extend(outputs)
components = []
for component in rule['action']:
component = self.ExpandInputRoot(component, rule_source_root,
rule_source_dirname)
if '$(RULE_SOURCES)' in component:
component = component.replace('$(RULE_SOURCES)',
rule_source)
components.append(component)
command = gyp.common.EncodePOSIXShellList(components)
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
if dirs:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
# We set up a rule to build the first output, and then set up
# a rule for each additional output to depend on the first.
outputs = map(self.LocalPathify, outputs)
main_output = outputs[0]
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(GYP_ABS_ANDROID_TOP_DIR)/$(gyp_intermediate_dir)'
% main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(GYP_ABS_ANDROID_TOP_DIR)/$(gyp_shared_intermediate_dir)'
% main_output)
# See explanation in WriteActions.
self.WriteLn('%s: export PATH := '
'$(subst $(ANDROID_BUILD_PATHS),,$(PATH))' % main_output)
main_output_deps = self.LocalPathify(rule_source)
if inputs:
main_output_deps += ' '
main_output_deps += ' '.join([self.LocalPathify(f) for f in inputs])
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, main_output_deps))
self.WriteLn('\t%s\n' % command)
for output in outputs[1:]:
self.WriteLn('%s: %s' % (output, main_output))
self.WriteLn('.PHONY: %s' % (rule_trigger))
self.WriteLn('%s: %s' % (rule_trigger, main_output))
self.WriteLn('')
if did_write_rule:
extra_sources.append(rule_trigger) # Force all rules to run.
self.WriteLn('### Finished generating for all rules')
self.WriteLn('')
def WriteCopies(self, copies, extra_outputs):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
"""
self.WriteLn('### Generated for copy rule.')
variable = make.StringToMakefileVariable(self.relative_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# The Android build system does not allow generation of files into the
# source tree. The destination should start with a variable, which will
# typically be $(gyp_intermediate_dir) or
# $(gyp_shared_intermediate_dir). Note that we can't use an assertion
# because some of the gyp tests depend on this.
if not copy['destination'].startswith('$'):
print ('WARNING: Copy rule for target %s writes output to '
'local path %s' % (self.target, copy['destination']))
# LocalPathify() calls normpath, stripping trailing slashes.
path = Sourceify(self.LocalPathify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.LocalPathify(os.path.join(copy['destination'],
filename)))
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES) | $(ACP)' %
(output, path))
self.WriteLn('\t@echo Copying: $@')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) $(ACP) -r $< $@')
self.WriteLn()
outputs.append(output)
self.WriteLn('%s = %s' % (variable,
' '.join(map(make.QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteSourceFlags(self, spec, configs):
"""Write out the flags and include paths used to compile source files for
the current target.
Args:
spec, configs: input from gyp.
"""
config = configs[spec['default_configuration']]
extracted_includes = []
self.WriteLn('\n# Flags passed to both C and C++ files.')
cflags, includes_from_cflags = self.ExtractIncludesFromCFlags(
config.get('cflags'))
extracted_includes.extend(includes_from_cflags)
self.WriteList(cflags, 'MY_CFLAGS')
cflags_c, includes_from_cflags_c = self.ExtractIncludesFromCFlags(
config.get('cflags_c'))
extracted_includes.extend(includes_from_cflags_c)
self.WriteList(cflags_c, 'MY_CFLAGS_C')
self.WriteList(config.get('defines'), 'MY_DEFS', prefix='-D',
quoter=make.EscapeCppDefine)
self.WriteLn('LOCAL_CFLAGS := $(MY_CFLAGS_C) $(MY_CFLAGS) $(MY_DEFS)')
# Undefine ANDROID for host modules
# TODO: the source code should not use macro ANDROID to tell if it's host or
# target module.
if self.toolset == 'host':
self.WriteLn('# Undefine ANDROID for host modules')
self.WriteLn('LOCAL_CFLAGS += -UANDROID')
self.WriteLn('\n# Include paths placed before CFLAGS/CPPFLAGS')
includes = list(config.get('include_dirs', []))
includes.extend(extracted_includes)
includes = map(Sourceify, map(self.LocalPathify, includes))
includes = self.NormalizeIncludePaths(includes)
self.WriteList(includes, 'LOCAL_C_INCLUDES')
self.WriteLn('LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) '
'$(LOCAL_C_INCLUDES)')
self.WriteLn('\n# Flags passed to only C++ (and not C) files.')
self.WriteList(config.get('cflags_cc'), 'LOCAL_CPPFLAGS')
def WriteSources(self, spec, configs, extra_sources):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
We need to handle shared_intermediate directory source files as
a special case by copying them to the intermediate directory and
treating them as a genereated sources. Otherwise the Android build
rules won't pick them up.
Args:
spec, configs: input from gyp.
extra_sources: Sources generated from Actions or Rules.
"""
sources = filter(make.Compilable, spec.get('sources', []))
generated_not_sources = [x for x in extra_sources if not make.Compilable(x)]
extra_sources = filter(make.Compilable, extra_sources)
# Determine and output the C++ extension used by these sources.
# We simply find the first C++ file and use that extension.
all_sources = sources + extra_sources
local_cpp_extension = '.cpp'
for source in all_sources:
(root, ext) = os.path.splitext(source)
if IsCPPExtension(ext):
local_cpp_extension = ext
break
if local_cpp_extension != '.cpp':
self.WriteLn('LOCAL_CPP_EXTENSION := %s' % local_cpp_extension)
# We need to move any non-generated sources that are coming from the
# shared intermediate directory out of LOCAL_SRC_FILES and put them
# into LOCAL_GENERATED_SOURCES. We also need to move over any C++ files
# that don't match our local_cpp_extension, since Android will only
# generate Makefile rules for a single LOCAL_CPP_EXTENSION.
local_files = []
for source in sources:
(root, ext) = os.path.splitext(source)
if '$(gyp_shared_intermediate_dir)' in source:
extra_sources.append(source)
elif '$(gyp_intermediate_dir)' in source:
extra_sources.append(source)
elif IsCPPExtension(ext) and ext != local_cpp_extension:
extra_sources.append(source)
else:
local_files.append(os.path.normpath(os.path.join(self.path, source)))
# For any generated source, if it is coming from the shared intermediate
# directory then we add a Make rule to copy them to the local intermediate
# directory first. This is because the Android LOCAL_GENERATED_SOURCES
# must be in the local module intermediate directory for the compile rules
# to work properly. If the file has the wrong C++ extension, then we add
# a rule to copy that to intermediates and use the new version.
final_generated_sources = []
# If a source file gets copied, we still need to add the orginal source
# directory as header search path, for GCC searches headers in the
# directory that contains the source file by default.
origin_src_dirs = []
for source in extra_sources:
local_file = source
if not '$(gyp_intermediate_dir)/' in local_file:
basename = os.path.basename(local_file)
local_file = '$(gyp_intermediate_dir)/' + basename
(root, ext) = os.path.splitext(local_file)
if IsCPPExtension(ext) and ext != local_cpp_extension:
local_file = root + local_cpp_extension
if local_file != source:
self.WriteLn('%s: %s' % (local_file, self.LocalPathify(source)))
self.WriteLn('\tmkdir -p $(@D); cp $< $@')
origin_src_dirs.append(os.path.dirname(source))
final_generated_sources.append(local_file)
# We add back in all of the non-compilable stuff to make sure that the
# make rules have dependencies on them.
final_generated_sources.extend(generated_not_sources)
self.WriteList(final_generated_sources, 'LOCAL_GENERATED_SOURCES')
origin_src_dirs = gyp.common.uniquer(origin_src_dirs)
origin_src_dirs = map(Sourceify, map(self.LocalPathify, origin_src_dirs))
self.WriteList(origin_src_dirs, 'GYP_COPIED_SOURCE_ORIGIN_DIRS')
self.WriteList(local_files, 'LOCAL_SRC_FILES')
# Write out the flags used to compile the source; this must be done last
# so that GYP_COPIED_SOURCE_ORIGIN_DIRS can be used as an include path.
self.WriteSourceFlags(spec, configs)
def ComputeAndroidModule(self, spec):
"""Return the Android module name used for a gyp spec.
We use the complete qualified target name to avoid collisions between
duplicate targets in different directories. We also add a suffix to
distinguish gyp-generated module names.
"""
if int(spec.get('android_unmangled_name', 0)):
assert self.type != 'shared_library' or self.target.startswith('lib')
return self.target
if self.type == 'shared_library':
# For reasons of convention, the Android build system requires that all
# shared library modules are named 'libfoo' when generating -l flags.
prefix = 'lib_'
else:
prefix = ''
if spec['toolset'] == 'host':
suffix = '_host_gyp'
else:
suffix = '_gyp'
if self.path:
name = '%s%s_%s%s' % (prefix, self.path, self.target, suffix)
else:
name = '%s%s%s' % (prefix, self.target, suffix)
return make.StringToMakefileVariable(name)
def ComputeOutputParts(self, spec):
"""Return the 'output basename' of a gyp spec, split into filename + ext.
Android libraries must be named the same thing as their module name,
otherwise the linker can't find them, so product_name and so on must be
ignored if we are building a library, and the "lib" prepending is
not done for Android.
"""
assert self.type != 'loadable_module' # TODO: not supported?
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.a'
elif self.type == 'shared_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.so'
elif self.type == 'none':
target_ext = '.stamp'
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
if self.type != 'static_library' and self.type != 'shared_library':
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
target_stem = target_prefix + target
return (target_stem, target_ext)
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
return ''.join(self.ComputeOutputParts(spec))
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
if self.type == 'executable' and self.toolset == 'host':
# We install host executables into shared_intermediate_dir so they can be
# run by gyp rules that refer to PRODUCT_DIR.
path = '$(gyp_shared_intermediate_dir)'
elif self.type == 'shared_library':
if self.toolset == 'host':
path = '$(HOST_OUT_INTERMEDIATE_LIBRARIES)'
else:
path = '$(TARGET_OUT_INTERMEDIATE_LIBRARIES)'
else:
# Other targets just get built into their intermediate dir.
if self.toolset == 'host':
path = '$(call intermediates-dir-for,%s,%s,true)' % (self.android_class,
self.android_module)
else:
path = '$(call intermediates-dir-for,%s,%s)' % (self.android_class,
self.android_module)
assert spec.get('product_dir') is None # TODO: not supported?
return os.path.join(path, self.ComputeOutputBasename(spec))
def NormalizeLdFlags(self, ld_flags):
""" Clean up ldflags from gyp file.
Remove any ldflags that contain android_top_dir.
Args:
ld_flags: ldflags from gyp files.
Returns:
clean ldflags
"""
clean_ldflags = []
for flag in ld_flags:
if self.android_top_dir in flag:
continue
clean_ldflags.append(flag)
return clean_ldflags
def NormalizeIncludePaths(self, include_paths):
""" Normalize include_paths.
Convert absolute paths to relative to the Android top directory;
filter out include paths that are already brought in by the Android build
system.
Args:
include_paths: A list of unprocessed include paths.
Returns:
A list of normalized include paths.
"""
normalized = []
for path in include_paths:
if path[0] == '/':
path = gyp.common.RelativePath(path, self.android_top_dir)
# Filter out the Android standard search path.
if path not in android_standard_include_paths:
normalized.append(path)
return normalized
def ExtractIncludesFromCFlags(self, cflags):
"""Extract includes "-I..." out from cflags
Args:
cflags: A list of compiler flags, which may be mixed with "-I.."
Returns:
A tuple of lists: (clean_clfags, include_paths). "-I.." is trimmed.
"""
clean_cflags = []
include_paths = []
if cflags:
for flag in cflags:
if flag.startswith('-I'):
include_paths.append(flag[2:])
else:
clean_cflags.append(flag)
return (clean_cflags, include_paths)
def ComputeAndroidLibraryModuleNames(self, libraries):
"""Compute the Android module names from libraries, ie spec.get('libraries')
Args:
libraries: the value of spec.get('libraries')
Returns:
A tuple (static_lib_modules, dynamic_lib_modules)
"""
static_lib_modules = []
dynamic_lib_modules = []
for libs in libraries:
# Libs can have multiple words.
for lib in libs.split():
# Filter the system libraries, which are added by default by the Android
# build system.
if (lib == '-lc' or lib == '-lstdc++' or lib == '-lm' or
lib.endswith('libgcc.a')):
continue
match = re.search(r'([^/]+)\.a$', lib)
if match:
static_lib_modules.append(match.group(1))
continue
match = re.search(r'([^/]+)\.so$', lib)
if match:
dynamic_lib_modules.append(match.group(1))
continue
# "-lstlport" -> libstlport
if lib.startswith('-l'):
if lib.endswith('_static'):
static_lib_modules.append('lib' + lib[2:])
else:
dynamic_lib_modules.append('lib' + lib[2:])
return (static_lib_modules, dynamic_lib_modules)
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteTargetFlags(self, spec, configs, link_deps):
"""Write Makefile code to specify the link flags and library dependencies.
spec, configs: input from gyp.
link_deps: link dependency list; see ComputeDeps()
"""
config = configs[spec['default_configuration']]
# LDFLAGS
ldflags = list(config.get('ldflags', []))
static_flags, dynamic_flags = self.ComputeAndroidLibraryModuleNames(
ldflags)
self.WriteLn('')
self.WriteList(self.NormalizeLdFlags(ldflags), 'LOCAL_LDFLAGS')
# Libraries (i.e. -lfoo)
libraries = gyp.common.uniquer(spec.get('libraries', []))
static_libs, dynamic_libs = self.ComputeAndroidLibraryModuleNames(
libraries)
# Link dependencies (i.e. libfoo.a, libfoo.so)
static_link_deps = [x[1] for x in link_deps if x[0] == 'static']
shared_link_deps = [x[1] for x in link_deps if x[0] == 'shared']
self.WriteLn('')
self.WriteList(static_flags + static_libs + static_link_deps,
'LOCAL_STATIC_LIBRARIES')
self.WriteLn('# Enable grouping to fix circular references')
self.WriteLn('LOCAL_GROUP_STATIC_LIBRARIES := true')
self.WriteLn('')
self.WriteList(dynamic_flags + dynamic_libs + shared_link_deps,
'LOCAL_SHARED_LIBRARIES')
def WriteTarget(self, spec, configs, deps, link_deps, part_of_all):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Rules for final target.')
if self.type != 'none':
self.WriteTargetFlags(spec, configs, link_deps)
# Add to the set of targets which represent the gyp 'all' target. We use the
# name 'gyp_all_modules' as the Android build system doesn't allow the use
# of the Make target 'all' and because 'all_modules' is the equivalent of
# the Make target 'all' on Android.
if part_of_all:
self.WriteLn('# Add target alias to "gyp_all_modules" target.')
self.WriteLn('.PHONY: gyp_all_modules')
self.WriteLn('gyp_all_modules: %s' % self.android_module)
self.WriteLn('')
# Add an alias from the gyp target name to the Android module name. This
# simplifies manual builds of the target, and is required by the test
# framework.
if self.target != self.android_module:
self.WriteLn('# Alias gyp target name.')
self.WriteLn('.PHONY: %s' % self.target)
self.WriteLn('%s: %s' % (self.target, self.android_module))
self.WriteLn('')
# Add the command to trigger build of the target type depending
# on the toolset. Ex: BUILD_STATIC_LIBRARY vs. BUILD_HOST_STATIC_LIBRARY
# NOTE: This has to come last!
modifier = ''
if self.toolset == 'host':
modifier = 'HOST_'
if self.type == 'static_library':
self.WriteLn('include $(BUILD_%sSTATIC_LIBRARY)' % modifier)
elif self.type == 'shared_library':
self.WriteLn('LOCAL_PRELINK_MODULE := false')
self.WriteLn('include $(BUILD_%sSHARED_LIBRARY)' % modifier)
elif self.type == 'executable':
if self.toolset == 'host':
self.WriteLn('LOCAL_MODULE_PATH := $(gyp_shared_intermediate_dir)')
else:
# Don't install target executables for now, as it results in them being
# included in ROM. This can be revisited if there's a reason to install
# them later.
self.WriteLn('LOCAL_UNINSTALLABLE_MODULE := true')
self.WriteLn('include $(BUILD_%sEXECUTABLE)' % modifier)
else:
self.WriteLn('LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp')
self.WriteLn('LOCAL_UNINSTALLABLE_MODULE := true')
self.WriteLn()
self.WriteLn('include $(BUILD_SYSTEM)/base_rules.mk')
self.WriteLn()
self.WriteLn('$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)')
self.WriteLn('\t$(hide) echo "Gyp timestamp: $@"')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) touch $@')
def WriteList(self, value_list, variable=None, prefix='',
quoter=make.QuoteIfNecessary, local_pathify=False):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
if local_pathify:
value_list = [self.LocalPathify(l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def LocalPathify(self, path):
"""Convert a subdirectory-relative path into a normalized path which starts
with the make variable $(LOCAL_PATH) (i.e. the top of the project tree).
Absolute paths, or paths that contain variables, are just normalized."""
if '$(' in path or os.path.isabs(path):
# path is not a file in the project tree in this case, but calling
# normpath is still important for trimming trailing slashes.
return os.path.normpath(path)
local_path = os.path.join('$(LOCAL_PATH)', self.path, path)
local_path = os.path.normpath(local_path)
# Check that normalizing the path didn't ../ itself out of $(LOCAL_PATH)
# - i.e. that the resulting path is still inside the project tree. The
# path may legitimately have ended up containing just $(LOCAL_PATH), though,
# so we don't look for a slash.
assert local_path.startswith('$(LOCAL_PATH)'), (
'Path %s attempts to escape from gyp path %s !)' % (path, self.path))
return local_path
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return path
def PerformBuild(data, configurations, params):
# The android backend only supports the default configuration.
options = params['options']
makefile = os.path.abspath(os.path.join(options.toplevel_dir,
'GypAndroid.mk'))
env = dict(os.environ)
env['ONE_SHOT_MAKEFILE'] = makefile
arguments = ['make', '-C', os.environ['ANDROID_BUILD_TOP'], 'gyp_all_modules']
print 'Building: %s' % arguments
subprocess.check_call(arguments, env=env)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
limit_to_target_all = generator_flags.get('limit_to_target_all', False)
android_top_dir = os.environ.get('ANDROID_BUILD_TOP')
assert android_top_dir, '$ANDROID_BUILD_TOP not set; you need to run lunch.'
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'GypAndroid' + options.suffix + '.mk'
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
make.ensure_directory_exists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(header)
# We set LOCAL_PATH just once, here, to the top of the project tree. This
# allows all the other paths we use to be relative to the Android.mk file,
# as the Android build system expects.
root_makefile.write('\nLOCAL_PATH := $(call my-dir)\n')
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
android_modules = {}
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
relative_build_file = gyp.common.RelativePath(build_file,
options.toplevel_dir)
build_files.add(relative_build_file)
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
part_of_all = (qualified_target in needed_targets and
not int(spec.get('suppress_wildcard', False)))
if limit_to_target_all and not part_of_all:
continue
relative_target = gyp.common.QualifiedTarget(relative_build_file, target,
toolset)
writer = AndroidMkWriter(android_top_dir)
android_module = writer.Write(qualified_target, relative_target, base_path,
output_file, spec, configs,
part_of_all=part_of_all)
if android_module in android_modules:
print ('ERROR: Android module names must be unique. The following '
'targets both generate Android module name %s.\n %s\n %s' %
(android_module, android_modules[android_module],
qualified_target))
return
android_modules[android_module] = qualified_target
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
# Some tools need to know the absolute path of the top directory.
root_makefile.write('GYP_ABS_ANDROID_TOP_DIR := $(shell pwd)\n')
root_makefile.write('GYP_DEFAULT_CONFIGURATION := %s\n' %
default_configuration)
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
root_makefile.write('include $(LOCAL_PATH)/' + include_file + '\n')
root_makefile.write('\n')
root_makefile.write(SHARED_FOOTER)
root_makefile.close()
| mit |
plotly/plotly.py | packages/python/plotly/plotly/graph_objs/ohlc/decreasing/_line.py | 2 | 7009 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "ohlc.decreasing"
_path_str = "ohlc.decreasing.line"
_valid_props = {"color", "dash", "width"}
# color
# -----
@property
def color(self):
"""
Sets the line color.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# dash
# ----
@property
def dash(self):
"""
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'dash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
"""
return self["dash"]
@dash.setter
def dash(self, val):
self["dash"] = val
# width
# -----
@property
def width(self):
"""
Sets the line width (in px).
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
width
Sets the line width (in px).
"""
def __init__(self, arg=None, color=None, dash=None, width=None, **kwargs):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.ohlc.decreasing.Line`
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
width
Sets the line width (in px).
Returns
-------
Line
"""
super(Line, self).__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.ohlc.decreasing.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.ohlc.decreasing.Line`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("dash", None)
_v = dash if dash is not None else _v
if _v is not None:
self["dash"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit |
andela-osule/django-bucketlist-application | functional_tests/test_signed_in_users.py | 1 | 2126 | # -*- coding: utf-8 -*-
from selenium import webdriver
from django.core.urlresolvers import reverse
from django.test import LiveServerTestCase
from django.utils.decorators import classonlymethod
from django.contrib.auth.models import User
class HomeSigninUserTest(LiveServerTestCase):
"""This TestSuite tests functionality available to visitors.
"""
@classmethod
def setUpClass(cls):
super(HomeSigninUserTest, cls).setUpClass()
cls.browser = webdriver.PhantomJS()
cls.login_data = {
'username': 'basil',
'password': 'some_really_strong_password'
}
def setUp(self):
User.objects.create_user(**self.login_data)
# sign in user
self.browser.get('%s%s' % (self.live_server_url, '/'))
self.browser.find_element_by_id(
"signInUsername").send_keys(self.login_data['username'])
self.browser.find_element_by_id(
"signInPassword").send_keys('some_really_strong_password')
self.browser.find_element_by_id(
"signInBtn"
).click()
super(HomeSigninUserTest, self).setUp()
self.browser.implicitly_wait(3)
@classmethod
def tearDownClass(cls):
cls.browser.quit()
super(HomeSigninUserTest, cls).tearDownClass()
def _get_full_url(self, namespace):
return self.live_server_url + reverse(namespace) # pragma: no cover
def test_dashboard(self):
"""Test that Bucketlist link is present in Dashboard.
"""
self.assertIn(
'Dashboard',
self.browser.find_element_by_tag_name('body').text)
self.assertIn(
'Bucketlists',
self.browser.find_element_by_css_selector('section ul li a').text)
def test_can_navigate_to_bucketlist_details(self):
"""Test that Bucketlist detail page can be navigated.
"""
self.browser.find_element_by_css_selector('section ul li a').click()
self.assertEquals(
'No bucketlists added yet.',
self.browser.find_element_by_css_selector('section div p').text)
| gpl-2.0 |
wf4ever/ro-manager | src/rocommand/test/TestAnnotationUtils.py | 1 | 35802 | #!/usr/bin/python
"""
Module to test RO manager annotation support utilities
See: http://www.wf4ever-project.org/wiki/display/docs/RO+management+tool
"""
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2011-2013, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import os, os.path
import sys
import re
import shutil
import unittest
import logging
import datetime
import StringIO
try:
# Running Python 2.5 with simplejson?
import simplejson as json
except ImportError:
import json
log = logging.getLogger(__name__)
if __name__ == "__main__":
# Add main project directory and ro manager directories at start of python path
sys.path.insert(0, "../..")
sys.path.insert(0, "..")
import rdflib
from MiscUtils import TestUtils
from rocommand import ro_settings
from rocommand import ro_manifest
from rocommand import ro_annotation
from rocommand.ro_namespaces import RDF, RDFS, RO, ORE, DCTERMS, ROTERMS
from TestConfig import ro_test_config
from StdoutContext import SwitchStdout
import TestROSupport
# Base directory for RO tests in this module
testbase = os.path.dirname(os.path.realpath(__file__))
# Local ro_config for testing
ro_config = {
"annotationTypes": ro_annotation.annotationTypes,
"annotationPrefixes": ro_annotation.annotationPrefixes
}
cwd = os.getcwd()
robase = ro_test_config.ROBASEDIR
robase_abs = os.path.abspath(ro_test_config.ROBASEDIR)
class TestAnnotationUtils(TestROSupport.TestROSupport):
"""
Test ro annotation support functions
"""
def setUp(self):
super(TestAnnotationUtils, self).setUp()
return
def tearDown(self):
super(TestAnnotationUtils, self).tearDown()
return
# Actual tests follow
def testNull(self):
assert True, 'Null test failed'
def testGetFileUri(self):
self.assertEquals(ro_manifest.getFileUri("/example/a/b.txt"),
rdflib.URIRef("file:///example/a/b.txt"))
self.assertEquals(ro_manifest.getFileUri("a/b.txt"),
rdflib.URIRef("file://%s/a/b.txt"%(cwd)))
self.assertEquals(ro_manifest.getFileUri("/example/ro/dir/a/b/d/"),
rdflib.URIRef("file:///example/ro/dir/a/b/d/"))
self.assertEquals(ro_manifest.getFileUri("a/b/d/"),
rdflib.URIRef("file://%s/a/b/d/"%(cwd)))
return
def testGetUriFile(self):
self.assertEquals(ro_manifest.getUriFile(rdflib.URIRef("file:///example/a/b.txt")), "/example/a/b.txt")
self.assertEquals(ro_manifest.getUriFile(rdflib.URIRef("/example/a/b.txt")), "/example/a/b.txt")
self.assertEquals(ro_manifest.getUriFile(rdflib.URIRef("a/b.txt")), "a/b.txt")
return
def testGetRoUri(self):
self.assertEquals(ro_manifest.getRoUri("/example/ro/dir"), rdflib.URIRef("file:///example/ro/dir/"))
self.assertEquals(ro_manifest.getRoUri("/example/ro/dir/"), rdflib.URIRef("file:///example/ro/dir/"))
self.assertEquals(ro_manifest.getRoUri("ro/dir"), rdflib.URIRef("file://%s/ro/dir/"%(cwd)))
self.assertEquals(ro_manifest.getRoUri("ro/dir/"), rdflib.URIRef("file://%s/ro/dir/"%(cwd)))
self.assertEquals(ro_manifest.getRoUri(robase+"/ro/dir"), rdflib.URIRef("file://%s/ro/dir/"%(robase_abs)))
self.assertEquals(ro_manifest.getRoUri(robase+"/ro/dir/"), rdflib.URIRef("file://%s/ro/dir/"%(robase_abs)))
return
def testGetComponentUri(self):
self.assertEquals(ro_manifest.getComponentUri("/example/ro/dir", "a/b.txt"),
rdflib.URIRef("file:///example/ro/dir/a/b.txt"))
self.assertEquals(ro_manifest.getComponentUri("/example/ro/dir/", "a/b.txt"),
rdflib.URIRef("file:///example/ro/dir/a/b.txt"))
self.assertEquals(ro_manifest.getComponentUri("ro/dir", "a/b.txt"),
rdflib.URIRef("file://%s/ro/dir/a/b.txt"%(cwd)))
self.assertEquals(ro_manifest.getComponentUri("ro/dir/", "a/b.txt"),
rdflib.URIRef("file://%s/ro/dir/a/b.txt"%(cwd)))
self.assertEquals(ro_manifest.getComponentUri("/example/ro/dir", "a/b/d/"),
rdflib.URIRef("file:///example/ro/dir/a/b/d/"))
self.assertEquals(ro_manifest.getComponentUri("/example/ro/dir/", "a/b/d/"),
rdflib.URIRef("file:///example/ro/dir/a/b/d/"))
self.assertEquals(ro_manifest.getComponentUri("ro/dir", "a/b/d/"),
rdflib.URIRef("file://%s/ro/dir/a/b/d/"%(cwd)))
self.assertEquals(ro_manifest.getComponentUri("ro/dir/", "a/b/d/"),
rdflib.URIRef("file://%s/ro/dir/a/b/d/"%(cwd)))
return
def testGetComponentUriRel(self):
self.assertEquals(ro_manifest.getComponentUriRel("/example/ro/dir", "a/b.txt"),
rdflib.URIRef("a/b.txt"))
self.assertEquals(ro_manifest.getComponentUriRel("/example/ro/dir/", "a/b.txt"),
rdflib.URIRef("a/b.txt"))
self.assertEquals(ro_manifest.getComponentUriRel("ro/dir", "a/b.txt"),
rdflib.URIRef("a/b.txt"))
self.assertEquals(ro_manifest.getComponentUriRel("ro/dir/", "a/b.txt"),
rdflib.URIRef("a/b.txt"))
self.assertEquals(ro_manifest.getComponentUriRel("/example/ro/dir", "a/b/d/"),
rdflib.URIRef("a/b/d/"))
self.assertEquals(ro_manifest.getComponentUriRel("/example/ro/dir/", "a/b/d/"),
rdflib.URIRef("a/b/d/"))
self.assertEquals(ro_manifest.getComponentUriRel("ro/dir", "a/b/d/"),
rdflib.URIRef("a/b/d/"))
self.assertEquals(ro_manifest.getComponentUriRel("ro/dir/", "a/b/d/"),
rdflib.URIRef("a/b/d/"))
self.assertEquals(ro_manifest.getComponentUriRel("/example/ro/dir", ""),
rdflib.URIRef(""))
self.assertEquals(ro_manifest.getComponentUriRel("/example/ro/dir/", ""),
rdflib.URIRef(""))
self.assertEquals(ro_manifest.getComponentUriRel("ro/dir", ""),
rdflib.URIRef(""))
self.assertEquals(ro_manifest.getComponentUriRel("ro/dir/", ""),
rdflib.URIRef(""))
self.assertEquals(ro_manifest.getComponentUriRel("/example/ro/dir", "/example/ro/dir/a/b.txt"),
rdflib.URIRef("a/b.txt"))
self.assertEquals(ro_manifest.getComponentUriRel("/example/ro/dir/", "/example/ro/dir/a/b.txt"),
rdflib.URIRef("a/b.txt"))
self.assertEquals(ro_manifest.getComponentUriRel("ro/dir", "%s/ro/dir/a/b.txt"%(cwd)),
rdflib.URIRef("a/b.txt"))
self.assertEquals(ro_manifest.getComponentUriRel("ro/dir/", "%s/ro/dir/a/b.txt"%(cwd)),
rdflib.URIRef("a/b.txt"))
self.assertEquals(ro_manifest.getComponentUriRel("/example/ro/dir", "/example/ro/dir/a/b/d/"),
rdflib.URIRef("a/b/d/"))
self.assertEquals(ro_manifest.getComponentUriRel("/example/ro/dir/", "/example/ro/dir/a/b/d/"),
rdflib.URIRef("a/b/d/"))
self.assertEquals(ro_manifest.getComponentUriRel("ro/dir", "%s/ro/dir/a/b/d/"%(cwd)),
rdflib.URIRef("a/b/d/"))
self.assertEquals(ro_manifest.getComponentUriRel("ro/dir/", "%s/ro/dir/a/b/d/"%(cwd)),
rdflib.URIRef("a/b/d/"))
self.assertEquals(ro_manifest.getComponentUriRel("/example/ro/dir", "/example/ro/dir/"),
rdflib.URIRef(""))
self.assertEquals(ro_manifest.getComponentUriRel("/example/ro/dir/", "/example/ro/dir/"),
rdflib.URIRef(""))
self.assertEquals(ro_manifest.getComponentUriRel("ro/dir", "%s/ro/dir/"%(cwd)),
rdflib.URIRef(""))
self.assertEquals(ro_manifest.getComponentUriRel("ro/dir/", "%s/ro/dir/"%(cwd)),
rdflib.URIRef(""))
# Test supplied file: URI string
self.assertEquals(ro_manifest.getComponentUriRel("/example/ro/dir", "file:///example/ro/dir/a/b.txt"),
rdflib.URIRef("a/b.txt"))
self.assertEquals(ro_manifest.getComponentUriRel("/example/ro/dir/", "file:///example/ro/dir/a/b.txt"),
rdflib.URIRef("a/b.txt"))
self.assertEquals(ro_manifest.getComponentUriRel("/example/ro/dir", "file:///example/ro/dir/a/b/d/"),
rdflib.URIRef("a/b/d/"))
self.assertEquals(ro_manifest.getComponentUriRel("/example/ro/dir/", "file:///example/ro/dir/a/b/d/"),
rdflib.URIRef("a/b/d/"))
self.assertEquals(ro_manifest.getComponentUriRel("/example/ro/dir", "file:///example/ro/dir/"),
rdflib.URIRef(""))
self.assertEquals(ro_manifest.getComponentUriRel("/example/ro/dir/", "file:///example/ro/dir/"),
rdflib.URIRef(""))
# Test supplied file: URI
self.assertEquals(ro_manifest.getComponentUriRel("/example/ro/dir",
rdflib.URIRef("file:///example/ro/dir/a/b.txt")),
rdflib.URIRef("a/b.txt"))
self.assertEquals(ro_manifest.getComponentUriRel("/example/ro/dir/",
rdflib.URIRef("file:///example/ro/dir/a/b.txt")),
rdflib.URIRef("a/b.txt"))
self.assertEquals(ro_manifest.getComponentUriRel("/example/ro/dir",
rdflib.URIRef("file:///example/ro/dir/a/b/d/")),
rdflib.URIRef("a/b/d/"))
self.assertEquals(ro_manifest.getComponentUriRel("/example/ro/dir/",
rdflib.URIRef("file:///example/ro/dir/a/b/d/")),
rdflib.URIRef("a/b/d/"))
self.assertEquals(ro_manifest.getComponentUriRel("/example/ro/dir",
rdflib.URIRef("file:///example/ro/dir/")),
rdflib.URIRef(""))
self.assertEquals(ro_manifest.getComponentUriRel("/example/ro/dir/",
rdflib.URIRef("file:///example/ro/dir/")),
rdflib.URIRef(""))
return
def testGetGraphRoUri(self):
rodir = self.createTestRo(testbase, "data/ro-test-1", "RO test graph", "ro-testRoGraph")
rograph = ro_manifest.readManifestGraph(rodir)
self.assertEquals(ro_manifest.getGraphRoUri(rodir, rograph),
rdflib.URIRef("file://%s/RO_test_graph/"%(robase_abs)))
self.deleteTestRo(rodir)
return
def testGetAnnotationByName(self):
def testAnnotaton(name, expecteduri, expectedtype):
(apred, atype) = ro_annotation.getAnnotationByName(ro_config, name)
self.assertEqual(apred, expecteduri)
self.assertEqual(atype, expectedtype)
return
self.assertEqual(RDF.type, rdflib.URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#type"))
testAnnotaton("title", DCTERMS.title, "string")
testAnnotaton("description", DCTERMS.description, "text")
testAnnotaton("rdf:type", RDF.type, "resource")
# testAnnotaton(rdflib.URIRef("http://example.org/foo"), "<http://example.org/foo>")
return
def testGetAnnotationByUri(self):
def testAnnotaton(uri, expectedname, expectedtype):
(aname, atype) = ro_annotation.getAnnotationByUri(ro_config, uri)
self.assertEqual(aname, expectedname)
self.assertEqual(atype, expectedtype)
return
testAnnotaton(DCTERMS.title, "title", "string")
testAnnotaton(DCTERMS.description, "description", "text")
testAnnotaton(RDF.type, "rdf:type", "resource")
testAnnotaton(RDFS.comment, "rdfs:comment", "string")
return
def testGetAnnotationNameByUri(self):
def testAnnotatonName(uri, name):
self.assertEqual(ro_annotation.getAnnotationNameByUri(ro_config, uri), name)
return
self.assertEqual(RDF.type, rdflib.URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#type"))
testAnnotatonName(DCTERMS.title, "title")
testAnnotatonName(DCTERMS.description, "description")
testAnnotatonName(RDF.type, "rdf:type")
testAnnotatonName(RDFS.comment, "rdfs:comment")
testAnnotatonName(rdflib.URIRef("http://example.org/foo"), "<http://example.org/foo>")
return
def testMakeAnnotationFilename(self):
rodir = "/example/ro/dir"
def testAnnotationFileName(filename, expectedname):
aname = ro_annotation.makeAnnotationFilename(rodir, filename)
self.assertEqual(aname, expectedname)
return
testAnnotationFileName("a/b", "%s/%s/a/b"%(rodir, ro_settings.MANIFEST_DIR))
testAnnotationFileName("a/", "%s/%s/a/"%(rodir, ro_settings.MANIFEST_DIR))
return
def testCreateReadRoAnnotationBody(self):
"""
Test function to create simple annotation body
"""
rodir = self.createTestRo(testbase, "data/ro-test-1", "RO test annotation", "ro-testRoAnnotate")
roresource = "."
attrdict = {
"type": rdflib.Literal("Research Object"),
# @@TODO: handle lists "keywords": ["test", "research", "object"],
"description": rdflib.Literal("Test research object"),
"format": rdflib.Literal("application/vnd.wf4ever.ro"),
"note": rdflib.Literal("Research object created for annotation testing"),
"title": rdflib.Literal("Test research object"),
"created": rdflib.Literal("2011-12-07")
}
annotationfilebase = ro_annotation.createAnnotationBody(
ro_config, rodir, roresource, attrdict)
# Ann-%04d%02d%02d-%04d-%s.rdf
self.assertRegexpMatches(annotationfilebase,
r"Ann-\d\d\d\d\d\d\d\d-\d+-RO_test_annotation\.rdf",
msg="Unexpected filename form for annotation: "+annotationfilebase)
annotationfilename = ro_annotation.makeAnnotationFilename(rodir, annotationfilebase)
annotationgraph = ro_annotation.readAnnotationBody(rodir, annotationfilename)
attrpropdict = {
"type": DCTERMS.type,
# @@TODO "keywords": DCTERMS.subject,
"description": DCTERMS.description,
"format": DCTERMS.format,
"note": ROTERMS.note,
"title": DCTERMS.title,
"created": DCTERMS.created
}
s = ro_manifest.getComponentUri(rodir, roresource)
log.debug("annotation subject %s"%repr(s))
for k in attrpropdict:
p = attrpropdict[k]
log.debug("annotation predicate %s"%repr(p))
v = attrdict[k]
a = annotationgraph.value(s, p, None)
log.debug("annotation value %s"%repr(a))
#self.assertEqual(len(a), 1, "Singleton result expected")
self.assertEqual(a, v)
self.deleteTestRo(rodir)
return
def testCreateReadFileAnnotationBody(self):
"""
Test function to create simple annotation body
"""
rodir = self.createTestRo(testbase, "data/ro-test-1", "RO test annotation", "ro-testRoAnnotate")
roresource = "subdir1/subdir1-file.txt"
attrdict = {
"type": rdflib.Literal("Test file"),
"description": rdflib.Literal("File in test research object"),
"note": rdflib.Literal("File in research object created for annotation testing"),
"title": rdflib.Literal("Test file in RO"),
"created": rdflib.Literal("2011-12-07")
}
annotationfilebase = ro_annotation.createAnnotationBody(
ro_config, rodir, roresource, attrdict)
# Ann-%04d%02d%02d-%04d-%s.rdf
self.assertRegexpMatches(annotationfilebase,
r"Ann-\d\d\d\d\d\d\d\d-\d+-subdir1-file\.txt\.rdf",
msg="Unexpected filename form for annotation: "+annotationfilebase)
annotationfilename = ro_annotation.makeAnnotationFilename(rodir, annotationfilebase)
annotationgraph = ro_annotation.readAnnotationBody(rodir, annotationfilename)
attrpropdict = {
"type": DCTERMS.type,
"description": DCTERMS.description,
"note": ROTERMS.note,
"title": DCTERMS.title,
"created": DCTERMS.created
}
s = ro_manifest.getComponentUri(rodir, roresource)
log.debug("annotation subject %s"%repr(s))
for k in attrpropdict:
p = attrpropdict[k]
log.debug("annotation predicate %s"%repr(p))
v = attrdict[k]
a = annotationgraph.value(s, p, None)
log.debug("annotation value %s"%repr(a))
#self.assertEqual(len(a), 1, "Singleton result expected")
self.assertEqual(a, v)
self.deleteTestRo(rodir)
return
def testGetInitialRoAnnotations(self):
rodir = self.createTestRo(testbase, "data/ro-test-1", "Test init RO annotation", "ro-testRoAnnotate")
roresource = "."
# Retrieve the anotations
annotations = ro_annotation._getRoAnnotations(rodir)
rouri = ro_manifest.getRoUri(rodir)
expected_annotations = (
[ (rouri, DCTERMS.description, rdflib.Literal('Test init RO annotation'))
, (rouri, DCTERMS.title, rdflib.Literal('Test init RO annotation'))
, (rouri, DCTERMS.created, rdflib.Literal('unknown'))
, (rouri, DCTERMS.creator, rdflib.Literal('Test User'))
, (rouri, DCTERMS.identifier, rdflib.Literal('ro-testRoAnnotate'))
, (rouri, RDF.type, RO.ResearchObject)
])
for i in range(6+1): # Annotations + aggregations
next = annotations.next()
#log.debug("Next %s"%(repr(next)))
if ( next not in expected_annotations and
next[1] != DCTERMS.created and
next[1] != ORE.aggregates ):
self.assertTrue(False, "Not expected (%d) %s"%(i, repr(next)))
self.assertRaises(StopIteration, annotations.next)
self.deleteTestRo(rodir)
return
def testAddGetRoAnnotations(self):
rodir = self.createTestRo(testbase, "data/ro-test-1", "Test add RO annotation", "ro-testRoAnnotate")
roresource = "."
# Add anotations for RO
ro_annotation._addSimpleAnnotation(ro_config, rodir, roresource,
"type", "Research Object")
ro_annotation._addSimpleAnnotation(ro_config, rodir, roresource,
"note", "Research object created for annotation testing")
ro_annotation._addSimpleAnnotation(ro_config, rodir, roresource,
"description", "Added description")
# Retrieve the anotations
annotations = ro_annotation._getRoAnnotations(rodir)
rouri = ro_manifest.getRoUri(rodir)
expected_annotations = (
[ (rouri, DCTERMS.description, rdflib.Literal('Test add RO annotation'))
, (rouri, DCTERMS.title, rdflib.Literal('Test add RO annotation'))
, (rouri, DCTERMS.created, rdflib.Literal('unknown'))
, (rouri, DCTERMS.creator, rdflib.Literal('Test User'))
, (rouri, DCTERMS.identifier, rdflib.Literal('ro-testRoAnnotate'))
, (rouri, RDF.type, RO.ResearchObject)
, (rouri, DCTERMS.type, rdflib.Literal('Research Object'))
, (rouri, ROTERMS.note, rdflib.Literal('Research object created for annotation testing'))
, (rouri, DCTERMS.description, rdflib.Literal('Added description'))
])
for i in range(9+4): # Annotations + aggregations
next = annotations.next()
#log.debug("Next %s"%(repr(next)))
if ( next not in expected_annotations and
next[1] != DCTERMS.created and
next[1] != ORE.aggregates ):
self.assertTrue(False, "Not expected (%d) %s"%(i, repr(next)))
self.assertRaises(StopIteration, annotations.next)
self.deleteTestRo(rodir)
return
def testRemoveGetRoAnnotations(self):
rodir = self.createTestRo(testbase, "data/ro-test-1", "Test remove RO annotation", "ro-testRoAnnotate")
roresource = "."
# Remove some anotations for RO
ro_annotation._removeSimpleAnnotation(ro_config, rodir, roresource,
"type", "Research Object")
ro_annotation._removeSimpleAnnotation(ro_config, rodir, roresource,
"title", "Test remove RO annotation")
ro_annotation._removeSimpleAnnotation(ro_config, rodir, roresource,
"description", None)
ro_annotation._removeSimpleAnnotation(ro_config, rodir, roresource,
"note", "Research object created for annotation testing")
ro_annotation._removeSimpleAnnotation(ro_config, rodir, roresource,
"created", None)
# Retrieve the anotations
annotations = ro_annotation._getRoAnnotations(rodir)
rouri = ro_manifest.getRoUri(rodir)
expected_annotations = (
[ (rouri, DCTERMS.creator, rdflib.Literal('Test User'))
, (rouri, DCTERMS.identifier, rdflib.Literal('ro-testRoAnnotate'))
, (rouri, RDF.type, RO.ResearchObject)
])
for i in range(4):
next = annotations.next()
#log.debug("Next %s"%(repr(next)))
if ( next not in expected_annotations and
next[1] != DCTERMS.created and
next[1] != ORE.aggregates ):
self.assertTrue(False, "Not expected (%d) %s"%(i, repr(next)))
self.assertRaises(StopIteration, annotations.next)
self.deleteTestRo(rodir)
return
def testReplaceGetRoAnnotations(self):
rodir = self.createTestRo(testbase, "data/ro-test-1", "Test replace RO annotation", "ro-testRoAnnotate")
roresource = "."
# Replace anotations for RO
ro_annotation._replaceSimpleAnnotation(ro_config, rodir, roresource,
"type", "Research Object")
ro_annotation._replaceSimpleAnnotation(ro_config, rodir, roresource,
"description", "Replacement description")
ro_annotation._replaceSimpleAnnotation(ro_config, rodir, roresource,
"note", "Research object for annotation replacement testing")
ro_annotation._replaceSimpleAnnotation(ro_config, rodir, roresource,
"title", "Replacement title")
ro_annotation._replaceSimpleAnnotation(ro_config, rodir, roresource,
"created", "2011-12-07")
# Retrieve the anotations
annotations = ro_annotation._getRoAnnotations(rodir)
rouri = ro_manifest.getRoUri(rodir)
expected_annotations = (
[ (rouri, DCTERMS.type, rdflib.Literal('Research Object'))
, (rouri, DCTERMS.title, rdflib.Literal('Replacement title'))
, (rouri, DCTERMS.description, rdflib.Literal('Replacement description'))
, (rouri, ROTERMS.note, rdflib.Literal('Research object for annotation replacement testing'))
, (rouri, DCTERMS.created, rdflib.Literal('2011-12-07'))
, (rouri, DCTERMS.creator, rdflib.Literal('Test User'))
, (rouri, DCTERMS.identifier, rdflib.Literal('ro-testRoAnnotate'))
, (rouri, RDF.type, RO.ResearchObject)
])
for i in range(8+1): # Annotations + aggregations
next = annotations.next()
#log.debug("Next %s"%(repr(next)))
if ( next not in expected_annotations and
next[1] != ORE.aggregates ):
self.assertTrue(False, "Not expected (%d) %s"%(i, repr(next)))
self.assertRaises(StopIteration, annotations.next)
self.deleteTestRo(rodir)
return
def testAddGetFileAnnotations(self):
rodir = self.createTestRo(testbase, "data/ro-test-1", "Test add file annotation", "ro-testRoAnnotate")
roresource = "subdir1/subdir1-file.txt"
# Add anotations for file
ro_annotation._addSimpleAnnotation(ro_config, rodir, roresource,
"type", "Test file")
ro_annotation._addSimpleAnnotation(ro_config, rodir, roresource,
"description", "File in test research object")
ro_annotation._addSimpleAnnotation(ro_config, rodir, roresource,
"note", "Research object file created for annotation testing")
ro_annotation._addSimpleAnnotation(ro_config, rodir, roresource,
"title", "Test file in RO")
ro_annotation._addSimpleAnnotation(ro_config, rodir, roresource,
"created", "2011-12-07")
ro_annotation._addSimpleAnnotation(ro_config, rodir, roresource,
"rdf:type", ROTERMS.resource)
# Retrieve the file anotations
annotations = ro_annotation._getFileAnnotations(rodir, roresource)
resourceuri = ro_manifest.getComponentUri(rodir, roresource)
log.debug("resourceuri: %s"%(resourceuri))
expected_annotations = (
[ (resourceuri, DCTERMS.type, rdflib.Literal('Test file'))
, (resourceuri, DCTERMS.description, rdflib.Literal('File in test research object'))
, (resourceuri, ROTERMS.note, rdflib.Literal('Research object file created for annotation testing'))
, (resourceuri, DCTERMS.title, rdflib.Literal('Test file in RO'))
, (resourceuri, DCTERMS.created, rdflib.Literal('2011-12-07'))
, (resourceuri, RDF.type, ROTERMS.resource)
])
for i in range(6):
next = annotations.next()
#log.debug("Next %s"%(repr(next)))
if ( next not in expected_annotations):
self.assertTrue(False, "Not expected (%d) %s"%(i, repr(next)))
self.assertRaises(StopIteration, annotations.next)
self.deleteTestRo(rodir)
return
def testRemoveGetFileAnnotations(self):
rodir = self.createTestRo(testbase, "data/ro-test-1", "Test remove file annotation", "ro-testRoAnnotate")
roresource = "subdir1/subdir1-file.txt"
# Add anotations for file
ro_annotation._addSimpleAnnotation(ro_config, rodir, roresource,
"type", "Test file")
ro_annotation._addSimpleAnnotation(ro_config, rodir, roresource,
"description", "File in test research object")
ro_annotation._addSimpleAnnotation(ro_config, rodir, roresource,
"note", "Research object file created for annotation testing")
ro_annotation._addSimpleAnnotation(ro_config, rodir, roresource,
"title", "Test file in RO")
ro_annotation._addSimpleAnnotation(ro_config, rodir, roresource,
"created", "2011-12-07")
ro_annotation._addSimpleAnnotation(ro_config, rodir, roresource,
"rdf:type", ROTERMS.resource)
# Remove annotations
ro_annotation._removeSimpleAnnotation(ro_config, rodir, roresource,
"description", "File in test research object")
ro_annotation._removeSimpleAnnotation(ro_config, rodir, roresource,
"note", None)
# Retrieve the file anotations
annotations = ro_annotation._getFileAnnotations(rodir, roresource)
resourceuri = ro_manifest.getComponentUri(rodir, roresource)
log.debug("resourceuri: %s"%(resourceuri))
expected_annotations = (
[ (resourceuri, DCTERMS.type, rdflib.Literal('Test file'))
, (resourceuri, DCTERMS.title, rdflib.Literal('Test file in RO'))
, (resourceuri, DCTERMS.created, rdflib.Literal('2011-12-07'))
, (resourceuri, RDF.type, ROTERMS.resource)
])
for i in range(4):
next = annotations.next()
#log.debug("Next %s"%(repr(next)))
if ( next not in expected_annotations):
self.assertTrue(False, "Not expected (%d) %s"%(i, repr(next)))
self.assertRaises(StopIteration, annotations.next)
self.deleteTestRo(rodir)
return
def testAddGetAllAnnotations(self):
rodir = self.createTestRo(testbase, "data/ro-test-1", "Test get all annotations", "ro-testRoAnnotate")
roresource = "subdir1/subdir1-file.txt"
# Add anotations for file
ro_annotation._addSimpleAnnotation(ro_config, rodir, roresource,
"type", "Test file")
ro_annotation._addSimpleAnnotation(ro_config, rodir, roresource,
"description", "File in test research object")
ro_annotation._addSimpleAnnotation(ro_config, rodir, roresource,
"note", "Research object file created for annotation testing")
ro_annotation._addSimpleAnnotation(ro_config, rodir, roresource,
"title", "Test file in RO")
ro_annotation._addSimpleAnnotation(ro_config, rodir, roresource,
"created", "2011-12-07")
ro_annotation._addSimpleAnnotation(ro_config, rodir, roresource,
"rdf:type", ROTERMS.resource)
# Retrieve the file anotations
annotations = ro_annotation.getAllAnnotations(rodir)
rouri = ro_manifest.getRoUri(rodir)
resourceuri = ro_manifest.getComponentUri(rodir, roresource)
log.debug("resourceuri: %s"%(resourceuri))
expected_annotations = (
[ (rouri, DCTERMS.description, rdflib.Literal('Test get all annotations'))
, (rouri, DCTERMS.title, rdflib.Literal('Test get all annotations'))
, (rouri, DCTERMS.created, rdflib.Literal('unknown'))
, (rouri, DCTERMS.creator, rdflib.Literal('Test User'))
, (rouri, DCTERMS.identifier, rdflib.Literal('ro-testRoAnnotate'))
, (rouri, RDF.type, RO.ResearchObject)
, (resourceuri, DCTERMS.type, rdflib.Literal('Test file'))
, (resourceuri, DCTERMS.description, rdflib.Literal('File in test research object'))
, (resourceuri, ROTERMS.note, rdflib.Literal('Research object file created for annotation testing'))
, (resourceuri, DCTERMS.title, rdflib.Literal('Test file in RO'))
, (resourceuri, DCTERMS.created, rdflib.Literal('2011-12-07'))
, (resourceuri, RDF.type, ROTERMS.resource)
])
for i in range(12+1+6): # Annotations + aggregations
next = annotations.next()
#log.debug("Next %s"%(repr(next)))
if ( next not in expected_annotations and
next[1] != DCTERMS.created and
next[1] != ORE.aggregates ):
self.assertTrue(False, "Not expected (%d) %s"%(i, repr(next)))
self.assertRaises(StopIteration, annotations.next)
self.deleteTestRo(rodir)
return
def testAddGetAnnotationValues(self):
rodir = self.createTestRo(testbase, "data/ro-test-1", "Test get annotation values", "ro-testRoAnnotate")
roresource = "subdir1/subdir1-file.txt"
# Add anotations for file
ro_annotation._addSimpleAnnotation(ro_config, rodir, roresource,
"type", "Test file")
ro_annotation._addSimpleAnnotation(ro_config, rodir, roresource,
"description", "File in test research object")
ro_annotation._addSimpleAnnotation(ro_config, rodir, roresource,
"rdf:type", ROTERMS.resource)
# Retrieve the anotations
values = ro_annotation._getAnnotationValues(ro_config, rodir, ".", "title")
self.assertEquals(values.next(), rdflib.Literal('Test get annotation values'))
self.assertRaises(StopIteration, values.next)
values = ro_annotation._getAnnotationValues(ro_config, rodir, ".", "rdf:type")
self.assertEquals(values.next(), RO.ResearchObject)
self.assertRaises(StopIteration, values.next)
values = ro_annotation._getAnnotationValues(ro_config, rodir, roresource, "type")
self.assertEquals(values.next(), rdflib.Literal('Test file'))
self.assertRaises(StopIteration, values.next)
values = ro_annotation._getAnnotationValues(ro_config, rodir, roresource, "description")
self.assertEquals(values.next(), rdflib.Literal('File in test research object'))
self.assertRaises(StopIteration, values.next)
values = ro_annotation._getAnnotationValues(ro_config, rodir, roresource, "rdf:type")
self.assertEquals(values.next(), ROTERMS.resource)
self.assertRaises(StopIteration, values.next)
# Clean up
self.deleteTestRo(rodir)
return
# Sentinel/placeholder tests
def testUnits(self):
assert (True)
def testComponents(self):
assert (True)
def testIntegration(self):
assert (True)
def testPending(self):
assert (False), "Pending tests follow"
# Assemble test suite
def getTestSuite(select="unit"):
"""
Get test suite
select is one of the following:
"unit" return suite of unit tests only
"component" return suite of unit and component tests
"all" return suite of unit, component and integration tests
"pending" return suite of pending tests
name a single named test to be run
"""
testdict = {
"unit":
[ "testUnits"
, "testNull"
, "testGetFileUri"
, "testGetUriFile"
, "testGetRoUri"
, "testGetComponentUri"
, "testGetComponentUriRel"
, "testGetGraphRoUri"
, "testGetAnnotationByName"
, "testGetAnnotationByUri"
, "testGetAnnotationNameByUri"
, "testMakeAnnotationFilename"
, "testCreateReadRoAnnotationBody"
, "testCreateReadFileAnnotationBody"
, "testGetInitialRoAnnotations"
, "testAddGetRoAnnotations"
, "testRemoveGetRoAnnotations"
, "testReplaceGetRoAnnotations"
, "testAddGetFileAnnotations"
, "testRemoveGetFileAnnotations"
, "testAddGetAllAnnotations"
, "testAddGetAnnotationValues"
],
"component":
[ "testComponents"
],
"integration":
[ "testIntegration"
],
"pending":
[ "testPending"
]
}
return TestUtils.getTestSuite(TestAnnotationUtils, testdict, select=select)
if __name__ == "__main__":
TestUtils.runTests("TestAnnotationUtils.log", getTestSuite, sys.argv)
# End.
| mit |
Inboxen/Inboxen | inboxen/async_messages/messages.py | 1 | 1160 | from django.contrib.messages import constants
from . import message_user
"""
Mimic the django.contrib.messages API
"""
def debug(user, message):
"""
Adds a message with the ``DEBUG`` level.
:param user: User instance
:param message: Message to show
"""
message_user(user, message, constants.DEBUG)
def info(user, message):
"""
Adds a message with the ``INFO`` level.
:param user: User instance
:param message: Message to show
"""
message_user(user, message, constants.INFO)
def success(user, message):
"""
Adds a message with the ``SUCCESS`` level.
:param user: User instance
:param message: Message to show
"""
message_user(user, message, constants.SUCCESS)
def warning(user, message):
"""
Adds a message with the ``WARNING`` level.
:param user: User instance
:param message: Message to show
"""
message_user(user, message, constants.WARNING)
def error(user, message):
"""
Adds a message with the ``ERROR`` level.
:param user: User instance
:param message: Message to show
"""
message_user(user, message, constants.ERROR)
| agpl-3.0 |
WillisXChen/django-oscar | oscar/lib/python2.7/site-packages/setuptools/sandbox.py | 259 | 13925 | import os
import sys
import tempfile
import operator
import functools
import itertools
import re
import contextlib
import pickle
import pkg_resources
if sys.platform.startswith('java'):
import org.python.modules.posix.PosixModule as _os
else:
_os = sys.modules[os.name]
try:
_file = file
except NameError:
_file = None
_open = open
from distutils.errors import DistutilsError
from pkg_resources import working_set
from setuptools import compat
from setuptools.compat import builtins
__all__ = [
"AbstractSandbox", "DirectorySandbox", "SandboxViolation", "run_setup",
]
def _execfile(filename, globals, locals=None):
"""
Python 3 implementation of execfile.
"""
mode = 'rb'
with open(filename, mode) as stream:
script = stream.read()
# compile() function in Python 2.6 and 3.1 requires LF line endings.
if sys.version_info[:2] < (2, 7) or sys.version_info[:2] >= (3, 0) and sys.version_info[:2] < (3, 2):
script = script.replace(b'\r\n', b'\n')
script = script.replace(b'\r', b'\n')
if locals is None:
locals = globals
code = compile(script, filename, 'exec')
exec(code, globals, locals)
@contextlib.contextmanager
def save_argv(repl=None):
saved = sys.argv[:]
if repl is not None:
sys.argv[:] = repl
try:
yield saved
finally:
sys.argv[:] = saved
@contextlib.contextmanager
def save_path():
saved = sys.path[:]
try:
yield saved
finally:
sys.path[:] = saved
@contextlib.contextmanager
def override_temp(replacement):
"""
Monkey-patch tempfile.tempdir with replacement, ensuring it exists
"""
if not os.path.isdir(replacement):
os.makedirs(replacement)
saved = tempfile.tempdir
tempfile.tempdir = replacement
try:
yield
finally:
tempfile.tempdir = saved
@contextlib.contextmanager
def pushd(target):
saved = os.getcwd()
os.chdir(target)
try:
yield saved
finally:
os.chdir(saved)
class UnpickleableException(Exception):
"""
An exception representing another Exception that could not be pickled.
"""
@classmethod
def dump(cls, type, exc):
"""
Always return a dumped (pickled) type and exc. If exc can't be pickled,
wrap it in UnpickleableException first.
"""
try:
return pickle.dumps(type), pickle.dumps(exc)
except Exception:
return cls.dump(cls, cls(repr(exc)))
class ExceptionSaver:
"""
A Context Manager that will save an exception, serialized, and restore it
later.
"""
def __enter__(self):
return self
def __exit__(self, type, exc, tb):
if not exc:
return
# dump the exception
self._saved = UnpickleableException.dump(type, exc)
self._tb = tb
# suppress the exception
return True
def resume(self):
"restore and re-raise any exception"
if '_saved' not in vars(self):
return
type, exc = map(pickle.loads, self._saved)
compat.reraise(type, exc, self._tb)
@contextlib.contextmanager
def save_modules():
"""
Context in which imported modules are saved.
Translates exceptions internal to the context into the equivalent exception
outside the context.
"""
saved = sys.modules.copy()
with ExceptionSaver() as saved_exc:
yield saved
sys.modules.update(saved)
# remove any modules imported since
del_modules = (
mod_name for mod_name in sys.modules
if mod_name not in saved
# exclude any encodings modules. See #285
and not mod_name.startswith('encodings.')
)
_clear_modules(del_modules)
saved_exc.resume()
def _clear_modules(module_names):
for mod_name in list(module_names):
del sys.modules[mod_name]
@contextlib.contextmanager
def save_pkg_resources_state():
saved = pkg_resources.__getstate__()
try:
yield saved
finally:
pkg_resources.__setstate__(saved)
@contextlib.contextmanager
def setup_context(setup_dir):
temp_dir = os.path.join(setup_dir, 'temp')
with save_pkg_resources_state():
with save_modules():
hide_setuptools()
with save_path():
with save_argv():
with override_temp(temp_dir):
with pushd(setup_dir):
# ensure setuptools commands are available
__import__('setuptools')
yield
def _needs_hiding(mod_name):
"""
>>> _needs_hiding('setuptools')
True
>>> _needs_hiding('pkg_resources')
True
>>> _needs_hiding('setuptools_plugin')
False
>>> _needs_hiding('setuptools.__init__')
True
>>> _needs_hiding('distutils')
True
"""
pattern = re.compile('(setuptools|pkg_resources|distutils)(\.|$)')
return bool(pattern.match(mod_name))
def hide_setuptools():
"""
Remove references to setuptools' modules from sys.modules to allow the
invocation to import the most appropriate setuptools. This technique is
necessary to avoid issues such as #315 where setuptools upgrading itself
would fail to find a function declared in the metadata.
"""
modules = filter(_needs_hiding, sys.modules)
_clear_modules(modules)
def run_setup(setup_script, args):
"""Run a distutils setup script, sandboxed in its directory"""
setup_dir = os.path.abspath(os.path.dirname(setup_script))
with setup_context(setup_dir):
try:
sys.argv[:] = [setup_script]+list(args)
sys.path.insert(0, setup_dir)
# reset to include setup dir, w/clean callback list
working_set.__init__()
working_set.callbacks.append(lambda dist:dist.activate())
def runner():
ns = dict(__file__=setup_script, __name__='__main__')
_execfile(setup_script, ns)
DirectorySandbox(setup_dir).run(runner)
except SystemExit as v:
if v.args and v.args[0]:
raise
# Normal exit, just return
class AbstractSandbox:
"""Wrap 'os' module and 'open()' builtin for virtualizing setup scripts"""
_active = False
def __init__(self):
self._attrs = [
name for name in dir(_os)
if not name.startswith('_') and hasattr(self,name)
]
def _copy(self, source):
for name in self._attrs:
setattr(os, name, getattr(source,name))
def run(self, func):
"""Run 'func' under os sandboxing"""
try:
self._copy(self)
if _file:
builtins.file = self._file
builtins.open = self._open
self._active = True
return func()
finally:
self._active = False
if _file:
builtins.file = _file
builtins.open = _open
self._copy(_os)
def _mk_dual_path_wrapper(name):
original = getattr(_os,name)
def wrap(self,src,dst,*args,**kw):
if self._active:
src,dst = self._remap_pair(name,src,dst,*args,**kw)
return original(src,dst,*args,**kw)
return wrap
for name in ["rename", "link", "symlink"]:
if hasattr(_os,name): locals()[name] = _mk_dual_path_wrapper(name)
def _mk_single_path_wrapper(name, original=None):
original = original or getattr(_os,name)
def wrap(self,path,*args,**kw):
if self._active:
path = self._remap_input(name,path,*args,**kw)
return original(path,*args,**kw)
return wrap
if _file:
_file = _mk_single_path_wrapper('file', _file)
_open = _mk_single_path_wrapper('open', _open)
for name in [
"stat", "listdir", "chdir", "open", "chmod", "chown", "mkdir",
"remove", "unlink", "rmdir", "utime", "lchown", "chroot", "lstat",
"startfile", "mkfifo", "mknod", "pathconf", "access"
]:
if hasattr(_os,name): locals()[name] = _mk_single_path_wrapper(name)
def _mk_single_with_return(name):
original = getattr(_os,name)
def wrap(self,path,*args,**kw):
if self._active:
path = self._remap_input(name,path,*args,**kw)
return self._remap_output(name, original(path,*args,**kw))
return original(path,*args,**kw)
return wrap
for name in ['readlink', 'tempnam']:
if hasattr(_os,name): locals()[name] = _mk_single_with_return(name)
def _mk_query(name):
original = getattr(_os,name)
def wrap(self,*args,**kw):
retval = original(*args,**kw)
if self._active:
return self._remap_output(name, retval)
return retval
return wrap
for name in ['getcwd', 'tmpnam']:
if hasattr(_os,name): locals()[name] = _mk_query(name)
def _validate_path(self,path):
"""Called to remap or validate any path, whether input or output"""
return path
def _remap_input(self,operation,path,*args,**kw):
"""Called for path inputs"""
return self._validate_path(path)
def _remap_output(self,operation,path):
"""Called for path outputs"""
return self._validate_path(path)
def _remap_pair(self,operation,src,dst,*args,**kw):
"""Called for path pairs like rename, link, and symlink operations"""
return (
self._remap_input(operation+'-from',src,*args,**kw),
self._remap_input(operation+'-to',dst,*args,**kw)
)
if hasattr(os, 'devnull'):
_EXCEPTIONS = [os.devnull,]
else:
_EXCEPTIONS = []
try:
from win32com.client.gencache import GetGeneratePath
_EXCEPTIONS.append(GetGeneratePath())
del GetGeneratePath
except ImportError:
# it appears pywin32 is not installed, so no need to exclude.
pass
class DirectorySandbox(AbstractSandbox):
"""Restrict operations to a single subdirectory - pseudo-chroot"""
write_ops = dict.fromkeys([
"open", "chmod", "chown", "mkdir", "remove", "unlink", "rmdir",
"utime", "lchown", "chroot", "mkfifo", "mknod", "tempnam",
])
_exception_patterns = [
# Allow lib2to3 to attempt to save a pickled grammar object (#121)
'.*lib2to3.*\.pickle$',
]
"exempt writing to paths that match the pattern"
def __init__(self, sandbox, exceptions=_EXCEPTIONS):
self._sandbox = os.path.normcase(os.path.realpath(sandbox))
self._prefix = os.path.join(self._sandbox,'')
self._exceptions = [
os.path.normcase(os.path.realpath(path))
for path in exceptions
]
AbstractSandbox.__init__(self)
def _violation(self, operation, *args, **kw):
raise SandboxViolation(operation, args, kw)
if _file:
def _file(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
self._violation("file", path, mode, *args, **kw)
return _file(path,mode,*args,**kw)
def _open(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
self._violation("open", path, mode, *args, **kw)
return _open(path,mode,*args,**kw)
def tmpnam(self):
self._violation("tmpnam")
def _ok(self, path):
active = self._active
try:
self._active = False
realpath = os.path.normcase(os.path.realpath(path))
return (
self._exempted(realpath)
or realpath == self._sandbox
or realpath.startswith(self._prefix)
)
finally:
self._active = active
def _exempted(self, filepath):
start_matches = (
filepath.startswith(exception)
for exception in self._exceptions
)
pattern_matches = (
re.match(pattern, filepath)
for pattern in self._exception_patterns
)
candidates = itertools.chain(start_matches, pattern_matches)
return any(candidates)
def _remap_input(self, operation, path, *args, **kw):
"""Called for path inputs"""
if operation in self.write_ops and not self._ok(path):
self._violation(operation, os.path.realpath(path), *args, **kw)
return path
def _remap_pair(self, operation, src, dst, *args, **kw):
"""Called for path pairs like rename, link, and symlink operations"""
if not self._ok(src) or not self._ok(dst):
self._violation(operation, src, dst, *args, **kw)
return (src,dst)
def open(self, file, flags, mode=0o777, *args, **kw):
"""Called for low-level os.open()"""
if flags & WRITE_FLAGS and not self._ok(file):
self._violation("os.open", file, flags, mode, *args, **kw)
return _os.open(file,flags,mode, *args, **kw)
WRITE_FLAGS = functools.reduce(
operator.or_, [getattr(_os, a, 0) for a in
"O_WRONLY O_RDWR O_APPEND O_CREAT O_TRUNC O_TEMPORARY".split()]
)
class SandboxViolation(DistutilsError):
"""A setup script attempted to modify the filesystem outside the sandbox"""
def __str__(self):
return """SandboxViolation: %s%r %s
The package setup script has attempted to modify files on your system
that are not within the EasyInstall build area, and has been aborted.
This package cannot be safely installed by EasyInstall, and may not
support alternate installation locations even if you run its setup
script by hand. Please inform the package's author and the EasyInstall
maintainers to find out if a fix or workaround is available.""" % self.args
#
| bsd-3-clause |
sadleader/odoo | addons/mass_mailing/wizard/test_mailing.py | 148 | 1994 | # -*- coding: utf-8 -*-
from openerp import tools
from openerp.osv import osv, fields
class TestMassMailing(osv.TransientModel):
_name = 'mail.mass_mailing.test'
_description = 'Sample Mail Wizard'
_columns = {
'email_to': fields.char('Recipients', required=True,
help='Comma-separated list of email addresses.'),
'mass_mailing_id': fields.many2one('mail.mass_mailing', 'Mailing', required=True),
}
_defaults = {
'email_to': lambda self, cr, uid, ctx=None: self.pool['mail.message']._get_default_from(cr, uid, context=ctx),
}
def send_mail_test(self, cr, uid, ids, context=None):
Mail = self.pool['mail.mail']
for wizard in self.browse(cr, uid, ids, context=context):
mailing = wizard.mass_mailing_id
test_emails = tools.email_split(wizard.email_to)
mail_ids = []
for test_mail in test_emails:
mail_values = {
'email_from': mailing.email_from,
'reply_to': mailing.reply_to,
'email_to': test_mail,
'subject': mailing.name,
'body_html': '',
'notification': True,
'mailing_id': mailing.id,
}
mail_mail_obj = Mail.browse(cr, uid, Mail.create(cr, uid, mail_values, context=context), context=context)
unsubscribe_url = Mail._get_unsubscribe_url(cr, uid, mail_mail_obj, test_mail, context=context)
body = tools.append_content_to_html(mailing.body_html, unsubscribe_url, plaintext=False, container_tag='p')
Mail.write(cr, uid, mail_mail_obj.id, {'body_html': mailing.body_html}, context=context)
mail_ids.append(mail_mail_obj.id)
Mail.send(cr, uid, mail_ids, context=context)
self.pool['mail.mass_mailing'].write(cr, uid, [mailing.id], {'state': 'test'}, context=context)
return True
| agpl-3.0 |
OpenEdgeComputing/elijah-provisioning | elijah/provisioning/progressbar.py | 2 | 2057 | import sys
class ProgressBar(object):
def __init__(self, start=0, end=10, width=12, fill='=', blank='.',
format='[%(fill)s>%(blank)s] %(progress)s%%',
incremental=True, **kwargs):
super(ProgressBar, self).__init__()
self.start = start
self.end = end
self.width = width
self.fill = fill
self.blank = blank
self.format = format
self.incremental = incremental
self.step = 100 / float(width) # fix
self.reset()
def set_percent(self, percent):
done_percent = self._get_progress(percent)
if done_percent > 100:
done_percent = 100
self.progress = done_percent
return self
def process(self, increment):
increment = self._get_progress(increment)
if 100 > self.progress + increment:
self.progress += increment
else:
self.progress = 100
return self
def __str__(self):
progressed = int(self.progress / self.step) # fix
fill = progressed * self.fill
blank = (self.width - progressed) * self.blank
return self.format % {
'fill': fill,
'blank': blank,
'progress': int(self.progress)
}
__repr__ = __str__
def _get_progress(self, increment):
return float(increment * 100) / self.end
def reset(self):
"""Resets the current progress to the start point"""
self.progress = self._get_progress(self.start)
return self
class AnimatedProgressBar(ProgressBar):
def __init__(self, *args, **kwargs):
super(AnimatedProgressBar, self).__init__(*args, **kwargs)
self.stdout = kwargs.get('stdout', sys.stdout)
def show_progress(self):
if hasattr(self.stdout, 'isatty') and self.stdout.isatty():
self.stdout.write('\r')
else:
self.stdout.write('\n')
self.stdout.write(str(self))
self.stdout.flush()
def finish(self):
self.stdout.write('\n')
| apache-2.0 |
837468220/python-for-android | python3-alpha/python3-src/Lib/ctypes/test/test_cast.py | 53 | 3222 | from ctypes import *
import unittest
import sys
class Test(unittest.TestCase):
def test_array2pointer(self):
array = (c_int * 3)(42, 17, 2)
# casting an array to a pointer works.
ptr = cast(array, POINTER(c_int))
self.assertEqual([ptr[i] for i in range(3)], [42, 17, 2])
if 2*sizeof(c_short) == sizeof(c_int):
ptr = cast(array, POINTER(c_short))
if sys.byteorder == "little":
self.assertEqual([ptr[i] for i in range(6)],
[42, 0, 17, 0, 2, 0])
else:
self.assertEqual([ptr[i] for i in range(6)],
[0, 42, 0, 17, 0, 2])
def test_address2pointer(self):
array = (c_int * 3)(42, 17, 2)
address = addressof(array)
ptr = cast(c_void_p(address), POINTER(c_int))
self.assertEqual([ptr[i] for i in range(3)], [42, 17, 2])
ptr = cast(address, POINTER(c_int))
self.assertEqual([ptr[i] for i in range(3)], [42, 17, 2])
def test_p2a_objects(self):
array = (c_char_p * 5)()
self.assertEqual(array._objects, None)
array[0] = b"foo bar"
self.assertEqual(array._objects, {'0': b"foo bar"})
p = cast(array, POINTER(c_char_p))
# array and p share a common _objects attribute
self.assertTrue(p._objects is array._objects)
self.assertEqual(array._objects, {'0': b"foo bar", id(array): array})
p[0] = b"spam spam"
self.assertEqual(p._objects, {'0': b"spam spam", id(array): array})
self.assertTrue(array._objects is p._objects)
p[1] = b"foo bar"
self.assertEqual(p._objects, {'1': b'foo bar', '0': b"spam spam", id(array): array})
self.assertTrue(array._objects is p._objects)
def test_other(self):
p = cast((c_int * 4)(1, 2, 3, 4), POINTER(c_int))
self.assertEqual(p[:4], [1,2, 3, 4])
self.assertEqual(p[:4:], [1, 2, 3, 4])
self.assertEqual(p[3:-1:-1], [4, 3, 2, 1])
self.assertEqual(p[:4:3], [1, 4])
c_int()
self.assertEqual(p[:4], [1, 2, 3, 4])
self.assertEqual(p[:4:], [1, 2, 3, 4])
self.assertEqual(p[3:-1:-1], [4, 3, 2, 1])
self.assertEqual(p[:4:3], [1, 4])
p[2] = 96
self.assertEqual(p[:4], [1, 2, 96, 4])
self.assertEqual(p[:4:], [1, 2, 96, 4])
self.assertEqual(p[3:-1:-1], [4, 96, 2, 1])
self.assertEqual(p[:4:3], [1, 4])
c_int()
self.assertEqual(p[:4], [1, 2, 96, 4])
self.assertEqual(p[:4:], [1, 2, 96, 4])
self.assertEqual(p[3:-1:-1], [4, 96, 2, 1])
self.assertEqual(p[:4:3], [1, 4])
def test_char_p(self):
# This didn't work: bad argument to internal function
s = c_char_p(b"hiho")
self.assertEqual(cast(cast(s, c_void_p), c_char_p).value,
b"hiho")
try:
c_wchar_p
except NameError:
pass
else:
def test_wchar_p(self):
s = c_wchar_p("hiho")
self.assertEqual(cast(cast(s, c_void_p), c_wchar_p).value,
"hiho")
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
kalliope-project/kalliope | kalliope/core/RestAPI/views/synapses_views.py | 1 | 16365 | import logging
import os
import time
from flask import jsonify, Blueprint
from flask import request
from werkzeug.utils import secure_filename
from kalliope import Utils
from kalliope.core.ConfigurationManager import BrainLoader, SettingEditor
from kalliope.core.ConfigurationManager.ConfigurationChecker import KalliopeModuleNotFoundError, ConfigurationChecker, \
InvalidSynapeName, NoSynapeNeurons, NoSynapeSignals
from kalliope.core.Cortex import Cortex
from kalliope.core.Lifo.LifoManager import LifoManager
from kalliope.core.Models import Synapse
from kalliope.core.Models.MatchedSynapse import MatchedSynapse
from kalliope.core.OrderListener import OrderListener
from kalliope.core.RestAPI import utils
from kalliope.core.RestAPI.utils import requires_auth
from kalliope.core.SynapseLauncher import SynapseLauncher
logging.basicConfig()
logger = logging.getLogger("kalliope")
UPLOAD_FOLDER = '/tmp/kalliope/tmp_uploaded_audio'
ALLOWED_EXTENSIONS = {'wav'}
class SynapsesView(Blueprint):
def __init__(self, name, import_name, app, brain=None, settings=None):
self.brain = brain
self.settings = settings
self.app = app
super(SynapsesView, self).__init__(name, import_name)
# api_response sent by the Order Analyser when using the /synapses/start/audio URL
self.api_response = None
# boolean used to notify the main process that we get the list of returned synapse
self.order_analyser_return = False
# routes
self.add_url_rule('/synapses', view_func=self.get_synapses, methods=['GET'])
self.add_url_rule('/synapses', view_func=self.create_synapses, methods=['POST'])
self.add_url_rule('/synapses/<synapse_name>', view_func=self.get_synapse, methods=['GET'])
self.add_url_rule('/synapses/<synapse_name>', view_func=self.delete_synapse, methods=['DELETE'])
self.add_url_rule('/synapses/start/id/<synapse_name>', view_func=self.run_synapse_by_name, methods=['POST'])
self.add_url_rule('/synapses/start/order', view_func=self.run_synapse_by_order, methods=['POST'])
self.add_url_rule('/synapses/start/audio', view_func=self.run_synapse_by_audio, methods=['POST'])
def _get_synapse_by_name(self, synapse_name):
"""
Find a synapse in the brain by its name
:param synapse_name:
:return:
"""
all_synapse = self.brain.synapses
for synapse in all_synapse:
try:
if synapse.name == synapse_name:
return synapse
except KeyError:
pass
return None
@requires_auth
def create_synapses(self):
"""
curl -i -H "Content-Type: application/json" \
--user admin:secret \
-X POST \
-d '{
"name": "Say-hello",
"signals": [
{
"order": "je suis nicolas"
}
],
"neurons": [
{
"say": {
"message": "je sais"
}
}
]
}' \
http://127.0.0.1:5000/synapses
:return:
"""
if not request.get_json() or 'name' not in request.get_json():
data = {
"Error": "Wrong parameters, 'name' not set"
}
return jsonify(error=data), 400
new_synapse = request.get_json()
try:
ConfigurationChecker().check_synape_dict(new_synapse)
except (InvalidSynapeName, NoSynapeNeurons, NoSynapeSignals) as e:
data = {
"error": "%s" % e
}
return jsonify(data), 400
try:
name = new_synapse["name"]
neurons = BrainLoader.get_neurons(new_synapse["neurons"], self.settings)
signals = BrainLoader.get_signals(new_synapse["signals"])
new_synapse_instance = Synapse(name=name, neurons=neurons, signals=signals)
self.brain.synapses.append(new_synapse_instance)
# TODO save the brain in yaml
return jsonify(new_synapse_instance.serialize()), 201
except KalliopeModuleNotFoundError as e:
data = {
"error": "%s" % e
}
return jsonify(data), 400
@requires_auth
def get_synapses(self):
"""
get all synapses.
test with curl:
curl -i --user admin:secret -X GET http://127.0.0.1:5000/synapses
"""
logger.debug("[FlaskAPI] get_synapses: all")
data = jsonify(synapses=[e.serialize() for e in self.brain.synapses])
return data, 200
@requires_auth
def get_synapse(self, synapse_name):
"""
get a synapse by its name
test with curl:
curl --user admin:secret -i -X GET http://127.0.0.1:5000/synapses/say-hello-en
"""
logger.debug("[FlaskAPI] get_synapse: synapse_name -> %s" % synapse_name)
synapse_target = self._get_synapse_by_name(synapse_name)
if synapse_target is not None:
data = jsonify(synapses=synapse_target.serialize())
return data, 200
data = {
"synapse name not found": "%s" % synapse_name
}
return jsonify(error=data), 404
@requires_auth
def delete_synapse(self, synapse_name):
"""
delete a synapse by its name
test with curl:
curl --user admin:secret -i -X DELETE http://127.0.0.1:5000/synapses/say-hello-en
"""
logger.debug("[FlaskAPI] delete_synapse -> %s" % synapse_name)
synapse_target = self._get_synapse_by_name(synapse_name)
if synapse_target is not None:
# delete from brain
self._delete_synapse_by_name(synapse_name)
return '', 204
data = {
"synapse name not found": "%s" % synapse_name
}
return jsonify(error=data), 404
@requires_auth
def run_synapse_by_name(self, synapse_name):
"""
Run a synapse by its name
test with curl:
curl -i --user admin:secret -X POST http://127.0.0.1:5000/synapses/start/id/say-hello-fr
run a synapse without making kalliope speaking
curl -i -H "Content-Type: application/json" --user admin:secret -X POST \
-d '{"mute":"true"}' http://127.0.0.1:5000/synapses/start/id/say-hello-fr
Run a synapse by its name and pass order's parameters
curl -i -H "Content-Type: application/json" --user admin:secret -X POST \
-d '{"mute":"true", "parameters": {"parameter1": "value1" }}' \
http://127.0.0.1:5000/synapses/start/id/say-hello-fr
:param synapse_name: name(id) of the synapse to execute
:return:
"""
# get a synapse object from the name
logger.debug("[FlaskAPI] run_synapse_by_name: synapse name -> %s" % synapse_name)
synapse_target = BrainLoader().brain.get_synapse_by_name(synapse_name=synapse_name)
# Store the mute value, then apply depending of the request parameters
old_mute_value = self.settings.options.mute
mute = utils.get_value_flag_from_request(http_request=request,
flag_to_find="mute",
is_boolean=True)
if mute is not None:
SettingEditor.set_mute_status(mute=mute)
# get parameters
parameters = utils.get_parameters_from_request(request)
if synapse_target is None:
data = {
"synapse name not found": "%s" % synapse_name
}
if mute is not None:
SettingEditor.set_mute_status(mute=old_mute_value)
return jsonify(error=data), 404
else:
# generate a MatchedSynapse from the synapse
matched_synapse = MatchedSynapse(matched_synapse=synapse_target, overriding_parameter=parameters)
# get the current LIFO buffer from the singleton
lifo_buffer = LifoManager.get_singleton_lifo()
lifo_buffer.add_synapse_list_to_lifo([matched_synapse])
response = lifo_buffer.execute(is_api_call=True)
data = jsonify(response)
if mute is not None:
SettingEditor.set_mute_status(mute=old_mute_value)
return data, 201
@requires_auth
def run_synapse_by_order(self):
"""
Give an order to Kalliope via API like it was from a spoken one
Test with curl
curl -i --user admin:secret -H "Content-Type: application/json" -X POST \
-d '{"order":"my order"}' http://localhost:5000/synapses/start/order
In case of quotes in the order or accents, use a file
cat post.json:
{"order":"j'aime"}
curl -i --user admin:secret -H "Content-Type: application/json" -X POST \
--data @post.json http://localhost:5000/order/
Can be used with mute flag
curl -i --user admin:secret -H "Content-Type: application/json" -X POST \
-d '{"order":"my order", "mute":"true"}' http://localhost:5000/synapses/start/order
:return:
"""
if not request.get_json() or 'order' not in request.get_json():
data = {
"Error": "Wrong parameters, 'order' not set"
}
return jsonify(error=data), 400
order = request.get_json('order')
# Store the mute value, then apply depending of the request parameters
old_mute_value = self.settings.options.mute
mute = utils.get_value_flag_from_request(http_request=request,
flag_to_find="mute",
is_boolean=True)
if mute is not None:
SettingEditor.set_mute_status(mute=mute)
if order is not None:
# get the order
order_to_run = order["order"]
logger.debug("[FlaskAPI] run_synapse_by_order: order to run -> %s" % order_to_run)
api_response = SynapseLauncher.run_matching_synapse_from_order(order_to_run,
self.brain,
self.settings,
is_api_call=True)
Cortex.save('kalliope_last_order', order_to_run)
data = jsonify(api_response)
if mute is not None:
SettingEditor.set_mute_status(mute=old_mute_value)
return data, 201
else:
data = {
"error": "order cannot be null"
}
if mute is not None:
SettingEditor.set_mute_status(mute=old_mute_value)
return jsonify(error=data), 400
@requires_auth
def run_synapse_by_audio(self):
"""
Give an order to Kalliope with an audio file
Test with curl
curl -i --user admin:secret -X POST http://localhost:5000/synapses/start/audio -F "file=@/path/to/input.wav"
With mute flag
curl -i --user admin:secret -X POST \
http://localhost:5000/synapses/start/audio -F "file=@path/to/file.wav" -F mute="true"
:return:
"""
# check if the post request has the file part
if 'file' not in request.files:
data = {
"error": "No file provided"
}
return jsonify(error=data), 400
uploaded_file = request.files['file']
# if user does not select file, browser also
# submit a empty part without filename
if uploaded_file.filename == '':
data = {
"error": "No file provided"
}
return jsonify(error=data), 400
# Store the mute value, then apply depending of the request parameters
old_mute_value = self.settings.options.mute
if request.form.get("mute"):
SettingEditor.set_mute_status(mute=Utils.str_to_bool(request.form.get("mute")))
# save the file
filename = secure_filename(uploaded_file.filename)
base_path = os.path.join(self.app.config['UPLOAD_FOLDER'])
uploaded_file.save(os.path.join(base_path, filename))
# now start analyse the audio with STT engine
audio_path = base_path + os.sep + filename
logger.debug("[FlaskAPI] run_synapse_by_audio: with file path %s" % audio_path)
if not self.allowed_file(audio_path):
audio_path = self._convert_to_wav(audio_file_path=audio_path)
ol = OrderListener(callback=self.audio_analyser_callback, audio_file_path=audio_path)
ol.start()
ol.join()
# wait the Order Analyser processing. We need to wait in this thread to keep the context
while not self.order_analyser_return:
time.sleep(0.1)
self.order_analyser_return = False
if self.api_response is not None and self.api_response:
data = jsonify(self.api_response)
self.api_response = None
logger.debug("[FlaskAPI] run_synapse_by_audio: data %s" % data)
if request.form.get("mute"):
SettingEditor.set_mute_status(mute=old_mute_value)
return data, 201
else:
data = {
"error": "The given order doesn't match any synapses"
}
if request.form.get("mute"):
SettingEditor.set_mute_status(mute=old_mute_value)
return jsonify(error=data), 400
def audio_analyser_callback(self, order):
"""
Callback of the OrderListener. Called after the processing of the audio file
This method will
- call the Order Analyser to analyse the order and launch corresponding synapse as usual.
- get a list of launched synapse.
- give the list to the main process via self.launched_synapses
- notify that the processing is over via order_analyser_return
:param order: string order to analyse
:return:
"""
logger.debug("[FlaskAPI] audio_analyser_callback: order to process -> %s" % order)
api_response = SynapseLauncher.run_matching_synapse_from_order(order,
self.brain,
self.settings,
is_api_call=True)
self.api_response = api_response
Cortex.save('kalliope_last_order', order)
# this boolean will notify the main process that the order have been processed
self.order_analyser_return = True
@staticmethod
def _convert_to_wav(audio_file_path):
"""
If not already .wav, convert an incoming audio file to wav format. Using system avconv (raspberry)
:param audio_file_path: the current full file path
:return: Wave file path
"""
# Not allowed so convert into wav using avconv (raspberry)
base = os.path.splitext(audio_file_path)[0]
extension = os.path.splitext(audio_file_path)[1]
if extension != ".wav":
current_file_path = audio_file_path
logger.debug("Converting file " + current_file_path + " to .wav")
audio_file_path = base + ".wav"
os.system("ffmpeg -loglevel panic -y -i " + current_file_path + " " + audio_file_path) # --> deprecated
# subprocess.call(['avconv', '-y', '-i', audio_path, new_file_path], shell=True) # Not working ...
return audio_file_path
@staticmethod
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def _delete_synapse_by_name(self, synapse_name):
all_synapse = self.brain.synapses
for synapse in all_synapse:
try:
if synapse.name == synapse_name:
logger.debug("[FlaskAPI] remove synapse from the brain: '%s'" % synapse_name)
all_synapse.remove(synapse)
# TODO save the brain in yaml
except KeyError:
pass
return None
| gpl-3.0 |
TrossSoftwareAndTech/webvt | lib/node-v7.2.0/deps/v8/tools/ignition/linux_perf_bytecode_annotate_test.py | 28 | 2253 | # Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import StringIO
import unittest
import linux_perf_bytecode_annotate as bytecode_annotate
PERF_SCRIPT_OUTPUT = """
# This line is a comment
# This should be ignored too
#
# cdefab01 aRandomSymbol::Name(to, be, ignored)
00000000 firstSymbol
00000123 secondSymbol
01234567 foo
abcdef76 BytecodeHandler:bar+0x12
76543210 baz
abcdef76 BytecodeHandler:bar+0x16
76543210 baz
01234567 foo
abcdef76 BytecodeHandler:foo+0x1
76543210 baz
abcdef76 BytecodeHandler:bar+0x2
76543210 bar
abcdef76 BytecodeHandler:bar+0x19
abcdef76 BytecodeHandler:bar+0x12
abcdef76 BytecodeHandler:bar+0x12
"""
D8_CODEGEN_OUTPUT = """
kind = BYTECODE_HANDLER
name = foo
compiler = turbofan
Instructions (size = 3)
0x3101394a3c0 0 55 push rbp
0x3101394a3c1 1 ffe3 jmp rbx
kind = BYTECODE_HANDLER
name = bar
compiler = turbofan
Instructions (size = 5)
0x3101394b3c0 0 55 push rbp
0x3101394b3c1 1 4883c428 REX.W addq rsp,0x28
# Unexpected comment
0x3101394b3c5 5 ffe3 jmp rbx
kind = BYTECODE_HANDLER
name = baz
compiler = turbofan
Instructions (size = 5)
0x3101394c3c0 0 55 push rbp
0x3101394c3c1 1 4883c428 REX.W addq rsp,0x28
0x3101394c3c5 5 ffe3 jmp rbx
"""
class LinuxPerfBytecodeAnnotateTest(unittest.TestCase):
def test_bytecode_offset_generator(self):
perf_stream = StringIO.StringIO(PERF_SCRIPT_OUTPUT)
offsets = list(
bytecode_annotate.bytecode_offset_generator(perf_stream, "bar"))
self.assertListEqual(offsets, [18, 25, 18, 18])
def test_bytecode_disassembly_generator(self):
codegen_stream = StringIO.StringIO(D8_CODEGEN_OUTPUT)
disassembly = list(
bytecode_annotate.bytecode_disassembly_generator(codegen_stream, "bar"))
self.assertListEqual(disassembly, [
"0x3101394b3c0 0 55 push rbp",
"0x3101394b3c1 1 4883c428 REX.W addq rsp,0x28",
"0x3101394b3c5 5 ffe3 jmp rbx"])
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
kwailamchan/programming-languages | cpp/deeplearning/caffe/examples/pycaffe/caffenet.py | 37 | 2112 | from __future__ import print_function
from caffe import layers as L, params as P, to_proto
from caffe.proto import caffe_pb2
# helper function for common structures
def conv_relu(bottom, ks, nout, stride=1, pad=0, group=1):
conv = L.Convolution(bottom, kernel_size=ks, stride=stride,
num_output=nout, pad=pad, group=group)
return conv, L.ReLU(conv, in_place=True)
def fc_relu(bottom, nout):
fc = L.InnerProduct(bottom, num_output=nout)
return fc, L.ReLU(fc, in_place=True)
def max_pool(bottom, ks, stride=1):
return L.Pooling(bottom, pool=P.Pooling.MAX, kernel_size=ks, stride=stride)
def caffenet(lmdb, batch_size=256, include_acc=False):
data, label = L.Data(source=lmdb, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(crop_size=227, mean_value=[104, 117, 123], mirror=True))
# the net itself
conv1, relu1 = conv_relu(data, 11, 96, stride=4)
pool1 = max_pool(relu1, 3, stride=2)
norm1 = L.LRN(pool1, local_size=5, alpha=1e-4, beta=0.75)
conv2, relu2 = conv_relu(norm1, 5, 256, pad=2, group=2)
pool2 = max_pool(relu2, 3, stride=2)
norm2 = L.LRN(pool2, local_size=5, alpha=1e-4, beta=0.75)
conv3, relu3 = conv_relu(norm2, 3, 384, pad=1)
conv4, relu4 = conv_relu(relu3, 3, 384, pad=1, group=2)
conv5, relu5 = conv_relu(relu4, 3, 256, pad=1, group=2)
pool5 = max_pool(relu5, 3, stride=2)
fc6, relu6 = fc_relu(pool5, 4096)
drop6 = L.Dropout(relu6, in_place=True)
fc7, relu7 = fc_relu(drop6, 4096)
drop7 = L.Dropout(relu7, in_place=True)
fc8 = L.InnerProduct(drop7, num_output=1000)
loss = L.SoftmaxWithLoss(fc8, label)
if include_acc:
acc = L.Accuracy(fc8, label)
return to_proto(loss, acc)
else:
return to_proto(loss)
def make_net():
with open('train.prototxt', 'w') as f:
print(caffenet('/path/to/caffe-train-lmdb'), file=f)
with open('test.prototxt', 'w') as f:
print(caffenet('/path/to/caffe-val-lmdb', batch_size=50, include_acc=True), file=f)
if __name__ == '__main__':
make_net()
| mit |
kalahbrown/HueBigSQL | desktop/core/ext-py/pycrypto-2.6.1/lib/Crypto/Hash/MD5.py | 123 | 2863 | # -*- coding: utf-8 -*-
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""MD5 cryptographic hash algorithm.
MD5 is specified in RFC1321_ and produces the 128 bit digest of a message.
>>> from Crypto.Hash import MD5
>>>
>>> h = MD5.new()
>>> h.update(b'Hello')
>>> print h.hexdigest()
MD5 stand for Message Digest version 5, and it was invented by Rivest in 1991.
This algorithm is insecure. Do not use it for new designs.
.. _RFC1321: http://tools.ietf.org/html/rfc1321
"""
_revision__ = "$Id$"
__all__ = ['new', 'digest_size', 'MD5Hash' ]
from Crypto.Util.py3compat import *
from Crypto.Hash.hashalgo import HashAlgo
try:
# The md5 module is deprecated in Python 2.6, so use hashlib when possible.
import hashlib
hashFactory = hashlib.md5
except ImportError:
import md5
hashFactory = md5
class MD5Hash(HashAlgo):
"""Class that implements an MD5 hash
:undocumented: block_size
"""
#: ASN.1 Object identifier (OID)::
#:
#: id-md5 OBJECT IDENTIFIER ::= {
#: iso(1) member-body(2) us(840) rsadsi(113549)
#: digestAlgorithm(2) 5
#: }
#:
#: This value uniquely identifies the MD5 algorithm.
oid = b('\x06\x08\x2a\x86\x48\x86\xf7\x0d\x02\x05')
digest_size = 16
block_size = 64
def __init__(self, data=None):
HashAlgo.__init__(self, hashFactory, data)
def new(self, data=None):
return MD5Hash(data)
def new(data=None):
"""Return a fresh instance of the hash object.
:Parameters:
data : byte string
The very first chunk of the message to hash.
It is equivalent to an early call to `MD5Hash.update()`.
Optional.
:Return: A `MD5Hash` object
"""
return MD5Hash().new(data)
#: The size of the resulting hash in bytes.
digest_size = MD5Hash.digest_size
#: The internal block size of the hash algorithm in bytes.
block_size = MD5Hash.block_size
| apache-2.0 |
moijes12/oh-mainline | vendor/packages/scrapy/scrapy/contrib/memdebug.py | 19 | 1354 | """
MemoryDebugger extension
See documentation in docs/topics/extensions.rst
"""
import gc
from scrapy.xlib.pydispatch import dispatcher
from scrapy import signals
from scrapy.exceptions import NotConfigured
from scrapy.conf import settings
from scrapy.stats import stats
from scrapy.utils.trackref import live_refs
class MemoryDebugger(object):
def __init__(self):
try:
import libxml2
self.libxml2 = libxml2
except ImportError:
self.libxml2 = None
if not settings.getbool('MEMDEBUG_ENABLED'):
raise NotConfigured
dispatcher.connect(self.engine_started, signals.engine_started)
dispatcher.connect(self.engine_stopped, signals.engine_stopped)
def engine_started(self):
if self.libxml2:
self.libxml2.debugMemory(1)
def engine_stopped(self):
if self.libxml2:
self.libxml2.cleanupParser()
stats.set_value('memdebug/libxml2_leaked_bytes', self.libxml2.debugMemory(1))
gc.collect()
stats.set_value('memdebug/gc_garbage_count', len(gc.garbage))
if settings.getbool('TRACK_REFS'):
for cls, wdict in live_refs.iteritems():
if not wdict:
continue
stats.set_value('memdebug/live_refs/%s' % cls.__name__, len(wdict))
| agpl-3.0 |
Kamik423/uni_plan | plan/plan/lib64/python3.4/site-packages/wheel/test/test_wheelfile.py | 327 | 4585 | import os
import wheel.install
import wheel.archive
import hashlib
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
import codecs
import zipfile
import pytest
import shutil
import tempfile
from contextlib import contextmanager
@contextmanager
def environ(key, value):
old_value = os.environ.get(key)
try:
os.environ[key] = value
yield
finally:
if old_value is None:
del os.environ[key]
else:
os.environ[key] = old_value
@contextmanager
def temporary_directory():
# tempfile.TemporaryDirectory doesn't exist in Python 2.
tempdir = tempfile.mkdtemp()
try:
yield tempdir
finally:
shutil.rmtree(tempdir)
@contextmanager
def readable_zipfile(path):
# zipfile.ZipFile() isn't a context manager under Python 2.
zf = zipfile.ZipFile(path, 'r')
try:
yield zf
finally:
zf.close()
def test_verifying_zipfile():
if not hasattr(zipfile.ZipExtFile, '_update_crc'):
pytest.skip('No ZIP verification. Missing ZipExtFile._update_crc.')
sio = StringIO()
zf = zipfile.ZipFile(sio, 'w')
zf.writestr("one", b"first file")
zf.writestr("two", b"second file")
zf.writestr("three", b"third file")
zf.close()
# In default mode, VerifyingZipFile checks the hash of any read file
# mentioned with set_expected_hash(). Files not mentioned with
# set_expected_hash() are not checked.
vzf = wheel.install.VerifyingZipFile(sio, 'r')
vzf.set_expected_hash("one", hashlib.sha256(b"first file").digest())
vzf.set_expected_hash("three", "blurble")
vzf.open("one").read()
vzf.open("two").read()
try:
vzf.open("three").read()
except wheel.install.BadWheelFile:
pass
else:
raise Exception("expected exception 'BadWheelFile()'")
# In strict mode, VerifyingZipFile requires every read file to be
# mentioned with set_expected_hash().
vzf.strict = True
try:
vzf.open("two").read()
except wheel.install.BadWheelFile:
pass
else:
raise Exception("expected exception 'BadWheelFile()'")
vzf.set_expected_hash("two", None)
vzf.open("two").read()
def test_pop_zipfile():
sio = StringIO()
zf = wheel.install.VerifyingZipFile(sio, 'w')
zf.writestr("one", b"first file")
zf.writestr("two", b"second file")
zf.close()
try:
zf.pop()
except RuntimeError:
pass # already closed
else:
raise Exception("expected RuntimeError")
zf = wheel.install.VerifyingZipFile(sio, 'a')
zf.pop()
zf.close()
zf = wheel.install.VerifyingZipFile(sio, 'r')
assert len(zf.infolist()) == 1
def test_zipfile_timestamp():
# An environment variable can be used to influence the timestamp on
# TarInfo objects inside the zip. See issue #143. TemporaryDirectory is
# not a context manager under Python 3.
with temporary_directory() as tempdir:
for filename in ('one', 'two', 'three'):
path = os.path.join(tempdir, filename)
with codecs.open(path, 'w', encoding='utf-8') as fp:
fp.write(filename + '\n')
zip_base_name = os.path.join(tempdir, 'dummy')
# The earliest date representable in TarInfos, 1980-01-01
with environ('SOURCE_DATE_EPOCH', '315576060'):
zip_filename = wheel.archive.make_wheelfile_inner(
zip_base_name, tempdir)
with readable_zipfile(zip_filename) as zf:
for info in zf.infolist():
assert info.date_time[:3] == (1980, 1, 1)
def test_zipfile_attributes():
# With the change from ZipFile.write() to .writestr(), we need to manually
# set member attributes.
with temporary_directory() as tempdir:
files = (('foo', 0o644), ('bar', 0o755))
for filename, mode in files:
path = os.path.join(tempdir, filename)
with codecs.open(path, 'w', encoding='utf-8') as fp:
fp.write(filename + '\n')
os.chmod(path, mode)
zip_base_name = os.path.join(tempdir, 'dummy')
zip_filename = wheel.archive.make_wheelfile_inner(
zip_base_name, tempdir)
with readable_zipfile(zip_filename) as zf:
for filename, mode in files:
info = zf.getinfo(os.path.join(tempdir, filename))
assert info.external_attr == (mode | 0o100000) << 16
assert info.compress_type == zipfile.ZIP_DEFLATED
| apache-2.0 |
jag1g13/lammps | tools/eff/lmp2radii.py | 54 | 3001 | #!/usr/local/bin/python-2.5/bin/python
Info="""
Module name: lmp2radii.py
Author: (c) Andres Jaramillo-Botero
California Institute of Technology
ajaramil@wag.caltech.edu
Project: pEFF
Version: August 2009
Extracts the electron radii from a lammps trajectory dump of style custom:
dump 1 all custom period dump_file id type q spin eradius x y z...
NOTE: The radius must be the 5th column per trajectory entry in the dump file
"""
# import essentials:
import sys, os
from math import log10
from shutil import rmtree
from getopt import gnu_getopt as getopt
import numpy
def printHelp():
print Info
print "Usage: python lmp2radii.pyx test.lammpstrj\n"
return
def makeradii(infile):
print "Reading %s ... [WAIT]"%infile,
fin = open(infile,'r')
lines = fin.xreadlines()
print 7*"\b"+"[DONE]"
frame=0
radii=[]
# grep the number of frames and atoms/frame
os.system("grep TIMESTEP %s | wc -l > frames; grep -m 1 -A 1 ATOMS %s > atoms"%(infile,infile))
tmp=open("frames",'r')
frames=int(tmp.readline().split()[0])
tmp.close()
tmp=open("atoms",'r')
atoms=int(tmp.readlines()[1].split()[0])
tmp.close()
os.system("rm -rf frames atoms lines")
arry=numpy.zeros((atoms,frames),dtype=float)
framecnt=0
header=9
ecount=0
print "Extracting electron radii per frame from %s ... "%(infile),
for i,line in enumerate(lines):
lo=(atoms+header)*framecnt+header
hi=lo+atoms
if (i<lo):
continue
elif (i >= lo) and (i < hi):
lparse=line.split()
id=int(lparse[0])
r=float(lparse[4])
if (r!=0.0):
arry[id-1][framecnt]=r
if (framecnt==0): ecount+=1
if (i==lo+1):
sys.stdout.write("%d/%d%s"%(framecnt+1,frames,(int(log10(framecnt+1))+3+int(log10(frames)))*"\b"))
sys.stdout.flush()
if (i == hi+1):
framecnt+=1
print
print "Writing radii/frame table to %s ... "%(infile+'.out'),
sys.stdout.flush()
fout=open(infile+'.out','w')
for i in range(frames):
fout.writelines('\tF'+str(i))
fout.writelines("\n")
e=1
for a in range(atoms):
if arry[a][0] == 0.0: continue
else:
sys.stdout.write("%d/%d%s"%(e,ecount,(int(log10(e))+int(log10(ecount))+3)*"\b"))
sys.stdout.flush()
e+=1
fout.writelines("%d\t"%(a+1))
for f in range(frames):
fout.writelines("%f\t"%(arry[a][f]))
fout.writelines("\n")
print
print "Done !! (generated radii/frame table) \n"
fout.close()
fin.close()
if __name__ == '__main__':
# set defaults
# check for input:
opts, argv = getopt(sys.argv[1:], 'h')
# if no input, print help and exit
if len(argv) != 1:
printHelp()
sys.exit(1)
else:
infile=argv[0]
# read options
for opt, arg in opts:
if opt == '-h': # -h: print help
printHelp()
makeradii(infile)
| gpl-2.0 |
cloudbau/nova | nova/scheduler/filters/json_filter.py | 24 | 4784 | # Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import operator
from nova.openstack.common import jsonutils
from nova.scheduler import filters
class JsonFilter(filters.BaseHostFilter):
"""Host Filter to allow simple JSON-based grammar for
selecting hosts.
"""
def _op_compare(self, args, op):
"""Returns True if the specified operator can successfully
compare the first item in the args with all the rest. Will
return False if only one item is in the list.
"""
if len(args) < 2:
return False
if op is operator.contains:
bad = args[0] not in args[1:]
else:
bad = [arg for arg in args[1:]
if not op(args[0], arg)]
return not bool(bad)
def _equals(self, args):
"""First term is == all the other terms."""
return self._op_compare(args, operator.eq)
def _less_than(self, args):
"""First term is < all the other terms."""
return self._op_compare(args, operator.lt)
def _greater_than(self, args):
"""First term is > all the other terms."""
return self._op_compare(args, operator.gt)
def _in(self, args):
"""First term is in set of remaining terms."""
return self._op_compare(args, operator.contains)
def _less_than_equal(self, args):
"""First term is <= all the other terms."""
return self._op_compare(args, operator.le)
def _greater_than_equal(self, args):
"""First term is >= all the other terms."""
return self._op_compare(args, operator.ge)
def _not(self, args):
"""Flip each of the arguments."""
return [not arg for arg in args]
def _or(self, args):
"""True if any arg is True."""
return any(args)
def _and(self, args):
"""True if all args are True."""
return all(args)
commands = {
'=': _equals,
'<': _less_than,
'>': _greater_than,
'in': _in,
'<=': _less_than_equal,
'>=': _greater_than_equal,
'not': _not,
'or': _or,
'and': _and,
}
def _parse_string(self, string, host_state):
"""Strings prefixed with $ are capability lookups in the
form '$variable' where 'variable' is an attribute in the
HostState class. If $variable is a dictionary, you may
use: $variable.dictkey
"""
if not string:
return None
if not string.startswith("$"):
return string
path = string[1:].split(".")
obj = getattr(host_state, path[0], None)
if obj is None:
return None
for item in path[1:]:
obj = obj.get(item, None)
if obj is None:
return None
return obj
def _process_filter(self, query, host_state):
"""Recursively parse the query structure."""
if not query:
return True
cmd = query[0]
method = self.commands[cmd]
cooked_args = []
for arg in query[1:]:
if isinstance(arg, list):
arg = self._process_filter(arg, host_state)
elif isinstance(arg, basestring):
arg = self._parse_string(arg, host_state)
if arg is not None:
cooked_args.append(arg)
result = method(self, cooked_args)
return result
def host_passes(self, host_state, filter_properties):
"""Return a list of hosts that can fulfill the requirements
specified in the query.
"""
try:
query = filter_properties['scheduler_hints']['query']
except KeyError:
query = None
if not query:
return True
# NOTE(comstud): Not checking capabilities or service for
# enabled/disabled so that a provided json filter can decide
result = self._process_filter(jsonutils.loads(query), host_state)
if isinstance(result, list):
# If any succeeded, include the host
result = any(result)
if result:
# Filter it out.
return True
return False
| apache-2.0 |
nicolargo/intellij-community | python/lib/Lib/distutils/tests/support.py | 147 | 1277 | """Support code for distutils test cases."""
import shutil
import tempfile
from distutils import log
class LoggingSilencer(object):
def setUp(self):
super(LoggingSilencer, self).setUp()
self.threshold = log.set_threshold(log.FATAL)
def tearDown(self):
log.set_threshold(self.threshold)
super(LoggingSilencer, self).tearDown()
class TempdirManager(object):
"""Mix-in class that handles temporary directories for test cases.
This is intended to be used with unittest.TestCase.
"""
def setUp(self):
super(TempdirManager, self).setUp()
self.tempdirs = []
def tearDown(self):
super(TempdirManager, self).tearDown()
while self.tempdirs:
d = self.tempdirs.pop()
shutil.rmtree(d)
def mkdtemp(self):
"""Create a temporary directory that will be cleaned up.
Returns the path of the directory.
"""
d = tempfile.mkdtemp()
self.tempdirs.append(d)
return d
class DummyCommand:
"""Class to store options for retrieval via set_undefined_options()."""
def __init__(self, **kwargs):
for kw, val in kwargs.items():
setattr(self, kw, val)
def ensure_finalized(self):
pass
| apache-2.0 |
codename13/kylessopen-3.4-port | tools/perf/scripts/python/net_dropmonitor.py | 4235 | 1554 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
linecount = 0
for line in f:
linecount = linecount+1
f.seek(0)
except:
return
j = 0
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
j = j +1
if ((j % 100) == 0):
print "\r" + str(j) + "/" + str(linecount),
kallsyms.append({ 'loc': loc, 'name' : name})
print "\r" + str(j) + "/" + str(linecount)
kallsyms.sort()
return
def get_sym(sloc):
loc = int(sloc)
for i in kallsyms:
if (i['loc'] >= loc):
return (i['name'], i['loc']-loc)
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
mbauskar/helpdesk-erpnext | erpnext/selling/report/customer_acquisition_and_loyalty/customer_acquisition_and_loyalty.py | 96 | 2099 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import getdate, cint
import calendar
def execute(filters=None):
# key yyyy-mm
new_customers_in = {}
repeat_customers_in = {}
customers = []
company_condition = ""
if filters.get("company"):
company_condition = ' and company=%(company)s'
for si in frappe.db.sql("""select posting_date, customer, base_grand_total from `tabSales Invoice`
where docstatus=1 and posting_date <= %(to_date)s
{company_condition} order by posting_date""".format(company_condition=company_condition),
filters, as_dict=1):
key = si.posting_date.strftime("%Y-%m")
if not si.customer in customers:
new_customers_in.setdefault(key, [0, 0.0])
new_customers_in[key][0] += 1
new_customers_in[key][1] += si.base_grand_total
customers.append(si.customer)
else:
repeat_customers_in.setdefault(key, [0, 0.0])
repeat_customers_in[key][0] += 1
repeat_customers_in[key][1] += si.base_grand_total
# time series
from_year, from_month, temp = filters.get("from_date").split("-")
to_year, to_month, temp = filters.get("to_date").split("-")
from_year, from_month, to_year, to_month = \
cint(from_year), cint(from_month), cint(to_year), cint(to_month)
out = []
for year in xrange(from_year, to_year+1):
for month in xrange(from_month if year==from_year else 1, (to_month+1) if year==to_year else 13):
key = "{year}-{month:02d}".format(year=year, month=month)
new = new_customers_in.get(key, [0,0.0])
repeat = repeat_customers_in.get(key, [0,0.0])
out.append([year, calendar.month_name[month],
new[0], repeat[0], new[0] + repeat[0],
new[1], repeat[1], new[1] + repeat[1]])
return [
_("Year"), _("Month"),
_("New Customers") + ":Int",
_("Repeat Customers") + ":Int",
_("Total") + ":Int",
_("New Customer Revenue") + ":Currency:150",
_("Repeat Customer Revenue") + ":Currency:150",
_("Total Revenue") + ":Currency:150"
], out
| agpl-3.0 |
JackKelly/neuralnilm_prototype | scripts/e385.py | 4 | 6024 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter
from neuralnilm.updates import clipped_nesterov_momentum
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
"""
e370
longer seq
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
# random_window=64,
output_one_appliance=True,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.9,
one_target_per_seq=False,
n_seq_per_batch=128,
# subsample_target=4,
include_diff=False,
include_power=True,
clip_appliance_power=True,
target_is_prediction=False,
independently_center_inputs=True,
standardise_input=True,
unit_variance_targets=True,
# input_padding=2,
lag=0
# classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=0.01,
learning_rate_changes_by_iteration={
# 500: 1e-5,
# 1500: 1e-6
# 800: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
# auto_reshape=False,
# plotter=CentralOutputPlotter
# plotter=MDNPlotter
layers_config=[
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': rectify,
'W_hid_to_hid': Identity(scale=0.1),
'W_in_to_hid': Normal(std=1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': rectify,
'W_hid_to_hid': Identity(scale=0.1),
'W_in_to_hid': Normal(std=1/sqrt(40))
}
]
)
def exp_a(name):
global source
# source_dict_copy = deepcopy(source_dict)
# source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'].extend([
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus,
'W': Normal(std=1/sqrt(40))
}
])
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
kaixinjxq/crosswalk-test-suite | tools/allpairs-plus/metacomm/combinatorics/pairs_storage.py | 31 | 1569 | from combinatorics import xuniqueCombinations
class node:
def __init__(self, id):
self.id = id
self.counter = 0
self.in_ = set()
self.out = set()
def __str__(self):
return str(self.__dict__)
def key(items):
return "->".join([x.id for x in items])
class pairs_storage:
def __init__(self, n):
self.__n = n
self.__nodes = {}
self.__combs_arr = []
for i in range(n):
self.__combs_arr.append(set())
def add(self, comb):
n = len(comb)
assert(n > 0)
self.__combs_arr[n - 1].add(key(comb))
if n == 1 and comb[0].id not in self.__nodes:
self.__nodes[comb[0].id] = node(comb[0].id)
return
ids = [x.id for x in comb]
for i, id in enumerate(ids):
curr = self.__nodes[id]
curr.counter += 1
curr.in_.update(ids[:i])
curr.out.update(ids[i + 1:])
def add_sequence(self, seq):
for i in range(1, self.__n + 1):
for comb in xuniqueCombinations(seq, i):
self.add(comb)
def get_node_info(self, item):
return self.__nodes.get(item.id, node(item.id))
def get_combs(self):
return self.__combs_arr
def __len__(self):
return len(self.__combs_arr[-1])
def count_new_combs(self, seq):
s = set([key(z) for z in xuniqueCombinations(seq, self.__n)]) - \
self.__combs_arr[-1]
return len(s)
| bsd-3-clause |
mementum/metaframe | docs/conf.py | 1 | 8339 | # -*- coding: utf-8 -*-
#
# metaframe documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 23 13:28:32 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'metaframe'
copyright = u'2015, Daniel Rodriguez'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import metaframe
version = metaframe.__version__
# The full version, including alpha/beta/rc tags.
# release = '0.9.0'
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'metaframe_doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'metaframe.tex', u'metaframe Documentation',
u'Daniel Rodriguez', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'backtrader', u'backtrader Documentation',
[u'Daniel Rodriguez'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'metaframe', u'metaframe Documentation',
u'Daniel Rodriguez', 'metaframe', 'Metaclass new/init infrastructure.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| gpl-3.0 |
ChanChiChoi/scikit-learn | examples/neighbors/plot_classification.py | 287 | 1790 | """
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
| bsd-3-clause |
vainotuisk/icecreamratings | ENV/lib/python2.7/site-packages/setuptools/package_index.py | 301 | 38760 | """PyPI and direct package downloading"""
import sys
import os
import re
import shutil
import socket
import base64
import hashlib
from functools import wraps
from pkg_resources import (
CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST,
require, Environment, find_distributions, safe_name, safe_version,
to_filename, Requirement, DEVELOP_DIST,
)
from setuptools import ssl_support
from distutils import log
from distutils.errors import DistutilsError
from setuptools.compat import (urllib2, httplib, StringIO, HTTPError,
urlparse, urlunparse, unquote, splituser,
url2pathname, name2codepoint,
unichr, urljoin, urlsplit, urlunsplit,
ConfigParser)
from setuptools.compat import filterfalse
from fnmatch import translate
from setuptools.py26compat import strip_fragment
from setuptools.py27compat import get_all_headers
EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.]+)$')
HREF = re.compile("""href\\s*=\\s*['"]?([^'"> ]+)""", re.I)
# this is here to fix emacs' cruddy broken syntax highlighting
PYPI_MD5 = re.compile(
'<a href="([^"#]+)">([^<]+)</a>\n\s+\\(<a (?:title="MD5 hash"\n\s+)'
'href="[^?]+\?:action=show_md5&digest=([0-9a-f]{32})">md5</a>\\)'
)
URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):',re.I).match
EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split()
__all__ = [
'PackageIndex', 'distros_for_url', 'parse_bdist_wininst',
'interpret_distro_name',
]
_SOCKET_TIMEOUT = 15
def parse_bdist_wininst(name):
"""Return (base,pyversion) or (None,None) for possible .exe name"""
lower = name.lower()
base, py_ver, plat = None, None, None
if lower.endswith('.exe'):
if lower.endswith('.win32.exe'):
base = name[:-10]
plat = 'win32'
elif lower.startswith('.win32-py',-16):
py_ver = name[-7:-4]
base = name[:-16]
plat = 'win32'
elif lower.endswith('.win-amd64.exe'):
base = name[:-14]
plat = 'win-amd64'
elif lower.startswith('.win-amd64-py',-20):
py_ver = name[-7:-4]
base = name[:-20]
plat = 'win-amd64'
return base,py_ver,plat
def egg_info_for_url(url):
scheme, server, path, parameters, query, fragment = urlparse(url)
base = unquote(path.split('/')[-1])
if server=='sourceforge.net' and base=='download': # XXX Yuck
base = unquote(path.split('/')[-2])
if '#' in base: base, fragment = base.split('#',1)
return base,fragment
def distros_for_url(url, metadata=None):
"""Yield egg or source distribution objects that might be found at a URL"""
base, fragment = egg_info_for_url(url)
for dist in distros_for_location(url, base, metadata): yield dist
if fragment:
match = EGG_FRAGMENT.match(fragment)
if match:
for dist in interpret_distro_name(
url, match.group(1), metadata, precedence = CHECKOUT_DIST
):
yield dist
def distros_for_location(location, basename, metadata=None):
"""Yield egg or source distribution objects based on basename"""
if basename.endswith('.egg.zip'):
basename = basename[:-4] # strip the .zip
if basename.endswith('.egg') and '-' in basename:
# only one, unambiguous interpretation
return [Distribution.from_location(location, basename, metadata)]
if basename.endswith('.exe'):
win_base, py_ver, platform = parse_bdist_wininst(basename)
if win_base is not None:
return interpret_distro_name(
location, win_base, metadata, py_ver, BINARY_DIST, platform
)
# Try source distro extensions (.zip, .tgz, etc.)
#
for ext in EXTENSIONS:
if basename.endswith(ext):
basename = basename[:-len(ext)]
return interpret_distro_name(location, basename, metadata)
return [] # no extension matched
def distros_for_filename(filename, metadata=None):
"""Yield possible egg or source distribution objects based on a filename"""
return distros_for_location(
normalize_path(filename), os.path.basename(filename), metadata
)
def interpret_distro_name(
location, basename, metadata, py_version=None, precedence=SOURCE_DIST,
platform=None
):
"""Generate alternative interpretations of a source distro name
Note: if `location` is a filesystem filename, you should call
``pkg_resources.normalize_path()`` on it before passing it to this
routine!
"""
# Generate alternative interpretations of a source distro name
# Because some packages are ambiguous as to name/versions split
# e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc.
# So, we generate each possible interepretation (e.g. "adns, python-1.1.0"
# "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice,
# the spurious interpretations should be ignored, because in the event
# there's also an "adns" package, the spurious "python-1.1.0" version will
# compare lower than any numeric version number, and is therefore unlikely
# to match a request for it. It's still a potential problem, though, and
# in the long run PyPI and the distutils should go for "safe" names and
# versions in distribution archive names (sdist and bdist).
parts = basename.split('-')
if not py_version and any(re.match('py\d\.\d$', p) for p in parts[2:]):
# it is a bdist_dumb, not an sdist -- bail out
return
for p in range(1,len(parts)+1):
yield Distribution(
location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]),
py_version=py_version, precedence = precedence,
platform = platform
)
# From Python 2.7 docs
def unique_everseen(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def unique_values(func):
"""
Wrap a function returning an iterable such that the resulting iterable
only ever yields unique items.
"""
@wraps(func)
def wrapper(*args, **kwargs):
return unique_everseen(func(*args, **kwargs))
return wrapper
REL = re.compile("""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I)
# this line is here to fix emacs' cruddy broken syntax highlighting
@unique_values
def find_external_links(url, page):
"""Find rel="homepage" and rel="download" links in `page`, yielding URLs"""
for match in REL.finditer(page):
tag, rel = match.groups()
rels = set(map(str.strip, rel.lower().split(',')))
if 'homepage' in rels or 'download' in rels:
for match in HREF.finditer(tag):
yield urljoin(url, htmldecode(match.group(1)))
for tag in ("<th>Home Page", "<th>Download URL"):
pos = page.find(tag)
if pos!=-1:
match = HREF.search(page,pos)
if match:
yield urljoin(url, htmldecode(match.group(1)))
user_agent = "Python-urllib/%s setuptools/%s" % (
sys.version[:3], require('setuptools')[0].version
)
class ContentChecker(object):
"""
A null content checker that defines the interface for checking content
"""
def feed(self, block):
"""
Feed a block of data to the hash.
"""
return
def is_valid(self):
"""
Check the hash. Return False if validation fails.
"""
return True
def report(self, reporter, template):
"""
Call reporter with information about the checker (hash name)
substituted into the template.
"""
return
class HashChecker(ContentChecker):
pattern = re.compile(
r'(?P<hash_name>sha1|sha224|sha384|sha256|sha512|md5)='
r'(?P<expected>[a-f0-9]+)'
)
def __init__(self, hash_name, expected):
self.hash_name = hash_name
self.hash = hashlib.new(hash_name)
self.expected = expected
@classmethod
def from_url(cls, url):
"Construct a (possibly null) ContentChecker from a URL"
fragment = urlparse(url)[-1]
if not fragment:
return ContentChecker()
match = cls.pattern.search(fragment)
if not match:
return ContentChecker()
return cls(**match.groupdict())
def feed(self, block):
self.hash.update(block)
def is_valid(self):
return self.hash.hexdigest() == self.expected
def report(self, reporter, template):
msg = template % self.hash_name
return reporter(msg)
class PackageIndex(Environment):
"""A distribution index that scans web pages for download URLs"""
def __init__(
self, index_url="https://pypi.python.org/simple", hosts=('*',),
ca_bundle=None, verify_ssl=True, *args, **kw
):
Environment.__init__(self,*args,**kw)
self.index_url = index_url + "/"[:not index_url.endswith('/')]
self.scanned_urls = {}
self.fetched_urls = {}
self.package_pages = {}
self.allows = re.compile('|'.join(map(translate,hosts))).match
self.to_scan = []
if verify_ssl and ssl_support.is_available and (ca_bundle or ssl_support.find_ca_bundle()):
self.opener = ssl_support.opener_for(ca_bundle)
else: self.opener = urllib2.urlopen
def process_url(self, url, retrieve=False):
"""Evaluate a URL as a possible download, and maybe retrieve it"""
if url in self.scanned_urls and not retrieve:
return
self.scanned_urls[url] = True
if not URL_SCHEME(url):
self.process_filename(url)
return
else:
dists = list(distros_for_url(url))
if dists:
if not self.url_ok(url):
return
self.debug("Found link: %s", url)
if dists or not retrieve or url in self.fetched_urls:
list(map(self.add, dists))
return # don't need the actual page
if not self.url_ok(url):
self.fetched_urls[url] = True
return
self.info("Reading %s", url)
self.fetched_urls[url] = True # prevent multiple fetch attempts
f = self.open_url(url, "Download error on %s: %%s -- Some packages may not be found!" % url)
if f is None: return
self.fetched_urls[f.url] = True
if 'html' not in f.headers.get('content-type', '').lower():
f.close() # not html, we can't process it
return
base = f.url # handle redirects
page = f.read()
if not isinstance(page, str): # We are in Python 3 and got bytes. We want str.
if isinstance(f, HTTPError):
# Errors have no charset, assume latin1:
charset = 'latin-1'
else:
charset = f.headers.get_param('charset') or 'latin-1'
page = page.decode(charset, "ignore")
f.close()
for match in HREF.finditer(page):
link = urljoin(base, htmldecode(match.group(1)))
self.process_url(link)
if url.startswith(self.index_url) and getattr(f,'code',None)!=404:
page = self.process_index(url, page)
def process_filename(self, fn, nested=False):
# process filenames or directories
if not os.path.exists(fn):
self.warn("Not found: %s", fn)
return
if os.path.isdir(fn) and not nested:
path = os.path.realpath(fn)
for item in os.listdir(path):
self.process_filename(os.path.join(path,item), True)
dists = distros_for_filename(fn)
if dists:
self.debug("Found: %s", fn)
list(map(self.add, dists))
def url_ok(self, url, fatal=False):
s = URL_SCHEME(url)
if (s and s.group(1).lower()=='file') or self.allows(urlparse(url)[1]):
return True
msg = ("\nNote: Bypassing %s (disallowed host; see "
"http://bit.ly/1dg9ijs for details).\n")
if fatal:
raise DistutilsError(msg % url)
else:
self.warn(msg, url)
def scan_egg_links(self, search_path):
for item in search_path:
if os.path.isdir(item):
for entry in os.listdir(item):
if entry.endswith('.egg-link'):
self.scan_egg_link(item, entry)
def scan_egg_link(self, path, entry):
lines = [_f for _f in map(str.strip,
open(os.path.join(path, entry))) if _f]
if len(lines)==2:
for dist in find_distributions(os.path.join(path, lines[0])):
dist.location = os.path.join(path, *lines)
dist.precedence = SOURCE_DIST
self.add(dist)
def process_index(self,url,page):
"""Process the contents of a PyPI page"""
def scan(link):
# Process a URL to see if it's for a package page
if link.startswith(self.index_url):
parts = list(map(
unquote, link[len(self.index_url):].split('/')
))
if len(parts)==2 and '#' not in parts[1]:
# it's a package page, sanitize and index it
pkg = safe_name(parts[0])
ver = safe_version(parts[1])
self.package_pages.setdefault(pkg.lower(),{})[link] = True
return to_filename(pkg), to_filename(ver)
return None, None
# process an index page into the package-page index
for match in HREF.finditer(page):
try:
scan(urljoin(url, htmldecode(match.group(1))))
except ValueError:
pass
pkg, ver = scan(url) # ensure this page is in the page index
if pkg:
# process individual package page
for new_url in find_external_links(url, page):
# Process the found URL
base, frag = egg_info_for_url(new_url)
if base.endswith('.py') and not frag:
if ver:
new_url+='#egg=%s-%s' % (pkg,ver)
else:
self.need_version_info(url)
self.scan_url(new_url)
return PYPI_MD5.sub(
lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1,3,2), page
)
else:
return "" # no sense double-scanning non-package pages
def need_version_info(self, url):
self.scan_all(
"Page at %s links to .py file(s) without version info; an index "
"scan is required.", url
)
def scan_all(self, msg=None, *args):
if self.index_url not in self.fetched_urls:
if msg: self.warn(msg,*args)
self.info(
"Scanning index of all packages (this may take a while)"
)
self.scan_url(self.index_url)
def find_packages(self, requirement):
self.scan_url(self.index_url + requirement.unsafe_name+'/')
if not self.package_pages.get(requirement.key):
# Fall back to safe version of the name
self.scan_url(self.index_url + requirement.project_name+'/')
if not self.package_pages.get(requirement.key):
# We couldn't find the target package, so search the index page too
self.not_found_in_index(requirement)
for url in list(self.package_pages.get(requirement.key,())):
# scan each page that might be related to the desired package
self.scan_url(url)
def obtain(self, requirement, installer=None):
self.prescan()
self.find_packages(requirement)
for dist in self[requirement.key]:
if dist in requirement:
return dist
self.debug("%s does not match %s", requirement, dist)
return super(PackageIndex, self).obtain(requirement,installer)
def check_hash(self, checker, filename, tfp):
"""
checker is a ContentChecker
"""
checker.report(self.debug,
"Validating %%s checksum for %s" % filename)
if not checker.is_valid():
tfp.close()
os.unlink(filename)
raise DistutilsError(
"%s validation failed for %s; "
"possible download problem?" % (
checker.hash.name, os.path.basename(filename))
)
def add_find_links(self, urls):
"""Add `urls` to the list that will be prescanned for searches"""
for url in urls:
if (
self.to_scan is None # if we have already "gone online"
or not URL_SCHEME(url) # or it's a local file/directory
or url.startswith('file:')
or list(distros_for_url(url)) # or a direct package link
):
# then go ahead and process it now
self.scan_url(url)
else:
# otherwise, defer retrieval till later
self.to_scan.append(url)
def prescan(self):
"""Scan urls scheduled for prescanning (e.g. --find-links)"""
if self.to_scan:
list(map(self.scan_url, self.to_scan))
self.to_scan = None # from now on, go ahead and process immediately
def not_found_in_index(self, requirement):
if self[requirement.key]: # we've seen at least one distro
meth, msg = self.info, "Couldn't retrieve index page for %r"
else: # no distros seen for this name, might be misspelled
meth, msg = (self.warn,
"Couldn't find index page for %r (maybe misspelled?)")
meth(msg, requirement.unsafe_name)
self.scan_all()
def download(self, spec, tmpdir):
"""Locate and/or download `spec` to `tmpdir`, returning a local path
`spec` may be a ``Requirement`` object, or a string containing a URL,
an existing local filename, or a project/version requirement spec
(i.e. the string form of a ``Requirement`` object). If it is the URL
of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one
that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is
automatically created alongside the downloaded file.
If `spec` is a ``Requirement`` object or a string containing a
project/version requirement spec, this method returns the location of
a matching distribution (possibly after downloading it to `tmpdir`).
If `spec` is a locally existing file or directory name, it is simply
returned unchanged. If `spec` is a URL, it is downloaded to a subpath
of `tmpdir`, and the local filename is returned. Various errors may be
raised if a problem occurs during downloading.
"""
if not isinstance(spec,Requirement):
scheme = URL_SCHEME(spec)
if scheme:
# It's a url, download it to tmpdir
found = self._download_url(scheme.group(1), spec, tmpdir)
base, fragment = egg_info_for_url(spec)
if base.endswith('.py'):
found = self.gen_setup(found,fragment,tmpdir)
return found
elif os.path.exists(spec):
# Existing file or directory, just return it
return spec
else:
try:
spec = Requirement.parse(spec)
except ValueError:
raise DistutilsError(
"Not a URL, existing file, or requirement spec: %r" %
(spec,)
)
return getattr(self.fetch_distribution(spec, tmpdir),'location',None)
def fetch_distribution(
self, requirement, tmpdir, force_scan=False, source=False,
develop_ok=False, local_index=None
):
"""Obtain a distribution suitable for fulfilling `requirement`
`requirement` must be a ``pkg_resources.Requirement`` instance.
If necessary, or if the `force_scan` flag is set, the requirement is
searched for in the (online) package index as well as the locally
installed packages. If a distribution matching `requirement` is found,
the returned distribution's ``location`` is the value you would have
gotten from calling the ``download()`` method with the matching
distribution's URL or filename. If no matching distribution is found,
``None`` is returned.
If the `source` flag is set, only source distributions and source
checkout links will be considered. Unless the `develop_ok` flag is
set, development and system eggs (i.e., those using the ``.egg-info``
format) will be ignored.
"""
# process a Requirement
self.info("Searching for %s", requirement)
skipped = {}
dist = None
def find(req, env=None):
if env is None:
env = self
# Find a matching distribution; may be called more than once
for dist in env[req.key]:
if dist.precedence==DEVELOP_DIST and not develop_ok:
if dist not in skipped:
self.warn("Skipping development or system egg: %s",dist)
skipped[dist] = 1
continue
if dist in req and (dist.precedence<=SOURCE_DIST or not source):
return dist
if force_scan:
self.prescan()
self.find_packages(requirement)
dist = find(requirement)
if local_index is not None:
dist = dist or find(requirement, local_index)
if dist is None:
if self.to_scan is not None:
self.prescan()
dist = find(requirement)
if dist is None and not force_scan:
self.find_packages(requirement)
dist = find(requirement)
if dist is None:
self.warn(
"No local packages or download links found for %s%s",
(source and "a source distribution of " or ""),
requirement,
)
else:
self.info("Best match: %s", dist)
return dist.clone(location=self.download(dist.location, tmpdir))
def fetch(self, requirement, tmpdir, force_scan=False, source=False):
"""Obtain a file suitable for fulfilling `requirement`
DEPRECATED; use the ``fetch_distribution()`` method now instead. For
backward compatibility, this routine is identical but returns the
``location`` of the downloaded distribution instead of a distribution
object.
"""
dist = self.fetch_distribution(requirement,tmpdir,force_scan,source)
if dist is not None:
return dist.location
return None
def gen_setup(self, filename, fragment, tmpdir):
match = EGG_FRAGMENT.match(fragment)
dists = match and [
d for d in
interpret_distro_name(filename, match.group(1), None) if d.version
] or []
if len(dists)==1: # unambiguous ``#egg`` fragment
basename = os.path.basename(filename)
# Make sure the file has been downloaded to the temp dir.
if os.path.dirname(filename) != tmpdir:
dst = os.path.join(tmpdir, basename)
from setuptools.command.easy_install import samefile
if not samefile(filename, dst):
shutil.copy2(filename, dst)
filename=dst
with open(os.path.join(tmpdir, 'setup.py'), 'w') as file:
file.write(
"from setuptools import setup\n"
"setup(name=%r, version=%r, py_modules=[%r])\n"
% (
dists[0].project_name, dists[0].version,
os.path.splitext(basename)[0]
)
)
return filename
elif match:
raise DistutilsError(
"Can't unambiguously interpret project/version identifier %r; "
"any dashes in the name or version should be escaped using "
"underscores. %r" % (fragment,dists)
)
else:
raise DistutilsError(
"Can't process plain .py files without an '#egg=name-version'"
" suffix to enable automatic setup script generation."
)
dl_blocksize = 8192
def _download_to(self, url, filename):
self.info("Downloading %s", url)
# Download the file
fp, info = None, None
try:
checker = HashChecker.from_url(url)
fp = self.open_url(strip_fragment(url))
if isinstance(fp, HTTPError):
raise DistutilsError(
"Can't download %s: %s %s" % (url, fp.code,fp.msg)
)
headers = fp.info()
blocknum = 0
bs = self.dl_blocksize
size = -1
if "content-length" in headers:
# Some servers return multiple Content-Length headers :(
sizes = get_all_headers(headers, 'Content-Length')
size = max(map(int, sizes))
self.reporthook(url, filename, blocknum, bs, size)
with open(filename,'wb') as tfp:
while True:
block = fp.read(bs)
if block:
checker.feed(block)
tfp.write(block)
blocknum += 1
self.reporthook(url, filename, blocknum, bs, size)
else:
break
self.check_hash(checker, filename, tfp)
return headers
finally:
if fp: fp.close()
def reporthook(self, url, filename, blocknum, blksize, size):
pass # no-op
def open_url(self, url, warning=None):
if url.startswith('file:'):
return local_open(url)
try:
return open_with_auth(url, self.opener)
except (ValueError, httplib.InvalidURL) as v:
msg = ' '.join([str(arg) for arg in v.args])
if warning:
self.warn(warning, msg)
else:
raise DistutilsError('%s %s' % (url, msg))
except urllib2.HTTPError as v:
return v
except urllib2.URLError as v:
if warning:
self.warn(warning, v.reason)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v.reason))
except httplib.BadStatusLine as v:
if warning:
self.warn(warning, v.line)
else:
raise DistutilsError(
'%s returned a bad status line. The server might be '
'down, %s' %
(url, v.line)
)
except httplib.HTTPException as v:
if warning:
self.warn(warning, v)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v))
def _download_url(self, scheme, url, tmpdir):
# Determine download filename
#
name, fragment = egg_info_for_url(url)
if name:
while '..' in name:
name = name.replace('..','.').replace('\\','_')
else:
name = "__downloaded__" # default if URL has no path contents
if name.endswith('.egg.zip'):
name = name[:-4] # strip the extra .zip before download
filename = os.path.join(tmpdir,name)
# Download the file
#
if scheme=='svn' or scheme.startswith('svn+'):
return self._download_svn(url, filename)
elif scheme=='git' or scheme.startswith('git+'):
return self._download_git(url, filename)
elif scheme.startswith('hg+'):
return self._download_hg(url, filename)
elif scheme=='file':
return url2pathname(urlparse(url)[2])
else:
self.url_ok(url, True) # raises error if not allowed
return self._attempt_download(url, filename)
def scan_url(self, url):
self.process_url(url, True)
def _attempt_download(self, url, filename):
headers = self._download_to(url, filename)
if 'html' in headers.get('content-type','').lower():
return self._download_html(url, headers, filename)
else:
return filename
def _download_html(self, url, headers, filename):
file = open(filename)
for line in file:
if line.strip():
# Check for a subversion index page
if re.search(r'<title>([^- ]+ - )?Revision \d+:', line):
# it's a subversion index page:
file.close()
os.unlink(filename)
return self._download_svn(url, filename)
break # not an index page
file.close()
os.unlink(filename)
raise DistutilsError("Unexpected HTML page found at "+url)
def _download_svn(self, url, filename):
url = url.split('#',1)[0] # remove any fragment for svn's sake
creds = ''
if url.lower().startswith('svn:') and '@' in url:
scheme, netloc, path, p, q, f = urlparse(url)
if not netloc and path.startswith('//') and '/' in path[2:]:
netloc, path = path[2:].split('/',1)
auth, host = splituser(netloc)
if auth:
if ':' in auth:
user, pw = auth.split(':',1)
creds = " --username=%s --password=%s" % (user, pw)
else:
creds = " --username="+auth
netloc = host
url = urlunparse((scheme, netloc, url, p, q, f))
self.info("Doing subversion checkout from %s to %s", url, filename)
os.system("svn checkout%s -q %s %s" % (creds, url, filename))
return filename
@staticmethod
def _vcs_split_rev_from_url(url, pop_prefix=False):
scheme, netloc, path, query, frag = urlsplit(url)
scheme = scheme.split('+', 1)[-1]
# Some fragment identification fails
path = path.split('#',1)[0]
rev = None
if '@' in path:
path, rev = path.rsplit('@', 1)
# Also, discard fragment
url = urlunsplit((scheme, netloc, path, query, ''))
return url, rev
def _download_git(self, url, filename):
filename = filename.split('#',1)[0]
url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
self.info("Doing git clone from %s to %s", url, filename)
os.system("git clone --quiet %s %s" % (url, filename))
if rev is not None:
self.info("Checking out %s", rev)
os.system("(cd %s && git checkout --quiet %s)" % (
filename,
rev,
))
return filename
def _download_hg(self, url, filename):
filename = filename.split('#',1)[0]
url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
self.info("Doing hg clone from %s to %s", url, filename)
os.system("hg clone --quiet %s %s" % (url, filename))
if rev is not None:
self.info("Updating to %s", rev)
os.system("(cd %s && hg up -C -r %s >&-)" % (
filename,
rev,
))
return filename
def debug(self, msg, *args):
log.debug(msg, *args)
def info(self, msg, *args):
log.info(msg, *args)
def warn(self, msg, *args):
log.warn(msg, *args)
# This pattern matches a character entity reference (a decimal numeric
# references, a hexadecimal numeric reference, or a named reference).
entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub
def uchr(c):
if not isinstance(c, int):
return c
if c>255: return unichr(c)
return chr(c)
def decode_entity(match):
what = match.group(1)
if what.startswith('#x'):
what = int(what[2:], 16)
elif what.startswith('#'):
what = int(what[1:])
else:
what = name2codepoint.get(what, match.group(0))
return uchr(what)
def htmldecode(text):
"""Decode HTML entities in the given text."""
return entity_sub(decode_entity, text)
def socket_timeout(timeout=15):
def _socket_timeout(func):
def _socket_timeout(*args, **kwargs):
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
return func(*args, **kwargs)
finally:
socket.setdefaulttimeout(old_timeout)
return _socket_timeout
return _socket_timeout
def _encode_auth(auth):
"""
A function compatible with Python 2.3-3.3 that will encode
auth from a URL suitable for an HTTP header.
>>> str(_encode_auth('username%3Apassword'))
'dXNlcm5hbWU6cGFzc3dvcmQ='
Long auth strings should not cause a newline to be inserted.
>>> long_auth = 'username:' + 'password'*10
>>> chr(10) in str(_encode_auth(long_auth))
False
"""
auth_s = unquote(auth)
# convert to bytes
auth_bytes = auth_s.encode()
# use the legacy interface for Python 2.3 support
encoded_bytes = base64.encodestring(auth_bytes)
# convert back to a string
encoded = encoded_bytes.decode()
# strip the trailing carriage return
return encoded.replace('\n','')
class Credential(object):
"""
A username/password pair. Use like a namedtuple.
"""
def __init__(self, username, password):
self.username = username
self.password = password
def __iter__(self):
yield self.username
yield self.password
def __str__(self):
return '%(username)s:%(password)s' % vars(self)
class PyPIConfig(ConfigParser.ConfigParser):
def __init__(self):
"""
Load from ~/.pypirc
"""
defaults = dict.fromkeys(['username', 'password', 'repository'], '')
ConfigParser.ConfigParser.__init__(self, defaults)
rc = os.path.join(os.path.expanduser('~'), '.pypirc')
if os.path.exists(rc):
self.read(rc)
@property
def creds_by_repository(self):
sections_with_repositories = [
section for section in self.sections()
if self.get(section, 'repository').strip()
]
return dict(map(self._get_repo_cred, sections_with_repositories))
def _get_repo_cred(self, section):
repo = self.get(section, 'repository').strip()
return repo, Credential(
self.get(section, 'username').strip(),
self.get(section, 'password').strip(),
)
def find_credential(self, url):
"""
If the URL indicated appears to be a repository defined in this
config, return the credential for that repository.
"""
for repository, cred in self.creds_by_repository.items():
if url.startswith(repository):
return cred
def open_with_auth(url, opener=urllib2.urlopen):
"""Open a urllib2 request, handling HTTP authentication"""
scheme, netloc, path, params, query, frag = urlparse(url)
# Double scheme does not raise on Mac OS X as revealed by a
# failing test. We would expect "nonnumeric port". Refs #20.
if netloc.endswith(':'):
raise httplib.InvalidURL("nonnumeric port: ''")
if scheme in ('http', 'https'):
auth, host = splituser(netloc)
else:
auth = None
if not auth:
cred = PyPIConfig().find_credential(url)
if cred:
auth = str(cred)
info = cred.username, url
log.info('Authenticating as %s for %s (from .pypirc)' % info)
if auth:
auth = "Basic " + _encode_auth(auth)
new_url = urlunparse((scheme,host,path,params,query,frag))
request = urllib2.Request(new_url)
request.add_header("Authorization", auth)
else:
request = urllib2.Request(url)
request.add_header('User-Agent', user_agent)
fp = opener(request)
if auth:
# Put authentication info back into request URL if same host,
# so that links found on the page will work
s2, h2, path2, param2, query2, frag2 = urlparse(fp.url)
if s2==scheme and h2==host:
fp.url = urlunparse((s2,netloc,path2,param2,query2,frag2))
return fp
# adding a timeout to avoid freezing package_index
open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth)
def fix_sf_url(url):
return url # backward compatibility
def local_open(url):
"""Read a local path, with special support for directories"""
scheme, server, path, param, query, frag = urlparse(url)
filename = url2pathname(path)
if os.path.isfile(filename):
return urllib2.urlopen(url)
elif path.endswith('/') and os.path.isdir(filename):
files = []
for f in os.listdir(filename):
if f=='index.html':
with open(os.path.join(filename,f),'r') as fp:
body = fp.read()
break
elif os.path.isdir(os.path.join(filename,f)):
f+='/'
files.append("<a href=%r>%s</a>" % (f,f))
else:
body = ("<html><head><title>%s</title>" % url) + \
"</head><body>%s</body></html>" % '\n'.join(files)
status, message = 200, "OK"
else:
status, message, body = 404, "Path not found", "Not found"
headers = {'content-type': 'text/html'}
return HTTPError(url, status, message, headers, StringIO(body))
| bsd-3-clause |
Depado/starmato-admin | setup.py | 1 | 1305 | import os
from setuptools import setup, find_packages
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='starmato-admin',
version='0.2.1',
packages=find_packages(),
package_data={
'starmato.admin': [
'templates/admin/*.*',
'templates/admin/edit_inline/*.*',
'templates/admin/includes/*.*',
'media/admin/js/*.*',
'media/admin/css/*.*',
'media/admin/images/*.*',
'locale/fr_FR/LC_MESSAGES/*.*'
],
},
license='MIT',
description='A Django app to upgrade django admin.',
url='http://www.go-tsunami.com/',
author='GoTsunami',
author_email='ab@go-tsunami.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| mit |
pmarques/ansible | test/lib/ansible_test/_internal/commands/integration/cloud/acme.py | 13 | 2405 | """ACME plugin for integration tests."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ....config import (
IntegrationConfig,
)
from ....containers import (
run_support_container,
)
from . import (
CloudEnvironment,
CloudEnvironmentConfig,
CloudProvider,
)
class ACMEProvider(CloudProvider):
"""ACME plugin. Sets up cloud resources for tests."""
DOCKER_SIMULATOR_NAME = 'acme-simulator'
def __init__(self, args): # type: (IntegrationConfig) -> None
super(ACMEProvider, self).__init__(args)
# The simulator must be pinned to a specific version to guarantee CI passes with the version used.
if os.environ.get('ANSIBLE_ACME_CONTAINER'):
self.image = os.environ.get('ANSIBLE_ACME_CONTAINER')
else:
self.image = 'quay.io/ansible/acme-test-container:2.0.0'
self.uses_docker = True
def setup(self): # type: () -> None
"""Setup the cloud resource before delegation and register a cleanup callback."""
super(ACMEProvider, self).setup()
if self._use_static_config():
self._setup_static()
else:
self._setup_dynamic()
def _setup_dynamic(self): # type: () -> None
"""Create a ACME test container using docker."""
ports = [
5000, # control port for flask app in container
14000, # Pebble ACME CA
]
descriptor = run_support_container(
self.args,
self.platform,
self.image,
self.DOCKER_SIMULATOR_NAME,
ports,
allow_existing=True,
cleanup=True,
)
descriptor.register(self.args)
self._set_cloud_config('acme_host', self.DOCKER_SIMULATOR_NAME)
def _setup_static(self): # type: () -> None
raise NotImplementedError()
class ACMEEnvironment(CloudEnvironment):
"""ACME environment plugin. Updates integration test environment after delegation."""
def get_environment_config(self): # type: () -> CloudEnvironmentConfig
"""Return environment configuration for use in the test environment after delegation."""
ansible_vars = dict(
acme_host=self._get_cloud_config('acme_host'),
)
return CloudEnvironmentConfig(
ansible_vars=ansible_vars,
)
| gpl-3.0 |
Nirvedh/CoarseCoherence | src/arch/x86/isa/insts/x87/arithmetic/__init__.py | 91 | 2486 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
categories = ["addition",
"subtraction",
"multiplication",
"division",
"change_sign",
"round",
"partial_remainder",
"square_root"]
microcode = '''
# X86 microcode
'''
for category in categories:
exec "import %s as cat" % category
microcode += cat.microcode
| bsd-3-clause |
adsabs/ADSDeploy | ADSDeploy/tests/test_unit/test_webapp.py | 1 | 6070 | """
Test utilities
"""
import hmac
import json
import mock
import hashlib
import unittest
from flask.ext.testing import TestCase
from ADSDeploy.webapp import app
from ADSDeploy.webapp.models import db, Deployment
from ADSDeploy.webapp.views import GithubListener
from stub_data.stub_webapp import github_payload, payload_tag
from ADSDeploy.webapp.utils import get_boto_session
from ADSDeploy.webapp.exceptions import NoSignatureInfo, InvalidSignature
from collections import OrderedDict
class FakeRequest:
"""
A rudimentary mock flask.request object
"""
def __init__(self):
self.headers = {}
self.data = ''
def get_json(self, **kwargs):
"""
return json from a string
"""
self.json = json.loads(self.data)
return self.json
class TestUtilities(unittest.TestCase):
"""
Test utility functions in utils.py
"""
@mock.patch('ADSDeploy.webapp.utils.Session')
def test_get_boto_session(self, Session):
"""
get_boto_session should call Session with the current app's config
"""
app_ = app.create_app()
app_.config['AWS_REGION'] = "unittest-region"
app_.config['AWS_ACCESS_KEY'] = "unittest-access"
app_.config['AWS_SECRET_KEY'] = "unittest-secret"
with self.assertRaises(RuntimeError): # app-context must be available
get_boto_session()
with app_.app_context():
get_boto_session()
Session.assert_called_with(
aws_access_key_id="unittest-access",
aws_secret_access_key="unittest-secret",
region_name="unittest-region",
)
class TestStaticMethodUtilities(TestCase):
"""
Test standalone staticmethods
"""
def create_app(self):
"""
Create the wsgi application
"""
app_ = app.create_app()
app_.config['SQLALCHEMY_DATABASE_URI'] = "sqlite://"
app_.config['GITHUB_SECRET'] = 'unittest-secret'
app_.config['RABBITMQ_URL'] = 'rabbitmq'
return app_
def setUp(self):
"""
setUp and tearDown are run at the start of each test; ensure
that a fresh database is used for each test.
"""
db.create_all()
def tearDown(self):
"""
setUp and tearDown are run at the start of each test; ensure
that a fresh database is used for each test.
"""
db.session.remove()
db.drop_all()
def test_verify_signature(self):
"""
Ensures that the signature is validated against the github algorithim
found at https://github.com/github/github-services/blob/f3bb3dd780feb6318c42b2db064ed6d481b70a1f/lib/service/http_helper.rb#L77
"""
r = FakeRequest()
r.data = '''{"payload": "unittest"}'''
h = hmac.new(
self.app.config['GITHUB_SECRET'],
msg=r.data,
digestmod=hashlib.sha1,
).hexdigest()
r.headers = {
'content-type': 'application/json',
self.app.config['GITHUB_SIGNATURE_HEADER']: "sha1={}".format(h)
}
self.assertTrue(GithubListener.verify_github_signature(r))
with self.assertRaises(InvalidSignature):
r.data = ''
GithubListener.verify_github_signature(r)
with self.assertRaises(NoSignatureInfo):
r.headers = {}
GithubListener.verify_github_signature(r)
def test_parse_github_payload(self):
"""
Tests that a db.Commit object is created when passed an example
github webhook payload
"""
# Set up fake payload
r = FakeRequest()
r.data = github_payload.replace('"name": "adsws"', '"name": "mission-control"')
# Modify the data such that the payload refers to a known repo,
# assert that the returned models.Commit contains the expected data
r.data = github_payload
c = GithubListener.parse_github_payload(r)
self.assertEqual(
c['url'],
u'https://github.com/adsabs/adsws'
)
self.assertEqual(
c['tag'],
None
)
for key in ['url', 'commit', 'author', 'tag']:
self.assertIn(
key,
c,
msg='Key "{}" not found in "{}"'.format(key, c)
)
def test_parse_github_payload_tag(self):
"""
Tests that a db.Commit object is created when passed a create event
example github webhook payload
"""
# Set up fake payload
r = FakeRequest()
r.data = payload_tag
c = GithubListener.parse_github_payload(r)
self.assertEqual(
c['url'],
'https://github.com/adsabs/adsws'
)
self.assertEqual(
c['tag'],
'v1.0.0'
)
for key in ['url', 'commit', 'author', 'tag']:
self.assertIn(
key,
c,
msg='Key "{}" not found in "{}"'.format(key, c)
)
@mock.patch('ADSDeploy.webapp.views.MiniRabbit')
def test_payload_sent_to_rabbitmq(self, mocked_rabbit):
"""
Tests that a payload is sent to rabbitmq and that it contains the
expected payload.
"""
instance_rabbit = mocked_rabbit.return_value
instance_rabbit.__enter__.return_value = instance_rabbit
instance_rabbit.__exit__.return_value = None
instance_rabbit.publish.side_effect = None
payload = OrderedDict([
('application', 'important-service'),
('commit', 'd8fgdfgdf'),
('environment', 'staging'),
('author', 'author'),
('tag', 'dsfdsf')
])
GithubListener.push_rabbitmq(payload=payload, exchange='test', route='test')
self.assertTrue(mocked_rabbit.called)
instance_rabbit.publish.assert_has_calls(
[mock.call(payload=json.dumps(payload), exchange='test', route='test')]
)
| gpl-3.0 |
naousse/odoo | addons/account/account_analytic_line.py | 304 | 7914 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields
from openerp.osv import osv
from openerp.tools.translate import _
class account_analytic_line(osv.osv):
_inherit = 'account.analytic.line'
_description = 'Analytic Line'
_columns = {
'product_uom_id': fields.many2one('product.uom', 'Unit of Measure'),
'product_id': fields.many2one('product.product', 'Product'),
'general_account_id': fields.many2one('account.account', 'General Account', required=True, ondelete='restrict'),
'move_id': fields.many2one('account.move.line', 'Move Line', ondelete='cascade', select=True),
'journal_id': fields.many2one('account.analytic.journal', 'Analytic Journal', required=True, ondelete='restrict', select=True),
'code': fields.char('Code', size=8),
'ref': fields.char('Ref.'),
'currency_id': fields.related('move_id', 'currency_id', type='many2one', relation='res.currency', string='Account Currency', store=True, help="The related account currency if not equal to the company one.", readonly=True),
'amount_currency': fields.related('move_id', 'amount_currency', type='float', string='Amount Currency', store=True, help="The amount expressed in the related account currency if not equal to the company one.", readonly=True),
}
_defaults = {
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.analytic.line', context=c),
}
_order = 'date desc'
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
if context.get('from_date',False):
args.append(['date', '>=', context['from_date']])
if context.get('to_date',False):
args.append(['date','<=', context['to_date']])
return super(account_analytic_line, self).search(cr, uid, args, offset, limit,
order, context=context, count=count)
def _check_company(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.move_id and not l.account_id.company_id.id == l.move_id.account_id.company_id.id:
return False
return True
# Compute the cost based on the price type define into company
# property_valuation_price_type property
def on_change_unit_amount(self, cr, uid, id, prod_id, quantity, company_id,
unit=False, journal_id=False, context=None):
if context==None:
context={}
if not journal_id:
j_ids = self.pool.get('account.analytic.journal').search(cr, uid, [('type','=','purchase')])
journal_id = j_ids and j_ids[0] or False
if not journal_id or not prod_id:
return {}
product_obj = self.pool.get('product.product')
analytic_journal_obj =self.pool.get('account.analytic.journal')
product_price_type_obj = self.pool.get('product.price.type')
product_uom_obj = self.pool.get('product.uom')
j_id = analytic_journal_obj.browse(cr, uid, journal_id, context=context)
prod = product_obj.browse(cr, uid, prod_id, context=context)
result = 0.0
if prod_id:
unit_obj = False
if unit:
unit_obj = product_uom_obj.browse(cr, uid, unit, context=context)
if not unit_obj or prod.uom_id.category_id.id != unit_obj.category_id.id:
unit = prod.uom_id.id
if j_id.type == 'purchase':
if not unit_obj or prod.uom_po_id.category_id.id != unit_obj.category_id.id:
unit = prod.uom_po_id.id
if j_id.type <> 'sale':
a = prod.property_account_expense.id
if not a:
a = prod.categ_id.property_account_expense_categ.id
if not a:
raise osv.except_osv(_('Error!'),
_('There is no expense account defined ' \
'for this product: "%s" (id:%d).') % \
(prod.name, prod.id,))
else:
a = prod.property_account_income.id
if not a:
a = prod.categ_id.property_account_income_categ.id
if not a:
raise osv.except_osv(_('Error!'),
_('There is no income account defined ' \
'for this product: "%s" (id:%d).') % \
(prod.name, prod_id,))
flag = False
# Compute based on pricetype
product_price_type_ids = product_price_type_obj.search(cr, uid, [('field','=','standard_price')], context=context)
pricetype = product_price_type_obj.browse(cr, uid, product_price_type_ids, context=context)[0]
if journal_id:
journal = analytic_journal_obj.browse(cr, uid, journal_id, context=context)
if journal.type == 'sale':
product_price_type_ids = product_price_type_obj.search(cr, uid, [('field','=','list_price')], context=context)
if product_price_type_ids:
pricetype = product_price_type_obj.browse(cr, uid, product_price_type_ids, context=context)[0]
# Take the company currency as the reference one
if pricetype.field == 'list_price':
flag = True
ctx = context.copy()
if unit:
# price_get() will respect a 'uom' in its context, in order
# to return a default price for those units
ctx['uom'] = unit
amount_unit = prod.price_get(pricetype.field, context=ctx)[prod.id]
prec = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
amount = amount_unit * quantity or 0.0
result = round(amount, prec)
if not flag:
result *= -1
return {'value': {
'amount': result,
'general_account_id': a,
'product_uom_id': unit
}
}
def view_header_get(self, cr, user, view_id, view_type, context=None):
if context is None:
context = {}
if context.get('account_id', False):
# account_id in context may also be pointing to an account.account.id
cr.execute('select name from account_analytic_account where id=%s', (context['account_id'],))
res = cr.fetchone()
if res:
res = _('Entries: ')+ (res[0] or '')
return res
return False
class res_partner(osv.osv):
""" Inherits partner and adds contract information in the partner form """
_inherit = 'res.partner'
_columns = {
'contract_ids': fields.one2many('account.analytic.account', \
'partner_id', 'Contracts', readonly=True),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
TheMocheeze/HS13-Arctic-Station | bot/D_help.py | 67 | 5811 | #As new commands are added, update this.
# Last updated: 8.3.2011
# Updated 12.3.2011:
# - Added the missing help data for Version
# - Imported CORE_DATA to get the name.
# - Tidied some commands up a bit.
# - Replaced all "Bot"s with the Skibot's current name.
from CORE_DATA import Name
everything = {"8ball":"[8ball <arg>] Responds to the argument",
"allcaps":"[allcaps <arg>] Takes an uppercase string and returns a capitalized version",
"bmaths":"[bmaths <arg>] Takes a math equation (Like 5+5) and returns a binary result",
"coin":"[coin] Flips a coin",
"dance":"[dance] Makes %s do a little dance" %(Name),
"delquote":"(OP ONLY) [delquote <arg>] Removes a quote with the filename equal to the argument",
"disable":"(OP ONLY) [disable] Disables all output from %s" %(Name),
"disable dance":"(HALFOP / OP ONLY) [disable dance] or [dd] Toggles dancing",
"disable fml":"(HALFOP / OP ONLY) [disable fml] Disables FML",
"eightball":"[eightball <arg>] Responds to the argument",
"enable":"(OP ONLY) [enable] After being disabled, enable will turn output back on",
"enable fml":"{HALFOP / OP ONLY} [enable fml] After fml has been disabled, enable fml will make it available again",
"fml":"[fml] Returns a random Fuck My Life bit",
"give":"[give <arg>] Gives the Pneumatic Disposal Unit the argument",
"help":"[help [<command>]] Returns the list of commands or a detailed description of a command if specified",
"hmaths":"[hmaths <arg>] Takes a math equation (Like 5+5) and returns a hex result",
"makequote":"[makequote <arg>] Creates a quote with arg being the quote itself",
"maths":"[maths <arg>] Takes a math equation (Like 5+5) and returns a default result",
"note":"[note <arg1> [<arg2>]] Opens a note if only arg1 is specified, Creates a note with the name of arg1 and contents of arg2 if arg2 is specified, if you prefix the note name with [CP], it creates a public note only to that channel. Which can be accessed by !note _<note name>",
"notes":"[notes] Displays all your saved notes on %s" %(Name),
"otherball":"[otherball] If Floorbot is on the same channel, %s will ask him a random question when this command is passed" %(Name),
"purgemessages":"[purgemessages] Used to delete all your Tell messages (%s,Tell <User> <Message>)" %(Name),
"quote":"[quote [<author>]] Picks a random quote, if the author is specified, a random quote by that author",
"redmine":"[redmine] If you have a note called redmine, with a valid whoopshop redmine address, this displays all the bugs labeled as 'New' on that page. It also displays the todo note if it's found.",
"replace":"[replace] Fixes the Pneumatic Smasher if it's been broken",
"rot13":"[rot13 <arg>] Encrypts the arg by using the rot13 method",
"rtd":"[rtd [<arg1>d<arg2>]] Rolls a six-sided dice if no arguments are specified, otherwise arg1 is the amount of rolls and arg2 is the amount of sides the dice have",
"sarcasticball":"[sarcasticball <arg>] Responds to the argument sarcastically",
"sball":"[sball <arg>] Responds to the argument sarcastically",
"srtd":"[srtd <arg1>d<arg2>] Rolls <arg1> amount of <arg2> sided die without showing the dice values separately",
"stop":"(RESTRICTED TO OP AND CREATOR) [stop] Stops %s, plain and simple" %(Name),
"suggest":"[suggest <arg>] Saves a suggestion given to %s, to be later viewed by the creator" %(Name),
"take":"[take <arg>] Takes an item specified in the argument from the Pneumatic Smasher",
"tban":"(OP ONLY) [tban <user> <seconds>] When %s is an operator, You can ban an user for specified amount of seconds" %(Name),
"thm":"(RESTRICTED TO OP AND CREATOR) [thm] Normally in 8ball and sarcasticball, Users are not shown, instead replaced by things like demons or plasma researchers, toggling this changes that behaviour.",
"tm":"(OP AND CREATOR ONLY) [tm] Toggles marakov",
"togglequotemakers":"(OP ONLY) [togglequotemakers or tqm] Normally with the quote command, makers are not shown, this toggles that behaviour.",
"tqm":"(OP ONLY) [tqm or togglequotemakers] Normally with the quote command, makers are not shown, this toggles that behaviour.",
"toggleofflinemessages":"(OP ONLY) [toggleofflinemessages or tom] Allows an operator to toggle leaving Tell messages (%s, Tell <User> <Message)" %(Name),
"tom":"(OP ONLY) [tom or toggleofflinemessages] Allows an operator to toggle leaving Tell messages (%s, Tell <User> <Message)" %(Name),
"toggleyoutubereveal":"(OP ONLY) [toggleyoutubereveal] or [tyr] Toggles the automatic showing of youtube video titles based on URL's.",
"tyr":"(OP ONLY) [tyr] or [toggleyoutubereveal] Toggles the automatic showing of youtube video titles based on URL's.",
"translate":"(OP ONLY) [translate <user>] Whenever the user says something in allcaps, it's capitalized.",
"uptime":"[uptime] Displays how long %s has been alive on the channel."%(Name),
"use":"[use] Uses the Pneumatic Smasher.",
"youtube":"[youtube <url>] Shows the title of a video by checking the URL provided.",
"version":"[version] Shows the current version of %s." %(Name),
"weather":"[weather <location>] Displays the current weather of the provided location.",
"life":"I cannot help you with that, sorry."}
| agpl-3.0 |
phoebusliang/parallel-lettuce | tests/integration/lib/Django-1.3/django/contrib/auth/tests/basic.py | 154 | 3632 | from django.test import TestCase
from django.contrib.auth.models import User, AnonymousUser
from django.core.management import call_command
from StringIO import StringIO
class BasicTestCase(TestCase):
def test_user(self):
"Check that users can be created and can set their password"
u = User.objects.create_user('testuser', 'test@example.com', 'testpw')
self.assertTrue(u.has_usable_password())
self.assertFalse(u.check_password('bad'))
self.assertTrue(u.check_password('testpw'))
# Check we can manually set an unusable password
u.set_unusable_password()
u.save()
self.assertFalse(u.check_password('testpw'))
self.assertFalse(u.has_usable_password())
u.set_password('testpw')
self.assertTrue(u.check_password('testpw'))
u.set_password(None)
self.assertFalse(u.has_usable_password())
# Check authentication/permissions
self.assertTrue(u.is_authenticated())
self.assertFalse(u.is_staff)
self.assertTrue(u.is_active)
self.assertFalse(u.is_superuser)
# Check API-based user creation with no password
u2 = User.objects.create_user('testuser2', 'test2@example.com')
self.assertFalse(u.has_usable_password())
def test_anonymous_user(self):
"Check the properties of the anonymous user"
a = AnonymousUser()
self.assertFalse(a.is_authenticated())
self.assertFalse(a.is_staff)
self.assertFalse(a.is_active)
self.assertFalse(a.is_superuser)
self.assertEqual(a.groups.all().count(), 0)
self.assertEqual(a.user_permissions.all().count(), 0)
def test_superuser(self):
"Check the creation and properties of a superuser"
super = User.objects.create_superuser('super', 'super@example.com', 'super')
self.assertTrue(super.is_superuser)
self.assertTrue(super.is_active)
self.assertTrue(super.is_staff)
def test_createsuperuser_management_command(self):
"Check the operation of the createsuperuser management command"
# We can use the management command to create a superuser
new_io = StringIO()
call_command("createsuperuser",
interactive=False,
username="joe",
email="joe@somewhere.org",
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = User.objects.get(username="joe")
self.assertEqual(u.email, 'joe@somewhere.org')
# created password should be unusable
self.assertFalse(u.has_usable_password())
# We can supress output on the management command
new_io = StringIO()
call_command("createsuperuser",
interactive=False,
username="joe2",
email="joe2@somewhere.org",
verbosity=0,
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, '')
u = User.objects.get(username="joe2")
self.assertEqual(u.email, 'joe2@somewhere.org')
self.assertFalse(u.has_usable_password())
new_io = StringIO()
call_command("createsuperuser",
interactive=False,
username="joe+admin@somewhere.org",
email="joe@somewhere.org",
stdout=new_io
)
u = User.objects.get(username="joe+admin@somewhere.org")
self.assertEqual(u.email, 'joe@somewhere.org')
self.assertFalse(u.has_usable_password())
| gpl-3.0 |
suneeshtr/persona | node_modules/l/node_modules/hook.io/node_modules/npm/node_modules/node-gyp/gyp/test/variables/filelist/gyptest-filelist.py | 102 | 1583 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test variable expansion of '<|(list.txt ...)' syntax commands.
"""
import os
import sys
import TestGyp
test = TestGyp.TestGyp(format='gypd')
expect = test.read('filelist.gyp.stdout')
if sys.platform == 'win32':
expect = expect.replace('/', r'\\').replace('\r\n', '\n')
test.run_gyp('src/filelist.gyp',
'--debug', 'variables',
stdout=expect, ignore_line_numbers=True)
# Verify the filelist.gypd against the checked-in expected contents.
#
# Normally, we should canonicalize line endings in the expected
# contents file setting the Subversion svn:eol-style to native,
# but that would still fail if multiple systems are sharing a single
# workspace on a network-mounted file system. Consequently, we
# massage the Windows line endings ('\r\n') in the output to the
# checked-in UNIX endings ('\n').
contents = test.read('src/filelist.gypd').replace(
'\r', '').replace('\\\\', '/')
expect = test.read('filelist.gypd.golden').replace('\r', '')
if not test.match(contents, expect):
print "Unexpected contents of `src/filelist.gypd'"
test.diff(expect, contents, 'src/filelist.gypd ')
test.fail_test()
contents = test.read('src/names.txt')
expect = 'John\nJacob\nJingleheimer\nSchmidt\n'
if not test.match(contents, expect):
print "Unexpected contents of `src/names.txt'"
test.diff(expect, contents, 'src/names.txt ')
test.fail_test()
test.pass_test()
| gpl-2.0 |
guyskk/flask-restaction | tests/test_cli.py | 1 | 1613 | import requests
from unittest import mock
from flask_restaction.cli import parse_meta, resjs, main
def test_url_prefix():
url = "http://127.0.0.1:5000"
meta = requests.get(url, headers={'Accept': 'application/json'}).json()
url_prefix, __, __ = parse_meta(meta)
assert url_prefix == ""
def test_resjs_web(tmpdir):
resjs("http://127.0.0.1:5000", tmpdir.join("res.js").strpath,
prefix="/api", min=True)
assert tmpdir.join("res.js").check()
def test_resjs_node(tmpdir):
resjs("http://127.0.0.1:5000", tmpdir.join("res.js").strpath, node=True)
assert tmpdir.join("res.js").check()
def test_api_meta_view():
resjs = requests.get("http://127.0.0.1:5000?f=res.js")
assert resjs.headers["Content-Type"] == "application/javascript"
resminjs = requests.get("http://127.0.0.1:5000?f=res.min.js")
assert resminjs.headers["Content-Type"] == "application/javascript"
resjs2 = requests.get("http://127.0.0.1:5000?f=res.js")
assert resjs.content == resjs2.content
resminjs2 = requests.get("http://127.0.0.1:5000?f=res.min.js")
assert resminjs.content == resminjs2.content
resp = requests.get("http://127.0.0.1:5000?f=docs.min.js")
assert resp.status_code == 200
resp = requests.get("http://127.0.0.1:5000?f=docs.min.css")
assert resp.status_code == 200
resp = requests.get("http://127.0.0.1:5000?f=unknown.js")
assert resp.status_code == 404
def test_cli(tmpdir):
dest = tmpdir.join("res.js").strpath
args = ["resjs", "http://127.0.0.1:5000", "-d", dest]
with mock.patch("sys.argv", new=args):
main()
| mit |
icomms/wqmanager | reportlab/graphics/barcode/widgets.py | 6 | 14421 | #copyright ReportLab Europe Limited. 2000-2006
#see license.txt for license details
__version__=''' $Id: widgets.py 3086 2007-05-22 13:10:34Z rgbecker $ '''
__all__= (
'BarcodeI2of5',
'BarcodeCode128',
'BarcodeStandard93',
'BarcodeExtended93',
'BarcodeStandard39',
'BarcodeExtended39',
'BarcodeMSI',
'BarcodeCodabar',
'BarcodeCode11',
'BarcodeFIM',
'BarcodePOSTNET',
'BarcodeUSPS_4State',
)
from reportlab.lib.validators import isInt, isNumber, isColor, isString, isColorOrNone, OneOf, isBoolean, EitherOr, isNumberOrNone
from reportlab.lib.attrmap import AttrMap, AttrMapValue
from reportlab.lib.colors import black
from reportlab.graphics.shapes import Line, Rect, Group, NotImplementedError, String
from reportlab.graphics.charts.areas import PlotArea
'''
#snippet
#first make your Drawing
from reportlab.graphics.shapes import Drawing
d= Drawing(100,50)
#create and set up the widget
from reportlab.graphics.barcode.widgets import BarcodeStandard93
bc = BarcodeStandard93()
bc.value = 'RGB-123456'
#add to the drawing and save
d.add(bc)
# d.save(formats=['gif','pict'],fnRoot='bc_sample')
'''
class _BarcodeWidget(PlotArea):
_attrMap = AttrMap(BASE=PlotArea,
barStrokeColor = AttrMapValue(isColorOrNone, desc='Color of bar borders.'),
barFillColor = AttrMapValue(isColorOrNone, desc='Color of bar interior areas.'),
barStrokeWidth = AttrMapValue(isNumber, desc='Width of bar borders.'),
value = AttrMapValue(EitherOr((isString,isNumber)), desc='Value.'),
textColor = AttrMapValue(isColorOrNone, desc='Color of human readable text.'),
valid = AttrMapValue(isBoolean),
validated = AttrMapValue(isString,desc="validated form of input"),
encoded = AttrMapValue(None,desc="encoded form of input"),
decomposed = AttrMapValue(isString,desc="decomposed form of input"),
canv = AttrMapValue(None,desc="temporarily used for internal methods"),
gap = AttrMapValue(isNumberOrNone, desc='Width of inter character gaps.'),
)
barStrokeColor = barFillColor = textColor = black
barStrokeWidth = 0
_BCC = None
def __init__(self,BCC=None,_value='',**kw):
self._BCC = BCC
class Combiner(self.__class__,BCC):
__name__ = self.__class__.__name__
self.__class__ = Combiner
PlotArea.__init__(self)
del self.width, self.height
self.x = self.y = 0
kw.setdefault('value',_value)
BCC.__init__(self,**kw)
def rect(self,x,y,w,h,**kw):
self._Gadd(Rect(self.x+x,self.y+y,w,h,
strokeColor=self.barStrokeColor,strokeWidth=self.barStrokeWidth, fillColor=self.barFillColor))
def draw(self):
if not self._BCC: raise NotImplementedError("Abstract class %s cannot be drawn" % self.__class__.__name__)
self.canv = self
G = Group()
self._Gadd = G.add
self._Gadd(Rect(self.x,self.y,self.width,self.height,fillColor=None,strokeColor=None,strokeWidth=0.0001))
self._BCC.draw(self)
del self.canv, self._Gadd
return G
def annotate(self,x,y,text,fontName,fontSize,anchor='middle'):
self._Gadd(String(self.x+x,self.y+y,text,fontName=fontName,fontSize=fontSize,
textAnchor=anchor,fillColor=self.textColor))
class BarcodeI2of5(_BarcodeWidget):
"""Interleaved 2 of 5 is used in distribution and warehouse industries.
It encodes an even-numbered sequence of numeric digits. There is an optional
module 10 check digit; if including this, the total length must be odd so that
it becomes even after including the check digit. Otherwise the length must be
even. Since the check digit is optional, our library does not check it.
"""
_tests = [
'12',
'1234',
'123456',
'12345678',
'1234567890'
]
codeName = "I2of5"
_attrMap = AttrMap(BASE=_BarcodeWidget,
barWidth = AttrMapValue(isNumber,'''(float, default .0075):
X-Dimension, or width of the smallest element
Minumum is .0075 inch (7.5 mils).'''),
ratio = AttrMapValue(isNumber,'''(float, default 2.2):
The ratio of wide elements to narrow elements.
Must be between 2.0 and 3.0 (or 2.2 and 3.0 if the
barWidth is greater than 20 mils (.02 inch))'''),
gap = AttrMapValue(isNumberOrNone,'''(float or None, default None):
width of intercharacter gap. None means "use barWidth".'''),
barHeight = AttrMapValue(isNumber,'''(float, see default below):
Height of the symbol. Default is the height of the two
bearer bars (if they exist) plus the greater of .25 inch
or .15 times the symbol's length.'''),
checksum = AttrMapValue(isBoolean,'''(bool, default 1):
Whether to compute and include the check digit'''),
bearers = AttrMapValue(isNumber,'''(float, in units of barWidth. default 3.0):
Height of bearer bars (horizontal bars along the top and
bottom of the barcode). Default is 3 x-dimensions.
Set to zero for no bearer bars. (Bearer bars help detect
misscans, so it is suggested to leave them on).'''),
quiet = AttrMapValue(isBoolean,'''(bool, default 1):
Whether to include quiet zones in the symbol.'''),
lquiet = AttrMapValue(isNumber,'''(float, see default below):
Quiet zone size to left of code, if quiet is true.
Default is the greater of .25 inch, or .15 times the symbol's
length.'''),
rquiet = AttrMapValue(isNumber,'''(float, defaults as above):
Quiet zone size to right left of code, if quiet is true.'''),
fontName = AttrMapValue(isString, desc='human readable font'),
fontSize = AttrMapValue(isNumber, desc='human readable font size'),
humanReadable = AttrMapValue(isBoolean, desc='if human readable'),
stop = AttrMapValue(isBoolean, desc='if we use start/stop symbols (default 1)'),
)
_bcTransMap = {}
def __init__(self,**kw):
from reportlab.graphics.barcode.common import I2of5
_BarcodeWidget.__init__(self,I2of5,1234,**kw)
class BarcodeCode128(BarcodeI2of5):
"""Code 128 encodes any number of characters in the ASCII character set.
"""
_tests = [
'ReportLab Rocks!'
]
codeName = "Code128"
_attrMap = AttrMap(BASE=BarcodeI2of5,UNWANTED=('bearers','checksum','ratio','checksum','stop'))
def __init__(self,**kw):
from reportlab.graphics.barcode.code128 import Code128
_BarcodeWidget.__init__(self,Code128,"AB-12345678",**kw)
class BarcodeStandard93(BarcodeCode128):
"""This is a compressed form of Code 39"""
codeName = "Standard93"
_attrMap = AttrMap(BASE=BarcodeCode128,
stop = AttrMapValue(isBoolean, desc='if we use start/stop symbols (default 1)'),
)
def __init__(self,**kw):
from reportlab.graphics.barcode.code93 import Standard93
_BarcodeWidget.__init__(self,Standard93,"CODE 93",**kw)
class BarcodeExtended93(BarcodeStandard93):
"""This is a compressed form of Code 39, allowing the full ASCII charset"""
codeName = "Extended93"
def __init__(self,**kw):
from reportlab.graphics.barcode.code93 import Extended93
_BarcodeWidget.__init__(self,Extended93,"L@@K! Code 93 ;-)",**kw)
class BarcodeStandard39(BarcodeI2of5):
"""Code39 is widely used in non-retail, especially US defence and health.
Allowed characters are 0-9, A-Z (caps only), space, and -.$/+%*.
"""
codeName = "Standard39"
def __init__(self,**kw):
from reportlab.graphics.barcode.code39 import Standard39
_BarcodeWidget.__init__(self,Standard39,"A012345B%R",**kw)
class BarcodeExtended39(BarcodeI2of5):
"""Extended 39 encodes the full ASCII character set by encoding
characters as pairs of Code 39 characters; $, /, % and + are used as
shift characters."""
codeName = "Extended39"
def __init__(self,**kw):
from reportlab.graphics.barcode.code39 import Extended39
_BarcodeWidget.__init__(self,Extended39,"A012345B}",**kw)
class BarcodeMSI(BarcodeI2of5):
"""MSI is used for inventory control in retail applications.
There are several methods for calculating check digits so we
do not implement one.
"""
codeName = "MSI"
def __init__(self,**kw):
from reportlab.graphics.barcode.common import MSI
_BarcodeWidget.__init__(self,MSI,1234,**kw)
class BarcodeCodabar(BarcodeI2of5):
"""Used in blood banks, photo labs and FedEx labels.
Encodes 0-9, -$:/.+, and four start/stop characters A-D.
"""
codeName = "Codabar"
def __init__(self,**kw):
from reportlab.graphics.barcode.common import Codabar
_BarcodeWidget.__init__(self,Codabar,"A012345B",**kw)
class BarcodeCode11(BarcodeI2of5):
"""Used mostly for labelling telecommunications equipment.
It encodes numeric digits.
"""
codeName = "Code11"
_attrMap = AttrMap(BASE=BarcodeI2of5,
checksum = AttrMapValue(isInt,'''(integer, default 2):
Whether to compute and include the check digit(s).
(0 none, 1 1-digit, 2 2-digit, -1 auto, default -1):
How many checksum digits to include. -1 ("auto") means
1 if the number of digits is 10 or less, else 2.'''),
)
def __init__(self,**kw):
from reportlab.graphics.barcode.common import Code11
_BarcodeWidget.__init__(self,Code11,"01234545634563",**kw)
class BarcodeFIM(_BarcodeWidget):
"""
FIM was developed as part of the POSTNET barcoding system. FIM (Face Identification Marking) is used by the cancelling machines to sort mail according to whether or not they have bar code and their postage requirements. There are four types of FIM called FIM A, FIM B, FIM C, and FIM D.
The four FIM types have the following meanings:
FIM A- Postage required pre-barcoded
FIM B - Postage pre-paid, no bar code exists
FIM C- Postage prepaid prebarcoded
FIM D- Postage required, no bar code exists
"""
codeName = "FIM"
_attrMap = AttrMap(BASE=_BarcodeWidget,
barWidth = AttrMapValue(isNumber,'''(float, default 1/32in): the bar width.'''),
spaceWidth = AttrMapValue(isNumber,'''(float or None, default 1/16in):
width of intercharacter gap. None means "use barWidth".'''),
barHeight = AttrMapValue(isNumber,'''(float, default 5/8in): The bar height.'''),
quiet = AttrMapValue(isBoolean,'''(bool, default 0):
Whether to include quiet zones in the symbol.'''),
lquiet = AttrMapValue(isNumber,'''(float, default: 15/32in):
Quiet zone size to left of code, if quiet is true.'''),
rquiet = AttrMapValue(isNumber,'''(float, default 1/4in):
Quiet zone size to right left of code, if quiet is true.'''),
fontName = AttrMapValue(isString, desc='human readable font'),
fontSize = AttrMapValue(isNumber, desc='human readable font size'),
humanReadable = AttrMapValue(isBoolean, desc='if human readable'),
)
def __init__(self,**kw):
from reportlab.graphics.barcode.usps import FIM
_BarcodeWidget.__init__(self,FIM,"A",**kw)
class BarcodePOSTNET(_BarcodeWidget):
codeName = "POSTNET"
_attrMap = AttrMap(BASE=_BarcodeWidget,
barWidth = AttrMapValue(isNumber,'''(float, default 0.018*in): the bar width.'''),
spaceWidth = AttrMapValue(isNumber,'''(float or None, default 0.0275in): width of intercharacter gap.'''),
shortHeight = AttrMapValue(isNumber,'''(float, default 0.05in): The short bar height.'''),
barHeight = AttrMapValue(isNumber,'''(float, default 0.125in): The full bar height.'''),
fontName = AttrMapValue(isString, desc='human readable font'),
fontSize = AttrMapValue(isNumber, desc='human readable font size'),
humanReadable = AttrMapValue(isBoolean, desc='if human readable'),
)
def __init__(self,**kw):
from reportlab.graphics.barcode.usps import POSTNET
_BarcodeWidget.__init__(self,POSTNET,"78247-1043",**kw)
class BarcodeUSPS_4State(_BarcodeWidget):
codeName = "USPS_4State"
_attrMap = AttrMap(BASE=_BarcodeWidget,
widthSize = AttrMapValue(isNumber,'''(float, default 1): the bar width size adjustment between 0 and 1.'''),
heightSize = AttrMapValue(isNumber,'''(float, default 1): the bar height size adjustment between 0 and 1.'''),
fontName = AttrMapValue(isString, desc='human readable font'),
fontSize = AttrMapValue(isNumber, desc='human readable font size'),
tracking = AttrMapValue(isString, desc='tracking data'),
routing = AttrMapValue(isString, desc='routing data'),
humanReadable = AttrMapValue(isBoolean, desc='if human readable'),
)
def __init__(self,**kw):
from reportlab.graphics.barcode.usps4s import USPS_4State
kw.setdefault('routing','01234567891')
_BarcodeWidget.__init__(self,USPS_4State,'01234567094987654321',**kw)
def annotate(self,x,y,text,fontName,fontSize,anchor='middle'):
_BarcodeWidget.annotate(self,x,y,text,fontName,fontSize,anchor='start')
if __name__=='__main__':
import os, sys, glob
from reportlab.graphics.shapes import Drawing
os.chdir(os.path.dirname(sys.argv[0]))
if not os.path.isdir('out'):
os.mkdir('out')
map(os.remove,glob.glob(os.path.join('out','*')))
html = ['<html><head></head><body>']
a = html.append
for C in (BarcodeI2of5,
BarcodeCode128,
BarcodeStandard93,
BarcodeExtended93,
BarcodeStandard39,
BarcodeExtended39,
BarcodeMSI,
BarcodeCodabar,
BarcodeCode11,
BarcodeFIM,
BarcodePOSTNET,
BarcodeUSPS_4State,
):
name = C.__name__
i = C()
D = Drawing(100,50)
D.add(i)
D.save(formats=['gif','pict'],outDir='out',fnRoot=name)
a('<h2>%s</h2><img src="%s.gif"><br>' % (name, name))
a('</body></html>')
open(os.path.join('out','index.html'),'w').write('\n'.join(html))
| bsd-3-clause |
vipul-sharma20/oh-mainline | vendor/packages/Django/django/contrib/localflavor/in_/forms.py | 101 | 3911 | """
India-specific Form helpers.
"""
from __future__ import absolute_import, unicode_literals
import re
from django.contrib.localflavor.in_.in_states import STATES_NORMALIZED, STATE_CHOICES
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, CharField, Select
from django.utils.encoding import smart_text
from django.utils.translation import ugettext_lazy as _
phone_digits_re = re.compile(r"""
(
(?P<std_code> # the std-code group
^0 # all std-codes start with 0
(
(?P<twodigit>\d{2}) | # either two, three or four digits
(?P<threedigit>\d{3}) | # following the 0
(?P<fourdigit>\d{4})
)
)
[-\s] # space or -
(?P<phone_no> # the phone number group
[1-6] # first digit of phone number
(
(?(twodigit)\d{7}) | # 7 more phone digits for 3 digit stdcode
(?(threedigit)\d{6}) | # 6 more phone digits for 4 digit stdcode
(?(fourdigit)\d{5}) # 5 more phone digits for 5 digit stdcode
)
)
)$""", re.VERBOSE)
class INZipCodeField(RegexField):
default_error_messages = {
'invalid': _('Enter a zip code in the format XXXXXX or XXX XXX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(INZipCodeField, self).__init__(r'^\d{3}\s?\d{3}$',
max_length, min_length, *args, **kwargs)
def clean(self, value):
super(INZipCodeField, self).clean(value)
if value in EMPTY_VALUES:
return ''
# Convert to "NNNNNN" if "NNN NNN" given
value = re.sub(r'^(\d{3})\s(\d{3})$', r'\1\2', value)
return value
class INStateField(Field):
"""
A form field that validates its input is a Indian state name or
abbreviation. It normalizes the input to the standard two-letter vehicle
registration abbreviation for the given state or union territory
"""
default_error_messages = {
'invalid': _('Enter an Indian state or territory.'),
}
def clean(self, value):
super(INStateField, self).clean(value)
if value in EMPTY_VALUES:
return ''
try:
value = value.strip().lower()
except AttributeError:
pass
else:
try:
return smart_text(STATES_NORMALIZED[value.strip().lower()])
except KeyError:
pass
raise ValidationError(self.error_messages['invalid'])
class INStateSelect(Select):
"""
A Select widget that uses a list of Indian states/territories as its
choices.
"""
def __init__(self, attrs=None):
super(INStateSelect, self).__init__(attrs, choices=STATE_CHOICES)
class INPhoneNumberField(CharField):
"""
INPhoneNumberField validates that the data is a valid Indian phone number,
including the STD code. It's normalised to 0XXX-XXXXXXX or 0XXX XXXXXXX
format. The first string is the STD code which is a '0' followed by 2-4
digits. The second string is 8 digits if the STD code is 3 digits, 7
digits if the STD code is 4 digits and 6 digits if the STD code is 5
digits. The second string will start with numbers between 1 and 6. The
separator is either a space or a hyphen.
"""
default_error_messages = {
'invalid': _('Phone numbers must be in 02X-8X or 03X-7X or 04X-6X format.'),
}
def clean(self, value):
super(INPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = smart_text(value)
m = phone_digits_re.match(value)
if m:
return '%s' % (value)
raise ValidationError(self.error_messages['invalid'])
| agpl-3.0 |
a-tal/EsiPy | test/mock.py | 1 | 6095 | # -*- encoding: utf-8 -*-
from __future__ import absolute_import
import datetime
import httmock
def make_expire_time_str():
""" Generate a date string for the Expires header
RFC 7231 format (always GMT datetime)
"""
date = datetime.datetime.utcnow()
date += datetime.timedelta(days=1)
return date.strftime('%a, %d %b %Y %H:%M:%S GMT')
@httmock.urlmatch(
scheme="https",
netloc=r"login\.eveonline\.com$",
path=r"^/oauth/token$"
)
def oauth_token(url, request):
""" Mock endpoint to get token (auth / refresh) """
if 'fail_test' in request.body:
return httmock.response(
status_code=400,
content={'message': 'Failed successfuly'}
)
if 'no_refresh' in request.body:
return httmock.response(
status_code=200,
content={
'access_token': 'access_token',
'expires_in': 1200,
}
)
return httmock.response(
status_code=200,
content={
'access_token': 'access_token',
'expires_in': 1200,
'refresh_token': 'refresh_token'
}
)
@httmock.urlmatch(
scheme="https",
netloc=r"login\.eveonline\.com$",
path=r"^/oauth/verify$"
)
def oauth_verify(url, request):
return httmock.response(
status_code=200,
content={
'CharacterID': 123456789,
'CharacterName': 'EsiPy Tester',
'CharacterOwnerHash': 'YetAnotherHash'
}
)
@httmock.urlmatch(
scheme="https",
netloc=r"login\.eveonline\.com$",
path=r"^/oauth/verify$"
)
def oauth_verify_fail(url, request):
return httmock.response(
status_code=400,
content={'message': 'Failed successfuly'}
)
@httmock.urlmatch(
scheme="https",
netloc=r"esi\.tech\.ccp\.is$",
path=r"^/latest/incursions/$"
)
def public_incursion(url, request):
""" Mock endpoint for incursion.
Public endpoint
"""
return httmock.response(
headers={'Expires': make_expire_time_str()},
status_code=200,
content=[
{
"type": "Incursion",
"state": "mobilizing",
"staging_solar_system_id": 30003893,
"constellation_id": 20000568,
"infested_solar_systems": [
30003888,
],
"has_boss": True,
"faction_id": 500019,
"influence": 1
}
]
)
@httmock.urlmatch(
scheme="https",
netloc=r"esi\.tech\.ccp\.is$",
path=r"^/latest/incursions/$"
)
def public_incursion_no_expires(url, request):
""" Mock endpoint for incursion.
Public endpoint without cache
"""
return httmock.response(
status_code=200,
content=[
{
"type": "Incursion",
"state": "mobilizing",
"staging_solar_system_id": 30003893,
"constellation_id": 20000568,
"infested_solar_systems": [
30003888,
],
"has_boss": True,
"faction_id": 500019,
"influence": 1
}
]
)
@httmock.urlmatch(
scheme="https",
netloc=r"esi\.tech\.ccp\.is$",
path=r"^/latest/incursions/$"
)
def public_incursion_no_expires_second(url, request):
""" Mock endpoint for incursion.
Public endpoint without cache
"""
return httmock.response(
status_code=200,
content=[
{
"type": "Incursion",
"state": "established",
"staging_solar_system_id": 30003893,
"constellation_id": 20000568,
"infested_solar_systems": [
30003888,
],
"has_boss": True,
"faction_id": 500019,
"influence": 1
}
]
)
@httmock.urlmatch(
scheme="https",
netloc=r"esi\.tech\.ccp\.is$",
path=r"^/latest/characters/(\d+)/location/$"
)
def auth_character_location(url, request):
""" Mock endpoint for character location.
Authed endpoint that check for auth
"""
return httmock.response(
headers={'Expires': make_expire_time_str()},
status_code=200,
content={
"station_id": 60004756,
"solar_system_id": 30002543
}
)
@httmock.urlmatch(
scheme="https",
netloc=r"esi\.tech\.ccp\.is$",
path=r"^/latest/incursions/$"
)
def public_incursion_warning(url, request):
""" Mock endpoint for incursion.
Public endpoint without cache
"""
return httmock.response(
status_code=200,
headers={"Warning": "199 - This endpoint has been updated."},
content=[
{
"type": "Incursion",
"state": "established",
"staging_solar_system_id": 30003893,
"constellation_id": 20000568,
"infested_solar_systems": [
30003888,
],
"has_boss": True,
"faction_id": 500019,
"influence": 1
}
]
)
@httmock.urlmatch(
scheme="https",
netloc=r"esi\.tech\.ccp\.is$",
path=r"^/latest/incursions/$"
)
def public_incursion_server_error(url, request):
""" Mock endpoint for incursion.
Public endpoint without cache
"""
public_incursion_server_error.count += 1
return httmock.response(
status_code=500,
content={
"error": "broke",
"count": public_incursion_server_error.count
}
)
public_incursion_server_error.count = 0
_all_auth_mock_ = [
oauth_token,
oauth_verify,
auth_character_location,
]
| bsd-3-clause |
pwmarcz/django | django/core/management/commands/compilemessages.py | 26 | 4687 | from __future__ import unicode_literals
import codecs
import glob
import os
from django.core.management.base import BaseCommand, CommandError
from django.core.management.utils import find_command, popen_wrapper
from django.utils._os import npath, upath
def has_bom(fn):
with open(fn, 'rb') as f:
sample = f.read(4)
return sample[:3] == b'\xef\xbb\xbf' or \
sample.startswith(codecs.BOM_UTF16_LE) or \
sample.startswith(codecs.BOM_UTF16_BE)
def is_writable(path):
# Known side effect: updating file access/modified time to current time if
# it is writable.
try:
with open(path, 'a'):
os.utime(path, None)
except (IOError, OSError):
return False
return True
class Command(BaseCommand):
help = 'Compiles .po files to .mo files for use with builtin gettext support.'
requires_system_checks = False
leave_locale_alone = True
program = 'msgfmt'
program_options = ['--check-format']
def add_arguments(self, parser):
parser.add_argument('--locale', '-l', dest='locale', action='append', default=[],
help='Locale(s) to process (e.g. de_AT). Default is to process all. '
'Can be used multiple times.')
parser.add_argument('--exclude', '-x', dest='exclude', action='append', default=[],
help='Locales to exclude. Default is none. Can be used multiple times.')
def handle(self, **options):
locale = options.get('locale')
exclude = options.get('exclude')
self.verbosity = int(options.get('verbosity'))
if find_command(self.program) is None:
raise CommandError("Can't find %s. Make sure you have GNU gettext "
"tools 0.15 or newer installed." % self.program)
basedirs = [os.path.join('conf', 'locale'), 'locale']
if os.environ.get('DJANGO_SETTINGS_MODULE'):
from django.conf import settings
basedirs.extend([upath(path) for path in settings.LOCALE_PATHS])
# Gather existing directories.
basedirs = set(map(os.path.abspath, filter(os.path.isdir, basedirs)))
if not basedirs:
raise CommandError("This script should be run from the Django Git "
"checkout or your project or app tree, or with "
"the settings module specified.")
# Build locale list
all_locales = []
for basedir in basedirs:
locale_dirs = filter(os.path.isdir, glob.glob('%s/*' % basedir))
all_locales.extend(map(os.path.basename, locale_dirs))
# Account for excluded locales
locales = locale or all_locales
locales = set(locales) - set(exclude)
for basedir in basedirs:
if locales:
dirs = [os.path.join(basedir, l, 'LC_MESSAGES') for l in locales]
else:
dirs = [basedir]
locations = []
for ldir in dirs:
for dirpath, dirnames, filenames in os.walk(ldir):
locations.extend((dirpath, f) for f in filenames if f.endswith('.po'))
if locations:
self.compile_messages(locations)
def compile_messages(self, locations):
"""
Locations is a list of tuples: [(directory, file), ...]
"""
for i, (dirpath, f) in enumerate(locations):
if self.verbosity > 0:
self.stdout.write('processing file %s in %s\n' % (f, dirpath))
po_path = os.path.join(dirpath, f)
if has_bom(po_path):
raise CommandError("The %s file has a BOM (Byte Order Mark). "
"Django only supports .po files encoded in "
"UTF-8 and without any BOM." % po_path)
base_path = os.path.splitext(po_path)[0]
# Check writability on first location
if i == 0 and not is_writable(npath(base_path + '.mo')):
self.stderr.write("The po files under %s are in a seemingly not writable location. "
"mo files will not be updated/created." % dirpath)
return
args = [self.program] + self.program_options + ['-o',
npath(base_path + '.mo'), npath(base_path + '.po')]
output, errors, status = popen_wrapper(args)
if status:
if errors:
msg = "Execution of %s failed: %s" % (self.program, errors)
else:
msg = "Execution of %s failed" % self.program
raise CommandError(msg)
| bsd-3-clause |
chugunovyar/factoryForBuild | env/lib/python2.7/site-packages/scipy/weave/examples/swig2_example.py | 100 | 1596 | """Simple example to show how to use weave.inline on SWIG2 wrapped
objects. SWIG2 refers to SWIG versions >= 1.3.
To run this example you must build the trivial SWIG2 extension called
swig2_ext. To do this you need to do something like this::
$ swig -c++ -python -I. -o swig2_ext_wrap.cxx swig2_ext.i
$ g++ -Wall -O2 -I/usr/include/python2.3 -fPIC -I. -c \
-o swig2_ext_wrap.os swig2_ext_wrap.cxx
$ g++ -shared -o _swig2_ext.so swig2_ext_wrap.os \
-L/usr/lib/python2.3/config
The files swig2_ext.i and swig2_ext.h are included in the same
directory that contains this file.
Note that weave's SWIG2 support works fine whether SWIG_COBJECT_TYPES
are used or not.
Author: Prabhu Ramachandran
Copyright (c) 2004, Prabhu Ramachandran
License: BSD Style.
"""
from __future__ import absolute_import, print_function
# Import our SWIG2 wrapped library
import swig2_ext
import scipy.weave as weave
from scipy.weave import swig2_spec, converters
# SWIG2 support is not enabled by default. We do this by adding the
# swig2 converter to the default list of converters.
converters.default.insert(0, swig2_spec.swig2_converter())
def test():
"""Instantiate the SWIG wrapped object and then call its method
from C++ using weave.inline
"""
a = swig2_ext.A()
b = swig2_ext.foo() # This will be an APtr instance.
b.thisown = 1 # Prevent memory leaks.
code = """a->f();
b->f();
"""
weave.inline(code, ['a', 'b'], include_dirs=['.'],
headers=['"swig2_ext.h"'], verbose=1)
if __name__ == "__main__":
test()
| gpl-3.0 |
alanwj/django-crispy-forms | crispy_forms/templatetags/crispy_forms_field.py | 14 | 5493 | try:
from itertools import izip
except ImportError:
izip = zip
from django import forms
from django import template
from django.template import loader, Context
from django.conf import settings
from crispy_forms.utils import TEMPLATE_PACK, get_template_pack
register = template.Library()
@register.filter
def is_checkbox(field):
return isinstance(field.field.widget, forms.CheckboxInput)
@register.filter
def is_password(field):
return isinstance(field.field.widget, forms.PasswordInput)
@register.filter
def is_radioselect(field):
return isinstance(field.field.widget, forms.RadioSelect)
@register.filter
def is_select(field):
return isinstance(field.field.widget, forms.Select)
@register.filter
def is_checkboxselectmultiple(field):
return isinstance(field.field.widget, forms.CheckboxSelectMultiple)
@register.filter
def is_file(field):
return isinstance(field.field.widget, forms.ClearableFileInput)
@register.filter
def classes(field):
"""
Returns CSS classes of a field
"""
return field.widget.attrs.get('class', None)
@register.filter
def css_class(field):
"""
Returns widgets class name in lowercase
"""
return field.field.widget.__class__.__name__.lower()
def pairwise(iterable):
"""s -> (s0,s1), (s2,s3), (s4, s5), ..."""
a = iter(iterable)
return izip(a, a)
class CrispyFieldNode(template.Node):
def __init__(self, field, attrs):
self.field = field
self.attrs = attrs
self.html5_required = 'html5_required'
def render(self, context):
# Nodes are not threadsafe so we must store and look up our instance
# variables in the current rendering context first
if self not in context.render_context:
context.render_context[self] = (
template.Variable(self.field),
self.attrs,
template.Variable(self.html5_required)
)
field, attrs, html5_required = context.render_context[self]
field = field.resolve(context)
try:
html5_required = html5_required.resolve(context)
except template.VariableDoesNotExist:
html5_required = False
# If template pack has been overridden in FormHelper we can pick it from context
template_pack = context.get('template_pack', TEMPLATE_PACK)
widgets = getattr(field.field.widget, 'widgets', [field.field.widget])
if isinstance(attrs, dict):
attrs = [attrs] * len(widgets)
converters = {
'textinput': 'textinput textInput',
'fileinput': 'fileinput fileUpload',
'passwordinput': 'textinput textInput',
}
converters.update(getattr(settings, 'CRISPY_CLASS_CONVERTERS', {}))
for widget, attr in zip(widgets, attrs):
class_name = widget.__class__.__name__.lower()
class_name = converters.get(class_name, class_name)
css_class = widget.attrs.get('class', '')
if css_class:
if css_class.find(class_name) == -1:
css_class += " %s" % class_name
else:
css_class = class_name
if (
template_pack == 'bootstrap3'
and not is_checkbox(field)
and not is_file(field)
):
css_class += ' form-control'
widget.attrs['class'] = css_class
# HTML5 required attribute
if html5_required and field.field.required and 'required' not in widget.attrs:
if field.field.widget.__class__.__name__ is not 'RadioSelect':
widget.attrs['required'] = 'required'
for attribute_name, attribute in attr.items():
attribute_name = template.Variable(attribute_name).resolve(context)
if attribute_name in widget.attrs:
widget.attrs[attribute_name] += " " + template.Variable(attribute).resolve(context)
else:
widget.attrs[attribute_name] = template.Variable(attribute).resolve(context)
return field
@register.tag(name="crispy_field")
def crispy_field(parser, token):
"""
{% crispy_field field attrs %}
"""
token = token.split_contents()
field = token.pop(1)
attrs = {}
# We need to pop tag name, or pairwise would fail
token.pop(0)
for attribute_name, value in pairwise(token):
attrs[attribute_name] = value
return CrispyFieldNode(field, attrs)
@register.simple_tag()
def crispy_addon(field, append="", prepend="", form_show_labels=True):
"""
Renders a form field using bootstrap's prepended or appended text::
{% crispy_addon form.my_field prepend="$" append=".00" %}
You can also just prepend or append like so
{% crispy_addon form.my_field prepend="$" %}
{% crispy_addon form.my_field append=".00" %}
"""
if field:
context = Context({
'field': field,
'form_show_errors': True,
'form_show_labels': form_show_labels,
})
template = loader.get_template('%s/layout/prepended_appended_text.html' % get_template_pack())
context['crispy_prepended_text'] = prepend
context['crispy_appended_text'] = append
if not prepend and not append:
raise TypeError("Expected a prepend and/or append argument")
return template.render(context)
| mit |
guettli/django | tests/middleware_exceptions/tests.py | 24 | 6903 | from django.conf import settings
from django.core.exceptions import MiddlewareNotUsed
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import patch_logger
from . import middleware as mw
@override_settings(ROOT_URLCONF='middleware_exceptions.urls')
class MiddlewareTests(SimpleTestCase):
def tearDown(self):
mw.log = []
@override_settings(MIDDLEWARE=['middleware_exceptions.middleware.ProcessViewNoneMiddleware'])
def test_process_view_return_none(self):
response = self.client.get('/middleware_exceptions/view/')
self.assertEqual(mw.log, ['processed view normal_view'])
self.assertEqual(response.content, b'OK')
@override_settings(MIDDLEWARE=['middleware_exceptions.middleware.ProcessViewMiddleware'])
def test_process_view_return_response(self):
response = self.client.get('/middleware_exceptions/view/')
self.assertEqual(response.content, b'Processed view normal_view')
@override_settings(MIDDLEWARE=[
'middleware_exceptions.middleware.ProcessViewTemplateResponseMiddleware',
'middleware_exceptions.middleware.LogMiddleware',
])
def test_templateresponse_from_process_view_rendered(self):
"""
TemplateResponses returned from process_view() must be rendered before
being passed to any middleware that tries to access response.content,
such as middleware_exceptions.middleware.LogMiddleware.
"""
response = self.client.get('/middleware_exceptions/view/')
self.assertEqual(response.content, b'Processed view normal_view\nProcessViewTemplateResponseMiddleware')
@override_settings(MIDDLEWARE=[
'middleware_exceptions.middleware.ProcessViewTemplateResponseMiddleware',
'middleware_exceptions.middleware.TemplateResponseMiddleware',
])
def test_templateresponse_from_process_view_passed_to_process_template_response(self):
"""
TemplateResponses returned from process_view() should be passed to any
template response middleware.
"""
response = self.client.get('/middleware_exceptions/view/')
expected_lines = [
b'Processed view normal_view',
b'ProcessViewTemplateResponseMiddleware',
b'TemplateResponseMiddleware',
]
self.assertEqual(response.content, b'\n'.join(expected_lines))
@override_settings(MIDDLEWARE=['middleware_exceptions.middleware.TemplateResponseMiddleware'])
def test_process_template_response(self):
response = self.client.get('/middleware_exceptions/template_response/')
self.assertEqual(response.content, b'template_response OK\nTemplateResponseMiddleware')
@override_settings(MIDDLEWARE=['middleware_exceptions.middleware.LogMiddleware'])
def test_view_exception_converted_before_middleware(self):
response = self.client.get('/middleware_exceptions/permission_denied/')
self.assertEqual(mw.log, [(response.status_code, response.content)])
self.assertEqual(response.status_code, 403)
@override_settings(MIDDLEWARE=['middleware_exceptions.middleware.ProcessExceptionMiddleware'])
def test_view_exception_handled_by_process_exception(self):
response = self.client.get('/middleware_exceptions/error/')
self.assertEqual(response.content, b'Exception caught')
@override_settings(MIDDLEWARE=[
'middleware_exceptions.middleware.ProcessExceptionLogMiddleware',
'middleware_exceptions.middleware.ProcessExceptionMiddleware',
])
def test_response_from_process_exception_short_circuits_remainder(self):
response = self.client.get('/middleware_exceptions/error/')
self.assertEqual(mw.log, [])
self.assertEqual(response.content, b'Exception caught')
@override_settings(MIDDLEWARE=[
'middleware_exceptions.middleware.LogMiddleware',
'middleware_exceptions.middleware.NotFoundMiddleware',
])
def test_exception_in_middleware_converted_before_prior_middleware(self):
response = self.client.get('/middleware_exceptions/view/')
self.assertEqual(mw.log, [(404, response.content)])
self.assertEqual(response.status_code, 404)
@override_settings(MIDDLEWARE=['middleware_exceptions.middleware.ProcessExceptionMiddleware'])
def test_exception_in_render_passed_to_process_exception(self):
response = self.client.get('/middleware_exceptions/exception_in_render/')
self.assertEqual(response.content, b'Exception caught')
@override_settings(ROOT_URLCONF='middleware_exceptions.urls')
class RootUrlconfTests(SimpleTestCase):
@override_settings(ROOT_URLCONF=None)
def test_missing_root_urlconf(self):
# Removing ROOT_URLCONF is safe, as override_settings will restore
# the previously defined settings.
del settings.ROOT_URLCONF
with self.assertRaises(AttributeError):
self.client.get("/middleware_exceptions/view/")
class MyMiddleware(object):
def __init__(self, get_response=None):
raise MiddlewareNotUsed
def process_request(self, request):
pass
class MyMiddlewareWithExceptionMessage(object):
def __init__(self, get_response=None):
raise MiddlewareNotUsed('spam eggs')
def process_request(self, request):
pass
@override_settings(
DEBUG=True,
ROOT_URLCONF='middleware_exceptions.urls',
MIDDLEWARE=['django.middleware.common.CommonMiddleware'],
)
class MiddlewareNotUsedTests(SimpleTestCase):
rf = RequestFactory()
def test_raise_exception(self):
request = self.rf.get('middleware_exceptions/view/')
with self.assertRaises(MiddlewareNotUsed):
MyMiddleware().process_request(request)
@override_settings(MIDDLEWARE=['middleware_exceptions.tests.MyMiddleware'])
def test_log(self):
with patch_logger('django.request', 'debug') as calls:
self.client.get('/middleware_exceptions/view/')
self.assertEqual(len(calls), 1)
self.assertEqual(
calls[0],
"MiddlewareNotUsed: 'middleware_exceptions.tests.MyMiddleware'"
)
@override_settings(MIDDLEWARE=['middleware_exceptions.tests.MyMiddlewareWithExceptionMessage'])
def test_log_custom_message(self):
with patch_logger('django.request', 'debug') as calls:
self.client.get('/middleware_exceptions/view/')
self.assertEqual(len(calls), 1)
self.assertEqual(
calls[0],
"MiddlewareNotUsed('middleware_exceptions.tests.MyMiddlewareWithExceptionMessage'): spam eggs"
)
@override_settings(DEBUG=False)
def test_do_not_log_when_debug_is_false(self):
with patch_logger('django.request', 'debug') as calls:
self.client.get('/middleware_exceptions/view/')
self.assertEqual(len(calls), 0)
| bsd-3-clause |
Adel-Magebinary/odoo | addons/account_asset/wizard/__init__.py | 445 | 1122 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_asset_change_duration
import wizard_asset_compute
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
alianmohammad/pd-gem5 | util/checkpoint-tester.py | 63 | 5044 | #! /usr/bin/env python
# Copyright (c) 2010 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: Steve Reinhardt
#
# Basic test script for checkpointing.
#
# Given an M5 command and an interval (in ticks), this script will:
# 1. Run the command, dumping periodic checkpoints at the given interval.
# 2. Rerun the command for each pair of adjacent checkpoints:
# a. Restore from checkpoint N
# b. Run until the timestamp of checkpoint N+1
# c. Dump a checkpoint and end the simulation
# d. Diff the new checkpoint with the original checkpoint N+1
#
# Note that '--' must be used to separate the script options from the
# M5 command line.
#
# Caveats:
#
# - This script relies on the checkpoint options implemented in
# configs/common/Simulation.py, so it works with commands based on
# the se.py and fs.py scripts in configs/example, but does not work
# directly with the existing regression tests.
# - Interleaving simulator and program output can cause discrepancies
# in the file position checkpoint information since different runs
# have different amount of simulator output.
# - Probably lots more issues we don't even know about yet.
#
# Examples:
#
# util/checkpoint-tester.py -i 400000 -- build/ALPHA_SE/m5.opt \
# configs/example/se.py -c tests/test-progs/hello/bin/alpha/tru64/hello \
# --output=progout --errout=progerr
#
# util/checkpoint-tester.py -i 200000000000 -- build/ALPHA_FS/m5.opt \
# configs/example/fs.py --script tests/halt.sh
#
import os, sys, re
import subprocess
import optparse
parser = optparse.OptionParser()
parser.add_option('-i', '--interval', type='int')
parser.add_option('-d', '--directory', default='checkpoint-test')
(options, args) = parser.parse_args()
interval = options.interval
if os.path.exists(options.directory):
print 'Error: test directory', options.directory, 'exists'
print ' Tester needs to create directory from scratch'
sys.exit(1)
top_dir = options.directory
os.mkdir(top_dir)
cmd_echo = open(os.path.join(top_dir, 'command'), 'w')
print >>cmd_echo, ' '.join(sys.argv)
cmd_echo.close()
m5_binary = args[0]
options = args[1:]
initial_args = ['--take-checkpoints', '%d,%d' % (interval, interval)]
cptdir = os.path.join(top_dir, 'm5out')
print '===> Running initial simulation.'
subprocess.call([m5_binary] + ['-red', cptdir] + options + initial_args)
dirs = os.listdir(cptdir)
expr = re.compile('cpt\.([0-9]*)')
cpts = []
for dir in dirs:
match = expr.match(dir)
if match:
cpts.append(int(match.group(1)))
cpts.sort()
# We test by loading checkpoint N, simulating to (and dumping at)
# checkpoint N+1, then comparing the resulting checkpoint with the
# original checkpoint N+1. Thus the number of tests we can run is one
# less than tha number of checkpoints.
for i in range(1, len(cpts)):
print '===> Running test %d of %d.' % (i, len(cpts)-1)
mydir = os.path.join(top_dir, 'test.%d' % i)
subprocess.call([m5_binary] + ['-red', mydir] + options + initial_args +
['--max-checkpoints' , '1', '--checkpoint-dir', cptdir,
'--checkpoint-restore', str(i)])
cpt_name = 'cpt.%d' % cpts[i]
diff_name = os.path.join(mydir, 'diffout')
diffout = open(diff_name, 'w')
subprocess.call(['diff', '-ru', '-I', '^##.*',
'%s/%s' % (cptdir, cpt_name),
'%s/%s' % (mydir, cpt_name)], stdout=diffout)
diffout.close()
# print out the diff
diffout = open(diff_name)
print diffout.read(),
diffout.close()
| bsd-3-clause |
AutorestCI/azure-sdk-for-python | azure-mgmt-resource/azure/mgmt/resource/resources/v2016_09_01/models/deployment_extended.py | 2 | 1325 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DeploymentExtended(Model):
"""Deployment information.
:param id: The ID of the deployment.
:type id: str
:param name: The name of the deployment.
:type name: str
:param properties: Deployment properties.
:type properties:
~azure.mgmt.resource.resources.v2016_09_01.models.DeploymentPropertiesExtended
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'DeploymentPropertiesExtended'},
}
def __init__(self, name, id=None, properties=None):
super(DeploymentExtended, self).__init__()
self.id = id
self.name = name
self.properties = properties
| mit |
aavidad/grx-asistencia | usuarios_win.py | 1 | 11812 | from __future__ import with_statement
from fabric.api import settings, abort, run, env, sudo, local, get , put, hosts
from fabric.contrib.console import confirm
from gi.repository import Gtk
import os,tablabel,subprocess
import time
class Mensaje(Gtk.MessageDialog):
def __init__(self, texto):
Gtk.MessageDialog.__init__(self, parent=None,
flags=Gtk.DialogFlags.MODAL,
type=Gtk.MessageType.WARNING,
buttons=Gtk.ButtonsType.OK,
message_format="ATENCION se ha producido un ERROR:")
self.set_default_size(150, 100)
label = Gtk.Label(texto)
box = self.get_content_area()
box.add(label)
self.show_all()
class Usuarios(Gtk.Grid):
def __init__(self,widget,nombre,ip,usuario,puerto,password,tecnico,password_tecnico):
Gtk.Grid.__init__(self, row_spacing=20, column_spacing=20)
builder = Gtk.Builder()
builder.add_from_file("/usr/share/grx/glade/usuario.glade")
box_usuario = builder.get_object("box_usuario")
notebook_usuario = builder.get_object("notebook_usuario")
spinner = builder.get_object("spinner")
estado = builder.get_object("estado")
entry_nombre = builder.get_object("entry_nombre")
entry_usuario = builder.get_object("entry_usuario")
entry_dir = builder.get_object("entry_dir")
entry_veleta = builder.get_object("entry_veleta")
entry_estado = builder.get_object("entry_estado")
entry_ultimo_login = builder.get_object("entry_ultimo_login")
entry_correo = builder.get_object("entry_correo")
entry_badpwd = builder.get_object("entry_badpwd")
entry_usuario.set_text(nombre)
entry_dir.set_text("/home/"+nombre)
entry_cambio_clave = builder.get_object("entry_cambio_clave")
entry_cambio_clave.connect("icon-press",self.on_entry_cambio_clave,tecnico,password_tecnico,nombre)
entry_usuario.connect("activate",self.on_Btn_Info_clicked,usuario,nombre,ip ,puerto,password,tecnico,password_tecnico,entry_usuario,entry_estado,entry_nombre,entry_ultimo_login,entry_correo,entry_badpwd,notebook_usuario)
entry_estado.connect("icon-press",self.on_entry_estado_icon_press, usuario,nombre,ip ,puerto,password,tecnico,password_tecnico)
entry_correo.connect("icon-press",self.on_entry_correo_icon_press)
Btn_Config_pam_mount = builder.get_object("Btn_Config_pam_mount")
Btn_Config_pam_mount.connect("clicked", self.on_Btn_Config_pam_mount_clicked,ip,usuario,nombre,puerto,password,notebook_usuario)
Btn_Refres = builder.get_object("Btn_Refres")
Btn_Refres.connect("clicked", self.on_Btn_Refres_clicked,usuario,nombre,ip,puerto,password)
Btn_Info = builder.get_object("Btn_Info")
Btn_Info.connect("clicked", self.on_Btn_Info_clicked,usuario,nombre,ip ,puerto,password,tecnico,password_tecnico,entry_usuario,entry_estado,entry_nombre,entry_ultimo_login,entry_correo,entry_badpwd,notebook_usuario)
Btn_Carpeta = builder.get_object("Btn_Carpeta")
Btn_Carpeta.connect("clicked", self.on_Btn_Carpeta_clicked,usuario,nombre,ip,puerto,password,spinner,estado)
#Btn_Refres2 = builder.get_object("Btn_Refres")
#Btn_Refres.connect("clicked", self.on_Btn_Refres_clicked,nombre,notebook)
#Btn_Backup2 = builder.get_object("Btn_Backup_impresora")
#Btn_Backup.connect("clicked", self.on_Btn_Backup_clicked,nombre)
self.add(box_usuario)
def mensaje(self,texto):
dialog = Mensaje(texto)
dialog.run()
dialog.destroy()
def on_Btn_Config_pam_mount_clicked(self, widget, ip,usuario,nombre,puerto,password,notebook):
scrolledwindow = Gtk.ScrolledWindow()
scrolledwindow.set_hexpand(True)
scrolledwindow.set_vexpand(True)
grid=Gtk.Grid()
boton=Gtk.Button(label='Grabar')
grid.attach(boton, 1,0, 2, 1)
page = Gtk.TextView()
page.set_editable(True)
grid.attach_next_to(page, boton, Gtk.PositionType.BOTTOM, 1, 2)
scrolledwindow.add(grid)
with settings(host_string=ip,port=puerto,password=password,user=usuario):
ruta = subprocess.check_output (['mktemp'])
boton.connect("clicked", self.graba_pam_mount,ip,usuario,nombre,puerto,password,page)
try:
archivo=sudo ('cat /home/'+nombre+'/.pam_mount.conf.xml')
except:
self.mensaje("No se ha podido abrir el archivo .pam_mount.conf.xml")
return
textbuffer = page.get_buffer()
textbuffer.set_text(archivo)
tab_label = tablabel.TabLabel(".pam_mount.conf.xml "+ ip,Gtk.Image.new_from_file("./icons/info.png"))
tab_label.connect("close-clicked", tablabel.on_close_clicked, notebook, scrolledwindow)
notebook.append_page(scrolledwindow ,tab_label)
self.show_all()
def graba_pam_mount(self, widget, ip, usuario,nombre, puerto,password,page):
with settings(host_string=ip,port=puerto,password=password,user=usuario):
try:
textbuffer = page.get_buffer()
start_iter = textbuffer.get_start_iter()
end_iter = textbuffer.get_end_iter()
archivo=textbuffer.get_text(start_iter, end_iter, True)
tmp = subprocess.check_output (['mktemp'])
archi=open(tmp,'w')
archi.write(archivo)
archi.close()
put (use_sudo=True, remote_path="/home/"+nombre+"/.pam_mount.conf.xml", local_path=tmp)
except:
self.mensaje("No se ha podido grabar el archivo de iptables en el equipo")
def on_entry_correo_icon_press(self, widget,enum,void):
os.system("xdg-email "+widget.get_text())
def on_Btn_Refres_clicked(self, widget, usuario,nombre,ip ,puerto,password):
with settings(host_string=ip,port=puerto,password=password,user=usuario):
try:
monta = subprocess.check_output (['mktemp','-d','-t',ip+'-'+nombre+'-XXXXXX'])
sudo ('chmod 777 /home/'+nombre+';chown -R administrador /home/'+nombre+'/.Private')
os.system ('sshfs -p '+puerto+' -o reconnect -C -o workaround=all '+usuario+'@'+ip+':/home/'+nombre+'/.Private'+' '+monta+'')
#os.system('printf "%s\0" "clave" | ecryptfs-add-passphrase $fnek | grep "^Inserted" | sed -e "s/^.*\[//" -e "s/\].*$//" -e "s/[^0-9a-f]//g")
result = os.system('nemo '+monta)
except:
self.mensaje ("No se ha podido montar la carpeta remota")
####################################################
def on_Btn_Info_clicked(self, widget, usuario,nombre,ip ,puerto,password,tecnico,password_tecnico,entry_usuario,entry_estado,entry_nombre,entry_ultimo_login,entry_correo,entry_badpwd,notebook):
with settings(host_string=ip,port=puerto,password=password,user=usuario):
tecnico=tecnico.split("@")[0]
try:
validar_tecnico= subprocess.check_output (['/usr/share/grx/ldap/validar_tecnico.sh',tecnico, password_tecnico])
except:
print "No se ha podido validar al tecnico. Compruebe la clave"
try:
ldap= subprocess.check_output (['/usr/share/grx/ldap/ldap.sh',nombre])
except:
print "No se ha podido realizar la consulta AD"
#archi=open('/usr/share/grx/ldap/ldap','r') ###En casa
#ldap = archi.read() ###En casa
ldap_lista=ldap.split("\n")
#####Busca y comprueba el estado de la cuenta
lock=filter(lambda x:'lockoutTime:' in x, ldap_lista)
strl=''.join(lock)
if (strl.split(":")[1]).strip()=="0":
estado="Activa"
else:
estado="BLOQUEADA"
entry_estado.set_text(estado)
######Busca el correo y lo coloca en entry_correo
tmp=filter(lambda x:'mail:' in x, ldap_lista)
mail=''.join(tmp)
entry_correo.set_text((mail.split(":")[1]).strip())
######Busca las veces que hemos introducido la clave mal
tmp=filter(lambda x:'badPwdCount:' in x, ldap_lista)
badpwd=''.join(tmp)
entry_badpwd.set_text((badpwd.split(":")[1]).strip())
######Busca el nombre completo
tmp=filter(lambda x:'cn:' in x, ldap_lista)
ncompleto=''.join(tmp)
entry_nombre.set_text((ncompleto.split(":")[1]).strip())
#######Busca el ultimo login
#tmp=filter(lambda x:'lastLogon:' in x, ldap_lista)
#lastlogon=''.join(tmp)
#entry_ultimo_login.set_text((lastlogon.split(":")[1]).strip())
#######Busca cuando expira la cuenta
tmp=filter(lambda x:'accountExpires:' in x, ldap_lista)
lastlogon=''.join(tmp)
fecha=(lastlogon.split(":")[1]).strip()
if fecha=="9223372036854775807":
entry_ultimo_login.set_text("No Caduca")
else:
#$(expr $(expr $1 / 10000000) - 11644473600)" sec GMT" +"%d/%m/%Y %H:%M:%S"
try:
tmp=time.strftime("%D %H:%M", time.localtime(int(fecha)))
except:
tmp="Desconocido"
entry_ultimo_login.set_text(tmp)
#######Busca el ultimo cambio de contrasena
#tmp=filter(lambda x:'pwdLastSet:' in x, ldap_lista)
#pwdLastSet=''.join(tmp)
#entry_ultimo_login.set_text((pwdLastSet.split(":")[1]).strip())
#######Busca cuando se creo la cuenta
#tmp=filter(lambda x:'whenCreated:' in x, ldap_lista)
#whenCreated=''.join(tmp)
#entry_ultimo_login.set_text((whenCreated.split(":")[1]).strip())
#######Busca cuando se modifico por ultima vez
#tmp=filter(lambda x:'whenChanged:' in x, ldap_lista)
#whenChanged=''.join(tmp)
#entry_ultimo_login.set_text((whenChanged.split(":")[1]).strip())
#######Muestra el numero de inicios de sesion de la cuenta
#tmp=filter(lambda x:'logonCount:' in x, ldap_lista)
#logonCount=''.join(tmp)
#entry_ultimo_login.set_text((logonCount.split(":")[1]).strip())
scrolledwindow = Gtk.ScrolledWindow()
scrolledwindow.set_hexpand(True)
scrolledwindow.set_vexpand(True)
page = Gtk.TextView()
scrolledwindow.add(page)
textbuffer = page.get_buffer()
textbuffer.set_text(ldap)
tab_label = tablabel.TabLabel("ldap "+ nombre,Gtk.Image.new_from_file("./icons/info.png"))
tab_label.connect("close-clicked", tablabel.on_close_clicked, notebook, scrolledwindow)
notebook.append_page(scrolledwindow ,tab_label)
self.show_all()
################################################
def on_entry_cambio_clave(self, widget,pos, otro, tecnico,password_tecnico,nombre):
clave=widget.get_text()
try:
tecnico=tecnico.split("@")[0]
validar_tecnico= subprocess.check_output (['/usr/share/grx/ldap/validar_tecnico.sh',tecnico, password_tecnico])
cmd='(echo '+clave+'; echo '+clave+' ;)| net ads password -U '+tecnico+'%'+password_tecnico+' '+nombre
os.system(cmd)
except:
print "No se ha podido validar al tecnico. Compruebe la clave"
def on_entry_estado_icon_press(self, widget, pos, otro, usuario,nombre,ip ,puerto,password,tecnico,password_tecnico):
try:
tecnico=tecnico.split("@")[0]
validar_tecnico= subprocess.check_output (['/usr/share/grx/ldap/validar_tecnico.sh',tecnico, password_tecnico])
ldap= subprocess.check_output (['/usr/share/grx/ldap/ldap.sh',nombre])
#archi=open('/usr/share/grx/ldap/ldap','r') ###En casa
#ldap = archi.read() ###En casa
ldap_lista=ldap.split("\n")
#######Busca el dn:
tmp=filter(lambda x:'dn:' in x, ldap_lista)
dn=''.join(tmp)
archivo=dn+'\nchangetype: modify\nreplace: lockoutTime\nlockoutTime: 0'
tmp_ruta = subprocess.check_output (['mktemp'])
archi=open(tmp_ruta,'w')
archi.write(archivo)
archi.close()
resultado = subprocess.check_output (['ldapmodify', '-f',tmp_ruta])
print resultado
widget.set_text("Activa")
except:
print "No se ha podido validar al tecnico. Compruebe la clave"
def on_Btn_Carpeta_clicked(self, widget, usuario,nombre,ip ,puerto,password,spinner,estado):
spinner.start()
estado.set_text("Montando carpeta...")
with settings(host_string=ip,port=puerto,password=password,user=usuario):
try:
monta = subprocess.check_output (['mktemp','-d','-t',ip+'-'+nombre+'-XXXXXX'])
os.system ('sshfs -p '+puerto+' -o reconnect -C -o workaround=all '+usuario+'@'+ip+':/home/'+nombre+' '+monta+'')
result = os.system('nemo '+monta)
except:
self.mensaje ("No se ha podido montar la carpeta remota")
spinner.stop()
estado.set_text("")
| gpl-3.0 |
hoosteeno/kuma | vendor/packages/translate/storage/subtitles.py | 24 | 7442 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2008-2009 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Class that manages subtitle files for translation.
This class makes use of the subtitle functionality of ``gaupol``.
.. seealso:: gaupol/agents/open.py::open_main
A patch to gaupol is required to open utf-8 files successfully.
"""
import os
import tempfile
from cStringIO import StringIO
try:
from aeidon import Subtitle, documents, newlines
from aeidon.encodings import detect
from aeidon.files import (AdvSubStationAlpha, MicroDVD, SubRip,
SubStationAlpha, new)
from aeidon.util import detect_format as determine
except ImportError:
from gaupol import FormatDeterminer, documents
from gaupol.encodings import detect
from gaupol.files import (AdvSubStationAlpha, MicroDVD, SubRip,
SubStationAlpha, new)
from gaupol.newlines import newlines
from gaupol.subtitle import Subtitle
from translate.storage import base
_determiner = FormatDeterminer()
determine = _determiner.determine
from translate.storage import base
class SubtitleUnit(base.TranslationUnit):
"""A subtitle entry that is translatable"""
def __init__(self, source=None, encoding="utf_8"):
self._start = None
self._end = None
if source:
self.source = source
super(SubtitleUnit, self).__init__(source)
def getnotes(self, origin=None):
if origin in ['programmer', 'developer', 'source code', None]:
return "visible for %d seconds" % self._duration
else:
return ''
def getlocations(self):
return ["%s-->%s" % (self._start, self._end)]
def getid(self):
return self.getlocations()[0]
class SubtitleFile(base.TranslationStore):
"""A subtitle file"""
UnitClass = SubtitleUnit
def __init__(self, inputfile=None, unitclass=UnitClass):
"""construct an Subtitle file, optionally reading in from inputfile."""
self.UnitClass = unitclass
base.TranslationStore.__init__(self, unitclass=unitclass)
self.units = []
self.filename = None
self._subtitlefile = None
self._encoding = 'utf_8'
if inputfile is not None:
self._parsefile(inputfile)
def __str__(self):
subtitles = []
for unit in self.units:
subtitle = Subtitle()
subtitle.main_text = unit.target or unit.source
subtitle.start = unit._start
subtitle.end = unit._end
subtitles.append(subtitle)
output = StringIO()
self._subtitlefile.write_to_file(subtitles, documents.MAIN, output)
return output.getvalue().encode(self._subtitlefile.encoding)
def _parse(self):
try:
self._encoding = detect(self.filename)
if self._encoding == 'ascii':
self._encoding = 'utf_8'
self._format = determine(self.filename, self._encoding)
self._subtitlefile = new(self._format, self.filename, self._encoding)
for subtitle in self._subtitlefile.read():
newunit = self.addsourceunit(subtitle.main_text)
newunit._start = subtitle.start
newunit._end = subtitle.end
newunit._duration = subtitle.duration_seconds
except Exception as e:
raise base.ParseError(e)
def _parsefile(self, storefile):
if hasattr(storefile, 'name'):
self.filename = storefile.name
storefile.close()
elif hasattr(storefile, 'filename'):
self.filename = storefile.filename
storefile.close()
elif isinstance(storefile, basestring):
self.filename = storefile
if self.filename and os.path.exists(self.filename):
self._parse()
else:
self.parse(storefile.read())
@classmethod
def parsefile(cls, storefile):
"""parse the given file"""
newstore = cls()
newstore._parsefile(storefile)
return newstore
def parse(self, input):
if isinstance(input, basestring):
# Gaupol does not allow parsing from strings
if self.filename:
tmpfile, tmpfilename = tempfile.mkstemp(suffix=self.filename)
else:
tmpfile, tmpfilename = tempfile.mkstemp()
tmpfile = open(tmpfilename, 'w')
tmpfile.write(input)
tmpfile.close()
self._parsefile(tmpfilename)
os.remove(tmpfilename)
else:
self._parsefile(input)
############# format specific classes ###################
# the generic SubtitleFile can adapt to any format, but only the
# specilized classes can be used to construct a new file
class SubRipFile(SubtitleFile):
"""specialized class for SubRipFile's only"""
Name = "SubRip subtitles file"
Extensions = ['srt']
def __init__(self, *args, **kwargs):
super(SubRipFile, self).__init__(*args, **kwargs)
if self._subtitlefile is None:
self._subtitlefile = SubRip(self.filename or '', self._encoding)
if self._subtitlefile.newline is None:
self._subtitlefile.newline = newlines.UNIX
class MicroDVDFile(SubtitleFile):
"""specialized class for SubRipFile's only"""
Name = "MicroDVD subtitles file"
Extensions = ['sub']
def __init__(self, *args, **kwargs):
super(SubRipFile, self).__init__(*args, **kwargs)
if self._subtitlefile is None:
self._subtitlefile = MicroDVD(self.filename or '', self._encoding)
if self._subtitlefile.newline is None:
self._subtitlefile.newline = newlines.UNIX
class AdvSubStationAlphaFile(SubtitleFile):
"""specialized class for SubRipFile's only"""
Name = "Advanced Substation Alpha subtitles file"
Extensions = ['ass']
def __init__(self, *args, **kwargs):
super(SubRipFile, self).__init__(*args, **kwargs)
if self._subtitlefile is None:
self._subtitlefile = AdvSubStationAlpha(self.filename or '', self._encoding)
if self._subtitlefile.newline is None:
self._subtitlefile.newline = newlines.UNIX
class SubStationAlphaFile(SubtitleFile):
"""specialized class for SubRipFile's only"""
Name = "Substation Alpha subtitles file"
Extensions = ['ssa']
def __init__(self, *args, **kwargs):
super(SubRipFile, self).__init__(*args, **kwargs)
if self._subtitlefile is None:
self._subtitlefile = SubStationAlpha(self.filename or '', self._encoding)
if self._subtitlefile.newline is None:
self._subtitlefile.newline = newlines.UNIX
| mpl-2.0 |
bretthandrews/flexCE | flexCE/fileio/pickle_io.py | 1 | 2232 | """Pickle and unpickle simulation output.
Save and reload entire simulation object from pickle files.
"""
from __future__ import print_function, division, absolute_import
import os
import pickle
def pickle_read(filename):
"""Read pickle file.
Args:
filename (str): name of pickle file.
Returns:
object
"""
fin = open(filename, 'rb')
obj = pickle.load(fin)
fin.close()
return obj
def pickle_write(obj, filename):
"""Write object to pickle file.
Args:
obj: object to be pickled.
filename (str): name of output pickle file.
"""
fout = open(filename, 'wb')
pickle.dump(obj, fout, -1)
fout.close()
def _make_sim_path(path_out, sim_id, stem='box'):
"""Construct path to simulation output pickle file.
Args:
path_out (str): directory of pickle file.
sim_id (str): simulation ID number.
Returns:
str: file path and name
"""
sim_id = str(sim_id)
path_sim = os.path.join(path_out, ''.join(['sim', sim_id]))
fname = '{}{}.pck'.format(stem, sim_id)
return os.path.join(path_sim, fname)
def box_read(path_out, sim_id):
"""Read in ChemEvol instance from box<sim_id>.pck file.
Args:
path_out (str): directory of pickle file.
sim_id (str): simulation ID number.
Returns:
object: instance of ChemEvol class ('box' object).
"""
fname = _make_sim_path(path_out, sim_id, stem='box')
return pickle_read(fname)
def ab_read(path_out, sim_id):
"""Read in Abundances instance from ab<sim_id>.pck file.
Args:
path_out (str): directory of pickle file.
sim_id (str): simulation ID number.
Returns:
object: instance of Abundances class ('ab' object).
"""
fname = _make_sim_path(path_out, sim_id, stem='ab')
return pickle_read(fname)
def sim_read(path_out, sim_id):
"""Read in box and ab objects.
Args:
path_out (str): directory of pickle file.
sim_id (str): simulation ID number.
Returns:
ChemEvol instance, Abundances instance (tuple): 'box' and 'ab' objects
"""
box = box_read(path_out, sim_id)
ab = ab_read(path_out, sim_id)
return box, ab
| mit |
iocoop/beancount | src/python/beancount/query/query_execute_test.py | 1 | 27719 | __author__ = "Martin Blais <blais@furius.ca>"
import datetime
import io
import unittest
import textwrap
from beancount.core.number import D
from beancount.core.number import Decimal
from beancount.core import inventory
from beancount.query import query_parser
from beancount.query import query_compile as qc
from beancount.query import query_env as qe
from beancount.query import query_execute as qx
from beancount.parser import cmptest
from beancount.utils import misc_utils
from beancount import loader
class QueryBase(cmptest.TestCase):
maxDiff = 8192
# Default execution contexts.
xcontext_entries = qe.FilterEntriesEnvironment()
xcontext_targets = qe.TargetsEnvironment()
xcontext_postings = qe.FilterPostingsEnvironment()
def setUp(self):
super().setUp()
self.parser = query_parser.Parser()
def parse(self, bql_string):
"""Parse a query.
Args:
bql_string: An SQL query to be parsed.
Returns:
A parsed statement (Select() node).
"""
return self.parser.parse(bql_string.strip())
def compile(self, bql_string):
"""Parse a query and compile it.
Args:
bql_string: An SQL query to be parsed.
Returns:
A compiled EvalQuery node.
"""
return qc.compile_select(self.parse(bql_string),
self.xcontext_targets,
self.xcontext_postings,
self.xcontext_entries)
def check_query(self,
input_string, bql_string,
expected_types, expected_rows,
sort_rows=False,
debug=False):
entries, _, options_map = loader.load_string(input_string)
query = self.compile(bql_string)
result_types, result_rows = qx.execute_query(query, entries, options_map)
if debug:
with misc_utils.box('result_types'):
print(result_types)
with misc_utils.box('result_rows'):
print(result_rows)
self.assertEqual(expected_types, result_types)
if sort_rows:
result_rows.sort()
self.assertEqual(expected_rows, result_rows)
def check_sorted_query(self,
input_string, bql_string,
expected_types, expected_rows):
return self.check_query(input_string, bql_string,
expected_types, expected_rows, True)
class CommonInputBase:
INPUT = textwrap.dedent("""
2010-01-01 open Assets:Bank:Checking
2010-01-01 open Assets:ForeignBank:Checking
2010-01-01 open Assets:Bank:Savings
2010-01-01 open Expenses:Restaurant
2010-01-01 * "Dinner with Cero"
Assets:Bank:Checking 100.00 USD
Expenses:Restaurant -100.00 USD
2011-01-01 * "Dinner with Uno"
Assets:Bank:Checking 101.00 USD
Expenses:Restaurant -101.00 USD
2012-02-02 * "Dinner with Dos"
Assets:Bank:Checking 102.00 USD
Expenses:Restaurant -102.00 USD
2013-03-03 * "Dinner with Tres"
Assets:Bank:Checking 103.00 USD
Expenses:Restaurant -103.00 USD
2013-10-10 * "International Transfer"
Assets:Bank:Checking -50.00 USD
Assets:ForeignBank:Checking -60.00 CAD @ 1.20 USD
2014-04-04 * "Dinner with Quatro"
Assets:Bank:Checking 104.00 USD
Expenses:Restaurant -104.00 USD
""")
def setUp(self):
super().setUp()
self.entries, _, self.options_map = loader.load_string(textwrap.dedent(self.INPUT))
class TestFilterEntries(CommonInputBase, QueryBase):
def test_filter_empty_from(self):
# Check that no filter outputs the very same thing.
filtered_entries = qx.filter_entries(self.compile("""
SELECT * ;
""").c_from, self.entries, self.options_map)
self.assertEqualEntries(self.entries, filtered_entries)
def test_filter_by_year(self):
filtered_entries = qx.filter_entries(self.compile("""
SELECT date, type FROM year(date) = 2012;
""").c_from, self.entries, self.options_map)
self.assertEqualEntries("""
2012-02-02 * "Dinner with Dos"
Assets:Bank:Checking 102.00 USD
Expenses:Restaurant -102.00 USD
""", filtered_entries)
def test_filter_by_expr1(self):
filtered_entries = qx.filter_entries(self.compile("""
SELECT date, type
FROM NOT (type = 'transaction' AND
(year(date) = 2012 OR year(date) = 2013));
""").c_from, self.entries, self.options_map)
self.assertEqualEntries("""
2010-01-01 open Assets:Bank:Checking
2010-01-01 open Assets:Bank:Savings
2010-01-01 open Expenses:Restaurant
2010-01-01 open Assets:ForeignBank:Checking
2010-01-01 * "Dinner with Cero"
Assets:Bank:Checking 100.00 USD
Expenses:Restaurant -100.00 USD
2011-01-01 * "Dinner with Uno"
Assets:Bank:Checking 101.00 USD
Expenses:Restaurant -101.00 USD
2014-04-04 * "Dinner with Quatro"
Assets:Bank:Checking 104.00 USD
Expenses:Restaurant -104.00 USD
""", filtered_entries)
def test_filter_by_expr2(self):
filtered_entries = qx.filter_entries(self.compile("""
SELECT date, type FROM date < 2012-06-01;
""").c_from, self.entries, self.options_map)
self.assertEqualEntries("""
2010-01-01 open Assets:Bank:Checking
2010-01-01 open Assets:Bank:Savings
2010-01-01 open Expenses:Restaurant
2010-01-01 open Assets:ForeignBank:Checking
2010-01-01 * "Dinner with Cero"
Assets:Bank:Checking 100.00 USD
Expenses:Restaurant -100.00 USD
2011-01-01 * "Dinner with Uno"
Assets:Bank:Checking 101.00 USD
Expenses:Restaurant -101.00 USD
2012-02-02 * "Dinner with Dos"
Assets:Bank:Checking 102.00 USD
Expenses:Restaurant -102.00 USD
""", filtered_entries)
def test_filter_close_undated(self):
filtered_entries = qx.filter_entries(self.compile("""
SELECT date, type FROM CLOSE;
""").c_from, self.entries, self.options_map)
self.assertEqualEntries(self.INPUT + textwrap.dedent("""
2014-04-04 C "Conversion for (-50.00 USD, -60.00 CAD)"
Equity:Conversions:Current 50.00 USD @ 0 NOTHING
Equity:Conversions:Current 60.00 CAD @ 0 NOTHING
"""), filtered_entries)
def test_filter_close_dated(self):
filtered_entries = qx.filter_entries(self.compile("""
SELECT date, type FROM CLOSE ON 2013-06-01;
""").c_from, self.entries, self.options_map)
self.assertEqualEntries(self.entries[:-2], filtered_entries)
def test_filter_open_dated(self):
filtered_entries = qx.filter_entries(self.compile("""
SELECT date, type FROM OPEN ON 2013-01-01;
""").c_from, self.entries, self.options_map)
self.assertEqualEntries("""
2010-01-01 open Assets:Bank:Checking
2010-01-01 open Assets:Bank:Savings
2010-01-01 open Expenses:Restaurant
2010-01-01 open Assets:ForeignBank:Checking
2012-12-31 S "Opening balance for 'Assets:Bank:Checking' (Summarization)"
Assets:Bank:Checking 303.00 USD
Equity:Opening-Balances -303.00 USD
2012-12-31 S "Opening balance for 'Equity:Earnings:Previous' (Summarization)"
Equity:Earnings:Previous -303.00 USD
Equity:Opening-Balances 303.00 USD
2013-03-03 * "Dinner with Tres"
Assets:Bank:Checking 103.00 USD
Expenses:Restaurant -103.00 USD
2013-10-10 * "International Transfer"
Assets:Bank:Checking -50.00 USD ; -50.00 USD
Assets:ForeignBank:Checking -60.00 CAD @ 1.20 USD ; -72.00 USD
2014-04-04 * "Dinner with Quatro"
Assets:Bank:Checking 104.00 USD
Expenses:Restaurant -104.00 USD
""", filtered_entries)
def test_filter_clear(self):
filtered_entries = qx.filter_entries(self.compile("""
SELECT date, type FROM CLEAR;
""").c_from, self.entries, self.options_map)
self.assertEqualEntries(self.INPUT + textwrap.dedent("""
2014-04-04 T "Transfer balance for 'Expenses:Restaurant' (Transfer balance)"
Expenses:Restaurant 510.00 USD
Equity:Earnings:Current -510.00 USD
"""), filtered_entries)
class TestExecutePrint(CommonInputBase, QueryBase):
def test_print_with_filter(self):
statement = qc.EvalPrint(qc.EvalFrom(qc.EvalEqual(qe.YearEntryColumn(),
qc.EvalConstant(2012)),
None, None, None))
oss = io.StringIO()
qx.execute_print(statement, self.entries, self.options_map, oss)
self.assertEqualEntries("""
2012-02-02 * "Dinner with Dos"
Assets:Bank:Checking 102.00 USD
Expenses:Restaurant -102.00 USD
""", oss.getvalue())
def test_print_with_no_filter(self):
statement = qc.EvalPrint(qc.EvalFrom(None, None, None, None))
oss = io.StringIO()
qx.execute_print(statement, self.entries, self.options_map, oss)
self.assertEqualEntries(self.INPUT, oss.getvalue())
statement = qc.EvalPrint(None)
oss = io.StringIO()
qx.execute_print(statement, self.entries, self.options_map, oss)
self.assertEqualEntries(self.INPUT, oss.getvalue())
class TestAllocation(unittest.TestCase):
def test_allocator(self):
allocator = qx.Allocator()
self.assertEqual(0, allocator.allocate())
self.assertEqual(1, allocator.allocate())
self.assertEqual(2, allocator.allocate())
self.assertEqual([None, None, None], allocator.create_store())
class TestBalanceColumn(unittest.TestCase):
def test_uses_balance_column(self):
c_simple = qe.BalanceColumn()
self.assertTrue(qx.uses_balance_column(c_simple))
c_simple_not = qe.AccountColumn()
self.assertFalse(qx.uses_balance_column(c_simple_not))
c_subexpr = qc.EvalEqual(qe.BalanceColumn(), qc.EvalConstant(2012))
self.assertTrue(qx.uses_balance_column(c_subexpr))
c_subexpr_not = qc.EvalEqual(qe.AccountColumn(), qc.EvalConstant('Assets'))
self.assertFalse(qx.uses_balance_column(c_subexpr_not))
class TestExecuteNonAggregatedQuery(QueryBase):
INPUT = """
2010-01-01 open Assets:Bank:Checking
2010-01-01 open Expenses:Restaurant
2010-02-23 * "Bla"
Assets:Bank:Checking 100.00 USD
Expenses:Restaurant -100.00 USD
"""
def test_non_aggregate__one(self):
self.check_query(
self.INPUT,
"""
SELECT date;
""",
[('date', datetime.date)],
[(datetime.date(2010, 2, 23),),
(datetime.date(2010, 2, 23),)])
def test_non_aggregate__many(self):
self.check_query(
self.INPUT,
"""
SELECT date, flag, payee, narration;
""",
[
('date', datetime.date),
('flag', str),
('payee', str),
('narration', str),
],
[
(datetime.date(2010, 2, 23), '*', '', 'Bla'),
(datetime.date(2010, 2, 23), '*', '', 'Bla'),
])
def test_non_aggregated_order_by_visible(self):
self.check_query(
self.INPUT,
"""
SELECT account, length(account) ORDER BY 2;
""",
[
('account', str),
('length_account', int),
],
[
('Expenses:Restaurant', 19),
('Assets:Bank:Checking', 20),
])
def test_non_aggregated_order_by_invisible(self):
self.check_query(
self.INPUT,
"""
SELECT account ORDER BY length(account);
""",
[
('account', str),
],
[
('Expenses:Restaurant',),
('Assets:Bank:Checking',),
])
class TestExecuteAggregatedQuery(QueryBase):
INPUT = """
2010-01-01 open Assets:Bank:Checking
2010-01-01 open Expenses:Restaurant
2010-02-23 * "Bla"
Assets:Bank:Checking 100.00 USD
Expenses:Restaurant -100.00 USD
"""
def test_aggregated_group_by_all_implicit(self):
# There is no group-by, but all columns are aggregations.
self.check_query(
self.INPUT,
"""
SELECT first(account), last(account);
""",
[
('first_account', str),
('last_account', str),
],
[
('Assets:Bank:Checking', 'Expenses:Restaurant'),
])
def test_aggregated_group_by_all_explicit(self):
# All columns ('account', 'len') are subject of a group-by.
self.check_sorted_query(
self.INPUT,
"""
SELECT account, length(account) as len
GROUP BY account, len;
""",
[
('account', str),
('len', int),
],
[
('Assets:Bank:Checking', 20),
('Expenses:Restaurant', 19),
])
self.check_sorted_query(
"""
2010-02-21 * "First"
Assets:Bank:Checking -1.00 USD
Expenses:Restaurant 1.00 USD
2010-02-23 * "Second"
Liabilities:Credit-Card -2.00 USD
Expenses:Restaurant 2.00 USD
""",
"""
SELECT account, length(account) as len
GROUP BY account, len;
""",
[
('account', str),
('len', int),
],
[
('Assets:Bank:Checking', 20),
('Expenses:Restaurant', 19),
('Liabilities:Credit-Card', 23),
])
def test_aggregated_group_by_visible(self):
# GROUP-BY: 'account' is visible.
self.check_sorted_query(
self.INPUT,
"""
SELECT account, sum(position) as amount
GROUP BY account;
""",
[
('account', str),
('amount', inventory.Inventory),
],
[
('Assets:Bank:Checking', inventory.from_string('100.00 USD')),
('Expenses:Restaurant', inventory.from_string('-100.00 USD')),
])
def test_aggregated_group_by_invisible(self):
# GROUP-BY: 'account' is invisible.
self.check_sorted_query(
self.INPUT,
"""
SELECT count(position)
GROUP BY account;
""",
[
('count_position', int),
],
[
(1,),
(1,),
])
def test_aggregated_group_by_visible_order_by_non_aggregate_visible(self):
# GROUP-BY: 'account' is visible.
# ORDER-BY: 'account' is a non-aggregate and visible.
self.check_query(
self.INPUT,
"""
SELECT account, sum(position) as amount
GROUP BY account
ORDER BY account;
""",
[
('account', str),
('amount', inventory.Inventory),
],
[
('Assets:Bank:Checking', inventory.from_string('100.00 USD')),
('Expenses:Restaurant', inventory.from_string('-100.00 USD')),
])
def test_aggregated_group_by_visible_order_by_non_aggregate_invisible(self):
# GROUP-BY: 'account' and 'length(account)' are visible.
# ORDER-BY: 'length(account)' is a non-aggregate and invisible.
self.check_query(
self.INPUT,
"""
SELECT account, sum(position) as amount
GROUP BY account, length(account)
ORDER BY length(account);
""",
[
('account', str),
('amount', inventory.Inventory),
],
[
('Expenses:Restaurant', inventory.from_string('-100.00 USD')),
('Assets:Bank:Checking', inventory.from_string('100.00 USD')),
])
def test_aggregated_group_by_visible_order_by_aggregate_visible(self):
# GROUP-BY: 'account' is visible.
# ORDER-BY: 'sum(account)' is an aggregate and visible.
self.check_query(
"""
2010-02-21 * "First"
Assets:Bank:Checking -1.00 USD
Expenses:Restaurant 1.00 USD
2010-02-23 * "Second"
Liabilities:Credit-Card -2.00 USD
Expenses:Restaurant 2.00 USD
""",
"""
SELECT account, count(account) as num, sum(number) as sum
GROUP BY account
ORDER BY sum(number);
""",
[
('account', str),
('num', int),
('sum', Decimal),
],
[
('Liabilities:Credit-Card', 1, D('-2.00')),
('Assets:Bank:Checking', 1, D('-1.00')),
('Expenses:Restaurant', 2, D('3.00')),
])
def test_aggregated_group_by_visible_order_by_aggregate_invisible(self):
# GROUP-BY: 'account' is visible.
# ORDER-BY: 'sum(number)' is an aggregate and invisible.
self.check_query(
"""
2010-02-21 * "First"
Assets:Bank:Checking -1.00 USD
Expenses:Restaurant 1.00 USD
2010-02-23 * "Second"
Liabilities:Credit-Card -2.00 USD
Expenses:Restaurant 2.00 USD
""",
"""
SELECT account, count(account) as num
GROUP BY account
ORDER BY sum(number);
""",
[
('account', str),
('num', int),
],
[
('Liabilities:Credit-Card', 1),
('Assets:Bank:Checking', 1),
('Expenses:Restaurant', 2),
])
def test_aggregated_group_by_invisible_order_by_non_aggregate_visible(self):
# GROUP-BY: 'account' is invisible.
# ORDER-BY: 'len(account)' is a non-aggregate and visible.
self.check_query(
self.INPUT,
"""
SELECT length(account) as len, sum(position) as amount
GROUP BY account, len
ORDER BY len;
""",
[
('len', int),
('amount', inventory.Inventory),
],
[
(19, inventory.from_string('-100.00 USD'),),
(20, inventory.from_string('100.00 USD'),),
])
def test_aggregated_group_by_invisible_order_by_non_aggregate_invis(self):
# GROUP-BY: 'account' is invisible.
# ORDER-BY: 'sum(number)' is an aggregate and invisible.
self.check_query(
"""
2010-02-21 * "First"
Assets:Bank:Checking -1.00 USD
Expenses:Restaurant 1.00 USD
2010-02-23 * "Second"
Liabilities:Credit-Card -2.00 USD
Expenses:Restaurant 2.00 USD
""",
"""
SELECT count(account) as num
GROUP BY account
ORDER BY sum(number);
""",
[
('num', int),
],
[
(1,),
(1,),
(2,),
])
def test_aggregated_group_by_invisible_order_by_aggregate_visible(self):
# GROUP-BY: 'account' is invisible.
# ORDER-BY: 'sum(account)' is an aggregate and visible.
self.check_query(
"""
2010-02-21 * "First"
Assets:Bank:Checking -1.00 USD
Expenses:Restaurant 1.00 USD
2010-02-23 * "Second"
Liabilities:Credit-Card -2.00 USD
Expenses:Restaurant 2.00 USD
""",
"""
SELECT count(account) as num, sum(number) as sum
GROUP BY account
ORDER BY sum(number);
""",
[
('num', int),
('sum', Decimal),
],
[
(1, D('-2.00')),
(1, D('-1.00')),
(2, D('3.00')),
])
def test_aggregated_group_by_invisible_order_by_aggregate_invisible(self):
# GROUP-BY: 'account' is invisible.
# ORDER-BY: 'sum(number)' is an aggregate and invisible.
self.check_query(
"""
2010-02-21 * "First"
Assets:Bank:Checking -1.00 USD
Expenses:Restaurant 1.00 USD
2010-02-23 * "Second"
Liabilities:Credit-Card -2.00 USD
Expenses:Restaurant 2.00 USD
""",
"""
SELECT count(account) as num
GROUP BY account
ORDER BY sum(number);
""",
[
('num', int),
],
[
(1,),
(1,),
(2,),
])
class TestExecuteOptions(QueryBase):
INPUT = """
2010-02-23 *
Assets:AssetA 5.00 USD
Assets:AssetD 2.00 USD
Assets:AssetB 4.00 USD
Assets:AssetC 3.00 USD
Assets:AssetE 1.00 USD
Equity:Rest -15.00 USD
"""
def test_order_by_asc_implicit(self):
self.check_query(
self.INPUT,
"""
SELECT account, number ORDER BY number;
""",
[
('account', str),
('number', Decimal),
],
[
('Equity:Rest', D('-15.00')),
('Assets:AssetE', D('1.00')),
('Assets:AssetD', D('2.00')),
('Assets:AssetC', D('3.00')),
('Assets:AssetB', D('4.00')),
('Assets:AssetA', D('5.00')),
])
def test_order_by_asc_explicit(self):
self.check_query(
self.INPUT,
"""
SELECT account, number ORDER BY number ASC;
""",
[
('account', str),
('number', Decimal),
],
[
('Equity:Rest', D('-15.00')),
('Assets:AssetE', D('1.00')),
('Assets:AssetD', D('2.00')),
('Assets:AssetC', D('3.00')),
('Assets:AssetB', D('4.00')),
('Assets:AssetA', D('5.00')),
])
def test_order_by_desc(self):
self.check_query(
self.INPUT,
"""
SELECT account, number ORDER BY number DESC;
""",
[
('account', str),
('number', Decimal),
],
[
('Assets:AssetA', D('5.00')),
('Assets:AssetB', D('4.00')),
('Assets:AssetC', D('3.00')),
('Assets:AssetD', D('2.00')),
('Assets:AssetE', D('1.00')),
('Equity:Rest', D('-15.00')),
])
def test_distinct(self):
self.check_sorted_query(
"""
2010-02-23 *
Assets:AssetA 5.00 USD
Assets:AssetA 2.00 USD
Assets:AssetA 4.00 USD
Equity:Rest
""",
"""
SELECT account ;
""",
[
('account', str),
],
[
('Assets:AssetA',),
('Assets:AssetA',),
('Assets:AssetA',),
('Equity:Rest',),
])
self.check_sorted_query(
"""
2010-02-23 *
Assets:AssetA 5.00 USD
Assets:AssetA 2.00 USD
Assets:AssetA 4.00 USD
Equity:Rest -5.00 USD
Equity:Rest -2.00 USD
Equity:Rest -4.00 USD
""",
"""
SELECT DISTINCT account ;
""",
[
('account', str),
],
[
('Assets:AssetA',),
('Equity:Rest',),
])
def test_limit(self):
self.check_query(
self.INPUT,
"""
SELECT account, number ORDER BY number LIMIT 3;
""",
[
('account', str),
('number', Decimal),
],
[
('Equity:Rest', D('-15.00')),
('Assets:AssetE', D('1.00')),
('Assets:AssetD', D('2.00')),
])
class TestExecuteFlatten(QueryBase):
def test_flatten_results(self):
## FIXME: We need some dedicated tests of flattening results.
pass
INPUT = """
plugin "beancount.plugins.auto_accounts"
2010-02-23 *
Assets:Something 5.00 USD
Assets:Something 2.00 CAD
Assets:Something 4 HOOL {531.20 USD}
Equity:Rest
"""
## FIXME: Bring this back in.
def __test_flatten(self):
self.check_query(
self.INPUT,
"""
SELECT account, sum(position)
WHERE account = 'Assets:Something'
GROUP BY account
FLATTEN;
""",
[
('account', str),
('sum_position', inventory.Inventory),
],
[
('Assets:Something',
inventory.from_string("5.00 USD, 2.00 CAD, 4 HOOL {531.20 USD}")),
])
| gpl-2.0 |
darktears/chromium-crosswalk | tools/telemetry/telemetry/benchmark_unittest.py | 8 | 5320 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import unittest
from telemetry import android
from telemetry import benchmark
from telemetry.testing import options_for_unittests
from telemetry.internal import story_runner
from telemetry import page
from telemetry.page import page_test
from telemetry.page import shared_page_state
from telemetry import story as story_module
from telemetry.web_perf import timeline_based_measurement
class DummyPageTest(page_test.PageTest):
def ValidateAndMeasurePage(self, *_):
pass
class TestBenchmark(benchmark.Benchmark):
def __init__(self, story):
super(TestBenchmark, self).__init__()
self._story_set = story_module.StorySet()
self._story_set.AddStory(story)
def CreatePageTest(self, _):
return DummyPageTest()
def CreateStorySet(self, _):
return self._story_set
class BenchmarkTest(unittest.TestCase):
def testPageTestWithIncompatibleStory(self):
b = TestBenchmark(story_module.Story(
shared_state_class=shared_page_state.SharedPageState))
with self.assertRaisesRegexp(
Exception, 'containing only telemetry.page.Page stories'):
b.Run(options_for_unittests.GetCopy())
state_class = story_module.SharedState
b = TestBenchmark(story_module.Story(
shared_state_class=state_class))
with self.assertRaisesRegexp(
Exception, 'containing only telemetry.page.Page stories'):
b.Run(options_for_unittests.GetCopy())
b = TestBenchmark(android.AndroidStory(start_intent=None))
with self.assertRaisesRegexp(
Exception, 'containing only telemetry.page.Page stories'):
b.Run(options_for_unittests.GetCopy())
def testPageTestWithCompatibleStory(self):
original_run_fn = story_runner.Run
was_run = [False]
def RunStub(*arg, **kwargs):
del arg, kwargs
was_run[0] = True
story_runner.Run = RunStub
try:
options = options_for_unittests.GetCopy()
options.output_formats = ['none']
options.suppress_gtest_report = True
parser = optparse.OptionParser()
benchmark.AddCommandLineArgs(parser)
options.MergeDefaultValues(parser.get_default_values())
b = TestBenchmark(page.Page(url='about:blank'))
b.Run(options)
finally:
story_runner.Run = original_run_fn
self.assertTrue(was_run[0])
def testOverriddenTbmOptionsAndPageTestRaises(self):
class FakeTimelineBasedMeasurementOptions(object):
pass
class OverrideBothBenchmark(benchmark.Benchmark):
def CreatePageTest(self, _):
return DummyPageTest()
def CreateTimelineBasedMeasurementOptions(self):
return FakeTimelineBasedMeasurementOptions()
assertion_regex = (
'Cannot override both CreatePageTest and '
'CreateTimelineBasedMeasurementOptions')
with self.assertRaisesRegexp(AssertionError, assertion_regex):
OverrideBothBenchmark()
def testBenchmarkMakesTbmTestByDefault(self):
class DefaultTbmBenchmark(benchmark.Benchmark):
pass
self.assertIsInstance(
DefaultTbmBenchmark().CreatePageTest(options=None),
timeline_based_measurement.TimelineBasedMeasurement)
def testUnknownTestTypeRaises(self):
class UnknownTestType(object):
pass
class UnknownTestTypeBenchmark(benchmark.Benchmark):
test = UnknownTestType
type_error_regex = (
'"UnknownTestType" is not a PageTest or a TimelineBasedMeasurement')
with self.assertRaisesRegexp(TypeError, type_error_regex):
UnknownTestTypeBenchmark().CreatePageTest(options=None)
def testOverriddenTbmOptionsAndPageTestTestAttributeRaises(self):
class FakeTimelineBasedMeasurementOptions(object):
pass
class OverrideOptionsOnPageTestBenchmark(benchmark.Benchmark):
test = DummyPageTest
def CreateTimelineBasedMeasurementOptions(self):
return FakeTimelineBasedMeasurementOptions()
assertion_regex = (
'Cannot override CreateTimelineBasedMeasurementOptions '
'with a PageTest')
with self.assertRaisesRegexp(AssertionError, assertion_regex):
OverrideOptionsOnPageTestBenchmark().CreatePageTest(options=None)
def testBenchmarkPredicate(self):
class PredicateBenchmark(TestBenchmark):
@classmethod
def ValueCanBeAddedPredicate(cls, value, is_first_result):
return False
original_run_fn = story_runner.Run
validPredicate = [False]
def RunStub(test, story_set_module, finder_options, results,
*args): # pylint: disable=unused-argument
predicate = results._value_can_be_added_predicate
valid = predicate == PredicateBenchmark.ValueCanBeAddedPredicate
validPredicate[0] = valid
story_runner.Run = RunStub
try:
options = options_for_unittests.GetCopy()
options.output_formats = ['none']
options.suppress_gtest_report = True
parser = optparse.OptionParser()
benchmark.AddCommandLineArgs(parser)
options.MergeDefaultValues(parser.get_default_values())
b = PredicateBenchmark(page.Page(url='about:blank'))
b.Run(options)
finally:
story_runner.Run = original_run_fn
self.assertTrue(validPredicate[0])
| bsd-3-clause |
hdeling/sofa | applications/plugins/Compliant/examples/knee/mapping.py | 11 | 5038 | '''easy python mappings'''
import numpy as np
import script
import path
import tool
class Script(script.Controller):
def __new__(cls, node, instance):
res = script.Controller.__new__(cls, node)
res.instance = instance
# callbacks
res.cb = []
return res
def update(self):
self.instance.update()
# FIXME mapping gets updated correctly, but output positions
# are lagging one step behind, so we need to push manually
# until a better solution is found
self.instance.output.position = str(self.instance.value)
for cb in self.cb: cb()
def onBeginAnimationStep(self, dt):
self.update()
def reset(self):
self.update()
class Base(object):
def __init__(self, node, name, dim, **kwargs):
self.node = node.createChild(name)
self.script = Script(self.node, self)
self.template = kwargs['template'].split(',')
self.output = tool.dofs(self.node, self.template[-1], dim)
self.input = kwargs['input']
input = ' '.join( [ '@' + path.relative(self.output, x)
for x in self.input] )
self.update_size()
self.value_size = dim * tool.coord_size(self.template[-1])
self.mapping = self.node.createObject('PythonMultiMapping',
name = 'mapping',
template = ','.join(self.template),
input = input,
output = '@dofs',
jacobian = np.zeros( (self.rows, self.cols)),
value = np.zeros( self.value_size ))
def update_size(self):
self.rows = tool.matrix_size( self.output )
self.cols = sum( map(tool.matrix_size, self.input) )
self.size = (self.rows, self.cols)
@property
def jacobian(self):
res = np.array(self.mapping.jacobian)
return res.reshape( (self.rows, self.cols) )
@property
def value(self):
return np.array(self.mapping.value)
@jacobian.setter
def jacobian(self, value):
self.mapping.jacobian = str(value.flatten())
@value.setter
def value(self, value):
self.mapping.value = str(value)
import math
class PointPlaneDistance(Base):
def __init__(self, node, plane, point, **kwargs):
self.plane_indices = kwargs.get('plane_indices', [0, 1, 2])
self.point_index = kwargs.get('point_index', 0)
self.plane_dofs = plane
self.point_dofs = point
self.offset = kwargs.get('offset', 0)
Base.__init__(self, node, 'point-plane-distance', 1,
input = [plane, point],
template = 'Vec3,Vec1')
self.sign = 1
def update(self):
A = np.zeros( (3, 3) )
plane = [self.plane_dofs.position[i] for i in self.plane_indices ]
point = self.point_dofs.position[ self.point_index ]
# TODO constant offset on both sides to avoid singular A
offset = np.zeros(3)
for i in xrange(3):
A[:, i] = plane[i] + offset
b = point + offset
Ainv = np.linalg.inv(A)
u = Ainv.transpose().dot( np.ones(3) )
alpha = math.sqrt( u.dot(u) )
self.value = self.sign * (u.dot(b) - self.offset * alpha - 1)
self.update_size()
J = np.zeros( (self.rows, self.cols) )
right = tool.matrix_size( self.point_dofs )
left = tool.matrix_size( self.plane_dofs )
# point part
J[:, -right:][:, 3 * self.point_index : 3 * self.point_index + 3] = u.transpose()
# plane part
n = u / alpha
v = b - self.offset * n
w = -Ainv.dot(v)
for i, p in enumerate(self.plane_indices):
J[:, :-right][:, 3 * p:3*p + 3] = w[i] * u.transpose()
self.jacobian = self.sign * J
# print 'jacobian', self.jacobian
# print 'value', self.value
import rigid
from SofaPython import Quaternion as quat
class HingeJoint(Base):
def __init__(self, dofs, **kwargs):
node = dofs.getContext()
self.axis = kwargs.get('axis', np.array([1, 0, 0]))
self.dofs = dofs
Base.__init__(self, node, 'joint', 1,
input = [dofs],
template = 'Vec1,Rigid')
# print self.script.instance
def update(self):
value = rigid.id()
value[3:] = quat.exp( self.dofs.position[0][0] * self.axis )
self.value = value
jacobian = np.zeros( self.size )
jacobian[3:, :] = self.axis.reshape( (3, 1) )
self.jacobian = jacobian
# print self.value
# print self.jacobian
| lgpl-2.1 |
goddardl/cortex | test/IECore/ops/splineInput/splineInput-1.py | 12 | 2469 | ##########################################################################
#
# Copyright (c) 2009-2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
class splineInput( IECore.Op ) :
def __init__( self ) :
IECore.Op.__init__( self,
"",
IECore.IntParameter(
name = "result",
description = "",
defaultValue = 0,
)
)
self.parameters().addParameter(
IECore.SplineffParameter(
name = "spline",
description = "description",
defaultValue = IECore.SplineffData(
IECore.Splineff(
IECore.CubicBasisf.catmullRom(),
(
( 0, 1 ),
( 0, 1 ),
( 1, 0 ),
( 1, 0 ),
),
),
),
)
)
def doOperation( self, args ) :
return IECore.IntData( len( args["spline"].value.points() ) )
IECore.registerRunTimeTyped( splineInput )
| bsd-3-clause |
rolandmansilla/microblog | flask/lib/python2.7/site-packages/pip/_vendor/html5lib/inputstream.py | 435 | 31665 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from pip._vendor.six.moves import http_client
import codecs
import re
from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
from .constants import encodings, ReparseException
from . import utils
from io import StringIO
try:
from io import BytesIO
except ImportError:
BytesIO = StringIO
try:
from io import BufferedIOBase
except ImportError:
class BufferedIOBase(object):
pass
# Non-unicode versions of constants for use in the pre-parser
spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters])
asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters])
asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase])
spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"])
invalid_unicode_no_surrogate = "[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]"
if utils.supports_lone_surrogates:
# Use one extra step of indirection and create surrogates with
# unichr. Not using this indirection would introduce an illegal
# unicode literal on platforms not supporting such lone
# surrogates.
invalid_unicode_re = re.compile(invalid_unicode_no_surrogate +
eval('"\\uD800-\\uDFFF"'))
else:
invalid_unicode_re = re.compile(invalid_unicode_no_surrogate)
non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
0x10FFFE, 0x10FFFF])
ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005B-\u0060\u007B-\u007E]")
# Cache for charsUntil()
charsUntilRegEx = {}
class BufferedStream(object):
"""Buffering for streams that do not have buffering of their own
The buffer is implemented as a list of chunks on the assumption that
joining many strings will be slow since it is O(n**2)
"""
def __init__(self, stream):
self.stream = stream
self.buffer = []
self.position = [-1, 0] # chunk number, offset
def tell(self):
pos = 0
for chunk in self.buffer[:self.position[0]]:
pos += len(chunk)
pos += self.position[1]
return pos
def seek(self, pos):
assert pos <= self._bufferedBytes()
offset = pos
i = 0
while len(self.buffer[i]) < offset:
offset -= len(self.buffer[i])
i += 1
self.position = [i, offset]
def read(self, bytes):
if not self.buffer:
return self._readStream(bytes)
elif (self.position[0] == len(self.buffer) and
self.position[1] == len(self.buffer[-1])):
return self._readStream(bytes)
else:
return self._readFromBuffer(bytes)
def _bufferedBytes(self):
return sum([len(item) for item in self.buffer])
def _readStream(self, bytes):
data = self.stream.read(bytes)
self.buffer.append(data)
self.position[0] += 1
self.position[1] = len(data)
return data
def _readFromBuffer(self, bytes):
remainingBytes = bytes
rv = []
bufferIndex = self.position[0]
bufferOffset = self.position[1]
while bufferIndex < len(self.buffer) and remainingBytes != 0:
assert remainingBytes > 0
bufferedData = self.buffer[bufferIndex]
if remainingBytes <= len(bufferedData) - bufferOffset:
bytesToRead = remainingBytes
self.position = [bufferIndex, bufferOffset + bytesToRead]
else:
bytesToRead = len(bufferedData) - bufferOffset
self.position = [bufferIndex, len(bufferedData)]
bufferIndex += 1
rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead])
remainingBytes -= bytesToRead
bufferOffset = 0
if remainingBytes:
rv.append(self._readStream(remainingBytes))
return b"".join(rv)
def HTMLInputStream(source, encoding=None, parseMeta=True, chardet=True):
if isinstance(source, http_client.HTTPResponse):
# Work around Python bug #20007: read(0) closes the connection.
# http://bugs.python.org/issue20007
isUnicode = False
elif hasattr(source, "read"):
isUnicode = isinstance(source.read(0), text_type)
else:
isUnicode = isinstance(source, text_type)
if isUnicode:
if encoding is not None:
raise TypeError("Cannot explicitly set an encoding with a unicode string")
return HTMLUnicodeInputStream(source)
else:
return HTMLBinaryInputStream(source, encoding, parseMeta, chardet)
class HTMLUnicodeInputStream(object):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
_defaultChunkSize = 10240
def __init__(self, source):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
if not utils.supports_lone_surrogates:
# Such platforms will have already checked for such
# surrogate errors, so no need to do this checking.
self.reportCharacterErrors = None
self.replaceCharactersRegexp = None
elif len("\U0010FFFF") == 1:
self.reportCharacterErrors = self.characterErrorsUCS4
self.replaceCharactersRegexp = re.compile(eval('"[\\uD800-\\uDFFF]"'))
else:
self.reportCharacterErrors = self.characterErrorsUCS2
self.replaceCharactersRegexp = re.compile(
eval('"([\\uD800-\\uDBFF](?![\\uDC00-\\uDFFF])|(?<![\\uD800-\\uDBFF])[\\uDC00-\\uDFFF])"'))
# List of where new lines occur
self.newLines = [0]
self.charEncoding = ("utf-8", "certain")
self.dataStream = self.openStream(source)
self.reset()
def reset(self):
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
self.errors = []
# number of (complete) lines in previous chunks
self.prevNumLines = 0
# number of columns in the last line of the previous chunk
self.prevNumCols = 0
# Deal with CR LF and surrogates split over chunk boundaries
self._bufferedCharacter = None
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = StringIO(source)
return stream
def _position(self, offset):
chunk = self.chunk
nLines = chunk.count('\n', 0, offset)
positionLine = self.prevNumLines + nLines
lastLinePos = chunk.rfind('\n', 0, offset)
if lastLinePos == -1:
positionColumn = self.prevNumCols + offset
else:
positionColumn = offset - (lastLinePos + 1)
return (positionLine, positionColumn)
def position(self):
"""Returns (line, col) of the current position in the stream."""
line, col = self._position(self.chunkOffset)
return (line + 1, col)
def char(self):
""" Read one character from the stream or queue if available. Return
EOF when EOF is reached.
"""
# Read a new chunk from the input stream if necessary
if self.chunkOffset >= self.chunkSize:
if not self.readChunk():
return EOF
chunkOffset = self.chunkOffset
char = self.chunk[chunkOffset]
self.chunkOffset = chunkOffset + 1
return char
def readChunk(self, chunkSize=None):
if chunkSize is None:
chunkSize = self._defaultChunkSize
self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
data = self.dataStream.read(chunkSize)
# Deal with CR LF and surrogates broken across chunks
if self._bufferedCharacter:
data = self._bufferedCharacter + data
self._bufferedCharacter = None
elif not data:
# We have no more data, bye-bye stream
return False
if len(data) > 1:
lastv = ord(data[-1])
if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF:
self._bufferedCharacter = data[-1]
data = data[:-1]
if self.reportCharacterErrors:
self.reportCharacterErrors(data)
# Replace invalid characters
# Note U+0000 is dealt with in the tokenizer
data = self.replaceCharactersRegexp.sub("\ufffd", data)
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
self.chunk = data
self.chunkSize = len(data)
return True
def characterErrorsUCS4(self, data):
for i in range(len(invalid_unicode_re.findall(data))):
self.errors.append("invalid-codepoint")
def characterErrorsUCS2(self, data):
# Someone picked the wrong compile option
# You lose
skip = False
for match in invalid_unicode_re.finditer(data):
if skip:
continue
codepoint = ord(match.group())
pos = match.start()
# Pretty sure there should be endianness issues here
if utils.isSurrogatePair(data[pos:pos + 2]):
# We have a surrogate pair!
char_val = utils.surrogatePairToCodepoint(data[pos:pos + 2])
if char_val in non_bmp_invalid_codepoints:
self.errors.append("invalid-codepoint")
skip = True
elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
pos == len(data) - 1):
self.errors.append("invalid-codepoint")
else:
skip = False
self.errors.append("invalid-codepoint")
def charsUntil(self, characters, opposite=False):
""" Returns a string of characters from the stream up to but not
including any character in 'characters' or EOF. 'characters' must be
a container that supports the 'in' method and iteration over its
characters.
"""
# Use a cache of regexps to find the required characters
try:
chars = charsUntilRegEx[(characters, opposite)]
except KeyError:
if __debug__:
for c in characters:
assert(ord(c) < 128)
regex = "".join(["\\x%02x" % ord(c) for c in characters])
if not opposite:
regex = "^%s" % regex
chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex)
rv = []
while True:
# Find the longest matching prefix
m = chars.match(self.chunk, self.chunkOffset)
if m is None:
# If nothing matched, and it wasn't because we ran out of chunk,
# then stop
if self.chunkOffset != self.chunkSize:
break
else:
end = m.end()
# If not the whole chunk matched, return everything
# up to the part that didn't match
if end != self.chunkSize:
rv.append(self.chunk[self.chunkOffset:end])
self.chunkOffset = end
break
# If the whole remainder of the chunk matched,
# use it all and read the next chunk
rv.append(self.chunk[self.chunkOffset:])
if not self.readChunk():
# Reached EOF
break
r = "".join(rv)
return r
def unget(self, char):
# Only one character is allowed to be ungotten at once - it must
# be consumed again before any further call to unget
if char is not None:
if self.chunkOffset == 0:
# unget is called quite rarely, so it's a good idea to do
# more work here if it saves a bit of work in the frequently
# called char and charsUntil.
# So, just prepend the ungotten character onto the current
# chunk:
self.chunk = char + self.chunk
self.chunkSize += 1
else:
self.chunkOffset -= 1
assert self.chunk[self.chunkOffset] == char
class HTMLBinaryInputStream(HTMLUnicodeInputStream):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
def __init__(self, source, encoding=None, parseMeta=True, chardet=True):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
# Raw Stream - for unicode objects this will encode to utf-8 and set
# self.charEncoding as appropriate
self.rawStream = self.openStream(source)
HTMLUnicodeInputStream.__init__(self, self.rawStream)
self.charEncoding = (codecName(encoding), "certain")
# Encoding Information
# Number of bytes to use when looking for a meta element with
# encoding information
self.numBytesMeta = 512
# Number of bytes to use when using detecting encoding using chardet
self.numBytesChardet = 100
# Encoding to use if no other information can be found
self.defaultEncoding = "windows-1252"
# Detect encoding iff no explicit "transport level" encoding is supplied
if (self.charEncoding[0] is None):
self.charEncoding = self.detectEncoding(parseMeta, chardet)
# Call superclass
self.reset()
def reset(self):
self.dataStream = codecs.getreader(self.charEncoding[0])(self.rawStream,
'replace')
HTMLUnicodeInputStream.reset(self)
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = BytesIO(source)
try:
stream.seek(stream.tell())
except:
stream = BufferedStream(stream)
return stream
def detectEncoding(self, parseMeta=True, chardet=True):
# First look for a BOM
# This will also read past the BOM if present
encoding = self.detectBOM()
confidence = "certain"
# If there is no BOM need to look for meta elements with encoding
# information
if encoding is None and parseMeta:
encoding = self.detectEncodingMeta()
confidence = "tentative"
# Guess with chardet, if avaliable
if encoding is None and chardet:
confidence = "tentative"
try:
try:
from charade.universaldetector import UniversalDetector
except ImportError:
from chardet.universaldetector import UniversalDetector
buffers = []
detector = UniversalDetector()
while not detector.done:
buffer = self.rawStream.read(self.numBytesChardet)
assert isinstance(buffer, bytes)
if not buffer:
break
buffers.append(buffer)
detector.feed(buffer)
detector.close()
encoding = detector.result['encoding']
self.rawStream.seek(0)
except ImportError:
pass
# If all else fails use the default encoding
if encoding is None:
confidence = "tentative"
encoding = self.defaultEncoding
# Substitute for equivalent encodings:
encodingSub = {"iso-8859-1": "windows-1252"}
if encoding.lower() in encodingSub:
encoding = encodingSub[encoding.lower()]
return encoding, confidence
def changeEncoding(self, newEncoding):
assert self.charEncoding[1] != "certain"
newEncoding = codecName(newEncoding)
if newEncoding in ("utf-16", "utf-16-be", "utf-16-le"):
newEncoding = "utf-8"
if newEncoding is None:
return
elif newEncoding == self.charEncoding[0]:
self.charEncoding = (self.charEncoding[0], "certain")
else:
self.rawStream.seek(0)
self.reset()
self.charEncoding = (newEncoding, "certain")
raise ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding))
def detectBOM(self):
"""Attempts to detect at BOM at the start of the stream. If
an encoding can be determined from the BOM return the name of the
encoding otherwise return None"""
bomDict = {
codecs.BOM_UTF8: 'utf-8',
codecs.BOM_UTF16_LE: 'utf-16-le', codecs.BOM_UTF16_BE: 'utf-16-be',
codecs.BOM_UTF32_LE: 'utf-32-le', codecs.BOM_UTF32_BE: 'utf-32-be'
}
# Go to beginning of file and read in 4 bytes
string = self.rawStream.read(4)
assert isinstance(string, bytes)
# Try detecting the BOM using bytes from the string
encoding = bomDict.get(string[:3]) # UTF-8
seek = 3
if not encoding:
# Need to detect UTF-32 before UTF-16
encoding = bomDict.get(string) # UTF-32
seek = 4
if not encoding:
encoding = bomDict.get(string[:2]) # UTF-16
seek = 2
# Set the read position past the BOM if one was found, otherwise
# set it to the start of the stream
self.rawStream.seek(encoding and seek or 0)
return encoding
def detectEncodingMeta(self):
"""Report the encoding declared by the meta element
"""
buffer = self.rawStream.read(self.numBytesMeta)
assert isinstance(buffer, bytes)
parser = EncodingParser(buffer)
self.rawStream.seek(0)
encoding = parser.getEncoding()
if encoding in ("utf-16", "utf-16-be", "utf-16-le"):
encoding = "utf-8"
return encoding
class EncodingBytes(bytes):
"""String-like object with an associated position and various extra methods
If the position is ever greater than the string length then an exception is
raised"""
def __new__(self, value):
assert isinstance(value, bytes)
return bytes.__new__(self, value.lower())
def __init__(self, value):
self._position = -1
def __iter__(self):
return self
def __next__(self):
p = self._position = self._position + 1
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
return self[p:p + 1]
def next(self):
# Py2 compat
return self.__next__()
def previous(self):
p = self._position
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
self._position = p = p - 1
return self[p:p + 1]
def setPosition(self, position):
if self._position >= len(self):
raise StopIteration
self._position = position
def getPosition(self):
if self._position >= len(self):
raise StopIteration
if self._position >= 0:
return self._position
else:
return None
position = property(getPosition, setPosition)
def getCurrentByte(self):
return self[self.position:self.position + 1]
currentByte = property(getCurrentByte)
def skip(self, chars=spaceCharactersBytes):
"""Skip past a list of characters"""
p = self.position # use property for the error-checking
while p < len(self):
c = self[p:p + 1]
if c not in chars:
self._position = p
return c
p += 1
self._position = p
return None
def skipUntil(self, chars):
p = self.position
while p < len(self):
c = self[p:p + 1]
if c in chars:
self._position = p
return c
p += 1
self._position = p
return None
def matchBytes(self, bytes):
"""Look for a sequence of bytes at the start of a string. If the bytes
are found return True and advance the position to the byte after the
match. Otherwise return False and leave the position alone"""
p = self.position
data = self[p:p + len(bytes)]
rv = data.startswith(bytes)
if rv:
self.position += len(bytes)
return rv
def jumpTo(self, bytes):
"""Look for the next sequence of bytes matching a given sequence. If
a match is found advance the position to the last byte of the match"""
newPosition = self[self.position:].find(bytes)
if newPosition > -1:
# XXX: This is ugly, but I can't see a nicer way to fix this.
if self._position == -1:
self._position = 0
self._position += (newPosition + len(bytes) - 1)
return True
else:
raise StopIteration
class EncodingParser(object):
"""Mini parser for detecting character encoding from meta elements"""
def __init__(self, data):
"""string - the data to work on for encoding detection"""
self.data = EncodingBytes(data)
self.encoding = None
def getEncoding(self):
methodDispatch = (
(b"<!--", self.handleComment),
(b"<meta", self.handleMeta),
(b"</", self.handlePossibleEndTag),
(b"<!", self.handleOther),
(b"<?", self.handleOther),
(b"<", self.handlePossibleStartTag))
for byte in self.data:
keepParsing = True
for key, method in methodDispatch:
if self.data.matchBytes(key):
try:
keepParsing = method()
break
except StopIteration:
keepParsing = False
break
if not keepParsing:
break
return self.encoding
def handleComment(self):
"""Skip over comments"""
return self.data.jumpTo(b"-->")
def handleMeta(self):
if self.data.currentByte not in spaceCharactersBytes:
# if we have <meta not followed by a space so just keep going
return True
# We have a valid meta element we want to search for attributes
hasPragma = False
pendingEncoding = None
while True:
# Try to find the next attribute after the current position
attr = self.getAttribute()
if attr is None:
return True
else:
if attr[0] == b"http-equiv":
hasPragma = attr[1] == b"content-type"
if hasPragma and pendingEncoding is not None:
self.encoding = pendingEncoding
return False
elif attr[0] == b"charset":
tentativeEncoding = attr[1]
codec = codecName(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
elif attr[0] == b"content":
contentParser = ContentAttrParser(EncodingBytes(attr[1]))
tentativeEncoding = contentParser.parse()
if tentativeEncoding is not None:
codec = codecName(tentativeEncoding)
if codec is not None:
if hasPragma:
self.encoding = codec
return False
else:
pendingEncoding = codec
def handlePossibleStartTag(self):
return self.handlePossibleTag(False)
def handlePossibleEndTag(self):
next(self.data)
return self.handlePossibleTag(True)
def handlePossibleTag(self, endTag):
data = self.data
if data.currentByte not in asciiLettersBytes:
# If the next byte is not an ascii letter either ignore this
# fragment (possible start tag case) or treat it according to
# handleOther
if endTag:
data.previous()
self.handleOther()
return True
c = data.skipUntil(spacesAngleBrackets)
if c == b"<":
# return to the first step in the overall "two step" algorithm
# reprocessing the < byte
data.previous()
else:
# Read all attributes
attr = self.getAttribute()
while attr is not None:
attr = self.getAttribute()
return True
def handleOther(self):
return self.data.jumpTo(b">")
def getAttribute(self):
"""Return a name,value pair for the next attribute in the stream,
if one is found, or None"""
data = self.data
# Step 1 (skip chars)
c = data.skip(spaceCharactersBytes | frozenset([b"/"]))
assert c is None or len(c) == 1
# Step 2
if c in (b">", None):
return None
# Step 3
attrName = []
attrValue = []
# Step 4 attribute name
while True:
if c == b"=" and attrName:
break
elif c in spaceCharactersBytes:
# Step 6!
c = data.skip()
break
elif c in (b"/", b">"):
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrName.append(c.lower())
elif c is None:
return None
else:
attrName.append(c)
# Step 5
c = next(data)
# Step 7
if c != b"=":
data.previous()
return b"".join(attrName), b""
# Step 8
next(data)
# Step 9
c = data.skip()
# Step 10
if c in (b"'", b'"'):
# 10.1
quoteChar = c
while True:
# 10.2
c = next(data)
# 10.3
if c == quoteChar:
next(data)
return b"".join(attrName), b"".join(attrValue)
# 10.4
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
# 10.5
else:
attrValue.append(c)
elif c == b">":
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
# Step 11
while True:
c = next(data)
if c in spacesAngleBrackets:
return b"".join(attrName), b"".join(attrValue)
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
class ContentAttrParser(object):
def __init__(self, data):
assert isinstance(data, bytes)
self.data = data
def parse(self):
try:
# Check if the attr name is charset
# otherwise return
self.data.jumpTo(b"charset")
self.data.position += 1
self.data.skip()
if not self.data.currentByte == b"=":
# If there is no = sign keep looking for attrs
return None
self.data.position += 1
self.data.skip()
# Look for an encoding between matching quote marks
if self.data.currentByte in (b'"', b"'"):
quoteMark = self.data.currentByte
self.data.position += 1
oldPosition = self.data.position
if self.data.jumpTo(quoteMark):
return self.data[oldPosition:self.data.position]
else:
return None
else:
# Unquoted value
oldPosition = self.data.position
try:
self.data.skipUntil(spaceCharactersBytes)
return self.data[oldPosition:self.data.position]
except StopIteration:
# Return the whole remaining value
return self.data[oldPosition:]
except StopIteration:
return None
def codecName(encoding):
"""Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding."""
if isinstance(encoding, bytes):
try:
encoding = encoding.decode("ascii")
except UnicodeDecodeError:
return None
if encoding:
canonicalName = ascii_punctuation_re.sub("", encoding).lower()
return encodings.get(canonicalName, None)
else:
return None
| bsd-3-clause |
joelcan/tools-eth-contract-dev | pyethereum/pyethereum/opcodes.py | 1 | 3118 | # schema: [opcode, ins, outs, memuses, gas]
#
# memuses are written as an array of (start, len) pairs; values less than
# zero are taken as stackarg indices and values zero or greater are taken
# as literals
opcodes = {
0x00: ['STOP', 0, 0, 0],
0x01: ['ADD', 2, 1, 1],
0x02: ['MUL', 2, 1, 1],
0x03: ['SUB', 2, 1, 1],
0x04: ['DIV', 2, 1, 1],
0x05: ['SDIV', 2, 1, 1],
0x06: ['MOD', 2, 1, 1],
0x07: ['SMOD', 2, 1, 1],
0x08: ['ADDMOD', 3, 1, 1],
0x09: ['MULMOD', 3, 1, 1],
0x0a: ['EXP', 2, 1, 1],
0x0b: ['SIGNEXTEND', 2, 1, 1],
0x10: ['LT', 2, 1, 1],
0x11: ['GT', 2, 1, 1],
0x12: ['SLT', 2, 1, 1],
0x13: ['SGT', 2, 1, 1],
0x14: ['EQ', 2, 1, 1],
0x15: ['ISZERO', 1, 1, 1],
0x16: ['AND', 2, 1, 1],
0x17: ['OR', 2, 1, 1],
0x18: ['XOR', 2, 1, 1],
0x19: ['NOT', 1, 1, 1],
0x1a: ['BYTE', 2, 1, 1],
0x20: ['SHA3', 2, 1, 10],
0x30: ['ADDRESS', 0, 1, 1],
0x31: ['BALANCE', 1, 1, 20],
0x32: ['ORIGIN', 0, 1, 1],
0x33: ['CALLER', 0, 1, 1],
0x34: ['CALLVALUE', 0, 1, 1],
0x35: ['CALLDATALOAD', 1, 1, 1],
0x36: ['CALLDATASIZE', 0, 1, 1],
0x37: ['CALLDATACOPY', 3, 0, 1],
0x38: ['CODESIZE', 0, 1, 1],
0x39: ['CODECOPY', 3, 0, 1],
0x3a: ['GASPRICE', 0, 1, 1],
0x3b: ['EXTCODESIZE', 1, 1, 1],
0x3c: ['EXTCODECOPY', 4, 0, 1],
0x40: ['BLOCKHASH', 1, 1, 1],
0x41: ['COINBASE', 0, 1, 1],
0x42: ['TIMESTAMP', 0, 1, 1],
0x43: ['NUMBER', 0, 1, 1],
0x44: ['DIFFICULTY', 0, 1, 1],
0x45: ['GASLIMIT', 0, 1, 1],
0x50: ['POP', 1, 0, 1],
0x51: ['MLOAD', 1, 1, 1],
0x52: ['MSTORE', 2, 0, 1],
0x53: ['MSTORE8', 2, 0, 1],
0x54: ['SLOAD', 1, 1, 20],
0x55: ['SSTORE', 2, 0, 0],
0x56: ['JUMP', 1, 0, 1],
0x57: ['JUMPI', 2, 0, 1],
0x58: ['PC', 0, 1, 1],
0x59: ['MSIZE', 0, 1, 1],
0x5a: ['GAS', 0, 1, 1],
0x5b: ['JUMPDEST', 0, 0, 1],
0xa0: ['LOG0', 2, 0, 32],
0xa1: ['LOG1', 3, 0, 64],
0xa2: ['LOG2', 4, 0, 96],
0xa3: ['LOG3', 5, 0, 128],
0xa4: ['LOG4', 6, 0, 160],
0xf0: ['CREATE', 3, 1, 100],
0xf1: ['CALL', 7, 1, 20],
0xf2: ['CALLCODE', 7, 1, 20],
0xf3: ['RETURN', 2, 1, 1],
0xff: ['SUICIDE', 1, 1, 0],
}
for i in range(1, 33):
opcodes[0x5f + i] = ['PUSH' + str(i), 0, 1, 1]
for i in range(1, 17):
opcodes[0x7f + i] = ['DUP' + str(i), i, i + 1, 1]
opcodes[0x8f + i] = ['SWAP' + str(i), i + 1, i + 1, 1]
reverse_opcodes = {}
for o in opcodes:
vars()[opcodes[o][0]] = opcodes[o]
reverse_opcodes[opcodes[o][0]] = o
# Non-opcode gas prices
GDEFAULT = 1
GMEMORY = 1
GSTORAGEKILL = -100
GSTORAGEMOD = 100
GSTORAGEADD = 300
GEXPONENTBYTE = 1 # cost of EXP exponent per byte
GCOPY = 1 # cost to copy one 32 byte word
GCONTRACTBYTE = 5 # one byte of code in contract creation
GTXCOST = 500 # TX BASE GAS COST
GTXDATAZERO = 1 # TX DATA ZERO BYTE GAS COST
GTXDATANONZERO = 5 # TX DATA NON ZERO BYTE GAS COST
GSHA3WORD = 10 # Cost of SHA3 per word
GSHA256WORD = 50 # Cost of SHA256 per word
GRIPEMD160WORD = 50 # Cost of RIPEMD160 per word
| mit |
pytorch/fairseq | fairseq/tasks/legacy_masked_lm.py | 1 | 5010 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
import os
import numpy as np
from fairseq import tokenizer, utils
from fairseq.data import ConcatDataset, Dictionary, data_utils, indexed_dataset
from fairseq.data.legacy.block_pair_dataset import BlockPairDataset
from fairseq.data.legacy.masked_lm_dataset import MaskedLMDataset
from fairseq.data.legacy.masked_lm_dictionary import BertDictionary
from fairseq.tasks import LegacyFairseqTask, register_task
logger = logging.getLogger(__name__)
@register_task("legacy_masked_lm")
class LegacyMaskedLMTask(LegacyFairseqTask):
"""
Task for training Masked LM (BERT) model.
Args:
dictionary (Dictionary): the dictionary for the input of the task
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument(
"data",
help="colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner",
)
parser.add_argument(
"--tokens-per-sample",
default=512,
type=int,
help="max number of total tokens over all segments"
" per sample for BERT dataset",
)
parser.add_argument(
"--break-mode", default="doc", type=str, help="mode for breaking sentence"
)
parser.add_argument("--shuffle-dataset", action="store_true", default=False)
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
@classmethod
def load_dictionary(cls, filename):
return BertDictionary.load(filename)
@classmethod
def build_dictionary(
cls, filenames, workers=1, threshold=-1, nwords=-1, padding_factor=8
):
d = BertDictionary()
for filename in filenames:
Dictionary.add_file_to_dictionary(
filename, d, tokenizer.tokenize_line, workers
)
d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor)
return d
@property
def target_dictionary(self):
return self.dictionary
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task."""
paths = utils.split_paths(args.data)
assert len(paths) > 0
dictionary = BertDictionary.load(os.path.join(paths[0], "dict.txt"))
logger.info("dictionary: {} types".format(len(dictionary)))
return cls(args, dictionary)
def load_dataset(self, split, epoch=1, combine=False):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
loaded_datasets = []
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
logger.info("data_path", data_path)
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
path = os.path.join(data_path, split_k)
ds = indexed_dataset.make_dataset(
path,
impl=self.args.dataset_impl,
fix_lua_indexing=True,
dictionary=self.dictionary,
)
if ds is None:
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
with data_utils.numpy_seed(self.seed + k):
loaded_datasets.append(
BlockPairDataset(
ds,
self.dictionary,
ds.sizes,
self.args.tokens_per_sample,
break_mode=self.args.break_mode,
doc_break_size=1,
)
)
logger.info(
"{} {} {} examples".format(data_path, split_k, len(loaded_datasets[-1]))
)
if not combine:
break
if len(loaded_datasets) == 1:
dataset = loaded_datasets[0]
sizes = dataset.sizes
else:
dataset = ConcatDataset(loaded_datasets)
sizes = np.concatenate([ds.sizes for ds in loaded_datasets])
self.datasets[split] = MaskedLMDataset(
dataset=dataset,
sizes=sizes,
vocab=self.dictionary,
pad_idx=self.dictionary.pad(),
mask_idx=self.dictionary.mask(),
classif_token_idx=self.dictionary.cls(),
sep_token_idx=self.dictionary.sep(),
shuffle=self.args.shuffle_dataset,
seed=self.seed,
)
| mit |
groschovskiy/lerigos_music | Server/API/lib/google/logging/type/log_severity_pb2.py | 2 | 3168 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/logging/type/log_severity.proto
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/logging/type/log_severity.proto',
package='google.logging.type',
syntax='proto3',
serialized_pb=b'\n&google/logging/type/log_severity.proto\x12\x13google.logging.type\x1a\x1cgoogle/api/annotations.proto*\x82\x01\n\x0bLogSeverity\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x44\x45\x42UG\x10\x64\x12\t\n\x04INFO\x10\xc8\x01\x12\x0b\n\x06NOTICE\x10\xac\x02\x12\x0c\n\x07WARNING\x10\x90\x03\x12\n\n\x05\x45RROR\x10\xf4\x03\x12\r\n\x08\x43RITICAL\x10\xd8\x04\x12\n\n\x05\x41LERT\x10\xbc\x05\x12\x0e\n\tEMERGENCY\x10\xa0\x06\x42-\n\x17\x63om.google.logging.typeB\x10LogSeverityProtoP\x01\x62\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_LOGSEVERITY = _descriptor.EnumDescriptor(
name='LogSeverity',
full_name='google.logging.type.LogSeverity',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DEBUG', index=1, number=100,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INFO', index=2, number=200,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOTICE', index=3, number=300,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WARNING', index=4, number=400,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR', index=5, number=500,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CRITICAL', index=6, number=600,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ALERT', index=7, number=700,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EMERGENCY', index=8, number=800,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=94,
serialized_end=224,
)
_sym_db.RegisterEnumDescriptor(_LOGSEVERITY)
LogSeverity = enum_type_wrapper.EnumTypeWrapper(_LOGSEVERITY)
DEFAULT = 0
DEBUG = 100
INFO = 200
NOTICE = 300
WARNING = 400
ERROR = 500
CRITICAL = 600
ALERT = 700
EMERGENCY = 800
DESCRIPTOR.enum_types_by_name['LogSeverity'] = _LOGSEVERITY
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\027com.google.logging.typeB\020LogSeverityProtoP\001')
# @@protoc_insertion_point(module_scope)
| apache-2.0 |
ivelum/djangoql | test_project/core/views.py | 1 | 1236 | import json
from django.contrib.auth.models import Group, User
from django.shortcuts import render
from django.views.decorators.http import require_GET
from djangoql.exceptions import DjangoQLError
from djangoql.queryset import apply_search
from djangoql.schema import DjangoQLSchema
from djangoql.serializers import DjangoQLSchemaSerializer
class UserQLSchema(DjangoQLSchema):
include = (User, Group)
suggest_options = {
Group: ['name'],
}
@require_GET
def completion_demo(request):
q = request.GET.get('q', '')
error = ''
query = User.objects.all().order_by('username')
if q:
try:
query = apply_search(query, q, schema=UserQLSchema)
except DjangoQLError as e:
query = query.none()
error = str(e)
# You may want to use SuggestionsAPISerializer and an additional API
# endpoint (see in djangoql.views) for asynchronous suggestions loading
introspections = DjangoQLSchemaSerializer().serialize(
UserQLSchema(query.model),
)
return render(request, 'completion_demo.html', context={
'q': q,
'error': error,
'search_results': query,
'introspections': json.dumps(introspections),
})
| mit |
leohmoraes/tablib | tablib/packages/odf/form.py | 100 | 3268 | # -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from namespaces import FORMNS
from element import Element
# Autogenerated
def Button(**args):
return Element(qname = (FORMNS,'button'), **args)
def Checkbox(**args):
return Element(qname = (FORMNS,'checkbox'), **args)
def Column(**args):
return Element(qname = (FORMNS,'column'), **args)
def Combobox(**args):
return Element(qname = (FORMNS,'combobox'), **args)
def ConnectionResource(**args):
return Element(qname = (FORMNS,'connection-resource'), **args)
def Date(**args):
return Element(qname = (FORMNS,'date'), **args)
def File(**args):
return Element(qname = (FORMNS,'file'), **args)
def FixedText(**args):
return Element(qname = (FORMNS,'fixed-text'), **args)
def Form(**args):
return Element(qname = (FORMNS,'form'), **args)
def FormattedText(**args):
return Element(qname = (FORMNS,'formatted-text'), **args)
def Frame(**args):
return Element(qname = (FORMNS,'frame'), **args)
def GenericControl(**args):
return Element(qname = (FORMNS,'generic-control'), **args)
def Grid(**args):
return Element(qname = (FORMNS,'grid'), **args)
def Hidden(**args):
return Element(qname = (FORMNS,'hidden'), **args)
def Image(**args):
return Element(qname = (FORMNS,'image'), **args)
def ImageFrame(**args):
return Element(qname = (FORMNS,'image-frame'), **args)
def Item(**args):
return Element(qname = (FORMNS,'item'), **args)
def ListProperty(**args):
return Element(qname = (FORMNS,'list-property'), **args)
def ListValue(**args):
return Element(qname = (FORMNS,'list-value'), **args)
def Listbox(**args):
return Element(qname = (FORMNS,'listbox'), **args)
def Number(**args):
return Element(qname = (FORMNS,'number'), **args)
def Option(**args):
return Element(qname = (FORMNS,'option'), **args)
def Password(**args):
return Element(qname = (FORMNS,'password'), **args)
def Properties(**args):
return Element(qname = (FORMNS,'properties'), **args)
def Property(**args):
return Element(qname = (FORMNS,'property'), **args)
def Radio(**args):
return Element(qname = (FORMNS,'radio'), **args)
def Text(**args):
return Element(qname = (FORMNS,'text'), **args)
def Textarea(**args):
return Element(qname = (FORMNS,'textarea'), **args)
def Time(**args):
return Element(qname = (FORMNS,'time'), **args)
def ValueRange(**args):
return Element(qname = (FORMNS,'value-range'), **args)
| mit |
csrocha/OpenUpgrade | addons/gamification/tests/test_challenge.py | 386 | 5133 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests import common
class test_challenge(common.TransactionCase):
def setUp(self):
super(test_challenge, self).setUp()
cr, uid = self.cr, self.uid
self.data_obj = self.registry('ir.model.data')
self.user_obj = self.registry('res.users')
self.challenge_obj = self.registry('gamification.challenge')
self.line_obj = self.registry('gamification.challenge.line')
self.goal_obj = self.registry('gamification.goal')
self.badge_obj = self.registry('gamification.badge')
self.badge_user_obj = self.registry('gamification.badge.user')
self.demo_user_id = self.data_obj.get_object_reference(cr, uid, 'base', 'user_demo')[1]
self.group_user_id = self.data_obj.get_object_reference(cr, uid, 'base', 'group_user')[1]
self.challenge_base_id = self.data_obj.get_object_reference(cr, uid, 'gamification', 'challenge_base_discover')[1]
self.definition_timezone_id = self.data_obj.get_object_reference(cr, uid, 'gamification', 'definition_base_timezone')[1]
self.badge_id = self.data_obj.get_object_reference(cr, uid, 'gamification', 'badge_good_job')[1]
def test_00_join_challenge(self):
cr, uid, context = self.cr, self.uid, {}
user_ids = self.user_obj.search(cr, uid, [('groups_id', '=', self.group_user_id)])
challenge = self.challenge_obj.browse(cr, uid, self.challenge_base_id, context=context)
self.assertGreaterEqual(len(challenge.user_ids), len(user_ids), "Not enough users in base challenge")
self.user_obj.create(cr, uid, {
'name': 'R2D2',
'login': 'r2d2@openerp.com',
'email': 'r2d2@openerp.com',
'groups_id': [(6, 0, [self.group_user_id])]
}, {'no_reset_password': True})
self.challenge_obj._update_all(cr, uid, [self.challenge_base_id], context=context)
challenge = self.challenge_obj.browse(cr, uid, self.challenge_base_id, context=context)
self.assertGreaterEqual(len(challenge.user_ids), len(user_ids)+1, "These are not droids you are looking for")
def test_10_reach_challenge(self):
cr, uid, context = self.cr, self.uid, {}
self.challenge_obj.write(cr, uid, [self.challenge_base_id], {'state': 'inprogress'}, context=context)
challenge = self.challenge_obj.browse(cr, uid, self.challenge_base_id, context=context)
challenge_user_ids = [user.id for user in challenge.user_ids]
self.assertEqual(challenge.state, 'inprogress', "Challenge failed the change of state")
line_ids = self.line_obj.search(cr, uid, [('challenge_id', '=', self.challenge_base_id)], context=context)
goal_ids = self.goal_obj.search(cr, uid, [('challenge_id', '=', self.challenge_base_id), ('state', '!=', 'draft')], context=context)
self.assertEqual(len(goal_ids), len(line_ids)*len(challenge_user_ids), "Incorrect number of goals generated, should be 1 goal per user, per challenge line")
# demo user will set a timezone
self.user_obj.write(cr, uid, self.demo_user_id, {'tz': "Europe/Brussels"}, context=context)
goal_ids = self.goal_obj.search(cr, uid, [('user_id', '=', self.demo_user_id), ('definition_id', '=', self.definition_timezone_id)], context=context)
self.goal_obj.update(cr, uid, goal_ids, context=context)
reached_goal_ids = self.goal_obj.search(cr, uid, [('id', 'in', goal_ids), ('state', '=', 'reached')], context=context)
self.assertEqual(set(goal_ids), set(reached_goal_ids), "Not every goal was reached after changing timezone")
# reward for two firsts as admin may have timezone
self.challenge_obj.write(cr, uid, self.challenge_base_id, {'reward_first_id': self.badge_id, 'reward_second_id': self.badge_id}, context=context)
self.challenge_obj.write(cr, uid, self.challenge_base_id, {'state': 'done'}, context=context)
badge_ids = self.badge_user_obj.search(cr, uid, [('badge_id', '=', self.badge_id), ('user_id', '=', self.demo_user_id)])
self.assertGreater(len(badge_ids), 0, "Demo user has not received the badge") | agpl-3.0 |
dracorpg/python-ivi | ivi/agilent/agilentMSOX3024A.py | 6 | 1695 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent3000A import *
class agilentMSOX3024A(agilent3000A):
"Agilent InfiniiVision MSOX3024A IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'MSO-X 3024A')
super(agilentMSOX3024A, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 200e6
self._init_channels()
| mit |
mohamed--abdel-maksoud/chromium.src | third_party/tlslite/tlslite/utils/openssl_aes.py | 202 | 1944 | # Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""OpenSSL/M2Crypto AES implementation."""
from .cryptomath import *
from .aes import *
if m2cryptoLoaded:
def new(key, mode, IV):
return OpenSSL_AES(key, mode, IV)
class OpenSSL_AES(AES):
def __init__(self, key, mode, IV):
AES.__init__(self, key, mode, IV, "openssl")
self.key = key
self.IV = IV
def _createContext(self, encrypt):
context = m2.cipher_ctx_new()
if len(self.key)==16:
cipherType = m2.aes_128_cbc()
if len(self.key)==24:
cipherType = m2.aes_192_cbc()
if len(self.key)==32:
cipherType = m2.aes_256_cbc()
m2.cipher_init(context, cipherType, self.key, self.IV, encrypt)
return context
def encrypt(self, plaintext):
AES.encrypt(self, plaintext)
context = self._createContext(1)
ciphertext = m2.cipher_update(context, plaintext)
m2.cipher_ctx_free(context)
self.IV = ciphertext[-self.block_size:]
return bytearray(ciphertext)
def decrypt(self, ciphertext):
AES.decrypt(self, ciphertext)
context = self._createContext(0)
#I think M2Crypto has a bug - it fails to decrypt and return the last block passed in.
#To work around this, we append sixteen zeros to the string, below:
plaintext = m2.cipher_update(context, ciphertext+('\0'*16))
#If this bug is ever fixed, then plaintext will end up having a garbage
#plaintext block on the end. That's okay - the below code will discard it.
plaintext = plaintext[:len(ciphertext)]
m2.cipher_ctx_free(context)
self.IV = ciphertext[-self.block_size:]
return bytearray(plaintext)
| bsd-3-clause |
qwefi/nova | plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py | 14 | 4895 | #!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This script is used to configure iptables, ebtables, and arptables rules on
XenServer hosts.
"""
import os
import sys
import novalib
# This is written to Python 2.4, since that is what is available on XenServer
import simplejson as json
def main(dom_id, command, only_this_vif=None):
xsls = novalib.execute_get_output('/usr/bin/xenstore-ls',
'/local/domain/%s/vm-data/networking' % dom_id)
macs = [line.split("=")[0].strip() for line in xsls.splitlines()]
for mac in macs:
xsread = novalib.execute_get_output('/usr/bin/xenstore-read',
'/local/domain/%s/vm-data/networking/%s' %
(dom_id, mac))
data = json.loads(xsread)
for ip in data['ips']:
if data["label"] == "public":
vif = "vif%s.0" % dom_id
else:
vif = "vif%s.1" % dom_id
if (only_this_vif is None) or (vif == only_this_vif):
params = dict(IP=ip['ip'], VIF=vif, MAC=data['mac'])
apply_ebtables_rules(command, params)
apply_arptables_rules(command, params)
apply_iptables_rules(command, params)
# A note about adding rules:
# Whenever we add any rule to iptables, arptables or ebtables we first
# delete the same rule to ensure the rule only exists once.
def apply_iptables_rules(command, params):
iptables = lambda *rule: novalib.execute('/sbin/iptables', *rule)
iptables('-D', 'FORWARD', '-m', 'physdev',
'--physdev-in', params['VIF'],
'-s', params['IP'],
'-j', 'ACCEPT')
if command == 'online':
iptables('-A', 'FORWARD', '-m', 'physdev',
'--physdev-in', params['VIF'],
'-s', params['IP'],
'-j', 'ACCEPT')
def apply_arptables_rules(command, params):
arptables = lambda *rule: novalib.execute('/sbin/arptables', *rule)
arptables('-D', 'FORWARD', '--opcode', 'Request',
'--in-interface', params['VIF'],
'--source-ip', params['IP'],
'--source-mac', params['MAC'],
'-j', 'ACCEPT')
arptables('-D', 'FORWARD', '--opcode', 'Reply',
'--in-interface', params['VIF'],
'--source-ip', params['IP'],
'--source-mac', params['MAC'],
'-j', 'ACCEPT')
if command == 'online':
arptables('-A', 'FORWARD', '--opcode', 'Request',
'--in-interface', params['VIF'],
'--source-mac', params['MAC'],
'-j', 'ACCEPT')
arptables('-A', 'FORWARD', '--opcode', 'Reply',
'--in-interface', params['VIF'],
'--source-ip', params['IP'],
'--source-mac', params['MAC'],
'-j', 'ACCEPT')
def apply_ebtables_rules(command, params):
ebtables = lambda *rule: novalib.execute("/sbin/ebtables", *rule)
ebtables('-D', 'FORWARD', '-p', '0806', '-o', params['VIF'],
'--arp-ip-dst', params['IP'],
'-j', 'ACCEPT')
ebtables('-D', 'FORWARD', '-p', '0800', '-o', params['VIF'],
'--ip-dst', params['IP'],
'-j', 'ACCEPT')
if command == 'online':
ebtables('-A', 'FORWARD', '-p', '0806',
'-o', params['VIF'],
'--arp-ip-dst', params['IP'],
'-j', 'ACCEPT')
ebtables('-A', 'FORWARD', '-p', '0800',
'-o', params['VIF'],
'--ip-dst', params['IP'],
'-j', 'ACCEPT')
ebtables('-D', 'FORWARD', '-s', '!', params['MAC'],
'-i', params['VIF'], '-j', 'DROP')
if command == 'online':
ebtables('-I', 'FORWARD', '1', '-s', '!', params['MAC'],
'-i', params['VIF'], '-j', 'DROP')
if __name__ == "__main__":
if len(sys.argv) < 3:
print ("usage: %s dom_id online|offline [vif]" %
os.path.basename(sys.argv[0]))
sys.exit(1)
else:
dom_id, command = sys.argv[1:3]
vif = len(sys.argv) == 4 and sys.argv[3] or None
main(dom_id, command, vif)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.