gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from senlin.drivers import base
from senlin.drivers.openstack import sdk
class NeutronClient(base.DriverBase):
'''Neutron V2 driver.'''
def __init__(self, params):
super(NeutronClient, self).__init__(params)
self.conn = sdk.create_connection(params)
@sdk.translate_exception
def network_get(self, name_or_id):
network = self.conn.network.find_network(name_or_id)
return network
@sdk.translate_exception
def subnet_get(self, name_or_id):
subnet = self.conn.network.find_subnet(name_or_id)
return subnet
@sdk.translate_exception
def loadbalancer_get(self, name_or_id):
lb = self.conn.network.find_load_balancer(name_or_id)
return lb
@sdk.translate_exception
def loadbalancer_list(self):
lbs = [lb for lb in self.conn.network.load_balancers()]
return lbs
@sdk.translate_exception
def loadbalancer_create(self, vip_subnet_id, vip_address=None,
admin_state_up=True, name=None, description=None):
kwargs = {
'vip_subnet_id': vip_subnet_id,
'admin_state_up': admin_state_up,
}
if vip_address is not None:
kwargs['vip_address'] = vip_address
if name is not None:
kwargs['name'] = name
if description is not None:
kwargs['description'] = description
res = self.conn.network.create_load_balancer(**kwargs)
return res
@sdk.translate_exception
def loadbalancer_delete(self, lb_id, ignore_missing=True):
self.conn.network.delete_load_balancer(
lb_id, ignore_missing=ignore_missing)
return
@sdk.translate_exception
def listener_get(self, name_or_id):
listener = self.conn.network.find_listener(name_or_id)
return listener
@sdk.translate_exception
def listener_list(self):
listeners = [i for i in self.conn.network.listeners()]
return listeners
@sdk.translate_exception
def listener_create(self, loadbalancer_id, protocol, protocol_port,
connection_limit=None,
admin_state_up=True, name=None, description=None):
kwargs = {
'loadbalancer_id': loadbalancer_id,
'protocol': protocol,
'protocol_port': protocol_port,
'admin_state_up': admin_state_up,
}
if connection_limit is not None:
kwargs['connection_limit'] = connection_limit
if name is not None:
kwargs['name'] = name
if description is not None:
kwargs['description'] = description
res = self.conn.network.create_listener(**kwargs)
return res
@sdk.translate_exception
def listener_delete(self, listener_id, ignore_missing=True):
self.conn.network.delete_listener(listener_id,
ignore_missing=ignore_missing)
return
@sdk.translate_exception
def pool_get(self, name_or_id):
pool = self.conn.network.find_pool(name_or_id)
return pool
@sdk.translate_exception
def pool_list(self):
pools = [p for p in self.conn.network.pools()]
return pools
@sdk.translate_exception
def pool_create(self, lb_algorithm, listener_id, protocol,
admin_state_up=True, name=None, description=None):
kwargs = {
'lb_algorithm': lb_algorithm,
'listener_id': listener_id,
'protocol': protocol,
'admin_state_up': admin_state_up,
}
if name is not None:
kwargs['name'] = name
if description is not None:
kwargs['description'] = description
res = self.conn.network.create_pool(**kwargs)
return res
@sdk.translate_exception
def pool_delete(self, pool_id, ignore_missing=True):
self.conn.network.delete_pool(pool_id,
ignore_missing=ignore_missing)
return
@sdk.translate_exception
def pool_member_get(self, pool_id, name_or_id):
member = self.conn.network.find_pool_member(name_or_id,
pool_id)
return member
@sdk.translate_exception
def pool_member_list(self, pool_id):
members = [m for m in self.conn.network.pool_members(pool_id)]
return members
@sdk.translate_exception
def pool_member_create(self, pool_id, address, protocol_port, subnet_id,
weight=None, admin_state_up=True):
kwargs = {
'address': address,
'protocol_port': protocol_port,
'admin_state_up': admin_state_up,
'subnet_id': subnet_id,
}
if weight is not None:
kwargs['weight'] = weight
res = self.conn.network.create_pool_member(pool_id, **kwargs)
return res
@sdk.translate_exception
def pool_member_delete(self, pool_id, member_id, ignore_missing=True):
self.conn.network.delete_pool_member(
member_id, pool_id, ignore_missing=ignore_missing)
return
@sdk.translate_exception
def healthmonitor_get(self, name_or_id):
hm = self.conn.network.find_health_monitor(name_or_id)
return hm
@sdk.translate_exception
def healthmonitor_list(self):
hms = [hm for hm in self.conn.network.health_monitors()]
return hms
@sdk.translate_exception
def healthmonitor_create(self, hm_type, delay, timeout, max_retries,
pool_id, admin_state_up=True,
http_method=None, url_path=None,
expected_codes=None):
kwargs = {
'type': hm_type,
'delay': delay,
'timeout': timeout,
'max_retries': max_retries,
'pool_id': pool_id,
'admin_state_up': admin_state_up,
}
# TODO(anyone): verify if this is correct
if hm_type == 'HTTP':
if http_method is not None:
kwargs['http_method'] = http_method
if url_path is not None:
kwargs['url_path'] = url_path
if expected_codes is not None:
kwargs['expected_codes'] = expected_codes
res = self.conn.network.create_health_monitor(**kwargs)
return res
@sdk.translate_exception
def healthmonitor_delete(self, hm_id, ignore_missing=True):
self.conn.network.delete_health_monitor(
hm_id, ignore_missing=ignore_missing)
return
| |
import logging
from billy.scrape import Scraper, SourcedObject
logger = logging.getLogger('billy')
class BillScraper(Scraper):
scraper_type = 'bills'
def scrape(self, chamber, session):
"""
Grab all the bills for a given chamber and session. Must be
overridden by subclasses.
Should raise a :class:`NoDataForPeriod` exception if it is
not possible to scrape bills for the given session.
"""
raise NotImplementedError('BillScrapers must define a scrape method')
save_bill = Scraper.save_object
class Bill(SourcedObject):
"""
Object representing a piece of legislation.
See :class:`~billy.scrape.SourcedObject` for notes on
extra attributes/fields.
"""
def __init__(self, session, chamber, bill_id, title, **kwargs):
"""
Create a new :obj:`Bill`.
:param session: the session in which the bill was introduced.
:param chamber: the chamber in which the bill was introduced:
either 'upper' or 'lower'
:param bill_id: an identifier assigned to this bill by the legislature
(should be unique within the context of this chamber/session)
e.g.: 'HB 1', 'S. 102', 'H.R. 18'
:param title: a title or short description of this bill provided by
the official source
Any additional keyword arguments will be associated with this
bill and stored in the database.
"""
super(Bill, self).__init__('bill', **kwargs)
self._seen_versions = set()
self['session'] = session
self['chamber'] = chamber
self['bill_id'] = bill_id
self['title'] = title
self['sponsors'] = []
self['votes'] = []
self['versions'] = []
self['actions'] = []
self['documents'] = []
self['alternate_titles'] = []
self['companions'] = []
if not 'type' in kwargs or not kwargs['type']:
self['type'] = ['bill']
elif isinstance(kwargs['type'], basestring):
self['type'] = [kwargs['type']]
else:
self['type'] = list(kwargs['type'])
def add_sponsor(self, type, name, **kwargs):
"""
Associate a sponsor with this bill.
:param type: the type of sponsorship, e.g. 'primary', 'cosponsor'
:param name: the name of the sponsor as provided by the official source
"""
self['sponsors'].append(dict(type=type, name=name, **kwargs))
def add_document(self, name, url, mimetype=None, **kwargs):
"""
Add a document or media item that is related to the bill.
Use this method to add documents such as Fiscal Notes, Analyses,
Amendments, or public hearing recordings.
:param name: a name given to the document, e.g.
'Fiscal Note for Amendment LCO 6544'
:param url: link to location of document or file
:param mimetype: MIME type of the document
If multiple formats of a document are provided, a good rule of
thumb is to prefer text, followed by html, followed by pdf/word/etc.
"""
d = dict(name=name, url=url, **kwargs)
if mimetype:
d['mimetype'] = mimetype
self['documents'].append(d)
def add_version(self, name, url, mimetype=None, on_duplicate='error',
**kwargs):
"""
Add a version of the text of this bill.
:param name: a name given to this version of the text, e.g.
'As Introduced', 'Version 2', 'As amended', 'Enrolled'
:param url: the location of this version on the legislative website.
:param mimetype: MIME type of the document
:param on_duplicate: What to do if a duplicate is seen:
error - default option, raises a ValueError
ignore - add the document twice (rarely the right choice)
use_new - use the new name, removing the old document
use_old - use the old name, not adding the new document
If multiple formats are provided, a good rule of thumb is to
prefer text, followed by html, followed by pdf/word/etc.
"""
if not mimetype:
raise ValueError('mimetype parameter to add_version is required')
if on_duplicate != 'ignore':
if url in self._seen_versions:
if on_duplicate == 'error':
raise ValueError('duplicate version url %s' % url)
elif on_duplicate == 'use_new':
# delete the old version
self['versions'] = [v for v in self['versions']
if v['url'] != url]
elif on_duplicate == 'use_old':
return # do nothing
self._seen_versions.add(url)
d = dict(name=name, url=url, mimetype=mimetype, **kwargs)
self['versions'].append(d)
def add_action(self, actor, action, date, type=None, committees=None,
legislators=None, **kwargs):
"""
Add an action that was performed on this bill.
:param actor: a string representing who performed the action.
If the action is associated with one of the chambers this
should be 'upper' or 'lower'. Alternatively, this could be
the name of a committee, a specific legislator, or an outside
actor such as 'Governor'.
:param action: a string representing the action performed, e.g.
'Introduced', 'Signed by the Governor', 'Amended'
:param date: the date/time this action was performed.
:param type: a type classification for this action
;param committees: a committee or list of committees to associate with
this action
"""
def _cleanup_list(obj, default):
if not obj:
obj = default
elif isinstance(obj, basestring):
obj = [obj]
elif not isinstance(obj, list):
obj = list(obj)
return obj
type = _cleanup_list(type, ['other'])
committees = _cleanup_list(committees, [])
legislators = _cleanup_list(legislators, [])
if 'committee' in kwargs:
raise ValueError("invalid param 'committee' passed to add_action, "
"must use committees")
if isinstance(committees, basestring):
committees = [committees]
related_entities = [] # OK, let's work some magic.
for committee in committees:
related_entities.append({
"type": "committee",
"name": committee
})
for legislator in legislators:
related_entities.append({
"type": "legislator",
"name": legislator
})
self['actions'].append(dict(actor=actor, action=action,
date=date, type=type,
related_entities=related_entities,
**kwargs))
def add_vote(self, vote):
"""
Associate a :class:`~billy.scrape.votes.Vote` object with this
bill.
"""
self['votes'].append(vote)
def add_title(self, title):
"""
Associate an alternate title with this bill.
"""
self['alternate_titles'].append(title)
def add_companion(self, bill_id, session=None, chamber=None):
"""
Associate another bill with this one.
If session isn't set it will be set to self['session'].
"""
companion = {'bill_id': bill_id,
'session': session or self['session'],
'chamber': chamber}
self['companions'].append(companion)
def get_filename(self):
filename = "%s_%s_%s.json" % (self['session'], self['chamber'],
self['bill_id'])
return filename.encode('ascii', 'replace')
def __unicode__(self):
return "%s %s: %s" % (self['chamber'], self['session'],
self['bill_id'])
| |
import logging
import os
import pytest
from cu2qu.ufo import CURVE_TYPE_LIB_KEY
from fontTools import designspaceLib
import ufo2ft
from ufo2ft.constants import (
COLOR_LAYER_MAPPING_KEY,
COLOR_LAYERS_KEY,
COLOR_PALETTES_KEY,
)
from ufo2ft.filters import FILTERS_KEY, loadFilterFromString
from ufo2ft.filters.explodeColorLayerGlyphs import ExplodeColorLayerGlyphsFilter
from ufo2ft.preProcessor import (
TTFInterpolatablePreProcessor,
TTFPreProcessor,
_init_explode_color_layer_glyphs_filter,
)
def getpath(filename):
dirname = os.path.dirname(__file__)
return os.path.join(dirname, "data", filename)
def glyph_has_qcurve(ufo, glyph_name):
return any(
s.segmentType == "qcurve" for contour in ufo[glyph_name] for s in contour
)
class TTFPreProcessorTest:
def test_no_inplace(self, FontClass):
ufo = FontClass(getpath("TestFont.ufo"))
glyphSet = TTFPreProcessor(ufo, inplace=False).process()
assert not glyph_has_qcurve(ufo, "c")
assert glyph_has_qcurve(glyphSet, "c")
assert CURVE_TYPE_LIB_KEY not in ufo.layers.defaultLayer.lib
def test_inplace_remember_curve_type(self, FontClass, caplog):
caplog.set_level(logging.ERROR)
ufo = FontClass(getpath("TestFont.ufo"))
assert CURVE_TYPE_LIB_KEY not in ufo.lib
assert CURVE_TYPE_LIB_KEY not in ufo.layers.defaultLayer.lib
assert not glyph_has_qcurve(ufo, "c")
TTFPreProcessor(ufo, inplace=True, rememberCurveType=True).process()
assert CURVE_TYPE_LIB_KEY not in ufo.lib
assert ufo.layers.defaultLayer.lib[CURVE_TYPE_LIB_KEY] == "quadratic"
assert glyph_has_qcurve(ufo, "c")
logger = "ufo2ft.filters.cubicToQuadratic"
with caplog.at_level(logging.INFO, logger=logger):
TTFPreProcessor(ufo, inplace=True, rememberCurveType=True).process()
assert len(caplog.records) == 1
assert "Curves already converted to quadratic" in caplog.text
assert glyph_has_qcurve(ufo, "c")
def test_inplace_no_remember_curve_type(self, FontClass):
ufo = FontClass(getpath("TestFont.ufo"))
assert CURVE_TYPE_LIB_KEY not in ufo.lib
assert CURVE_TYPE_LIB_KEY not in ufo.layers.defaultLayer.lib
for _ in range(2):
TTFPreProcessor(ufo, inplace=True, rememberCurveType=False).process()
assert CURVE_TYPE_LIB_KEY not in ufo.lib
assert CURVE_TYPE_LIB_KEY not in ufo.layers.defaultLayer.lib
assert glyph_has_qcurve(ufo, "c")
def test_custom_filters(self, FontClass):
ufo1 = FontClass(getpath("TestFont.ufo"))
ufo1.lib[FILTERS_KEY] = [
{"name": "transformations", "kwargs": {"OffsetX": -40}, "pre": True}
]
ufo2 = FontClass(getpath("TestFont.ufo"))
ufo2.lib[FILTERS_KEY] = [{"name": "transformations", "kwargs": {"OffsetY": 10}}]
glyphSets0 = TTFPreProcessor(ufo1).process()
glyphSets1 = TTFPreProcessor(ufo2).process()
assert (glyphSets0["a"][0][0].x - glyphSets1["a"][0][0].x) == -40
assert (glyphSets1["a"][0][0].y - glyphSets0["a"][0][0].y) == 10
def test_custom_filters_as_argument(self, FontClass):
from ufo2ft.filters import RemoveOverlapsFilter, TransformationsFilter
ufo1 = FontClass(getpath("TestFont.ufo"))
ufo2 = FontClass(getpath("TestFont.ufo"))
filter1 = RemoveOverlapsFilter(backend="pathops")
filter2 = TransformationsFilter(include=["d"], pre=True, OffsetY=-200)
filter3 = TransformationsFilter(OffsetX=10)
glyphSets0 = TTFPreProcessor(
ufo1, filters=[filter1, filter2, filter3]
).process()
glyphSets1 = TTFPreProcessor(
ufo2, filters=[filter1, filter2, filter3]
).process()
# Both UFOs have the same filters applied
assert (glyphSets0["a"][0][0].x - glyphSets1["a"][0][0].x) == 0
# "a" has initially its starting point at (66, 0)
assert (glyphSets0["a"][0][0].x, glyphSets0["a"][0][0].y) == (76, 0)
assert (glyphSets1["a"][0][0].x, glyphSets1["a"][0][0].y) == (76, 0)
# A component was shifted to overlap with another in a pre-filter
# filter2, before overlaps were removed in a post-filter filter1
assert len(glyphSets0["d"].components) == 0
class TTFInterpolatablePreProcessorTest:
def test_no_inplace(self, FontClass):
ufo1 = FontClass(getpath("TestFont.ufo"))
ufo2 = FontClass(getpath("TestFont.ufo"))
ufos = [ufo1, ufo2]
assert CURVE_TYPE_LIB_KEY not in ufo1.lib
assert CURVE_TYPE_LIB_KEY not in ufo1.layers.defaultLayer.lib
assert not glyph_has_qcurve(ufo1, "c")
glyphSets = TTFInterpolatablePreProcessor(ufos, inplace=False).process()
for i in range(2):
assert glyph_has_qcurve(glyphSets[i], "c")
assert CURVE_TYPE_LIB_KEY not in ufos[i].lib
assert CURVE_TYPE_LIB_KEY not in ufos[i].layers.defaultLayer.lib
def test_inplace_remember_curve_type(self, FontClass):
ufo1 = FontClass(getpath("TestFont.ufo"))
ufo2 = FontClass(getpath("TestFont.ufo"))
ufos = [ufo1, ufo2]
assert CURVE_TYPE_LIB_KEY not in ufo1.lib
assert CURVE_TYPE_LIB_KEY not in ufo1.layers.defaultLayer.lib
assert not glyph_has_qcurve(ufo1, "c")
TTFInterpolatablePreProcessor(
ufos, inplace=True, rememberCurveType=True
).process()
assert ufo1.layers.defaultLayer.lib[CURVE_TYPE_LIB_KEY] == "quadratic"
assert glyph_has_qcurve(ufo1, "c")
assert ufo2.layers.defaultLayer.lib[CURVE_TYPE_LIB_KEY] == "quadratic"
assert glyph_has_qcurve(ufo2, "c")
def test_inplace_no_remember_curve_type(self, FontClass):
ufo1 = FontClass(getpath("TestFont.ufo"))
ufo2 = FontClass(getpath("TestFont.ufo"))
ufos = [ufo1, ufo2]
for _ in range(2):
TTFInterpolatablePreProcessor(
ufos, inplace=True, rememberCurveType=False
).process()
assert CURVE_TYPE_LIB_KEY not in ufo1.layers.defaultLayer.lib
assert CURVE_TYPE_LIB_KEY not in ufo2.layers.defaultLayer.lib
assert glyph_has_qcurve(ufo1, "c")
assert glyph_has_qcurve(ufo2, "c")
def test_custom_filters(self, FontClass):
ufo1 = FontClass(getpath("TestFont.ufo"))
ufo1.lib[FILTERS_KEY] = [
{"name": "transformations", "kwargs": {"OffsetX": -40}, "pre": True}
]
ufo2 = FontClass(getpath("TestFont.ufo"))
ufo2.lib[FILTERS_KEY] = [{"name": "transformations", "kwargs": {"OffsetY": 10}}]
ufos = [ufo1, ufo2]
glyphSets = TTFInterpolatablePreProcessor(ufos).process()
assert (glyphSets[0]["a"][0][0].x - glyphSets[1]["a"][0][0].x) == -40
assert (glyphSets[1]["a"][0][0].y - glyphSets[0]["a"][0][0].y) == 10
def test_custom_filters_as_argument(self, FontClass):
ufo1 = FontClass(getpath("TestFont.ufo"))
ufo2 = FontClass(getpath("TestFont.ufo"))
filter1 = loadFilterFromString("RemoveOverlapsFilter(backend='pathops')")
filter2 = loadFilterFromString(
"TransformationsFilter(OffsetY=-200, include=['d'], pre=True)"
)
filter3 = loadFilterFromString("TransformationsFilter(OffsetX=10)")
ufos = [ufo1, ufo2]
glyphSets = TTFInterpolatablePreProcessor(
ufos,
filters=[filter1, filter2, filter3],
).process()
# Both UFOs have the same filters applied
assert (glyphSets[0]["a"][0][0].x - glyphSets[1]["a"][0][0].x) == 0
# "a" has initially its starting point at (66, 0)
assert (glyphSets[0]["a"][0][0].x, glyphSets[0]["a"][0][0].y) == (76, 0)
assert (glyphSets[1]["a"][0][0].x, glyphSets[1]["a"][0][0].y) == (76, 0)
# A component was shifted to overlap with another in a pre-filter
# filter2, before overlaps were removed in a post-filter filter1
assert len(glyphSets[0]["d"].components) == 0
class SkipExportGlyphsTest:
def test_skip_export_glyphs_filter(self, FontClass):
from ufo2ft.util import _GlyphSet
ufo = FontClass(getpath("IncompatibleMasters/NewFont-Regular.ufo"))
skipExportGlyphs = ["b", "d"]
glyphSet = _GlyphSet.from_layer(ufo, skipExportGlyphs=skipExportGlyphs)
assert set(glyphSet.keys()) == {"a", "c", "e", "f"}
assert len(glyphSet["a"]) == 1
assert not glyphSet["a"].components
assert len(glyphSet["c"]) == 5 # 4 "d" components decomposed plus 1 outline
assert list(c.baseGlyph for c in glyphSet["c"].components) == ["a"]
assert len(glyphSet["e"]) == 1
assert list(c.baseGlyph for c in glyphSet["e"].components) == ["c", "c"]
assert not glyphSet["f"]
assert list(c.baseGlyph for c in glyphSet["f"].components) == ["a", "a"]
def test_skip_export_glyphs_filter_nested(self, FontClass):
from ufo2ft.util import _GlyphSet
ufo = FontClass()
glyph_N = ufo.newGlyph("N")
glyph_N.width = 100
pen = glyph_N.getPen()
pen.moveTo((0, 0))
pen.lineTo((300, 0))
pen.lineTo((300, 400))
pen.lineTo((0, 400))
pen.closePath()
glyph_o = ufo.newGlyph("o")
glyph_o.width = 100
pen = glyph_o.getPen()
pen.moveTo((0, 0))
pen.lineTo((300, 0))
pen.lineTo((300, 300))
pen.lineTo((0, 300))
pen.closePath()
glyph_onumero = ufo.newGlyph("_o.numero")
glyph_onumero.width = 100
pen = glyph_onumero.getPen()
pen.addComponent("o", (-1, 0, 0, -1, 0, 100))
pen.moveTo((0, 0))
pen.lineTo((300, 0))
pen.lineTo((300, 50))
pen.lineTo((0, 50))
pen.closePath()
glyph_numero = ufo.newGlyph("numero")
glyph_numero.width = 200
pen = glyph_numero.getPen()
pen.addComponent("N", (1, 0, 0, 1, 0, 0))
pen.addComponent("_o.numero", (1, 0, 0, 1, 400, 0))
skipExportGlyphs = ["_o.numero"]
glyphSet = _GlyphSet.from_layer(ufo, skipExportGlyphs=skipExportGlyphs)
assert len(glyphSet["numero"].components) == 1 # The "N" component
assert len(glyphSet["numero"]) == 2 # The two contours of "o" and "_o.numero"
def test_skip_export_glyphs_designspace(self, FontClass):
# Designspace has a public.skipExportGlyphs lib key excluding "b" and "d".
designspace = designspaceLib.DesignSpaceDocument.fromfile(
getpath("IncompatibleMasters/IncompatibleMasters.designspace")
)
for source in designspace.sources:
source.font = FontClass(
getpath(os.path.join("IncompatibleMasters", source.filename))
)
ufo2ft.compileInterpolatableTTFsFromDS(designspace, inplace=True)
for source in designspace.sources:
assert source.font.getGlyphOrder() == [".notdef", "a", "c", "e", "f"]
gpos_table = source.font["GPOS"].table
assert gpos_table.LookupList.Lookup[0].SubTable[0].Coverage.glyphs == [
"a",
"e",
"f",
]
glyphs = source.font["glyf"].glyphs
for g in glyphs.values():
g.expand(source.font["glyf"])
assert glyphs["a"].numberOfContours == 1
assert not hasattr(glyphs["a"], "components")
assert glyphs["c"].numberOfContours == 6
assert not hasattr(glyphs["c"], "components")
assert glyphs["e"].numberOfContours == 13
assert not hasattr(glyphs["e"], "components")
assert glyphs["f"].isComposite()
def test_skip_export_glyphs_multi_ufo(self, FontClass):
# Bold has a public.skipExportGlyphs lib key excluding "b", "d" and "f".
ufo1 = FontClass(getpath("IncompatibleMasters/NewFont-Regular.ufo"))
ufo2 = FontClass(getpath("IncompatibleMasters/NewFont-Bold.ufo"))
fonts = ufo2ft.compileInterpolatableTTFs([ufo1, ufo2], inplace=True)
for font in fonts:
assert set(font.getGlyphOrder()) == {".notdef", "a", "c", "e"}
gpos_table = font["GPOS"].table
assert gpos_table.LookupList.Lookup[0].SubTable[0].Coverage.glyphs == ["a"]
glyphs = font["glyf"].glyphs
for g in glyphs.values():
g.expand(font["glyf"])
assert glyphs["a"].numberOfContours == 1
assert not hasattr(glyphs["a"], "components")
assert glyphs["c"].numberOfContours == 6
assert not hasattr(glyphs["c"], "components")
assert glyphs["e"].numberOfContours == 13
assert not hasattr(glyphs["e"], "components")
def test_skip_export_glyphs_single_ufo(self, FontClass):
# UFO has a public.skipExportGlyphs lib key excluding "b", "d" and "f".
ufo = FontClass(getpath("IncompatibleMasters/NewFont-Bold.ufo"))
font = ufo2ft.compileTTF(ufo, inplace=True)
assert set(font.getGlyphOrder()) == {".notdef", "a", "c", "e"}
gpos_table = font["GPOS"].table
assert gpos_table.LookupList.Lookup[0].SubTable[0].Coverage.glyphs == ["a"]
glyphs = font["glyf"].glyphs
for g in glyphs.values():
g.expand(font["glyf"])
assert glyphs["a"].numberOfContours == 1
assert not hasattr(glyphs["a"], "components")
assert glyphs["c"].numberOfContours == 6
assert not hasattr(glyphs["c"], "components")
assert glyphs["e"].numberOfContours == 13
assert not hasattr(glyphs["e"], "components")
@pytest.fixture
def color_ufo(FontClass):
ufo = FontClass()
ufo.lib[COLOR_PALETTES_KEY] = [[(1, 0.3, 0.1, 1), (0, 0.4, 0.8, 1)]]
return ufo
class InitExplodeColorLayerGlyphsFilterTest:
def test_no_color_palettes(self, FontClass):
ufo = FontClass()
filters = []
_init_explode_color_layer_glyphs_filter(ufo, filters)
assert not filters
def test_no_color_layer_mapping(self, color_ufo):
filters = []
_init_explode_color_layer_glyphs_filter(color_ufo, filters)
assert not filters
def test_explicit_color_layers(self, color_ufo):
color_ufo.lib[COLOR_LAYERS_KEY] = {"a": [("a.z_0", 1), ("a.z_1", 0)]}
filters = []
_init_explode_color_layer_glyphs_filter(color_ufo, filters)
assert not filters
def test_font_color_layer_mapping(self, color_ufo):
color_ufo.lib[COLOR_LAYER_MAPPING_KEY] = [("z_0", 1), ("z_1", 0)]
filters = []
_init_explode_color_layer_glyphs_filter(color_ufo, filters)
assert isinstance(filters[0], ExplodeColorLayerGlyphsFilter)
def test_glyph_color_layer_mapping(self, color_ufo):
color_ufo.newGlyph("a").lib[COLOR_LAYER_MAPPING_KEY] = [("z_0", 0), ("z_1", 1)]
filters = []
_init_explode_color_layer_glyphs_filter(color_ufo, filters)
assert isinstance(filters[0], ExplodeColorLayerGlyphsFilter)
| |
# -*- coding: utf-8 -*-
"""
License: BSD
(c) 2009 ::: www.CodeResort.com - BV Network AS (simon-code@bvnetwork.no)
"""
import datetime
from itertools import izip
import re
from types import GeneratorType
try:
import babel
except ImportError:
babel = None
import genshi
from trac.core import *
from trac.perm import PermissionError
from trac.resource import ResourceNotFound
from trac.util.datefmt import utc
from trac.util.text import to_unicode
from trac.web.api import RequestDone
from tracrpc.api import IRPCProtocol, XMLRPCSystem, Binary, \
RPCError, MethodNotFound, ProtocolException
from tracrpc.util import exception_to_unicode, empty, prepare_docs
__all__ = ['JsonRpcProtocol']
try:
import json
if not (hasattr(json, 'JSONEncoder') \
and hasattr(json, 'JSONDecoder')):
raise AttributeError("Incorrect JSON library found.")
except (ImportError, AttributeError):
try:
import simplejson as json
except ImportError:
json = None
__all__ = []
if json:
class TracRpcJSONEncoder(json.JSONEncoder):
""" Extending the JSON encoder to support some additional types:
1. datetime.datetime => {'__jsonclass__': ["datetime", "<rfc3339str>"]}
2. tracrpc.api.Binary => {'__jsonclass__': ["binary", "<base64str>"]}
3. empty => ''
4. genshi.builder.Fragment|genshi.core.Markup => unicode
5. babel.support.LazyProxy => unicode
"""
def default(self, obj):
if isinstance(obj, datetime.datetime):
# http://www.ietf.org/rfc/rfc3339.txt
return {'__jsonclass__': ["datetime",
obj.strftime('%Y-%m-%dT%H:%M:%S')]}
elif isinstance(obj, Binary):
return {'__jsonclass__': ["binary",
obj.data.encode("base64")]}
elif obj is empty:
return ''
elif isinstance(obj, (genshi.builder.Fragment,
genshi.core.Markup)):
return unicode(obj)
elif babel and isinstance(obj, babel.support.LazyProxy):
return unicode(obj)
else:
return json.JSONEncoder(self, obj)
class TracRpcJSONDecoder(json.JSONDecoder):
""" Extending the JSON decoder to support some additional types:
1. {'__jsonclass__': ["datetime", "<rfc3339str>"]} => datetime.datetime
2. {'__jsonclass__': ["binary", "<base64str>"]} => tracrpc.api.Binary """
dt = re.compile(
'^(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})(?:\.(\d{1,}))?')
def _normalize(self, obj):
""" Helper to traverse JSON decoded object for custom types. """
if isinstance(obj, tuple):
return tuple(self._normalize(item) for item in obj)
elif isinstance(obj, list):
return [self._normalize(item) for item in obj]
elif isinstance(obj, dict):
if obj.keys() == ['__jsonclass__']:
kind, val = obj['__jsonclass__']
if kind == 'datetime':
dt = self.dt.match(val)
if not dt:
raise Exception(
"Invalid datetime string (%s)" % val)
dt = tuple([int(i) for i in dt.groups() if i])
kw_args = {'tzinfo': utc}
return datetime.datetime(*dt, **kw_args)
elif kind == 'binary':
try:
bin = val.decode("base64")
return Binary(bin)
except:
raise Exception("Invalid base64 string")
else:
raise Exception("Unknown __jsonclass__: %s" % kind)
else:
return dict(self._normalize(obj.items()))
elif isinstance(obj, basestring):
return to_unicode(obj)
else:
return obj
def decode(self, obj, *args, **kwargs):
obj = json.JSONDecoder.decode(self, obj, *args, **kwargs)
return self._normalize(obj)
class JsonProtocolException(ProtocolException):
"""Impossible to handle JSON-RPC request."""
def __init__(self, details, code=-32603, title=None, show_traceback=False):
ProtocolException.__init__(self, details, title, show_traceback)
self.code = code
class JsonRpcProtocol(Component):
r"""
Example `POST` request using `curl` with `Content-Type` header
and body:
{{{
user: ~ > cat body.json
{"params": ["WikiStart"], "method": "wiki.getPage", "id": 123}
user: ~ > curl -H "Content-Type: application/json" --data @body.json ${req.abs_href.rpc()}
{"id": 123, "error": null, "result": "= Welcome to....
}}}
Implementation details:
* JSON-RPC has no formalized type system, so a class-hint system is used
for input and output of non-standard types:
* `{"__jsonclass__": ["datetime", "YYYY-MM-DDTHH:MM:SS"]} => DateTime (UTC)`
* `{"__jsonclass__": ["binary", "<base64-encoded>"]} => Binary`
* `"id"` is optional, and any marker value received with a
request is returned with the response.
"""
implements(IRPCProtocol)
# IRPCProtocol methods
def rpc_info(self):
return ('JSON-RPC', prepare_docs(self.__doc__))
def rpc_match(self):
yield('rpc', 'application/json')
# Legacy path - provided for backwards compatibility:
yield ('jsonrpc', 'application/json')
def parse_rpc_request(self, req, content_type):
""" Parse JSON-RPC requests"""
if not json:
self.log.debug("RPC(json) call ignored (not available).")
raise JsonProtocolException("Error: JSON-RPC not available.\n")
try:
data = json.load(req, cls=TracRpcJSONDecoder)
self.log.info("RPC(json) JSON-RPC request ID : %s.", data.get('id'))
if data.get('method') == 'system.multicall':
# Prepare for multicall
self.log.debug("RPC(json) Multicall request %s", data)
params = data.get('params', [])
for signature in params :
signature['methodName'] = signature.get('method', '')
data['params'] = [params]
return data
except Exception, e:
# Abort with exception - no data can be read
self.log.error("RPC(json) decode error %s",
exception_to_unicode(e, traceback=True))
raise JsonProtocolException(e, -32700)
def send_rpc_result(self, req, result):
"""Send JSON-RPC response back to the caller."""
rpcreq = req.rpc
r_id = rpcreq.get('id')
try:
if rpcreq.get('method') == 'system.multicall':
# Custom multicall
args = (rpcreq.get('params') or [[]])[0]
mcresults = [self._json_result(
isinstance(value, Exception) and \
value or value[0], \
sig.get('id') or r_id) \
for sig, value in izip(args, result)]
response = self._json_result(mcresults, r_id)
else:
response = self._json_result(result, r_id)
try: # JSON encoding
self.log.debug("RPC(json) result: %s" % repr(response))
response = json.dumps(response, cls=TracRpcJSONEncoder)
except Exception, e:
response = json.dumps(self._json_error(e, r_id=r_id),
cls=TracRpcJSONEncoder)
except Exception, e:
self.log.error("RPC(json) error %s" % exception_to_unicode(e,
traceback=True))
response = json.dumps(self._json_error(e, r_id=r_id),
cls=TracRpcJSONEncoder)
self._send_response(req, response + '\n', rpcreq['mimetype'])
def send_rpc_error(self, req, e):
"""Send a JSON-RPC fault message back to the caller. """
rpcreq = req.rpc
r_id = rpcreq.get('id')
response = json.dumps(self._json_error(e, r_id=r_id), \
cls=TracRpcJSONEncoder)
self._send_response(req, response + '\n', rpcreq['mimetype'])
# Internal methods
def _send_response(self, req, response, content_type='application/json'):
self.log.debug("RPC(json) encoded response: %s" % response)
response = to_unicode(response).encode("utf-8")
req.send_response(200)
req.send_header('Content-Type', content_type)
req.send_header('Content-Length', len(response))
req.end_headers()
req.write(response)
raise RequestDone()
def _json_result(self, result, r_id=None):
""" Create JSON-RPC response dictionary. """
if not isinstance(result, Exception):
return {'result': result, 'error': None, 'id': r_id}
else :
return self._json_error(result, r_id=r_id)
def _json_error(self, e, c=None, r_id=None):
""" Makes a response dictionary that is an error. """
if isinstance(e, MethodNotFound):
c = -32601
elif isinstance(e, PermissionError):
c = 403
elif isinstance(e, ResourceNotFound):
c = 404
else:
c = c or hasattr(e, 'code') and e.code or -32603
return {'result': None, 'id': r_id, 'error': {
'name': hasattr(e, 'name') and e.name or 'JSONRPCError',
'code': c,
'message': to_unicode(e)}}
| |
import argparse
import json
import os
import sys
import tarfile
from contextlib import ExitStack
from tempfile import TemporaryDirectory
from libtiff import TIFF
from omero.cli import cli_login
from omero.gateway import BlitzGateway # noqa
from omero.constants.namespaces import NSBULKANNOTATIONS # noqa
def warn(message, image_identifier, warn_skip=False):
message = message.rstrip()
if warn_skip:
if message[-1] in ['.', '!', '?']:
skip_msg = ' Skipping download!'
else:
skip_msg = '. Skipping download!'
else:
skip_msg = ''
print(
'ImageSpecWarning for {0}: {1}{2}'
.format(
image_identifier,
message,
skip_msg
),
file=sys.stderr
)
def find_channel_index(image, channel_name):
channel_name = channel_name.lower()
for n, channel in enumerate(image.getChannelLabels()):
if channel_name == channel.lower():
return n
# Check map annotation for information (this is necessary for some images)
for ann in image.listAnnotations(NSBULKANNOTATIONS):
pairs = ann.getValue()
for p in pairs:
if p[0] == "Channels":
channels = p[1].replace(" ", "").split(";")
for n, c in enumerate(channels):
for value in c.split(':'):
if channel_name == value.lower():
return n
return -1
def get_clipping_region(image, x, y, w, h):
# If the (x, y) coordinate falls outside the image boundaries, we
# cannot just shift it because that would render the meaning of
# w and h undefined (should width and height be decreased or the whole
# region be shifted to keep them fixed?).
# It may be better to abort in this situation.
if x < 0 or y < 0:
raise ValueError(
'Too small upper left coordinate ({0}, {1}) for clipping region.'
.format(x, y)
)
size_x = image.getSizeX()
size_y = image.getSizeY()
if x >= size_x or y >= size_y:
raise ValueError(
'Upper left coordinate ({0}, {1}) of clipping region lies '
'outside of image.'
.format(x, y)
)
# adjust width and height to the image dimensions
if w <= 0 or x + w > size_x:
w = size_x - x
if h <= 0 or y + h > size_y:
h = size_y - y
return [x, y, w, h]
def confine_plane(image, z):
if z < 0:
z = 0
else:
max_z = image.getSizeZ() - 1
if z > max_z:
z = max_z
return z
def confine_frame(image, t):
if t < 0:
t = 0
else:
max_t = image.getSizeT() - 1
if t > max_t:
t = max_t
return t
def get_image_array(image, tile, z, c, t):
pixels = image.getPrimaryPixels()
try:
selection = pixels.getTile(theZ=z, theT=t, theC=c, tile=tile)
except Exception:
warning = '{0} (ID: {1})'.format(image.getName(),
image.getId())
warn('Could not download the requested region', warning)
return
return selection
def download_image_data(
image_ids_or_dataset_id, dataset=False,
download_original=False,
channel=None, z_stack=0, frame=0,
coord=(0, 0), width=0, height=0, region_spec='rectangle',
skip_failed=False, download_tar=False, omero_host='idr.openmicroscopy.org', omero_secured=False, config_file=None
):
if config_file is None: # IDR connection
omero_username = 'public'
omero_password = 'public'
else: # other omero instance
with open(config_file) as f:
cfg = json.load(f)
omero_username = cfg['username']
omero_password = cfg['password']
if omero_username == "" or omero_password == "":
omero_username = 'public'
omero_password = 'public'
if not download_original and region_spec not in ['rectangle', 'center']:
raise ValueError(
'Got unknown value "{0}" as region_spec argument'
.format(region_spec)
)
with ExitStack() as exit_stack:
conn = exit_stack.enter_context(
BlitzGateway(
omero_username, omero_password,
host=omero_host,
secure=omero_secured
)
)
# exit_stack.callback(conn.connect().close)
if download_tar:
# create an archive file to write images to
archive = exit_stack.enter_context(
tarfile.open('images.tar', mode='w')
)
tempdir = exit_stack.enter_context(
TemporaryDirectory()
)
if dataset:
dataset_warning_id = 'Dataset-ID: {0}'.format(image_ids_or_dataset_id[0])
try:
dataset_id = int(image_ids_or_dataset_id[0])
except ValueError:
image_ids = None
else:
try:
dataset = conn.getObject("Dataset", dataset_id)
except Exception as e:
# respect skip_failed on unexpected errors
if skip_failed:
warn(str(e), dataset_warning_id, warn_skip=True)
else:
raise
else:
image_ids = [image.id for image in dataset.listChildren()]
if image_ids is None:
if skip_failed:
warn(
'Unable to find a dataset with this ID in the '
'database.',
dataset_warning_id,
warn_skip=True
)
else:
raise ValueError(
'{0}: Unable to find a dataset with this ID in the '
'database. Aborting!'
.format(dataset_warning_id)
)
else:
# basic argument sanity checks and adjustments
prefix = 'image-'
# normalize image ids by stripping off prefix if it exists
image_ids = [
iid[len(prefix):] if iid[:len(prefix)] == prefix else iid
for iid in image_ids_or_dataset_id
]
for image_id in image_ids:
image_warning_id = 'Image-ID: {0}'.format(image_id)
try:
image_id = int(image_id)
except ValueError:
image = None
else:
try:
image = conn.getObject("Image", image_id)
except Exception as e:
# respect skip_failed on unexpected errors
if skip_failed:
warn(str(e), image_warning_id, warn_skip=True)
continue
else:
raise
if image is None:
if skip_failed:
warn(
'Unable to find an image with this ID in the '
'database.',
image_warning_id,
warn_skip=True
)
continue
raise ValueError(
'{0}: Unable to find an image with this ID in the '
'database. Aborting!'
.format(image_warning_id)
)
if not download_original:
try:
# try to extract image properties
# if anything goes wrong here skip the image
# or abort.
image_name = os.path.splitext(image.getName())[0]
image_warning_id = '{0} (ID: {1})'.format(
image_name, image_id
)
if region_spec == 'rectangle':
tile = get_clipping_region(image, *coord, width, height)
elif region_spec == 'center':
tile = get_clipping_region(
image,
*_center_to_ul(*coord, width, height)
)
ori_z, z_stack = z_stack, confine_plane(image, z_stack)
ori_frame, frame = frame, confine_frame(image, frame)
num_channels = image.getSizeC()
if channel is None:
channel_index = 0
else:
channel_index = find_channel_index(image, channel)
except Exception as e:
# respect skip_failed on unexpected errors
if skip_failed:
warn(str(e), image_warning_id, warn_skip=True)
continue
else:
raise
# region sanity checks and warnings
if tile[2] < width or tile[3] < height:
# The downloaded image region will have smaller dimensions
# than the specified width x height.
warn(
'Downloaded image dimensions ({0} x {1}) will be smaller '
'than the specified width and height ({2} x {3}).'
.format(tile[2], tile[3], width, height),
image_warning_id
)
# z-stack sanity checks and warnings
if z_stack != ori_z:
warn(
'Specified image plane ({0}) is out of bounds. Using {1} '
'instead.'
.format(ori_z, z_stack),
image_warning_id
)
# frame sanity checks and warnings
if frame != ori_frame:
warn(
'Specified image frame ({0}) is out of bounds. Using '
'frame {1} instead.'
.format(ori_frame, frame),
image_warning_id
)
# channel index sanity checks and warnings
if channel is None:
if num_channels > 1:
warn(
'No specific channel selected for multi-channel '
'image. Using first of {0} channels.'
.format(num_channels),
image_warning_id
)
else:
if channel_index == -1 or channel_index >= num_channels:
if skip_failed:
warn(
str(channel)
+ ' is not a known channel name for this image.',
image_warning_id,
warn_skip=True
)
continue
else:
raise ValueError(
'"{0}" is not a known channel name for image {1}. '
'Aborting!'
.format(channel, image_warning_id)
)
# download and save the region as TIFF
fname = '__'.join(
[image_name, str(image_id)] + [str(x) for x in tile]
)
try:
if fname[-5:] != '.tiff':
fname += '.tiff'
fname = fname.replace(' ', '_')
im_array = get_image_array(image, tile, z_stack, channel_index, frame)
if download_tar:
fname = os.path.join(tempdir, fname)
try:
tiff = TIFF.open(fname, mode='w')
tiff.write_image(im_array)
finally:
tiff.close()
# move image into tarball
if download_tar:
archive.add(fname, os.path.basename(fname))
os.remove(fname)
except Exception as e:
if skip_failed:
# respect skip_failed on unexpected errors
warn(str(e), image_warning_id, warn_skip=True)
continue
else:
raise
else:
try:
# try to extract image properties
# if anything goes wrong here skip the image
# or abort.
image_name = os.path.splitext(image.getName())[0]
image_warning_id = '{0} (ID: {1})'.format(
image_name, image_id
)
original_image_name = image.getFileset().listFiles()[0].getName()
fname = image_name + "__" + str(image_id) + os.path.splitext(original_image_name)[1]
fname = fname.replace(' ', '_')
fname = fname.replace('/', '_')
download_directory = "./"
if download_tar:
download_directory = tempdir
with cli_login("-u", omero_username, "-s", omero_host, "-w", omero_password) as cli:
cli.invoke(["download", f"Image:{image_id}", download_directory])
if cli.rv != 0:
raise Exception("Download failed.")
# This will download to download_directory/original_image_name
os.rename(os.path.join(download_directory, original_image_name),
os.path.join(download_directory, fname))
# move image into tarball
if download_tar:
archive.add(os.path.join(download_directory, fname),
os.path.basename(fname))
os.remove(os.path.join(download_directory, fname))
except Exception as e:
# respect skip_failed on unexpected errors
if skip_failed:
warn(str(e), image_warning_id, warn_skip=True)
continue
else:
raise
def _center_to_ul(center_x, center_y, width, height):
if width > 0:
ext_x = (width - 1) // 2
ul_x = max([center_x - ext_x, 0])
width = center_x + ext_x + 1 - ul_x
else:
ul_x = 0
if height > 0:
ext_y = (height - 1) // 2
ul_y = max([center_y - ext_y, 0])
height = center_y + ext_y + 1 - ul_y
else:
ul_y = 0
return ul_x, ul_y, width, height
if __name__ == "__main__":
p = argparse.ArgumentParser()
p.add_argument(
'image_ids_or_dataset_id', nargs='*', default=[],
help='one or more IDR image ids or a single dataset id'
'for which to retrieve data (default: '
'read ids from stdin).'
)
p.add_argument(
'--download-original', dest='download_original', action='store_true',
help="download the original file uploaded to omero"
)
p.add_argument(
'-c', '--channel',
help='name of the channel to retrieve data for '
'(note: the first channel of each image will be downloaded if '
'left unspecified)'
)
region = p.add_mutually_exclusive_group()
region.add_argument(
'--rectangle', nargs=4, type=int, default=argparse.SUPPRESS,
help='specify a clipping region for the image as x y width height, '
'where x and y give the upper left coordinate of the rectangle '
'to clip to. Set width and height to 0 to extend the rectangle '
'to the actual size of the image.'
)
region.add_argument(
'--center', nargs=4, type=int, default=argparse.SUPPRESS,
help='specify a clipping region for the image as x y width height, '
'where x and y define the center of a width x height rectangle. '
'Set either width or height to 0 to extend the region to the '
'actual size of the image along the x- or y-axis.\n'
'Note: Even values for width and height will be rounded down to '
'the nearest odd number.'
)
p.add_argument(
'-f', '--frame', type=int, default=0
)
p.add_argument(
'-z', '--z-stack', type=int, default=0
)
p.add_argument(
'--skip-failed', action='store_true'
)
p.add_argument(
'--download-tar', action='store_true'
)
p.add_argument(
'-oh', '--omero-host', type=str, default="idr.openmicroscopy.org"
)
p.add_argument(
'--omero-secured', action='store_true', default=True
)
p.add_argument(
'-cf', '--config-file', dest='config_file', default=None
)
p.add_argument(
'--dataset', action='store_true'
)
args = p.parse_args()
if not args.image_ids_or_dataset_id:
args.image_ids_or_dataset_id = sys.stdin.read().split()
if args.dataset and len(args.image_ids_or_dataset_id) > 1:
warn("Multiple dataset ids provided. Only the first one will be used.")
if 'center' in args:
args.coord, args.width, args.height = (
args.center[:2], args.center[2], args.center[3]
)
args.region_spec = 'center'
del args.center
elif 'rectangle' in args:
args.coord, args.width, args.height = (
args.rectangle[:2], args.rectangle[2], args.rectangle[3]
)
args.region_spec = 'rectangle'
del args.rectangle
download_image_data(**vars(args))
| |
# -*- encoding: utf8 -*-
from datetime import *
from decimal import Decimal
#from fastdec import mpd as Decimal
from cPickle import dumps, loads
#from sqlalchemy.dialects.postgresql.base import ARRAY
from stresstest import *
# ---
test_types = False
test_methods = True
test_pickle = False
test_orm = False
# ---
verbose = True
def values_results(raw_results):
return [tuple(r.values()) for r in raw_results]
def getitem_str_results(raw_results):
return [
(r['id'],
r['field0'], r['field1'], r['field2'], r['field3'], r['field4'],
r['field5'], r['field6'], r['field7'], r['field8'], r['field9'])
for r in raw_results]
def getitem_fallback_results(raw_results):
return [
(r['ID'],
r['FIELD0'], r['FIELD1'], r['FIELD2'], r['FIELD3'], r['FIELD4'],
r['FIELD5'], r['FIELD6'], r['FIELD7'], r['FIELD8'], r['FIELD9'])
for r in raw_results]
def getitem_int_results(raw_results):
return [
(r[0],
r[1], r[2], r[3], r[4], r[5],
r[6], r[7], r[8], r[9], r[10])
for r in raw_results]
def getitem_long_results(raw_results):
return [
(r[0L],
r[1L], r[2L], r[3L], r[4L], r[5L],
r[6L], r[7L], r[8L], r[9L], r[10L])
for r in raw_results]
def getitem_obj_results(raw_results):
c = test_table.c
fid, f0, f1, f2, f3, f4, f5, f6, f7, f8, f9 = (
c.id, c.field0, c.field1, c.field2, c.field3, c.field4,
c.field5, c.field6, c.field7, c.field8, c.field9)
return [
(r[fid],
r[f0], r[f1], r[f2], r[f3], r[f4],
r[f5], r[f6], r[f7], r[f8], r[f9])
for r in raw_results]
def slice_results(raw_results):
return [row[0:6] + row[6:11] for row in raw_results]
# ---------- #
# Test types #
# ---------- #
# Array
#def genarrayvalue(rnum, fnum):
# return [fnum, fnum + 1, fnum + 2]
#arraytest = (ARRAY(Integer), genarrayvalue,
# dict(num_fields=100, num_records=1000,
# engineurl='postgresql:///test'))
# Boolean
def genbooleanvalue(rnum, fnum):
if rnum % 4:
return bool(fnum % 2)
else:
return None
booleantest = (Boolean, genbooleanvalue, dict(num_records=100000))
# Datetime
def gendatetimevalue(rnum, fnum):
return (rnum % 4) and datetime(2005, 3, 3) or None
datetimetest = (DateTime, gendatetimevalue, dict(num_records=10000))
# Decimal
def gendecimalvalue(rnum, fnum):
if rnum % 4:
return Decimal(str(0.25 * fnum))
else:
return None
decimaltest = (Numeric(10, 2), gendecimalvalue, dict(num_records=10000))
# Interval
# no microseconds because Postgres does not seem to support it
from_epoch = timedelta(14643, 70235)
def genintervalvalue(rnum, fnum):
return from_epoch
intervaltest = (Interval, genintervalvalue,
dict(num_fields=2, num_records=100000))
# PickleType
def genpicklevalue(rnum, fnum):
return (rnum % 4) and {'str': "value%d" % fnum, 'int': rnum} or None
pickletypetest = (PickleType, genpicklevalue,
dict(num_fields=1, num_records=100000))
# TypeDecorator
class MyIntType(TypeDecorator):
impl = Integer
def process_bind_param(self, value, dialect):
return value * 10
def process_result_value(self, value, dialect):
return value / 10
def copy(self):
return MyIntType()
def genmyintvalue(rnum, fnum):
return rnum + fnum
typedecoratortest = (MyIntType, genmyintvalue,
dict(num_records=100000))
# Unicode
def genunicodevalue(rnum, fnum):
return (rnum % 4) and (u"value%d" % fnum) or None
unicodetest = (Unicode(20, assert_unicode=False), genunicodevalue,
dict(num_records=100000))
# dict(engineurl='mysql:///test', freshdata=False))
# do the tests
if test_types:
tests = [booleantest, datetimetest, decimaltest, intervaltest,
pickletypetest, typedecoratortest, unicodetest]
for engineurl in ('postgresql://scott:tiger@localhost/test',
'sqlite://', 'mysql://scott:tiger@localhost/test'):
print "\n%s\n" % engineurl
for datatype, genvalue, kwargs in tests:
print "%s:" % getattr(datatype, '__name__',
datatype.__class__.__name__),
profile_and_time_dbfunc(iter_results, datatype, genvalue,
profile=False, engineurl=engineurl,
verbose=verbose, **kwargs)
# ---------------------- #
# test row proxy methods #
# ---------------------- #
if test_methods:
methods = [iter_results, values_results, getattr_results,
getitem_str_results, getitem_fallback_results,
getitem_int_results, getitem_long_results, getitem_obj_results,
slice_results]
for engineurl in ('postgresql://scott:tiger@localhost/test',
'sqlite://', 'mysql://scott:tiger@localhost/test'):
print "\n%s\n" % engineurl
test_table = prepare(Unicode(20, assert_unicode=False),
genunicodevalue,
num_fields=10, num_records=100000,
verbose=verbose, engineurl=engineurl)
for method in methods:
print "%s:" % method.__name__,
time_dbfunc(test_table, method, genunicodevalue,
num_fields=10, num_records=100000, profile=False,
verbose=verbose)
# --------------------------------
# test pickling Rowproxy instances
# --------------------------------
def pickletofile_results(raw_results):
from cPickle import dump, load
for protocol in (0, 1, 2):
print "dumping protocol %d..." % protocol
f = file('noext.pickle%d' % protocol, 'wb')
dump(raw_results, f, protocol)
f.close()
return raw_results
def pickle_results(raw_results):
return loads(dumps(raw_results, 2))
def pickle_meta(raw_results):
pickled = dumps(raw_results[0]._parent, 2)
metadata = loads(pickled)
return raw_results
def pickle_rows(raw_results):
return [loads(dumps(row, 2)) for row in raw_results]
if test_pickle:
test_table = prepare(Unicode, genunicodevalue,
num_fields=10, num_records=10000)
funcs = [pickle_rows, pickle_results]
for func in funcs:
print "%s:" % func.__name__,
time_dbfunc(test_table, func, genunicodevalue,
num_records=10000, profile=False, verbose=verbose)
# --------------------------------
# test ORM
# --------------------------------
if test_orm:
from sqlalchemy.orm import *
class Test(object):
pass
Session = sessionmaker()
session = Session()
def get_results():
return session.query(Test).all()
print "ORM:",
for engineurl in ('postgresql:///test', 'sqlite://', 'mysql:///test'):
print "\n%s\n" % engineurl
profile_and_time_dbfunc(getattr_results, Unicode(20), genunicodevalue,
class_=Test, getresults_func=get_results,
engineurl=engineurl, #freshdata=False,
num_records=10000, verbose=verbose)
| |
# -*- coding: utf-8 -*-
from ccxt.async.base.exchange import Exchange
import math
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import InvalidOrder
class cex (Exchange):
def describe(self):
return self.deep_extend(super(cex, self).describe(), {
'id': 'cex',
'name': 'CEX.IO',
'countries': ['GB', 'EU', 'CY', 'RU'],
'rateLimit': 1500,
'hasCORS': True,
'hasFetchTickers': True,
'hasFetchOHLCV': True,
'hasFetchOpenOrders': True,
'timeframes': {
'1m': '1m',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766442-8ddc33b0-5ed8-11e7-8b98-f786aef0f3c9.jpg',
'api': 'https://cex.io/api',
'www': 'https://cex.io',
'doc': 'https://cex.io/cex-api',
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'uid': True,
},
'api': {
'public': {
'get': [
'currency_limits/',
'last_price/{pair}/',
'last_prices/{currencies}/',
'ohlcv/hd/{yyyymmdd}/{pair}',
'order_book/{pair}/',
'ticker/{pair}/',
'tickers/{currencies}/',
'trade_history/{pair}/',
],
'post': [
'convert/{pair}',
'price_stats/{pair}',
],
},
'private': {
'post': [
'active_orders_status/',
'archived_orders/{pair}/',
'balance/',
'cancel_order/',
'cancel_orders/{pair}/',
'cancel_replace_order/{pair}/',
'close_position/{pair}/',
'get_address/',
'get_myfee/',
'get_order/',
'get_order_tx/',
'open_orders/{pair}/',
'open_orders/',
'open_position/{pair}/',
'open_positions/{pair}/',
'place_order/{pair}/',
],
},
},
'fees': {
'trading': {
'maker': 0,
'taker': 0.2 / 100,
},
},
})
async def fetch_markets(self):
markets = await self.publicGetCurrencyLimits()
result = []
for p in range(0, len(markets['data']['pairs'])):
market = markets['data']['pairs'][p]
id = market['symbol1'] + '/' + market['symbol2']
symbol = id
base, quote = symbol.split('/')
result.append({
'id': id,
'info': market,
'symbol': symbol,
'base': base,
'quote': quote,
'precision': {
'price': self.precision_from_string(market['minPrice']),
'amount': -1 * math.log10(market['minLotSize']),
},
'limits': {
'amount': {
'min': market['minLotSize'],
'max': market['maxLotSize'],
},
'price': {
'min': float(market['minPrice']),
'max': float(market['maxPrice']),
},
'cost': {
'min': market['minLotSizeS2'],
'max': None,
},
},
})
return result
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privatePostBalance()
result = {'info': response}
ommited = ['username', 'timestamp']
balances = self.omit(response, ommited)
currencies = list(balances.keys())
for i in range(0, len(currencies)):
currency = currencies[i]
if currency in balances:
account = {
'free': self.safe_float(balances[currency], 'available', 0.0),
'used': self.safe_float(balances[currency], 'orders', 0.0),
'total': 0.0,
}
account['total'] = self.sum(account['free'], account['used'])
result[currency] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, params={}):
await self.load_markets()
orderbook = await self.publicGetOrderBookPair(self.extend({
'pair': self.market_id(symbol),
}, params))
timestamp = orderbook['timestamp'] * 1000
return self.parse_order_book(orderbook, timestamp)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return [
ohlcv[0] * 1000,
ohlcv[1],
ohlcv[2],
ohlcv[3],
ohlcv[4],
ohlcv[5],
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
if not since:
since = self.milliseconds() - 86400000 # yesterday
ymd = self.Ymd(since)
ymd = ymd.split('-')
ymd = ''.join(ymd)
request = {
'pair': market['id'],
'yyyymmdd': ymd,
}
response = await self.publicGetOhlcvHdYyyymmddPair(self.extend(request, params))
key = 'data' + self.timeframes[timeframe]
ohlcvs = json.loads(response[key])
return self.parse_ohlcvs(ohlcvs, market, timeframe, since, limit)
def parse_ticker(self, ticker, market=None):
timestamp = None
iso8601 = None
if 'timestamp' in ticker:
timestamp = int(ticker['timestamp']) * 1000
iso8601 = self.iso8601(timestamp)
volume = self.safe_float(ticker, 'volume')
high = self.safe_float(ticker, 'high')
low = self.safe_float(ticker, 'low')
bid = self.safe_float(ticker, 'bid')
ask = self.safe_float(ticker, 'ask')
last = self.safe_float(ticker, 'last')
symbol = None
if market:
symbol = market['symbol']
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': iso8601,
'high': high,
'low': low,
'bid': bid,
'ask': ask,
'vwap': None,
'open': None,
'close': None,
'first': None,
'last': last,
'change': None,
'percentage': None,
'average': None,
'baseVolume': volume,
'quoteVolume': None,
'info': ticker,
}
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
currencies = list(self.currencies.keys())
response = await self.publicGetTickersCurrencies(self.extend({
'currencies': '/'.join(currencies),
}, params))
tickers = response['data']
result = {}
for t in range(0, len(tickers)):
ticker = tickers[t]
symbol = ticker['pair'].replace(':', '/')
market = self.markets[symbol]
result[symbol] = self.parse_ticker(ticker, market)
return result
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
ticker = await self.publicGetTickerPair(self.extend({
'pair': market['id'],
}, params))
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market=None):
timestamp = int(trade['date']) * 1000
return {
'info': trade,
'id': trade['tid'],
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'side': trade['type'],
'price': float(trade['price']),
'amount': float(trade['amount']),
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.publicGetTradeHistoryPair(self.extend({
'pair': market['id'],
}, params))
return self.parse_trades(response, market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
order = {
'pair': self.market_id(symbol),
'type': side,
'amount': amount,
}
if type == 'limit':
order['price'] = price
else:
# for market buy CEX.io requires the amount of quote currency to spend
if side == 'buy':
if not price:
raise InvalidOrder('For market buy orders ' + self.id + " requires the amount of quote currency to spend, to calculate proper costs call createOrder(symbol, 'market', 'buy', amount, price)")
order['amount'] = amount * price
order['order_type'] = type
response = await self.privatePostPlaceOrderPair(self.extend(order, params))
return {
'info': response,
'id': response['id'],
}
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
return await self.privatePostCancelOrder({'id': id})
def parse_order(self, order, market=None):
timestamp = int(order['time'])
symbol = None
if not market:
symbol = order['symbol1'] + '/' + order['symbol2']
if symbol in self.markets:
market = self.market(symbol)
status = order['status']
if status == 'a':
status = 'open' # the unified status
elif status == 'cd':
status = 'canceled'
elif status == 'c':
status = 'canceled'
elif status == 'd':
status = 'closed'
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'amount')
remaining = self.safe_float(order, 'pending')
if not remaining:
remaining = self.safe_float(order, 'remains')
filled = amount - remaining
fee = None
cost = None
if market:
symbol = market['symbol']
cost = self.safe_float(order, 'ta:' + market['quote'])
baseFee = 'fa:' + market['base']
quoteFee = 'fa:' + market['quote']
feeRate = self.safe_float(order, 'tradingFeeMaker')
if not feeRate:
feeRate = self.safe_float(order, 'tradingFeeTaker', feeRate)
if feeRate:
feeRate /= 100.0 # convert to mathematically-correct percentage coefficients: 1.0 = 100%
if baseFee in order:
fee = {
'currency': market['base'],
'rate': feeRate,
'cost': self.safe_float(order, baseFee),
}
elif quoteFee in order:
fee = {
'currency': market['quote'],
'rate': feeRate,
'cost': self.safe_float(order, quoteFee),
}
if not cost:
cost = price * filled
return {
'id': order['id'],
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'status': status,
'symbol': symbol,
'type': None,
'side': order['type'],
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'trades': None,
'fee': fee,
'info': order,
}
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
method = 'privatePostOpenOrders'
market = None
if symbol:
market = self.market(symbol)
request['pair'] = market['id']
method += 'Pair'
orders = await getattr(self, method)(self.extend(request, params))
for i in range(0, len(orders)):
orders[i] = self.extend(orders[i], {'status': 'open'})
return self.parse_orders(orders, market, since, limit)
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
response = await self.privatePostGetOrder(self.extend({
'id': str(id),
}, params))
return self.parse_order(response)
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = str(self.nonce())
auth = nonce + self.uid + self.apiKey
signature = self.hmac(self.encode(auth), self.encode(self.secret))
body = self.urlencode(self.extend({
'key': self.apiKey,
'signature': signature.upper(),
'nonce': nonce,
}, query))
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
if not response:
raise ExchangeError(self.id + ' returned ' + self.json(response))
elif response is True:
return response
elif 'e' in response:
if 'ok' in response:
if response['ok'] == 'ok':
return response
raise ExchangeError(self.id + ' ' + self.json(response))
elif 'error' in response:
if response['error']:
raise ExchangeError(self.id + ' ' + self.json(response))
return response
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_kernel_label_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import common_shapes
from tensorflow.python.platform import googletest
class TensorTest(test_util.TensorFlowTestCase):
def testShape(self):
op = ops.Operation(ops._NodeDef("noop", "myop"), ops.Graph(),
[], [dtypes.float32])
t = op.outputs[0]
self.assertEqual(tensor_shape.unknown_shape(), t.get_shape())
t.set_shape([1, 2, 3])
self.assertEqual([1, 2, 3], t.get_shape())
class NodeDefConstructorTest(test_util.TensorFlowTestCase):
def testNoArgs(self):
nodedef = ops._NodeDef("noop", "bar")
self.assertProtoEquals("op: 'noop' name: 'bar'", nodedef)
def testArgs(self):
nodedef = ops._NodeDef("foo", "bar", device="/device:baz:*")
self.assertProtoEquals("op:'foo' name:'bar' device:'/device:baz:*'",
nodedef)
nodedef = ops._NodeDef("foo", "bar", device=pydev.Device(job="j"))
self.assertProtoEquals("op:'foo' name:'bar' device:'/job:j'", nodedef)
# NOTE(mrry): Dummy shape registrations for ops used in the tests.
ops.RegisterShape("a")(None)
ops.RegisterShape("b")(None)
ops.RegisterShape("c")(None)
ops.RegisterShape("add")(None)
ops.RegisterShape("an_op")(None)
ops.RegisterShape("const")(None)
ops.RegisterShape("copy")(None)
ops.RegisterShape("foo")(None)
ops.RegisterShape("identity")(None)
ops.RegisterShape("mul")(None)
ops.RegisterShape("nonrefop")(None)
ops.RegisterShape("noop")(None)
ops.RegisterShape("refop")(None)
def _apply_op(g, *args, **kwargs):
op = g.create_op(*args, **kwargs)
if len(op.outputs) == 1:
return op.outputs[0]
else:
return op.outputs
class OperationTest(test_util.TensorFlowTestCase):
def testNoInputs(self):
op = ops.Operation(ops._NodeDef("noop", "myop"), ops.Graph(),
[],
[dtypes.float32, dtypes.string])
self.assertEqual(2, len(op.values()))
self.assertEqual(0, len(op.inputs))
self.assertEqual("myop", op.name)
float_t, label_str_t = op.values()
self.assertEqual(dtypes.float32, float_t.dtype)
self.assertEqual(op, float_t.op)
self.assertEqual(0, float_t._value_index)
self.assertEqual(0, len(float_t._consumers))
self.assertEqual("myop", float_t._as_node_def_input())
self.assertEqual(dtypes.string, label_str_t.dtype)
self.assertEqual(op, label_str_t.op)
self.assertEqual(1, label_str_t._value_index)
self.assertEqual(0, len(label_str_t._consumers))
self.assertEqual("myop:1", label_str_t._as_node_def_input())
self.assertProtoEquals("op:'noop' name:'myop'", op.node_def)
def testNoOutputs(self):
g = ops.Graph()
op1 = ops.Operation(
ops._NodeDef("noop", "myop1"), g, [], [dtypes.float32])
float_t, = op1.values()
op2 = ops.Operation(ops._NodeDef("reop", "myop2"), g, [float_t], [])
self.assertEqual(0, len(op2.values()))
self.assertEqual(1, len(op2.inputs))
self.assertIs(float_t, op2.inputs[0])
self.assertEqual(1, len(float_t._consumers))
self.assertEqual(op2, float_t._consumers[0])
self.assertProtoEquals("op:'noop' name:'myop1'", op1.node_def)
self.assertProtoEquals("op:'reop' name:'myop2' input:'myop1'",
op2.node_def)
def testInputsAndOutputs(self):
g = ops.Graph()
op1 = ops.Operation(
ops._NodeDef("noop", "myop1"), g, [], [dtypes.float32])
self.assertEqual(1, len(op1.values()))
float1_t, = op1.values()
op2 = ops.Operation(ops._NodeDef("reop", "myop2"), g,
[], [dtypes.float32, dtypes.string])
self.assertEqual(2, len(op2.values()))
float2_t, label2_str_t = op2.values()
# Note that we consume label2_str_t twice here.
op3 = ops.Operation(ops._NodeDef("add", "myop3"), g,
[float1_t, label2_str_t, label2_str_t],
[dtypes.float32, dtypes.int32])
self.assertEqual(2, len(op3.values()))
self.assertEqual(1, len(float1_t._consumers))
self.assertEqual(op3, float1_t._consumers[0])
self.assertEqual(0, len(float2_t._consumers))
self.assertEqual(2, len(label2_str_t._consumers))
self.assertEqual(op3, label2_str_t._consumers[0])
self.assertEqual(op3, label2_str_t._consumers[1])
self.assertProtoEquals("""
op:'add' name:'myop3'
input:'myop1' input:'myop2:1' input:'myop2:1'
""", op3.node_def)
def testDeviceObject(self):
op = ops.Operation(ops._NodeDef("noop", "myop"), ops.Graph(), [], [])
op._set_device("/job:goo/device:GPU:0")
self.assertProtoEquals(
"op:'noop' name:'myop' device:'/job:goo/device:GPU:0' ",
op.node_def)
op = ops.Operation(ops._NodeDef("noop", "op2"), ops.Graph(), [], [])
op._set_device(pydev.Device(job="muu", device_type="CPU", device_index=0))
self.assertProtoEquals(
"op:'noop' name:'op2' device:'/job:muu/device:CPU:0'",
op.node_def)
def testReferenceInput(self):
g = ops.Graph()
op1 = ops.Operation(ops._NodeDef("noop", "op1"), g, [],
[dtypes.float32_ref, dtypes.float32])
self.assertProtoEquals("op:'noop' name:'op1'",
op1.node_def)
ref_t, nonref_t = op1.values()
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
op2 = ops.Operation(
ops._NodeDef("refop", "op2"), g, [ref_t, nonref_t], [],
input_types=[dtypes.float32_ref, dtypes.float32])
self.assertProtoEquals("op:'refop' name:'op2' input:'op1' input:'op1:1'",
op2.node_def)
op3 = ops.Operation(
ops._NodeDef("nonrefop", "op3"), g, [ref_t, nonref_t], [])
self.assertProtoEquals("op:'nonrefop' name:'op3' input:'op1' input:'op1:1'",
op3.node_def)
def testInvalidNames(self):
g = ops.Graph()
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", ""), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "_invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "-invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "/invalid"), g)
def testShapeFunctionAbsence(self):
def _test():
pass
g = ops.Graph()
with self.assertRaises(RuntimeError):
g.create_op("shapeless_op", [], [dtypes.float32])
def testNoShapeFunction(self):
g = ops.Graph()
op = ops.Operation(ops._NodeDef("op", "an_op"), g,
output_types = [dtypes.float32])
self.assertEqual(tensor_shape.unknown_shape(),
_apply_op(g, "an_op", [], [dtypes.float32]).get_shape())
class CreateOpTest(test_util.TensorFlowTestCase):
def testNodeDefArgs(self):
g = ops.Graph()
op1 = g.create_op("const", [], [dtypes.float32], None, name="myop1")
with g.device("/device:GPU"):
op2 = g.create_op("add",
[],
[dtypes.float32, dtypes.string], None,
name="myop2")
op3 = g.create_op("foo",
[list(op1.values())[0], list(op2.values())[1],
list(op2.values())[0]],
[dtypes.float32, dtypes.int32],
None,
name="myop3")
self.assertEqual(None, op1.device)
self.assertEqual("/device:GPU", op2.device)
self.assertEqual(None, op3.device)
self.assertProtoEquals("name:'myop1' op:'const'", op1.node_def)
self.assertProtoEquals("name:'myop2' op:'add' device:'/device:GPU'",
op2.node_def)
self.assertProtoEquals(
"name:'myop3' input:'myop1' input:'myop2:1' input:'myop2' op:'foo'",
op3.node_def)
def testReferenceInput(self):
g = ops.Graph()
op1 = g.create_op("noop", [],
[dtypes.float32_ref, dtypes.float32], name="op1")
self.assertProtoEquals("op:'noop' name:'op1'", op1.node_def)
ref_t, nonref_t = op1.values()
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
op2 = g.create_op("refop", [ref_t, nonref_t], [],
input_types=[dtypes.float32_ref, dtypes.float32],
name="op2")
self.assertProtoEquals("op:'refop' name:'op2' input:'op1' input:'op1:1'",
op2.node_def)
op3 = g.create_op("nonrefop", [ref_t, nonref_t], [], name="op3")
self.assertProtoEquals("op:'nonrefop' name:'op3' input:'op1' input:'op1:1'",
op3.node_def)
def testFinalized(self):
g = ops.Graph()
g.finalize()
with self.assertRaises(RuntimeError):
g.create_op("const", [], [dtypes.float32], None, name="myop1")
class ApplyOpTest(test_util.TensorFlowTestCase):
def testNodeDefArgs(self):
g = ops.Graph()
t1 = _apply_op(g, "const", [], [dtypes.float32], name="myop1")
with g.device("/device:GPU"):
t2 = _apply_op(g, "add",
[],
[dtypes.float32, dtypes.string],
name="myop2")
t3 = _apply_op(g, "foo", [t1, t2[1], t2[0]],
[dtypes.float32, dtypes.int32], name="myop3")
self.assertTrue(isinstance(t1, ops.Tensor))
self.assertTrue(isinstance(t2, list))
self.assertTrue(isinstance(t3, list))
self.assertTrue(isinstance(t3[0], ops.Tensor))
self.assertEqual("myop1", t1._as_node_def_input())
self.assertEqual("myop2", t2[0]._as_node_def_input())
self.assertEqual("myop2:1", t2[1]._as_node_def_input())
self.assertEqual("myop3", t3[0]._as_node_def_input())
# Validate that we got the right ops as well
self.assertProtoEquals("name:'myop1' op:'const'", t1.op.node_def)
self.assertProtoEquals("name:'myop2' op:'add' device:'/device:GPU'",
t2[0].op.node_def)
self.assertProtoEquals(
"name:'myop3' input:'myop1' input:'myop2:1' input:'myop2' op:'foo'",
t3[0].op.node_def)
def testReferenceInput(self):
g = ops.Graph()
ref_t, nonref_t = _apply_op(
g, "noop", [], [dtypes.float32_ref, dtypes.float32], name="op1")
self.assertProtoEquals("op:'noop' name:'op1'", ref_t.op.node_def)
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
out_2 = _apply_op(g, "refop", [ref_t, nonref_t], [dtypes.int32],
input_types=[dtypes.float32_ref, dtypes.float32],
name="op2")
self.assertProtoEquals("op:'refop' name:'op2' input:'op1' input:'op1:1'",
out_2.op.node_def)
out_3 = _apply_op(g, "nonrefop", [ref_t, nonref_t], [dtypes.int32],
name="op3")
self.assertProtoEquals("op:'nonrefop' name:'op3' input:'op1' input:'op1:1'",
out_3.op.node_def)
class NameStackTest(test_util.TensorFlowTestCase):
def testBasics(self):
g = ops.Graph()
self.assertEqual("foo", g.unique_name("foo"))
self.assertEqual("foo_1", g.unique_name("foo"))
self.assertEqual("foo_2", g.unique_name("foo"))
self.assertEqual("foo_1_1", g.unique_name("foo_1"))
self.assertEqual("foo_1_2", g.unique_name("foo_1"))
self.assertEqual("foo_1_2_1", g.unique_name("foo_1_2"))
with g.name_scope("bar"):
self.assertEqual("bar/foo", g.unique_name("foo"))
self.assertEqual("bar/foo_1", g.unique_name("foo"))
with g.name_scope(None):
self.assertEqual("foo_3", g.unique_name("foo"))
with g.name_scope("baz"):
self.assertEqual("bar/baz/foo", g.unique_name("foo"))
self.assertEqual("bar/baz/foo_1", g.unique_name("foo"))
with g.name_scope("baz"):
self.assertEqual("bar/baz_1/foo", g.unique_name("foo"))
self.assertEqual("bar/baz_1/foo_1", g.unique_name("foo"))
with g.name_scope("quux"):
self.assertEqual("quux/foo", g.unique_name("foo"))
with g.name_scope("bar"):
with g.name_scope("baz"):
self.assertEqual("bar_1/baz/foo", g.unique_name("foo"))
self.assertEqual("foo_4", g.unique_name("foo"))
self.assertEqual("bar_2", g.unique_name("bar"))
def testOutOfOrderUniqueName(self):
g = ops.Graph()
self.assertEqual("foo_2", g.unique_name("foo_2"))
self.assertEqual("foo", g.unique_name("foo"))
self.assertEqual("foo_1", g.unique_name("foo"))
self.assertEqual("foo_3", g.unique_name("foo"))
class NameTest(test_util.TensorFlowTestCase):
def testGenerateName(self):
g = ops.Graph()
op0 = g.create_op("const", [], [dtypes.float32, dtypes.float32])
self.assertEqual("const", op0.name)
self.assertEqual("const:0", op0.outputs[0].name)
self.assertEqual("const:1", op0.outputs[1].name)
op1 = g.create_op("const", [], [dtypes.float32])
self.assertEqual("const_1", op1.name)
self.assertEqual("const_1:0", op1.outputs[0].name)
op2 = g.create_op("const", [], [dtypes.float32], name="my_op")
self.assertEqual("my_op", op2.name)
self.assertEqual("my_op:0", op2.outputs[0].name)
def testname_scope(self):
g = ops.Graph()
with g.name_scope("foo") as foo:
self.assertEqual(foo, "foo/")
with g.name_scope("foo2") as foo2:
self.assertEqual(foo2, "foo/foo2/")
with g.name_scope(None) as empty1:
self.assertEqual(empty1, "")
with g.name_scope("foo3") as foo3:
self.assertEqual(foo3, "foo3/")
with g.name_scope("") as empty2:
self.assertEqual(empty2, "")
self.assertEqual("const",
g.create_op("const", [], [dtypes.float32]).name)
with g.name_scope("bar") as scope:
self.assertEqual("bar/const",
g.create_op("const", [], [dtypes.float32]).name)
self.assertEqual("bar/const_1",
g.create_op("const", [], [dtypes.float32]).name)
# If you use the value from "with .. as", that values is used as-is.
self.assertEqual(
"bar",
g.create_op("const", [], [dtypes.float32], name=scope).name)
with g.name_scope("baz") as scope:
with g.name_scope("quux"):
self.assertEqual("baz/quux/const",
g.create_op("const", [], [dtypes.float32]).name)
# If you use the value from the enclosing "with .. as", nothing is pushed.
with g.name_scope(scope):
self.assertEqual("baz/const",
g.create_op("const", [], [dtypes.float32]).name)
self.assertEqual("baz",
g.create_op("const", [], [dtypes.float32],
name=scope).name)
self.assertEqual("trailing",
g.create_op("const", [], [dtypes.float32],
name="trailing/").name)
with g.name_scope("bar"):
self.assertEqual("bar_1/const",
g.create_op("const", [], [dtypes.float32]).name)
with g.name_scope("bar/"):
self.assertEqual("bar/const_2",
g.create_op("const", [], [dtypes.float32]).name)
class DeviceTest(test_util.TensorFlowTestCase):
def testNoDevice(self):
g = ops.Graph()
op = g.create_op("an_op", [], [dtypes.float32])
self.assertEqual(None, op.device)
gd = g.as_graph_def()
self.assertProtoEquals("""
node { name: "an_op" op: "an_op" }
""", gd)
def testDevicePartialString(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("an_op", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEquals("""
node { name: "an_op" op: "an_op" device: "/job:worker/replica:2" }
""", gd)
def testDeviceFull(self):
g = ops.Graph()
with g.device(pydev.Device(job="worker", replica=2, task=0,
device_type="CPU",
device_index=3)):
g.create_op("an_op", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEquals("""
node { name: "an_op" op: "an_op"
device: "/job:worker/replica:2/task:0/device:CPU:3" }
""", gd)
def testNesting(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("an_op", [], [dtypes.float32])
with g.device("/job:worker/replica:3/task:0"):
g.create_op("an_op", [], [dtypes.float32])
g.create_op("an_op", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEquals("""
node { name: "an_op" op: "an_op"
device: "/job:worker/replica:2" }
node { name: "an_op_1" op: "an_op"
device: "/job:worker/replica:3/task:0" }
node { name: "an_op_2" op: "an_op"
device: "/job:worker/replica:2" }
""", gd)
def testNestingString(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("an_op", [], [dtypes.float32])
with g.device("/job:worker/replica:3/task:0"):
g.create_op("an_op", [], [dtypes.float32])
g.create_op("an_op", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEquals("""
node { name: "an_op" op: "an_op"
device: "/job:worker/replica:2" }
node { name: "an_op_1" op: "an_op"
device: "/job:worker/replica:3/task:0" }
node { name: "an_op_2" op: "an_op"
device: "/job:worker/replica:2" }
""", gd)
def testNestingOverrideGpuCpu(self):
g = ops.Graph()
with g.device("/job:worker/replica:2/device:CPU:1"):
g.create_op("an_op", [], [dtypes.float32])
with g.device("/job:worker/replica:2/device:GPU:2"):
g.create_op("an_op", [], [dtypes.float32])
g.create_op("an_op", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEquals("""
node { name: "an_op" op: "an_op"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "an_op_1" op: "an_op"
device: "/job:worker/replica:2/device:GPU:2" }
node { name: "an_op_2" op: "an_op"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
def testNestingWithMergeDeviceFunction(self):
g = ops.Graph()
with g.device(pydev.merge_device("/device:GPU:0")):
g.create_op("an_op", [], [dtypes.float32])
with g.device(pydev.merge_device("/job:worker")):
g.create_op("an_op", [], [dtypes.float32])
with g.device(pydev.merge_device("/device:CPU:0")):
g.create_op("an_op", [], [dtypes.float32])
with g.device(pydev.merge_device("/job:ps")):
g.create_op("an_op", [], [dtypes.float32])
with g.device(pydev.merge_device(None)):
g.create_op("an_op", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEquals("""
node { name: "an_op" op: "an_op"
device: "/device:GPU:0" }
node { name: "an_op_1" op: "an_op"
device: "/job:worker/device:GPU:0" }
node { name: "an_op_2" op: "an_op"
device: "/job:worker/device:CPU:0" }
node { name: "an_op_3" op: "an_op"
device: "/job:ps/device:CPU:0" }
node { name: "an_op_4" op: "an_op"
device: "/job:ps/device:CPU:0" }
""", gd)
def testNoneClearsDefault(self):
g = ops.Graph()
with g.device("/job:worker/replica:2/device:CPU:1"):
g.create_op("an_op", [], [dtypes.float32])
with g.device(None):
g.create_op("an_op", [], [dtypes.float32])
g.create_op("an_op", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEquals("""
node { name: "an_op" op: "an_op"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "an_op_1" op: "an_op" }
node { name: "an_op_2" op: "an_op"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
class ObjectWithName(object):
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
class CollectionTest(test_util.TensorFlowTestCase):
def testadd_to_collection(self):
g = ops.Graph()
g.add_to_collection("key", 12)
g.add_to_collection("other", "foo")
g.add_to_collection("key", 34)
# Note that only blank1 is returned.
g.add_to_collection("blah", 27)
blank1 = ObjectWithName("prefix/foo")
g.add_to_collection("blah", blank1)
blank2 = ObjectWithName("junk/foo")
g.add_to_collection("blah", blank2)
self.assertEqual(["foo"], g.get_collection("other"))
self.assertEqual([12, 34], g.get_collection("key"))
self.assertEqual([], g.get_collection("nothing"))
self.assertEqual([27, blank1, blank2], g.get_collection("blah"))
self.assertEqual([blank1], g.get_collection("blah", "prefix"))
def testDefaulGraph(self):
with ops.Graph().as_default():
ops.add_to_collection("key", 90)
ops.add_to_collection("key", 100)
# Collections are ordered.
self.assertEqual([90, 100], ops.get_collection("key"))
def an_op(g):
return _apply_op(g, "an_op", [], [dtypes.float32])
ops.NoGradient("an_op")
def copy_op(x):
return _apply_op(x.graph, "copy", [x], [x.dtype])
@ops.RegisterGradient("copy")
def _CopyGrad(op, x_grad):
_ = op
return x_grad
@ops.RegisterGradient("copy_override")
def _CopyOverrideGrad(op, x_grad):
_ = op
return x_grad
class RegistrationTest(test_util.TensorFlowTestCase):
def testRegisterGradients(self):
g = ops.Graph()
x = an_op(g)
y = copy_op(x)
fn = ops.get_gradient_function(y.op)
self.assertEqual(_CopyGrad, fn)
def testOverrideGradients(self):
g = ops.Graph()
x = an_op(g)
with g.gradient_override_map({"copy": "copy_override"}):
y = copy_op(x)
fn = ops.get_gradient_function(y.op)
self.assertEqual(_CopyOverrideGrad, fn)
def testNonExistentOverride(self):
g = ops.Graph()
x = an_op(g)
with g.gradient_override_map({"copy": "unknown_override"}):
y = copy_op(x)
with self.assertRaisesRegexp(LookupError, "unknown_override"):
fn = ops.get_gradient_function(y.op)
class ComparisonTest(test_util.TensorFlowTestCase):
def testMembershipAllowed(self):
g = ops.Graph()
t1 = _apply_op(g, "const", [], [dtypes.float32], name="myop1")
t2 = _apply_op(g, "const", [], [dtypes.float32], name="myop2")
self.assertTrue(isinstance(t1, ops.Tensor))
self.assertTrue(isinstance(t2, ops.Tensor))
self.assertTrue(t1 in [t1])
self.assertTrue(t1 not in [t2])
class ControlDependenciesTest(test_util.TensorFlowTestCase):
def testBasic(self):
g = ops.Graph()
a = _apply_op(g, "const", [], [dtypes.float32])
b = _apply_op(g, "const", [], [dtypes.float32])
with g.control_dependencies([a]):
c = _apply_op(g, "const", [], [dtypes.float32])
d = _apply_op(g, "identity", [b], [dtypes.float32])
e = _apply_op(g, "identity", [c], [dtypes.float32])
self.assertEqual(c.op.control_inputs, [a.op])
self.assertEqual(d.op.control_inputs, [a.op])
# e should be dominated by c.
self.assertEqual(e.op.control_inputs, [])
def testBasicWithConversion(self):
g = ops.Graph()
a = _apply_op(g, "const", [], [dtypes.float32])
class ConvertibleObj(object):
def _as_graph_element(self):
return a
with g.control_dependencies([ConvertibleObj()]):
c = _apply_op(g, "const", [], [dtypes.float32])
self.assertEqual(c.op.control_inputs, [a.op])
def testNested(self):
g = ops.Graph()
a_1 = _apply_op(g, "const", [], [dtypes.float32])
a_2 = _apply_op(g, "const", [], [dtypes.float32])
a_3 = _apply_op(g, "const", [], [dtypes.float32])
a_4 = _apply_op(g, "const", [], [dtypes.float32])
with g.control_dependencies([a_1, a_2, a_3, a_4]):
b_1 = _apply_op(g, "const", [], [dtypes.float32])
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
b_2 = _apply_op(g, "const", [], [dtypes.float32])
self.assertItemsEqual(
[a_1.op, a_2.op, a_3.op, a_4.op], b_1.op.control_inputs)
self.assertItemsEqual(b_1.op.control_inputs, b_2.op.control_inputs)
def testComplex(self):
g = ops.Graph()
# Usage pattern:
# * Nodes a_i are constants defined at the outermost scope, and are used
# as control inputs for the ith nested scope.
# * Nodes b_i are defined as Mul(a_3, a_4) at each scope.
# * Nodes c_i are defined as Mul(a_1, b_1) at each scope.
# * Nodes d_i are defined as Mul(b_i, c_i) at each scope.
# * Nodes e_i are defined as Mul(e_i-1, e_i-1) at each scope i > 1.
a_1 = _apply_op(g, "const", [], [dtypes.float32])
a_2 = _apply_op(g, "const", [], [dtypes.float32])
a_3 = _apply_op(g, "const", [], [dtypes.float32])
a_4 = _apply_op(g, "const", [], [dtypes.float32])
with g.control_dependencies([a_1]):
b_1 = _apply_op(g, "mul", [a_3, a_4], [dtypes.float32])
c_1 = _apply_op(g, "mul", [a_1, b_1], [dtypes.float32])
d_1 = _apply_op(g, "mul", [b_1, c_1], [dtypes.float32])
e_1 = _apply_op(g, "const", [], [dtypes.float32])
with g.control_dependencies([a_2]):
b_2 = _apply_op(g, "mul", [a_3, a_4], [dtypes.float32])
c_2 = _apply_op(g, "mul", [a_1, b_1], [dtypes.float32])
d_2 = _apply_op(g, "mul", [b_2, c_2], [dtypes.float32])
e_2 = _apply_op(g, "mul", [e_1, e_1], [dtypes.float32])
with g.control_dependencies([a_3]):
b_3 = _apply_op(g, "mul", [a_3, a_4], [dtypes.float32])
c_3 = _apply_op(g, "mul", [a_1, b_1], [dtypes.float32])
d_3 = _apply_op(g, "mul", [b_3, c_3], [dtypes.float32])
e_3 = _apply_op(g, "mul", [e_2, e_2], [dtypes.float32])
with g.control_dependencies([a_4]):
b_4 = _apply_op(g, "mul", [a_3, a_4], [dtypes.float32])
c_4 = _apply_op(g, "mul", [a_1, b_1], [dtypes.float32])
d_4 = _apply_op(g, "mul", [b_4, c_4], [dtypes.float32])
e_4 = _apply_op(g, "mul", [e_3, e_3], [dtypes.float32])
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_2.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_3.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_4.op.control_inputs)
self.assertItemsEqual([], c_1.op.control_inputs)
self.assertItemsEqual([a_2.op], c_2.op.control_inputs)
self.assertItemsEqual([a_2.op, a_3.op], c_3.op.control_inputs)
self.assertItemsEqual([a_2.op, a_3.op, a_4.op], c_4.op.control_inputs)
self.assertItemsEqual([], d_1.op.control_inputs)
self.assertItemsEqual([], d_2.op.control_inputs)
self.assertItemsEqual([], d_3.op.control_inputs)
self.assertItemsEqual([], d_4.op.control_inputs)
self.assertItemsEqual([a_1.op], e_1.op.control_inputs)
self.assertItemsEqual([a_2.op], e_2.op.control_inputs)
self.assertItemsEqual([a_3.op], e_3.op.control_inputs)
self.assertItemsEqual([a_4.op], e_4.op.control_inputs)
def testRepeatedDependency(self):
g = ops.Graph()
a = g.create_op("foo", [], [dtypes.float32, dtypes.float32])
a_0, a_1 = a.outputs
with g.control_dependencies([a_0]):
b = _apply_op(g, "const", [], [dtypes.float32])
with g.control_dependencies([a_1]):
c = _apply_op(g, "const", [], [dtypes.float32])
self.assertEqual(b.op.control_inputs, [a])
self.assertEqual(c.op.control_inputs, [a])
def testNoControlDependencyWithDataDependency(self):
g = ops.Graph()
a = _apply_op(g, "const", [], [dtypes.float32])
with g.control_dependencies([a]):
b = _apply_op(g, "identity", [a], [dtypes.float32])
self.assertEqual(b.op.control_inputs, [])
class GraphTest(test_util.TensorFlowTestCase):
def setUp(self):
ops.reset_default_graph()
def _AssertDefault(self, expected):
self.assertIs(expected, ops.get_default_graph())
def testGraphContextManager(self):
g0 = ops.Graph()
with g0.as_default() as g1:
self.assertIs(g0, g1)
def testDefaultGraph(self):
orig = ops.get_default_graph()
self._AssertDefault(orig)
g0 = ops.Graph()
self._AssertDefault(orig)
context_manager_0 = g0.as_default()
self._AssertDefault(orig)
with context_manager_0 as g0:
self._AssertDefault(g0)
with ops.Graph().as_default() as g1:
self._AssertDefault(g1)
self._AssertDefault(g0)
self._AssertDefault(orig)
def testAsGraphElementConversions(self):
class ConvertibleObj(object):
def _as_graph_element(self):
return "const:0"
class NonConvertibleObj(object):
pass
g = ops.Graph()
a = _apply_op(g, "const", [], [dtypes.float32])
self.assertEqual(a, g.as_graph_element(ConvertibleObj()))
with self.assertRaises(TypeError):
g.as_graph_element(NonConvertibleObj())
def testAssertSameGraph(self):
g0 = ops.Graph()
a = g0.create_op("a", [], [dtypes.float32])
b = g0.create_op("b", [], [dtypes.float32])
ops.assert_same_graph([a, b])
ops.assert_same_graph([a, b], g0)
g1 = ops.Graph()
c = g1.create_op("c", [], [dtypes.float32])
self.assertRaises(ValueError, ops.assert_same_graph, [a, b, c])
self.assertRaises(ValueError, ops.assert_same_graph, [c], g0)
self.assertRaises(ValueError, ops.assert_same_graph, [a], g1)
sparse = ops.SparseTensor(
_apply_op(g0, "const", [], [dtypes.int64]),
_apply_op(g0, "const", [], [dtypes.float32]),
_apply_op(g0, "const", [], [dtypes.int64]))
ops.assert_same_graph([sparse, a, b])
ops.assert_same_graph([sparse, a, b], g0)
self.assertRaises(ValueError, ops.assert_same_graph, [sparse, a, c])
self.assertRaises(ValueError, ops.assert_same_graph, [sparse, a, c], g1)
ops.RegisterShape("KernelLabel")(common_shapes.scalar_shape)
class KernelLabelTest(test_util.TensorFlowTestCase):
def testNoLabel(self):
with self.test_session():
self.assertAllEqual(b"My label is: default",
test_kernel_label_op.kernel_label().eval())
def testLabelMap(self):
with self.test_session() as sess:
default_1 = test_kernel_label_op.kernel_label()
# pylint: disable=protected-access
with sess.graph._kernel_label_map({"KernelLabel": "overload_1"}):
overload_1_1 = test_kernel_label_op.kernel_label()
with sess.graph._kernel_label_map({"KernelLabel": "overload_2"}):
overload_2 = test_kernel_label_op.kernel_label()
with sess.graph._kernel_label_map({"KernelLabel": ""}):
default_2 = test_kernel_label_op.kernel_label()
overload_1_2 = test_kernel_label_op.kernel_label()
# pylint: enable=protected-access
default_3 = test_kernel_label_op.kernel_label()
self.assertAllEqual(b"My label is: default", default_1.eval())
self.assertAllEqual(b"My label is: default", default_2.eval())
self.assertAllEqual(b"My label is: default", default_3.eval())
self.assertAllEqual(b"My label is: overload_1", overload_1_1.eval())
self.assertAllEqual(b"My label is: overload_1", overload_1_2.eval())
self.assertAllEqual(b"My label is: overload_2", overload_2.eval())
if __name__ == "__main__":
googletest.main()
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Top-level module of TensorFlow. By convention, we refer to this module as
`tf` instead of `tensorflow`, following the common practice of importing
TensorFlow via the command `import tensorflow as tf`.
The primary function of this module is to import all of the public TensorFlow
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
Note that the file `__init__.py` in the TensorFlow source code tree is actually
only a placeholder to enable test cases to run. The TensorFlow build replaces
this file with a file generated from [`api_template.__init__.py`](https://www.github.com/tensorflow/tensorflow/blob/master/tensorflow/api_template.__init__.py)
"""
import distutils as _distutils
import inspect as _inspect
import logging as _logging
import os as _os
import site as _site
import sys as _sys
import typing as _typing
from tensorflow.python.tools import module_util as _module_util
from tensorflow.python.util.lazy_loader import LazyLoader as _LazyLoader
# Make sure code inside the TensorFlow codebase can use tf2.enabled() at import.
_os.environ['TF2_BEHAVIOR'] = '1'
from tensorflow.python import tf2 as _tf2
_tf2.enable()
# API IMPORTS PLACEHOLDER
# WRAPPER_PLACEHOLDER
# Make sure directory containing top level submodules is in
# the __path__ so that "from tensorflow.foo import bar" works.
# We're using bitwise, but there's nothing special about that.
_API_MODULE = _sys.modules[__name__].bitwise
_tf_api_dir = _os.path.dirname(_os.path.dirname(_API_MODULE.__file__))
_current_module = _sys.modules[__name__]
if not hasattr(_current_module, '__path__'):
__path__ = [_tf_api_dir]
elif _tf_api_dir not in __path__:
__path__.append(_tf_api_dir)
# Hook external TensorFlow modules.
# Import compat before trying to import summary from tensorboard, so that
# reexport_tf_summary can get compat from sys.modules. Only needed if using
# lazy loading.
_current_module.compat.v2 # pylint: disable=pointless-statement
try:
from tensorboard.summary._tf import summary
_current_module.__path__ = (
[_module_util.get_parent_dir(summary)] + _current_module.__path__)
setattr(_current_module, "summary", summary)
except ImportError:
_logging.warning(
"Limited tf.summary API due to missing TensorBoard installation.")
# Load tensorflow-io-gcs-filesystem if enabled
# pylint: disable=g-import-not-at-top
if (_os.getenv('TF_USE_MODULAR_FILESYSTEM', '0') == 'true' or
_os.getenv('TF_USE_MODULAR_FILESYSTEM', '0') == '1'):
import tensorflow_io_gcs_filesystem as _tensorflow_io_gcs_filesystem
# pylint: enable=g-import-not-at-top
# Lazy-load estimator.
_estimator_module = "tensorflow_estimator.python.estimator.api._v2.estimator"
estimator = _LazyLoader("estimator", globals(), _estimator_module)
_module_dir = _module_util.get_parent_dir_for_name(_estimator_module)
if _module_dir:
_current_module.__path__ = [_module_dir] + _current_module.__path__
setattr(_current_module, "estimator", estimator)
_keras_module = "keras.api._v2.keras"
keras = _LazyLoader("keras", globals(), _keras_module)
_module_dir = _module_util.get_parent_dir_for_name(_keras_module)
if _module_dir:
_current_module.__path__ = [_module_dir] + _current_module.__path__
setattr(_current_module, "keras", keras)
# Explicitly import lazy-loaded modules to support autocompletion.
# pylint: disable=g-import-not-at-top
if _typing.TYPE_CHECKING:
from tensorflow_estimator.python.estimator.api._v2 import estimator
# pylint: enable=g-import-not-at-top
# Enable TF2 behaviors
from tensorflow.python.compat import v2_compat as _compat # pylint: disable=g-import-not-at-top
_compat.enable_v2_behavior()
_major_api_version = 2
# Load all plugin libraries from site-packages/tensorflow-plugins if we are
# running under pip.
# TODO(gunan): Enable setting an environment variable to define arbitrary plugin
# directories.
# TODO(gunan): Find a better location for this code snippet.
from tensorflow.python.framework import load_library as _ll
from tensorflow.python.lib.io import file_io as _fi
# Get sitepackages directories for the python installation.
_site_packages_dirs = []
if _site.ENABLE_USER_SITE and _site.USER_SITE is not None:
_site_packages_dirs += [_site.USER_SITE]
_site_packages_dirs += [_p for _p in _sys.path if 'site-packages' in _p]
if 'getsitepackages' in dir(_site):
_site_packages_dirs += _site.getsitepackages()
if 'sysconfig' in dir(_distutils):
_site_packages_dirs += [_distutils.sysconfig.get_python_lib()]
_site_packages_dirs = list(set(_site_packages_dirs))
# Find the location of this exact file.
_current_file_location = _inspect.getfile(_inspect.currentframe())
def _running_from_pip_package():
return any(
_current_file_location.startswith(dir_) for dir_ in _site_packages_dirs)
if _running_from_pip_package():
# TODO(gunan): Add sanity checks to loaded modules here.
# Load first party dynamic kernels.
_tf_dir = _os.path.dirname(_current_file_location)
_kernel_dir = _os.path.join(_tf_dir, 'core', 'kernels')
if _os.path.exists(_kernel_dir):
_ll.load_library(_kernel_dir)
# Load third party dynamic kernels.
for _s in _site_packages_dirs:
_plugin_dir = _os.path.join(_s, 'tensorflow-plugins')
if _os.path.exists(_plugin_dir):
_ll.load_library(_plugin_dir)
# Load Pluggable Device Library
_ll.load_pluggable_device_library(_plugin_dir)
# Add module aliases
if hasattr(_current_module, 'keras'):
# It is possible that keras is a lazily loaded module, which might break when
# actually trying to import it. Have a Try-Catch to make sure it doesn't break
# when it doing some very initial loading, like tf.compat.v2, etc.
try:
_keras_package = "keras.api._v2.keras."
losses = _LazyLoader("losses", globals(), _keras_package + "losses")
metrics = _LazyLoader("metrics", globals(), _keras_package + "metrics")
optimizers = _LazyLoader(
"optimizers", globals(), _keras_package + "optimizers")
initializers = _LazyLoader(
"initializers", globals(), _keras_package + "initializers")
setattr(_current_module, "losses", losses)
setattr(_current_module, "metrics", metrics)
setattr(_current_module, "optimizers", optimizers)
setattr(_current_module, "initializers", initializers)
except ImportError:
pass
# Do an eager load for Keras' code so that any function/method that needs to
# happen at load time will trigger, eg registration of optimizers in the
# SavedModel registry.
# See b/196254385 for more details.
if hasattr(_current_module, "keras"):
try:
keras._load()
except ImportError:
pass
# pylint: enable=undefined-variable
# Delete modules that should be hidden from dir().
# Don't fail if these modules are not available.
# For e.g. this file will be originally placed under tensorflow/_api/v1 which
# does not have 'python', 'core' directories. Then, it will be copied
# to tensorflow/ which does have these two directories.
# pylint: disable=undefined-variable
try:
del python
except NameError:
pass
try:
del core
except NameError:
pass
try:
del compiler
except NameError:
pass
# __all__ PLACEHOLDER
| |
# -*- coding: utf-8 -*-
# pylint: disable=C0103
"""
k-means clustering.
"""
# Author: bertrand-l
# License: BSD
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from warnings import warn
from .metrics import lp_norm
from .predictor import BasePredictor
from .rand import randintw
from .util import assert_in, assert_positive, strtab
__all__ = ['KMeansCluster']
def clusterindex(X, centroids, p=2):
"""
Returns index of the cluster nearest to each data point.
"""
X, centroids = np.asarray(X), np.asarray(centroids)
norm = lp_norm(p=p, root=False, axis=1)
return np.array([np.argmin(norm(x - centroids)) for x in X])
def kmeans_init(X, k):
"""
Returns k randomly selected unique data points as initial guesses for
centroids.
"""
n_samples = len(X)
n = max(1, np.round(n_samples / k))
centroids = [X[min(c * n + np.random.randint(0, n), n_samples - 1)]
for c in range(k - 1)]
c = k - 1
centroids += [X[c * n + np.random.randint(0, n_samples - c * n)]]
return np.array(centroids)
def kmeanspp_init(X, k, p=2):
"""
Returns k randomly selected unique data points as initial guesses for
centroids using the k-means++ method.
"""
X = np.asarray(X)
norm = lp_norm(p=p, root=False, axis=1)
n_samples, n_features = X.shape
centroids = np.zeros((k, n_features))
centroids[0] = X[randintw(n_samples, weights=None)]
for i in range(1, k):
weights = - np.ones(n_samples)
for j in range(i):
if weights[0] < 0.:
weights = norm(X - centroids[j])
else:
weights = np.minimum(weights, norm(X - centroids[j]))
centroids[i] = X[randintw(n_samples, weights=weights)]
return centroids
def kmeans(X, centroids, average="mean", nstepmax=5000, p=2,
atol=0., rtol=1.e-5):
"""
Returns centroids computed using k-means (Lloyd's) algorithm.
"""
k = len(centroids)
if average == 'median':
avg = np.median
else:
avg = np.mean
norm = lp_norm(p=p, root=False, axis=0)
cdist = min(norm(centroids[i] - centroids[j]) for i in range(k)
for j in range(i))
norm = lp_norm(p=p, root=False, axis=1)
istep, converged = 0, False
while not converged and istep < nstepmax:
icluster = clusterindex(X, centroids, p=p)
centroids_old = centroids.copy()
for i in range(k):
centroids[i, :] = avg(X[icluster == i], axis=0)
converged = (istep > 1 and cdist > atol +
rtol * max(norm(centroids - centroids_old)))
istep += 1
if not converged:
warn("'kmeans' has not converged.")
info = {"converged": converged, "nsteps": istep}
return centroids, info, icluster
class KMeansCluster(BasePredictor):
"""
Clustering using the k-means or k-medians method.
Parameters
----------
atol, rtol : non-negative floats
Absolute and relative tolerance on changes between centroid loci used
in the convergence criterion.
average : {'mean', 'median'}, optional, default "mean"
Compute centroid location as mean or median of cluster.
init : {'++', ''}, optional, default '++'
Use k-means++ initialization for faster convergence.
nstepmax : positive integer, optional, default 500
Maximum number of steps.
p : positive integer or "inf", optional
Order of the Lp norm (p=2 is the Euclidean norm).
Raises
------
TypeError, ValueError
"""
def __init__(self, average="mean", init='++', nstepmax=5000, p=2,
atol=0., rtol=1.e-5):
BasePredictor.__init__(self)
self._centroids = None
self.settings(average=average, init=init, nstepmax=nstepmax, p=p,
atol=atol, rtol=rtol)
def __str__(self):
txt = BasePredictor.__str__(self)
stats = self._training_stats
table = self.centroids
table = [[i] + list(table[i][:10]) + ['...'] * (len(table[0]) > 10)
for i in range(len(table))]
txt += ("\n" + strtab(table, margin=8, ffmt='.3g', title="Centroids"))
table = [("", "n points", "mean dist.", "max dist.", "nearest cluster",
"dist.")]
for i in range(len(self.centroids)):
table += [(i, stats["n_points"][i], stats["dist_mean"][i],
stats["dist_max"][i], stats["nearest_cluster_i"][i],
stats["nearest_cluster_dist"][i])]
txt += ("\n" + strtab(table, margin=8,
ffmt=('i', 'i', '.3g', '.3g', 'i', '.3g'),
title="Clusters") +
"\n (dist. = L2 distance from centroid)")
return txt
@property
def centroids(self):
"""Centroid of each cluster."""
return self._centroids
@property
def converged(self):
"""
Minimizer has converged on the training set.
"""
return self._training_info.get('converged', False)
def learn(self, X, centroids=None, k=None):
"""
Group the data points into k clusters repeatedly computing the centroid
of each cluster until convergence.
Parameters
----------
X : array_like
Input features, shape=(n_samples, n_features).
k : int > 1
Number of clusters. Mandatory if `centroids` is left as None,
otherwise `k` is just the length of `centroids`.
centroids : array_like, optional
Initial guesses for the cluster centroids.
Returns
-------
self : KMeansCluster object
Raises
------
TypeError, ValueError
"""
# check input and parameters
X = np.asarray(X, dtype=np.float)
n_samples, n_features = X.shape
BasePredictor.learn(self, X)
average = self._settings['average']
nstepmax = self._settings['nstepmax']
atol, rtol = self._settings['atol'], self._settings['rtol']
p = self._settings['p']
init = self._settings['init']
if centroids is None and k is None:
raise TypeError("neither 'k' or 'centroids' is specified.")
elif centroids is None:
if init == '++':
centroids = kmeanspp_init(X, k)
else:
centroids = kmeans_init(X, k)
centroids = np.array(centroids).copy()
k = centroids.shape[0]
if not (len(centroids.shape) == 2 and
centroids.shape[1] == n_features):
raise TypeError("'centroids' does not have {0} features."
.format(n_features))
if k > np.floor(n_samples / 2):
raise ValueError("too many clusters or not enough samples.")
# iteratively compute centroids
centroids, info, icluster = kmeans(X, centroids, average=average,
nstepmax=nstepmax, p=p,
atol=atol, rtol=rtol)
# summary and stats
n_points = [np.sum((icluster == c).astype(np.int)) for c in range(k)]
norm = lp_norm(p=2, root=True)
dmean, dmax = np.zeros(k), np.zeros(k)
inearest, dnearest = np.zeros(k), np.zeros(k)
for i in range(k):
n = 0
for dist in (norm(x - centroids[i]) for x in X[icluster == i]):
n += 1
dmean[i] += dist
dmax[i] = max(dmax[i], dist)
if n > 0:
dmean[i] /= n
inearest_, dnearest_ = - 1, None
for j in (l for l in range(k) if l != i):
d = norm(centroids[j] - centroids[i])
if dnearest_ is None or d < dnearest_:
inearest_, dnearest_ = j, d
inearest[i] = inearest_
dnearest[i] = dnearest_
self._centroids = centroids
self._training_info = info
self._training_stats['n_points'] = np.array(n_points)
self._training_stats['dist_mean'] = dmean
self._training_stats['dist_max'] = dmax
self._training_stats['nearest_cluster_i'] = inearest
self._training_stats['nearest_cluster_dist'] = dnearest
return self
def predict(self, X):
"""
Attribute each data point in X to a cluster.
Parameters
----------
X : array_like
Input features, shape=(n_samples, n_features).
Returns
-------
classes : array_like
"""
X = BasePredictor.predict(self, X)
return clusterindex(X, self.centroids, p=self._settings['p'])
def settings(self, atol=None, average=None, init=None, nstepmax=None,
p=None, rtol=None):
"""
Optionally sets one or several parameters and returns current settings.
Parameters
----------
atol, rtol : non-negative floats
Absolute and relative tolerance on changes between centroid loci
used in the convergence criterion.
average : {'mean', 'median'}, optional
Compute centroid location as mean or median of cluster.
init : {'++', ''}, optional, default '++'
Use k-means++ initialization for faster convergence.
nstepmax : positive integer, optional
Maximum number of steps.
p : positive integer or "inf", optional
Order of the Lp norm (p=2 is the Euclidean norm).
Raises
------
TypeError, ValueError
"""
if atol is not None:
assert_positive(atol, 'atol', nonnegative=True)
self._settings['atol'] = atol
if average is not None:
average = str(average).lower().strip()
assert_in(average, 'average', ('mean', 'median'))
self._settings['average'] = average
if init is not None:
if str(init).find('++') > -1:
init = '++'
else:
init = ''
self._settings['init'] = init
if nstepmax is not None:
nstepmax = int(nstepmax)
assert_positive(nstepmax, 'nstepmax')
self._settings['nstepmax'] = nstepmax
if p is not None:
assert_positive(p, 'p')
self._settings['p'] = p
if rtol is not None:
assert_positive(rtol, 'rtol', nonnegative=True)
self._settings['rtol'] = rtol
return self._settings
| |
from __future__ import absolute_import
import logging
from unittest import TestCase
import settings
import sys, traceback
reload(sys)
sys.setdefaultencoding("utf-8")
import os
from AppCompatProcessor import main
import tempfile
from shutil import copyfile
# Setup the logger
logger = logging.getLogger()
DB = None
class TestAppLoadMP(TestCase):
def BuildTestPath(self, folder):
master_test_folder = os.path.join(os.path.abspath(os.path.join(os.path.join(os.path.dirname(__file__), os.pardir), os.pardir)), "appcompatprocessor-DataSets")
load_test_path = os.path.join(master_test_folder, folder)
# Remove all fake hosts
filelist = [ f for f in os.listdir(load_test_path) if f.startswith("new_test_") ]
for f in filelist:
os.remove(os.path.join(load_test_path, f))
filelist = [f for f in os.listdir(load_test_path) if f.endswith("-shimcache.txt")]
for f in filelist:
os.remove(os.path.join(load_test_path, f))
return load_test_path
def test_SimpleLoadAppCompat(self):
load_test_path = self.BuildTestPath("miniXML")
# Get temp db name for the test
tempdb = tempfile.NamedTemporaryFile(suffix='.db', prefix='testCase_SimpleLoadAppCompat', dir=tempfile.gettempdir())
tempdb.close()
try:
(db_filenameFullPath, db_version, num_hosts, num_instances, num_entries) = main([tempdb.name, "load", load_test_path])
except Exception as e:
print traceback.format_exc()
self.fail(e.message + "\n" + traceback.format_exc())
# Remove temp db
os.remove(tempdb.name)
self.assertEquals(num_hosts, 22, "test_SimpleLoadAppCompat failed!")
self.assertEquals(num_instances, 22, "test_SimpleLoadAppCompat failed!")
self.assertEquals(num_entries, 11561, "test_SimpleLoadAppCompat failed!")
def test_SimpleLoadAmCache(self):
load_test_path = self.BuildTestPath("TestData-AmCache")
# Get temp db name for the test
tempdb = tempfile.NamedTemporaryFile(suffix='.db', prefix='testCase', dir=tempfile.gettempdir())
tempdb.close()
(db_filenameFullPath1, db_version1, num_hosts1, num_instances1, num_entries1) = main(
[tempdb.name, "load", load_test_path])
# Remove temp db
os.remove(tempdb.name)
self.assertEquals(num_hosts1, 6, "test_SimpleLoadAmCache failed!")
self.assertEquals(num_entries1, 31260, "test_SimpleLoadAmCache failed!")
def test_MultipleInstancesLoadAppCompat(self):
load_test_path = self.BuildTestPath("MultipleInstances-1")
# Get temp db name for the test
tempdb = tempfile.NamedTemporaryFile(suffix='.db', prefix='testCase', dir=tempfile.gettempdir())
tempdb.close()
(db_filenameFullPath1, db_version1, num_hosts1, num_instances1, num_entries1) = main([tempdb.name, "load", load_test_path])
# Remove temp db
os.remove(tempdb.name)
self.assertEquals(num_hosts1, 1, "test_MultipleInstancesLoadAppCompat failed!")
self.assertEquals(num_instances1, 1, "test_MultipleInstancesLoadAppCompat failed!")
def test_MultipleInstancesLoadAppCompat2(self):
load_test_path = self.BuildTestPath("MultipleInstances-1")
# Get temp db name for the test
tempdb = tempfile.NamedTemporaryFile(suffix='.db', prefix='testCase', dir=tempfile.gettempdir())
tempdb.close()
(db_filenameFullPath1, db_version1, num_hosts1, num_instances1, num_entries1) = main([tempdb.name, "load", load_test_path])
self.assertEquals(num_hosts1, 1, "test_MultipleInstancesLoadAppCompat2 failed!")
if num_instances1 == 2:
print "stop"
self.assertEquals(num_instances1, 1, "test_MultipleInstancesLoadAppCompat2 failed!")
# Remove temp db
os.remove(tempdb.name)
load_test_path = self.BuildTestPath("MultipleInstances-2")
# Get temp db name for the test
tempdb = tempfile.NamedTemporaryFile(suffix='.db', prefix='testCase', dir=tempfile.gettempdir())
tempdb.close()
(db_filenameFullPath2, db_version2, num_hosts2, num_instances2, num_entries2) = main([tempdb.name, "load", load_test_path])
self.assertEquals(num_hosts1, 1, "test_MultipleInstancesLoadAppCompat2 failed!")
self.assertEquals(num_hosts2, 1, "test_MultipleInstancesLoadAppCompat2 failed!")
self.assertEquals(num_instances1, 1, "test_MultipleInstancesLoadAppCompat2 failed!")
self.assertEquals(num_instances2, 2, "test_MultipleInstancesLoadAppCompat2 failed!")
self.assertEquals(num_entries2, num_entries1 * 2, "test_MultipleInstancesLoadAppCompat2 failed!")
# Remove temp db
os.remove(tempdb.name)
def test_MultipleInstancesLoadAppCompat3(self):
load_test_path = self.BuildTestPath("MultipleInstances-1")
# Get temp db name for the test
tempdb = tempfile.NamedTemporaryFile(suffix='.db', prefix='testCase', dir=tempfile.gettempdir())
tempdb.close()
(db_filenameFullPath1, db_version1, num_hosts1, num_instances1, num_entries1) = main([tempdb.name, "load", load_test_path])
# Remove temp db
os.remove(tempdb.name)
# Create new hosts
new_filename = ""
filelist = [ f for f in os.listdir(load_test_path) if f.endswith("_w32registry.xml") ]
for f in filelist:
new_filename = os.path.join(load_test_path, f.replace("49070f781b14d49c9086144819e45bee9fa215dea18dbe1223881c479314e8be","59070f781b14d49c9086144819e45bee9fa215dea18dbe1223881c479314e8be"))
copyfile(os.path.join(load_test_path, f), new_filename)
load_test_path = self.BuildTestPath("MultipleInstances-1")
# Get temp db name for the test
tempdb = tempfile.NamedTemporaryFile(suffix='.db', prefix='testCase', dir=tempfile.gettempdir())
tempdb.close()
(db_filenameFullPath2, db_version2, num_hosts2, num_instances2, num_entries2) = main([tempdb.name, "load", load_test_path])
# Remove temp db
os.remove(tempdb.name)
# Remove new_filename
os.remove(new_filename)
self.assertEquals(num_hosts1, 1, "test_MultipleInstancesLoadAppCompat3 failed!")
self.assertEquals(num_hosts2, 1, "test_MultipleInstancesLoadAppCompat3 failed!")
self.assertEquals(num_instances1, 1, "test_MultipleInstancesLoadAppCompat3 failed!")
self.assertEquals(num_instances2, 1, "test_MultipleInstancesLoadAppCompat3 failed!")
self.assertEquals(num_entries1, num_entries2, "test_MultipleInstancesLoadAppCompat3 failed!")
def test_MultipleInstancesLoadAppCompat4(self):
load_test_path = self.BuildTestPath("MultipleInstances-1")
# Get temp db name for the test
tempdb = tempfile.NamedTemporaryFile(suffix='.db', prefix='testCase', dir=tempfile.gettempdir())
tempdb.close()
(db_filenameFullPath1, db_version1, num_hosts1, num_instances1, num_entries1) = main([tempdb.name, "load", load_test_path])
# Remove temp db
os.remove(tempdb.name)
# Create new hosts
new_filename = ""
filelist = [ f for f in os.listdir(load_test_path) if f.endswith("_w32registry.xml") ]
for f in filelist:
new_filename = os.path.join(load_test_path, f.replace("49070f781b14d49c9086144819e45bee9fa215dea18dbe1223881c479314e8be","59070f781b14d49c9086144819e45bee9fa215dea18dbe1223881c479314e8be"))
# Change timestamp
with open(new_filename, "wt") as fout:
with open(os.path.join(load_test_path, f), "rt") as fin:
for line in fin:
fout.write(line.replace('2016-01-19T10:50:30Z', '2020-01-19T10:50:35Z'))
load_test_path = self.BuildTestPath("MultipleInstances-1")
# Get temp db name for the test
tempdb = tempfile.NamedTemporaryFile(suffix='.db', prefix='testCase', dir=tempfile.gettempdir())
tempdb.close()
(db_filenameFullPath2, db_version2, num_hosts2, num_instances2, num_entries2) = main([tempdb.name, "load", load_test_path])
# Remove temp db
os.remove(tempdb.name)
# Remove new_filename
os.remove(new_filename)
self.assertEquals(num_hosts1, 1, "test_MultipleInstancesLoadAppCompat4 failed!")
self.assertEquals(num_hosts2, 1, "test_MultipleInstancesLoadAppCompat4 failed!")
self.assertEquals(num_instances1, 1, "test_MultipleInstancesLoadAppCompat4 failed!")
self.assertEquals(num_instances2, 2, "test_MultipleInstancesLoadAppCompat4 failed!")
self.assertEquals(num_entries1 * 2, num_entries2, "test_MultipleInstancesLoadAppCompat4 failed!")
def test_AddExistingHostsAppCompat(self):
load_test_path = self.BuildTestPath("miniXML")
# Get temp db name for the test
tempdb = tempfile.NamedTemporaryFile(suffix='.db', prefix='testCase', dir=tempfile.gettempdir())
tempdb.close()
# Load hosts
(db_filenameFullPath1, db_version1, num_hosts1, num_instances1, num_entries1) = main([tempdb.name, "load", load_test_path])
# Reload the same set of hosts again
(db_filenameFullPath2, db_version2, num_hosts2, num_instances2, num_entries2) = main([tempdb.name, "load", load_test_path])
# Remove temp db
os.remove(tempdb.name)
self.assertEquals(num_hosts1, num_hosts2, "test_SimpleLoad failed!")
self.assertEquals(num_entries1, num_entries2, "test_SimpleLoad failed!")
def test_AddExistingHosts_PreProcessed(self):
load_test_path = self.BuildTestPath("miniXML")
# Get temp db name for the test
tempdb = tempfile.NamedTemporaryFile(suffix='.db', prefix='testCase', dir=tempfile.gettempdir())
tempdb.close()
# Load hosts
(db_filenameFullPath1, db_version1, num_hosts1, num_instances1, num_entries1) = main([tempdb.name, "load", load_test_path])
# Remove all pre-processed -shimcache.txt files:
filelist = [ f for f in os.listdir(load_test_path) if f.endswith("-shimcache.txt") ]
for f in filelist:
os.remove(os.path.join(load_test_path, f))
# Reload the same set of hosts again
(db_filenameFullPath2, db_version2, num_hosts2, num_instances2, num_entries2) = main([tempdb.name, "load", load_test_path])
# Remove temp db
os.remove(tempdb.name)
# Remove all pre-processed -shimcache.txt files:
filelist = [ f for f in os.listdir(load_test_path) if f.endswith("-shimcache.txt") ]
for f in filelist:
os.remove(os.path.join(load_test_path, f))
self.assertEquals(num_hosts1, num_hosts2, "test_AddExistingHosts_PreProcessed failed!")
self.assertEquals(num_entries1, num_entries2, "test_AddExistingHosts_PreProcessed failed!")
def test_AddNewHostsAppCompat(self):
# todo: Test is not finishing
return(0)
load_test_path = self.BuildTestPath("miniXML")
# Get temp db name for the test
tempdb = tempfile.NamedTemporaryFile(suffix='.db', prefix='testCase', dir=tempfile.gettempdir())
tempdb.close()
# Load hosts
(db_filenameFullPath1, db_version1, num_hosts1, num_instances1, num_entries1) = main([tempdb.name, "load", load_test_path])
# Do simple search
(num_hits1, num_hits_suppressed1, results1) = main([tempdb.name, "search", "-F", "calc.exe"])
# Create new hosts
filelist = [ f for f in os.listdir(load_test_path) if f.endswith("_w32registry.xml") ]
for f in filelist:
copyfile(os.path.join(load_test_path, f), os.path.join(load_test_path, "new_test_" + f))
# Add new hosts just added
(db_filenameFullPath2, db_version2, num_hosts2, num_instances2, num_entries2) = main([tempdb.name, "load", load_test_path])
# Do simple search
(num_hits2, num_hits_suppressed2, results2) = main([tempdb.name, "search", "-F", "calc.exe"])
# Remove temp db
os.remove(tempdb.name)
self.assertEquals(num_hosts1 * 2, num_hosts2, "test_AddNewHostsAppCompat failed!")
self.assertEquals(num_entries1 * 2, num_entries2, "test_AddNewHostsAppCompat failed!")
self.assertEquals(num_hits1 * 2, num_hits2, "test_AddNewHostsAppCompat failed!")
def test_AddExistingHostsAmCache(self):
load_test_path = self.BuildTestPath("TestData-AmCache")
# Get temp db name for the test
tempdb = tempfile.NamedTemporaryFile(suffix='.db', prefix='testCase', dir=tempfile.gettempdir())
tempdb.close()
# Load hosts
(db_filenameFullPath1, db_version1, num_hosts1, num_instances1, num_entries1) = main([tempdb.name, "load", load_test_path])
# Reload the same set of hosts again
(db_filenameFullPath2, db_version2, num_hosts2, num_instances2, num_entries2) = main([tempdb.name, "load", load_test_path])
# Remove temp db
os.remove(tempdb.name)
self.assertEquals(num_hosts1, num_hosts2, "test_AddExistingHostsAmCache failed!")
self.assertEquals(num_entries1, num_entries2, "test_AddExistingHostsAmCache failed!")
self.assertEquals(num_instances1, num_instances2, "test_AddExistingHostsAmCache failed!")
def test_AddNewHostsAmCache(self):
load_test_path = self.BuildTestPath("TestData-AmCache")
# Get temp db name for the test
tempdb = tempfile.NamedTemporaryFile(suffix='.db', prefix='testCase', dir=tempfile.gettempdir())
tempdb.close()
# Load hosts
(db_filenameFullPath1, db_version1, num_hosts1, num_instances1, num_entries1) = main([tempdb.name, "load", load_test_path])
# Do simple search
(num_hits1, num_hits_suppressed1, results1) = main([tempdb.name, "search", "-F", "calc.exe"])
# Create new hosts
filelist = [ f for f in os.listdir(load_test_path) if f.endswith("_octet-stream.xml") ]
for f in filelist:
copyfile(os.path.join(load_test_path, f), os.path.join(load_test_path, "new_test_" + f))
# Add new hosts just added
(db_filenameFullPath, db_version, num_hosts2, num_instances2, num_entries2) = main([tempdb.name, "load", load_test_path])
# Do simple search
(num_hits2, num_hits_suppressed2, results2) = main([tempdb.name, "search", "-F", "calc.exe"])
# Remove temp db
os.remove(tempdb.name)
self.assertEquals(num_hosts1 * 2, num_hosts2, "test_AddNewHostsAmCache failed!")
self.assertEquals(num_entries1 * 2, num_entries2, "test_AddNewHostsAmCache failed!")
self.assertEquals(num_hits1 * 2, num_hits2, "test_AddNewHostsAmCache failed!")
def test_RecursiveLoad(self):
load_test_path = self.BuildTestPath("Recursive")
# Get temp db name for the test
tempdb = tempfile.NamedTemporaryFile(suffix='.db', prefix='testCase', dir=tempfile.gettempdir())
tempdb.close()
(db_filenameFullPath, db_version, num_hosts, num_instances, num_entries) = main(
[tempdb.name, "load", load_test_path])
# Remove temp db
os.remove(tempdb.name)
self.assertEquals(num_hosts, 23, "test_RecursiveLoad failed!")
self.assertEquals(num_entries, 12442, "test_RecursiveLoad failed!")
def __test_ZipLoadAppCompat(self):
load_test_path = self.BuildTestPath("TestZip-AppCompat/dir1/56fe48f9b8b35.zip")
# Get temp db name for the test
tempdb = tempfile.NamedTemporaryFile(suffix='.db', prefix='testCase', dir=tempfile.gettempdir())
tempdb.close()
(db_filenameFullPath, db_version, num_hosts, num_instances, num_entries) = main(
[tempdb.name, "load", load_test_path])
# Remove temp db
os.remove(tempdb.name)
self.assertEquals(num_hosts, 22, "test_ZipLoadAppCompat failed!")
def __test_ZipLoadAmCache(self):
load_test_path = self.BuildTestPath("TestData-AmCache")
# Get temp db name for the test
tempdb = tempfile.NamedTemporaryFile(suffix='.db', prefix='testCase', dir=tempfile.gettempdir())
tempdb.close()
(db_filenameFullPath, db_version, num_hosts, num_instances, num_entries) = main(
[tempdb.name, "load", load_test_path])
# Remove temp db
os.remove(tempdb.name)
load_test_path = self.BuildTestPath("TestZip-AmCache/345a67b67f766.zip")
# Get temp db name for the test
tempdb = tempfile.NamedTemporaryFile(suffix='.db', prefix='testCase', dir=tempfile.gettempdir())
tempdb.close()
(db_filenameFullPath2, db_version2, num_hosts2, num_instances2, num_entries2) = main(
[tempdb.name, "load", load_test_path])
# Remove temp db
os.remove(tempdb.name)
self.assertEquals(num_hosts, num_hosts2, "test_ZipLoadAmCache failed!")
self.assertEquals(num_instances, num_instances2, "test_ZipLoadAmCache failed!")
self.assertEquals(num_entries, num_entries2, "test_ZipLoadAmCache failed!")
def __test_ZipLoadRecursive(self):
load_test_path = self.BuildTestPath("TestZip-AppCompat")
# Get temp db name for the test
tempdb = tempfile.NamedTemporaryFile(suffix='.db', prefix='testCase', dir=tempfile.gettempdir())
tempdb.close()
(db_filenameFullPath, db_version, num_hosts, num_instances, num_entries2) = main(
[tempdb.name, "load", load_test_path])
# Remove temp db
os.remove(tempdb.name)
self.assertEquals(num_hosts, 22, "test_ZipLoadRecursive failed!")
def __test_ZipLoadRecursive2(self):
load_test_path = self.BuildTestPath("miniXML")
# Get temp db name for the test
tempdb = tempfile.NamedTemporaryFile(suffix='.db', prefix='testCase', dir=tempfile.gettempdir())
tempdb.close()
(db_filenameFullPath, db_version, num_hosts, num_instances, num_entries) = main(
[tempdb.name, "load", load_test_path])
# Remove temp db
os.remove(tempdb.name)
load_test_path = self.BuildTestPath("TestZip-AppCompat")
# Get temp db name for the test
tempdb = tempfile.NamedTemporaryFile(suffix='.db', prefix='testCase', dir=tempfile.gettempdir())
tempdb.close()
(db_filenameFullPath2, db_version2, num_hosts2, num_instances2, num_entries2) = main(
[tempdb.name, "load", load_test_path])
# Remove temp db
os.remove(tempdb.name)
self.assertEquals(num_hosts, num_hosts2, "test_ZipLoadRecursive failed!")
self.assertEquals(num_instances, num_instances2, "test_ZipLoadRecursive failed!")
self.assertEquals(num_entries, num_entries2, "test_ZipLoadRecursive failed!")
| |
#!/usr/bin/python
# Copyright (c) nexB Inc. http://www.nexb.com/ - All rights reserved.
"""
This script is a configuration helper to select pip requirement files to install
and python and shell configuration scripts to execute based on provided config
directories paths arguments and the operating system platform. To use, create
a configuration directory tree that contains any of these:
* Requirements files named with this convention:
- base.txt contains common requirements installed on all platforms.
- win.txt, linux.txt, mac.txt, posix.txt are os-specific requirements.
* Python scripts files named with this convention:
- base.py is a common script executed on all os before os-specific scripts.
- win.py, linux.py, mac.py, posix.py are os-specific scripts to execute.
* Shell or Windows CMD scripts files named with this convention:
- win.bat is a windows bat file to execute
- posix.sh, linux.sh, mac.sh are os-specific scripts to execute.
The config directory structure contains one or more directories paths. This
way you can have a main configuration (that is always used) and additional
sub-configurations of a product such as for prod, test, ci, dev, or anything
else.
All scripts and requirements are optional and only used if presents. Scripts
are executed in sequence, one after the other after all requirements are
installed, so they may import from any installed requirement.
The execution order is:
- requirements installation
- python scripts execution
- shell scripts execution
On posix, posix Python and shell scripts are executed before mac or linux
scripts.
The base scripts or packages are always installed first before platform-
specific ones.
For example a tree could be looking like this::
etc/conf
base.txt : base pip requirements for all platforms
linux.txt : linux-only pip requirements
base.py : base config script for all platforms
win.py : windows-only config script
posix.sh: posix-only shell script
etc/conf/prod
base.txt : base pip requirements for all platforms
linux.txt : linux-only pip requirements
linux.sh : linux-only script
base.py : base config script for all platforms
mac.py : mac-only config script
"""
from __future__ import print_function
import os
import stat
import sys
import shutil
import subprocess
# platform-specific file base names
sys_platform = str(sys.platform).lower()
on_win = False
if sys_platform.startswith('linux'):
platform_names = ('posix', 'linux',)
elif 'win32' in sys_platform:
platform_names = ('win',)
on_win = True
elif 'darwin' in sys_platform:
platform_names = ('posix', 'mac',)
else:
raise Exception('Unsupported OS/platform %r' % sys_platform)
platform_names = tuple()
# common file basenames for requirements and scripts
base = ('base',)
# known full file names with txt extension for requirements
# base is always last
requirements = tuple(p + '.txt' for p in platform_names + base)
# known full file names with py extensions for scripts
# base is always last
python_scripts = tuple(p + '.py' for p in platform_names + base)
# known full file names of shell scripts
# there is no base for scripts: they cannot work cross OS (cmd vs. sh)
shell_scripts = tuple(p + '.sh' for p in platform_names)
if on_win:
shell_scripts = ('win.bat',)
def call(cmd, root_dir):
""" Run a `cmd` command (as a list of args) with all env vars."""
cmd = ' '.join(cmd)
if subprocess.Popen(cmd, shell=True, env=dict(os.environ), cwd=root_dir).wait() != 0:
print()
print('Failed to execute command:\n%(cmd)s. Aborting...' % locals())
sys.exit(1)
def find_pycache(root_dir):
"""
Yield __pycache__ directory paths found in root_dir as paths relative to
root_dir.
"""
for top, dirs, _files in os.walk(root_dir):
for d in dirs:
if d == '__pycache__':
dir_path = os.path.join(top, d)
dir_path = dir_path.replace(root_dir, '', 1)
dir_path = dir_path.strip(os.path.sep)
yield dir_path
def clean(root_dir):
"""
Remove cleanable directories and files in root_dir.
"""
print('* Cleaning ...')
cleanable = '''build bin lib Lib include Include Scripts local
django_background_task.log
develop-eggs eggs parts .installed.cfg
.Python
.cache
pip-selfcheck.json
'''.split()
# also clean __pycache__ if any
cleanable.extend(find_pycache(root_dir))
for d in cleanable:
try:
loc = os.path.join(root_dir, d)
if os.path.exists(loc):
if os.path.isdir(loc):
shutil.rmtree(loc)
else:
os.remove(loc)
except:
pass
def build_pip_dirs_args(paths, root_dir, option='--extra-search-dir='):
"""
Return an iterable of pip command line options for `option` of pip using a
list of `paths` to directories.
"""
for path in paths:
if not os.path.isabs(path):
path = os.path.join(root_dir, path)
if os.path.exists(path):
yield option + '"' + path + '"'
def create_virtualenv(std_python, root_dir, tpp_dirs, quiet=False):
"""
Create a virtualenv in `root_dir` using the `std_python` Python
executable. One of the `tpp_dirs` must contain a vendored virtualenv.py and
virtualenv dependencies such as setuptools and pip packages.
@std_python: Path or name of the Python executable to use.
@root_dir: directory in which the virtualenv will be created. This is also
the root directory for the project and the base directory for vendored
components directory paths.
@tpp_dirs: list of directory paths relative to `root_dir` containing
vendored Python distributions that pip will use to find required
components.
"""
if not quiet:
print("* Configuring Python ...")
# search virtualenv.py in the tpp_dirs. keep the first found
venv_py = None
for tpd in tpp_dirs:
venv = os.path.join(root_dir, tpd, 'virtualenv.py')
if os.path.exists(venv):
venv_py = '"' + venv + '"'
break
# error out if venv_py not found
if not venv_py:
print("Configuration Error ... aborting.")
exit(1)
vcmd = [std_python, venv_py, '--never-download']
if quiet:
vcmd += ['--quiet']
# third parties may be in more than one directory
vcmd.extend(build_pip_dirs_args(tpp_dirs, root_dir))
# we create the virtualenv in the root_dir
vcmd.append('"' + root_dir + '"')
call(vcmd, root_dir)
def activate(root_dir):
""" Activate a virtualenv in the current process."""
bin_dir = os.path.join(root_dir, 'bin')
activate_this = os.path.join(bin_dir, 'activate_this.py')
with open(activate_this) as f:
code = compile(f.read(), activate_this, 'exec')
exec(code, dict(__file__=activate_this))
def install_3pp(configs, root_dir, tpp_dirs, quiet=False):
"""
Install requirements from requirement files found in `configs` with pip,
using the vendored components in `tpp_dirs`.
"""
if not quiet:
print("* Installing components ...")
requirement_files = get_conf_files(configs, root_dir, requirements, quiet)
if on_win:
bin_dir = os.path.join(root_dir, 'bin')
configured_python = os.path.join(bin_dir, 'python.exe')
base_cmd = [configured_python, '-m', 'pip']
else:
base_cmd = ['pip']
for req_file in requirement_files:
pcmd = base_cmd + ['install', '--upgrade', '--no-index', '--no-cache-dir']
if quiet:
pcmd += ['--quiet']
pip_dir_args = list(build_pip_dirs_args(tpp_dirs, root_dir, '--find-links='))
pcmd.extend(pip_dir_args)
req_loc = os.path.join(root_dir, req_file)
pcmd.extend(['-r' , '"' + req_loc + '"'])
call(pcmd, root_dir)
def run_scripts(configs, root_dir, configured_python, quiet=False):
"""
Run Python scripts and shell scripts found in `configs`.
"""
if not quiet:
print("* Configuring ...")
# Run Python scripts for each configurations
for py_script in get_conf_files(configs, root_dir, python_scripts):
cmd = [configured_python, '"' + os.path.join(root_dir, py_script) + '"']
call(cmd, root_dir)
# Run sh_script scripts for each configurations
for sh_script in get_conf_files(configs, root_dir, shell_scripts):
if on_win:
cmd = []
else:
# we source the scripts on posix
cmd = ['.']
cmd.extend([os.path.join(root_dir, sh_script)])
call(cmd, root_dir)
def chmod_bin(directory):
"""
Makes the directory and its children executable recursively.
"""
rwx = (stat.S_IXUSR | stat.S_IRUSR | stat.S_IWUSR
| stat.S_IXGRP | stat.S_IXOTH)
for path, _, files in os.walk(directory):
for f in files:
os.chmod(os.path.join(path, f), rwx)
def get_conf_files(config_dir_paths, root_dir, file_names=requirements, quiet=False):
"""
Return a list of collected path-prefixed file paths matching names in a
file_names tuple, based on config_dir_paths, root_dir and the types of
file_names requested. Returned paths are posix paths.
@config_dir_paths: Each config_dir_path is a relative from the project
root to a config dir. This script should always be called from the project
root dir.
@root_dir: The project absolute root dir.
@file_names: get requirements, python or shell files based on list of
supported file names provided as a tuple of supported file_names.
Scripts or requirements are optional and only used if presents. Unknown
scripts or requirements file_names are ignored (but they could be used
indirectly by known requirements with -r requirements inclusion, or
scripts with python imports.)
Since Python scripts are executed after requirements are installed they
can import from any requirement-installed component such as Fabric.
"""
# collect files for each requested dir path
collected = []
for config_dir_path in config_dir_paths:
abs_config_dir_path = os.path.join(root_dir, config_dir_path)
if not os.path.exists(abs_config_dir_path):
if not quiet:
print('Configuration directory %(config_dir_path)s '
'does not exists. Skipping.' % locals())
continue
# Support args like enterprise or enterprise/dev
paths = config_dir_path.strip('/').replace('\\', '/').split('/')
# a tuple of (relative path, location,)
current = None
for path in paths:
if not current:
current = (path, os.path.join(root_dir, path),)
else:
base_path, base_loc = current
current = (os.path.join(base_path, path),
os.path.join(base_loc, path),)
path, loc = current
# we iterate on known filenames to ensure the defined precedence
# is respected (posix over mac, linux), etc
for n in file_names:
for f in os.listdir(loc):
if f == n:
f_loc = os.path.join(loc, f)
if f_loc not in collected:
collected.append(f_loc)
return collected
usage = '\nUsage: configure [--clean] <path/to/configuration/directory> ...\n'
if __name__ == '__main__':
# you must create a CONFIGURE_QUIET env var if you want to run quietly
quiet = 'CONFIGURE_QUIET' in os.environ
# define/setup common directories
etc_dir = os.path.abspath(os.path.dirname(__file__))
root_dir = os.path.dirname(etc_dir)
args = sys.argv[1:]
if args:
arg0 = args[0]
if arg0 == '--clean':
clean(root_dir)
sys.exit(0)
elif arg0.startswith('-'):
print()
print('ERROR: unknown option: %(arg0)s' % locals())
print(usage)
sys.exit(1)
sys.path.insert(0, root_dir)
bin_dir = os.path.join(root_dir, 'bin')
standard_python = sys.executable
if on_win:
configured_python = os.path.join(bin_dir, 'python.exe')
scripts_dir = os.path.join(root_dir, 'Scripts')
bin_dir = os.path.join(root_dir, 'bin')
if not os.path.exists(scripts_dir):
os.makedirs(scripts_dir)
if not os.path.exists(bin_dir):
cmd = ('mklink /J "%(bin_dir)s" "%(scripts_dir)s"' % locals()).split()
call(cmd, root_dir)
else:
configured_python = os.path.join(bin_dir, 'python')
scripts_dir = bin_dir
# Get requested configuration paths to collect components and scripts later
configs = []
for path in args[:]:
abs_path = path
if not os.path.isabs(path):
abs_path = os.path.join(root_dir, path)
if not os.path.exists(abs_path):
print()
print('ERROR: Configuration directory does not exists:\n'
' %(path)s: %(abs_path)r'
% locals())
print(usage)
sys.exit(1)
configs.append(path)
# Collect vendor directories from environment variables: one or more third-
# party directories may exist as environment variables prefixed with TPP_DIR
thirdparty_dirs = []
for envvar, path in os.environ.items():
if not envvar.startswith('TPP_DIR'):
continue
abs_path = path
if not os.path.isabs(path):
abs_path = os.path.join(root_dir, path)
if not os.path.exists(abs_path):
if not quiet:
print()
print('WARNING: Third-party Python libraries directory does not exists:\n'
' %(path)r: %(abs_path)r\n'
' Provided by environment variable:\n'
' set %(envvar)s=%(path)r' % locals())
print()
else:
thirdparty_dirs.append(path)
# Finally execute our three steps: venv, install and scripts
if not os.path.exists(configured_python):
create_virtualenv(standard_python, root_dir, thirdparty_dirs, quiet=quiet)
activate(root_dir)
install_3pp(configs, root_dir, thirdparty_dirs, quiet=quiet)
run_scripts(configs, root_dir, configured_python, quiet=quiet)
chmod_bin(bin_dir)
if not quiet:
print("* Configuration completed.")
print()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A pedestrian version of The Cannon.
"""
from __future__ import (division, print_function, absolute_import,
unicode_literals)
__all__ = ["CannonModel"]
import logging
import numpy as np
import scipy.optimize as op
from . import (model, utils)
logger = logging.getLogger(__name__)
class CannonModel(model.BaseCannonModel):
"""
A generalised Cannon model for the estimation of arbitrary stellar labels.
:param labels:
A table with columns as labels, and stars as rows.
:type labels:
:class:`~astropy.table.Table` or numpy structured array
:param fluxes:
An array of fluxes for stars in the training set, given as shape
`(num_stars, num_pixels)`. The `num_stars` should match the number of
rows in `labels`.
:type fluxes:
:class:`np.ndarray`
:param flux_uncertainties:
An array of 1-sigma flux uncertainties for stars in the training set,
The shape of the `flux_uncertainties` should match `fluxes`.
:type flux_uncertainties:
:class:`np.ndarray`
:param dispersion: [optional]
The dispersion values corresponding to the given pixels. If provided,
this should have length `num_pixels`.
:param live_dangerously: [optional]
If enabled then no checks will be made on the label names, prohibiting
the user to input human-readable forms of the label vector.
"""
_descriptive_attributes = ["_label_vector"]
_trained_attributes = ["_coefficients", "_scatter"]
_data_attributes = \
["training_labels", "training_fluxes", "training_flux_uncertainties"]
def __init__(self, *args, **kwargs):
super(CannonModel, self).__init__(*args, **kwargs)
@model.requires_label_vector
def train(self, **kwargs):
"""
Train the model based on the training set and the description of the
label vector.
"""
# Initialise the scatter and coefficient arrays.
N_px = len(self.dispersion)
scatter = np.nan * np.ones(N_px)
label_vector_array = self.label_vector_array
theta = np.nan * np.ones((N_px, label_vector_array.shape[0]))
# Details for the progressbar.
pb_kwds = {
"message": "Training Cannon model from {0} stars with {1} pixels "
"each".format(len(self.training_labels), N_px),
"size": 100 if kwargs.pop("progressbar", True) else -1
}
if self.pool is None:
for pixel in utils.progressbar(range(N_px), **pb_kwds):
theta[pixel, :], scatter[pixel] = _fit_pixel(
self.training_fluxes[:, pixel],
self.training_flux_uncertainties[:, pixel],
label_vector_array, **kwargs)
else:
# Not as nice as just mapping, but necessary for a progress bar.
process = { pixel: self.pool.apply_async(
_fit_pixel,
args=(
self.training_fluxes[:, pixel],
self.training_flux_uncertainties[:, pixel],
label_vector_array
),
kwds=kwargs) \
for pixel in range(N_px) }
for pixel, proc in utils.progressbar(process.items(), **pb_kwds):
theta[pixel, :], scatter[pixel] = proc.get()
self.coefficients, self.scatter = (theta, scatter)
self._trained = True
return (theta, scatter)
@model.requires_training_wheels
def predict(self, labels=None, **labels_as_kwargs):
"""
Predict spectra from the trained model, given the labels.
:param labels: [optional]
The labels required for the trained model. This should be a N-length
list matching the number of unique terms in the model, in the order
given by `self.labels`. Alternatively, labels can be explicitly
given as keyword arguments.
"""
labels = self._format_input_labels(labels, **labels_as_kwargs)
return np.dot(self.coefficients,
model._build_label_vector_rows(self.label_vector, labels)).flatten()
@model.requires_training_wheels
def fit(self, fluxes, flux_uncertainties, **kwargs):
"""
Solve the labels for given pixel fluxes and uncertainties.
:param fluxes:
The normalised fluxes. These should be on the same dispersion scale
as the trained data.
:param flux_uncertainties:
The 1-sigma uncertainties in the fluxes. This should have the same
shape as `fluxes`.
:returns:
The labels and covariance matrix.
"""
label_indices = self._get_lowest_order_label_indices()
fluxes, flux_uncertainties = map(np.array, (fluxes, flux_uncertainties))
# TODO: Consider parallelising this, which would mean factoring
# _fit out of the model class, which gets messy.
# Since solving for labels is not a big bottleneck (yet), let's leave
# this.
full_output = kwargs.pop("full_output", False)
if fluxes.ndim == 1:
labels, covariance = \
self._fit(fluxes, flux_uncertainties, label_indices, **kwargs)
else:
N_stars, N_labels = (fluxes.shape[0], len(self.labels))
labels = np.empty((N_stars, N_labels), dtype=float)
covariance = np.empty((N_stars, N_labels, N_labels), dtype=float)
for i, (f, u) in enumerate(zip(fluxes, flux_uncertainties)):
labels[i, :], covariance[i, :] = \
self._fit(f, u, label_indices, **kwargs)
if full_output:
return (labels, covariance)
return labels
def _fit(self, fluxes, flux_uncertainties, label_indices, **kwargs):
"""
Solve the labels for given pixel fluxes and uncertainties
for a single star.
:param fluxes:
The normalised fluxes. These should be on the same dispersion scale
as the trained data.
:param flux_uncertainties:
The 1-sigma uncertainties in the fluxes. This should have the same
shape as `fluxes`.
:returns:
The labels and covariance matrix.
"""
# Check which pixels to use, then just use those.
use = (flux_uncertainties < kwargs.get("max_uncertainty", 1)) \
* np.isfinite(self.coefficients[:, 0] * fluxes * flux_uncertainties)
fluxes = fluxes.copy()[use]
flux_uncertainties = flux_uncertainties.copy()[use]
scatter, coefficients = self.scatter[use], self.coefficients[use]
Cinv = 1.0 / (scatter**2 + flux_uncertainties**2)
A = np.dot(coefficients.T, Cinv[:, None] * coefficients)
B = np.dot(coefficients.T, Cinv * fluxes)
theta_p0 = np.linalg.solve(A, B)
# Need to match the initial theta coefficients back to label values.
# (Maybe this should use some general non-linear simultaneous solver?)
initial = {}
for index in label_indices:
if index is None: continue
label, order = self.label_vector[index][0]
# The +1 index offset is because the first theta is a scaling.
value = abs(theta_p0[1 + index])**(1./order)
if not np.isfinite(value): continue
initial[label] = value
missing = set(self.labels).difference(initial)
if missing:
# There must be some coefficients that are only used in cross-terms.
# We could solve for them, or just take the mean of the training
# set as the initial guess.
initial.update({ label: \
np.nanmean(self.training_labels[label]) for label in missing
})
# Create and test the generating function.
def function(coeffs, *labels):
return np.dot(coeffs,
model._build_label_vector_rows(self.label_vector,
{ label: [v] for label, v in zip(self.labels, labels) }
)).flatten()
# Solve for the parameters.
kwds = {
"p0": np.array([initial[label] for label in self.labels]),
"maxfev": 10000,
"sigma": 1.0/np.sqrt(Cinv),
"absolute_sigma": True
}
kwds.update(kwargs)
labels_opt, cov = op.curve_fit(function, coefficients, fluxes, **kwds)
return (labels_opt, cov)
def _fit_pixel(fluxes, flux_uncertainties, label_vector_array, **kwargs):
"""
Return the optimal label vector coefficients and scatter for a pixel, given
the fluxes, uncertainties, and the label vector array.
:param fluxes:
The fluxes for the given pixel, from all stars.
:param flux_uncertainties:
The 1-sigma flux uncertainties for the given pixel, from all stars.
:param label_vector_array:
The label vector array. This should have shape `(N_stars, N_terms + 1)`.
:returns:
The optimised label vector coefficients and scatter for this pixel.
"""
_ = kwargs.get("max_uncertainty", 1)
failed_response = (np.nan * np.ones(label_vector_array.shape[0]), _)
if np.all(flux_uncertainties > _):
return failed_response
# Get an initial guess of the scatter.
scatter = np.var(fluxes) - np.median(flux_uncertainties)**2
scatter = np.sqrt(scatter) if scatter >= 0 else np.std(fluxes)
# Optimise the scatter, and at each scatter value we will calculate the
# optimal vector coefficients.
op_scatter, fopt, direc, n_iter, n_funcs, warnflag = op.fmin_powell(
_pixel_scatter_nll, scatter,
args=(fluxes, flux_uncertainties, label_vector_array),
disp=False, full_output=True)
if warnflag > 0:
logger.warning("Warning: {}".format([
"Maximum number of function evaluations made during optimisation.",
"Maximum number of iterations made during optimisation."
][warnflag - 1]))
# Calculate the coefficients at the optimal scatter value.
# Note that if we can't solve for the coefficients, we should just set them
# as zero and send back a giant variance.
try:
coefficients, ATCiAinv, variance = _fit_coefficients(
fluxes, flux_uncertainties, op_scatter, label_vector_array)
except np.linalg.linalg.LinAlgError:
logger.exception("Failed to calculate coefficients")
if kwargs.get("debug", False): raise
return failed_response
else:
return (coefficients, op_scatter)
def _pixel_scatter_nll(scatter, fluxes, flux_uncertainties, label_vector_array,
**kwargs):
"""
Return the negative log-likelihood for the scatter in a single pixel.
:param scatter:
The model scatter in the pixel.
:param fluxes:
The fluxes for a given pixel (in many stars).
:param flux_uncertainties:
The 1-sigma uncertainties in the fluxes for a given pixel. This should
have the same shape as `fluxes`.
:param label_vector_array:
The label vector array for each star, for the given pixel.
:returns:
The log-likelihood of the log scatter, given the fluxes and the label
vector array.
:raises np.linalg.linalg.LinAlgError:
If there was an error in inverting a matrix, and `debug` is set to True.
"""
if 0 > scatter:
return np.inf
try:
# Calculate the coefficients for the given level of scatter.
theta, ATCiAinv, variance = _fit_coefficients(
fluxes, flux_uncertainties, scatter, label_vector_array)
except np.linalg.linalg.LinAlgError:
if kwargs.get("debug", False): raise
return np.inf
model = np.dot(theta, label_vector_array)
return 0.5 * np.sum((fluxes - model)**2 / variance) \
+ 0.5 * np.sum(np.log(variance))
def _fit_coefficients(fluxes, flux_uncertainties, scatter, label_vector_array):
"""
Fit model coefficients and scatter to a given set of normalised fluxes for a
single pixel.
:param fluxes:
The normalised fluxes for a single pixel (in many stars).
:param flux_uncertainties:
The 1-sigma uncertainties in normalised fluxes. This should have the
same shape as `fluxes`.
:param label_vector_array:
The label vector array for each pixel.
:returns:
The label vector coefficients for the pixel, the inverse variance matrix
and the total pixel variance.
"""
variance = flux_uncertainties**2 + scatter**2
CiA = label_vector_array.T * \
np.tile(1./variance, (label_vector_array.shape[0], 1)).T
ATCiAinv = np.linalg.inv(np.dot(label_vector_array, CiA))
ATY = np.dot(label_vector_array, fluxes/variance)
theta = np.dot(ATCiAinv, ATY)
return (theta, ATCiAinv, variance)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import sys
import time
import numpy as np
from tensorflow.core.framework import function_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import function
from tensorflow.python.framework import graph_to_function_def
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_logging_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def _OptimizerOptions():
for cse in [False, True]:
for inline in [False, True]:
for cfold in [False, True]:
yield config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0,
do_common_subexpression_elimination=cse,
do_function_inlining=inline,
do_constant_folding=cfold)))
@test_util.with_c_api
class FunctionTest(test.TestCase):
"""Test methods for verifying Function support.
These test methods are used as mix-ins in two test cases: with
and without C API support.
"""
def testIdentity(self):
@function.Defun(dtypes.float32, func_name="MyIdentity")
def MyIdentityFunc(a):
return a
with ops.Graph().as_default():
call = MyIdentityFunc([18.0])
self.assertEqual("MyIdentity", call.op.name)
with session.Session() as sess:
self.assertAllEqual([18.0], sess.run(call))
def testIdentityImplicitDeref(self):
@function.Defun(dtypes.float32, func_name="MyIdentity")
def MyIdentityFunc(a):
return a
with ops.Graph().as_default():
var = variables.Variable([18.0])
call = MyIdentityFunc(var._ref()) # pylint: disable=protected-access
self.assertEqual("MyIdentity", call.op.name)
for cfg in _OptimizerOptions():
with session.Session(config=cfg) as sess:
sess.run(var.initializer)
self.assertAllEqual([18.0], sess.run(call))
def testIdentityOutputName(self):
@function.Defun(
dtypes.float32, func_name="MyIdentity", out_names=["my_result_name"])
def MyIdentityFunc(a):
return a
with ops.Graph().as_default():
call = MyIdentityFunc([18.0])
self.assertEqual("MyIdentity", call.op.name)
with session.Session() as sess:
self.assertAllEqual([18.0], sess.run(call))
def testTooManyOutputNames(self):
@function.Defun(
dtypes.float32, func_name="MyIdentity",
out_names=["my_result1", "my_result2"])
def MyIdentityFunc(a):
return a
with ops.Graph().as_default():
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
(r"output names must be either empty or equal in size to outputs. "
"output names size = 2 outputs size = 1")):
MyIdentityFunc([18.0])
def testDefineFunction2Args(self):
@function.Defun(dtypes.float32, dtypes.float32, func_name="APlus2B")
def APlus2B(a, b):
return a + b * 2
with ops.Graph().as_default():
call = APlus2B([1.0], [2.0])
self.assertEqual("APlus2B", call.op.name)
with session.Session() as sess:
self.assertAllEqual([5.0], sess.run(call))
def testFunctionWithNoOutput(self):
@function.Defun(dtypes.float32, dtypes.float32)
def APlus2B(a, b):
c = a + b * 2 # Create some ops to have nodes in the body
print(c) # Using 'print' to make lint happy
with ops.Graph().as_default():
# Call function. There should be no exceptions.
APlus2B([1.0], [2.0])
def testDefineFunction2ArgsOutputName(self):
@function.Defun(
dtypes.float32,
dtypes.float32,
func_name="APlus2B",
out_names=["my_result_name"])
def APlus2B(a, b):
return a + b * 2
with ops.Graph().as_default():
call = APlus2B([1.0], [2.0])
self.assertEqual("APlus2B", call.op.name)
with session.Session() as sess:
self.assertAllEqual([5.0], sess.run(call))
def testDefineFunctionDuplicateOutputs(self):
@function.Defun(dtypes.float32, func_name="Duplicate")
def Duplicate(a):
b = a + 1.0
return b, b
g = ops.Graph()
with g.as_default():
Duplicate([3.0])
func_sig = g.as_graph_def().library.function[0].signature
# The names given to both outputs should be different
# even though the same tensor is emitted to both.
out_names = [a.name for a in func_sig.output_arg]
self.assertEqual(2, len(out_names))
self.assertNotEqual(out_names[0], out_names[1])
def testGradientFunc(self):
@function.Defun(dtypes.float32, func_name="XSquarePlusOneFn")
def XSquarePlusOne(x):
return x * x + 1.0
@function.Defun(dtypes.float32, dtypes.float32)
def XSquarePlusOneGrad(x, dy):
dx = functional_ops._symbolic_gradient(
input=[x, dy], Tout=[dtypes.float32], f="XSquarePlusOneFn", name="dx")
return dx
g = ops.Graph()
with g.as_default():
call_f = XSquarePlusOne([2.0])
call_g = XSquarePlusOneGrad([2.0], [0.1])
with session.Session() as sess:
self.assertAllClose([5.0], sess.run(call_f))
self.assertAllClose([0.4], sess.run(call_g))
def testTanhSymGrad(self):
@function.Defun(dtypes.float32)
def Forward(x):
return math_ops.reduce_sum(math_ops.tanh(x))
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtypes.float32)
y = Forward(x)
dx = gradients_impl.gradients([y], [x])
inp = np.array([-1, 1, 2, -2], dtype=np.float32)
feed = {x: inp}
cfg = config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L1,
do_function_inlining=True)))
with session.Session(graph=g, config=cfg) as sess:
out, = sess.run(dx, feed)
self.assertAllClose(1 - np.square(np.tanh(inp)), out)
def testCustomGradient(self):
dtype = dtypes.float32
@function.Defun(dtype, dtype, dtype)
def XentLossGrad(logits, labels, dloss):
dlogits = array_ops.reshape(dloss, [-1, 1]) * (
nn_ops.softmax(logits) - labels)
dlabels = array_ops.zeros_like(labels)
# Takes exp(dlogits) to differentiate it from the "correct" gradient.
return math_ops.exp(dlogits), dlabels
@function.Defun(dtype, dtype, grad_func=XentLossGrad)
def XentLoss(logits, labels):
return math_ops.reduce_sum(labels * math_ops.log(nn_ops.softmax(logits)),
1)
g = ops.Graph()
with g.as_default():
logits = array_ops.placeholder(dtype)
labels = array_ops.placeholder(dtype)
loss = XentLoss(logits, labels)
dlogits = gradients_impl.gradients([loss], [logits])
x = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32)
prob = np.exp(x) / np.sum(np.exp(x), 1, keepdims=1)
y = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32)
for cfg in _OptimizerOptions():
tf_logging.info("cfg = %s", cfg)
with session.Session(graph=g, config=cfg) as sess:
out, = sess.run(dlogits, {logits: x, labels: y})
self.assertAllClose(out, np.exp(prob - y))
def testCustomGradientError(self):
dtype = dtypes.float32
@function.Defun(dtype, dtype, dtype)
def Grad(x, dy, dz):
# Should have returned 1 result.
return x, dy + dz
@function.Defun(dtype, grad_func=Grad)
def Forward(x):
return x, x
g = ops.Graph()
with g.as_default():
inp = array_ops.placeholder(dtype)
out = math_ops.add_n(Forward(inp))
dinp = gradients_impl.gradients(out, [inp])
x = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32)
with session.Session(graph=g) as sess:
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"SymGrad expects to return 1.*but get 2.*instead"):
_ = sess.run(dinp, {inp: x})
def testSymGradShape(self):
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtypes.float32, [25, 4])
y = array_ops.placeholder(dtypes.float32, [200, 100])
dz = array_ops.placeholder(dtypes.float32, [1])
# We assume Foo is a function of (x, y) -> (z) Then, Foo's
# gradient function is (x, y, dz) -> (dx, dy). dx's shape
# should be the same as x's; and dy's shape should be the same
# as y's.
dx, dy = functional_ops._symbolic_gradient(
input=[x, y, dz], Tout=[dtypes.float32] * 2, f="Foo")
self.assertEqual(x.get_shape(), dx.get_shape())
self.assertEqual(y.get_shape(), dy.get_shape())
def testSymGradAttr(self):
@function.Defun(noinline=True)
def Foo(x):
return x * 2
self.assertTrue(
Foo.instantiate([dtypes.float32]).definition.attr["_noinline"].b)
g = ops.Graph()
with g.as_default():
x = constant_op.constant(3.0)
y = Foo(x)
dx, = gradients_impl.gradients(y, [x])
cfg = config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0,
do_common_subexpression_elimination=True,
do_function_inlining=True,
do_constant_folding=True)))
with self.test_session(graph=g, config=cfg):
self.assertAllClose(y.eval(), 6.)
self.assertAllClose(dx.eval(), 2.)
def _testZNoDepOnY(self, use_const_grad_ys):
@function.Defun(dtypes.float32, dtypes.float32)
def Foo(x, y): # pylint: disable=unused-argument
return x * 2
with ops.Graph().as_default():
# z = Foo(x, y). z doe
x = constant_op.constant(1.0)
y = constant_op.constant(2.0)
z = Foo(x, y)
if use_const_grad_ys:
dx, dy = gradients_impl.gradients([z], [x, y], grad_ys=[1.0])
else:
dx, dy = gradients_impl.gradients([z], [x, y])
with session.Session() as sess:
dx_val, dy_val = sess.run([dx, dy])
self.assertEqual([2.0], dx_val)
self.assertEqual([0.0], dy_val)
def testZNoDepOnY(self):
self._testZNoDepOnY(False)
def testZNoDepOnYConstGradYs(self):
# Tests for constant folding of grad_ys
self._testZNoDepOnY(True)
def testDefineFunctionNoArgs(self):
@function.Defun(func_name="AConstant")
def AConstant():
return constant_op.constant([42])
with ops.Graph().as_default():
call = AConstant()
self.assertEqual("AConstant", call.op.name)
with session.Session() as sess:
self.assertAllEqual([42], sess.run(call))
def testDefineFunctionNames(self):
@function.Defun(dtypes.float32, func_name="Foo")
def Foo(a):
return a + 1
with ops.Graph().as_default():
call1 = Foo([1.0])
self.assertEqual("Foo", call1.op.name)
call2 = Foo([1.0])
self.assertEqual("Foo_1", call2.op.name)
# pylint: disable=unexpected-keyword-arg
call3 = Foo([1.0], name="mine")
self.assertEqual("mine", call3.op.name)
with ops.name_scope("my"):
call4 = Foo([1.0], name="precious")
self.assertEqual("my/precious", call4.op.name)
def testNoOp(self):
@function.Defun(dtypes.float32)
def Foo(x):
y = logging_ops.Print(x, [], "Hello")
with ops.control_dependencies([y]):
z = control_flow_ops.no_op()
with ops.control_dependencies([z]):
return x * 2
with ops.Graph().as_default(), self.test_session():
z = Foo(constant_op.constant(3.0))
self.assertAllEqual(z.eval(), 6.0)
def testAssertOp(self):
@function.Defun(dtypes.float32)
def Foo(x):
check = gen_logging_ops._assert(math_ops.greater(x, 0), [x])
with ops.control_dependencies([check]):
return x * 2
g = ops.Graph()
with g.as_default(), self.test_session():
self.assertAllEqual(Foo(constant_op.constant(3.0)).eval(), 6.0)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"assertion failed.*-3"):
self.assertAllEqual(Foo(constant_op.constant(-3.0)).eval(), 6.0)
@test_util.disable_c_api # Op._add_control_inputs doesn't work with C API
def testAssertWrapper(self):
@function.Defun(dtypes.float32)
def MyFn(x):
with ops.control_dependencies(
[control_flow_ops.Assert(math_ops.less_equal(x, 10.0), [x])]):
return array_ops.identity(x)
with self.test_session():
self.assertEqual(1.0, MyFn(1.0).eval())
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"assertion"):
_ = MyFn(100.0).eval()
@test_util.disable_c_api # Op._add_control_inputs doesn't work with C API
def testWhileLoopCallsFunc(self):
with self.test_session(use_gpu=True) as sess:
@function.Defun(dtypes.float32)
def Times2(x):
constant_two = constant_op.constant(2, dtypes.int32)
two_on_gpu = math_ops.cast(constant_two, dtypes.float32)
return x * two_on_gpu
def Body(x):
x2 = Times2(x)
x2.set_shape([])
return x2
loop = control_flow_ops.while_loop(lambda x: x < 1e5, Body, [1.0])
ans = sess.run(loop)
self.assertAllClose(ans, 131072.)
@test_util.disable_c_api # Op._add_control_inputs doesn't work with C API
def testControlFlowStrictness(self):
"""Inlined functions must not execute in a untaken control flow branch."""
@function.Defun(dtypes.int32)
def AssertFail(x):
# Assertion that always fails and does not have a data dependency on `x`.
assert_false = control_flow_ops.Assert(False, [42])
with ops.control_dependencies([assert_false]):
return array_ops.identity(x)
with ops.device("CPU"):
pred = array_ops.placeholder(dtypes.bool)
x = array_ops.placeholder(dtypes.int32)
cond = control_flow_ops.cond(pred, lambda: x + 1, lambda: AssertFail(x))
# pylint: disable=unnecessary-lambda
loop = control_flow_ops.while_loop(lambda y: pred,
lambda y: AssertFail(y), [x])
# pylint: enable=unnecessary-lambda
rewriter_config = rewriter_config_pb2.RewriterConfig(
dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF)
# Enables inlining.
config = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0,
do_common_subexpression_elimination=True,
do_function_inlining=True,
do_constant_folding=True),
rewrite_options=rewriter_config))
with session.Session(config=config) as sess:
# Since the 'False' branch is not taken, the assertion should not fire.
self.assertEqual(4, sess.run(cond, {pred: True, x: 3}))
# The assertion should still fire if the False branch is taken.
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"assertion"):
sess.run(cond, {pred: False, x: 3})
# Similarly for loops.
self.assertEqual(3, sess.run(loop, {pred: False, x: 3}))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"assertion"):
sess.run(loop, {pred: True, x: 3})
def testVar(self):
@function.Defun(dtypes.float32)
def Foo(x):
return x * x + 1
g = ops.Graph()
with g.as_default():
v = variables.Variable(constant_op.constant(10.0))
z = Foo(v)
with self.test_session(graph=g):
variables.global_variables_initializer().run()
self.assertAllEqual(z.eval(), 101.)
def testResourceVarAsImplicitInput(self):
g = ops.Graph()
with g.as_default(), ops.device("cpu:0"):
v = variable_scope.get_variable(
"var", (4, 4), dtypes.float32, use_resource=True)
@function.Defun()
def Foo():
return array_ops.identity(v)
y = v.value()
z = Foo()
with self.test_session(graph=g):
v.initializer.run()
self.assertAllEqual(y.eval(), z.eval())
def testDefineErrors(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, "can not return None"):
@function.Defun()
def TwoNone():
return None, None
_ = TwoNone.definition
with self.assertRaisesRegexp(ValueError, "are not supported"):
@function.Defun()
def DefaultArg(unused_a=12):
return constant_op.constant([1])
_ = DefaultArg.definition
with self.assertRaisesRegexp(ValueError, "are not supported"):
@function.Defun()
def KwArgs(**unused_kwargs):
return constant_op.constant([1])
_ = KwArgs.definition
with self.assertRaisesRegexp(ValueError, "specified input types"):
@function.Defun(dtypes.float32)
def PlusMinusV2(a, b):
return a + b, b - a
_ = PlusMinusV2.definition
with self.assertRaisesRegexp(ValueError, "specified input types"):
@function.Defun(dtypes.float32, dtypes.float32, dtypes.float32)
def PlusMinusV3(a, b):
return a + b, b - a
_ = PlusMinusV3.definition
def testCallErrors(self):
@function.Defun()
def Const():
return constant_op.constant(1)
@function.Defun(dtypes.int32)
def PlusOne(a):
return a + 1
@function.Defun(dtypes.int32, dtypes.int32)
def PlusMinus(a, b):
return a + b, b - a
with ops.Graph().as_default():
_ = Const()
# pylint: disable=too-many-function-args
# pylint: disable=unexpected-keyword-arg
# pylint: disable=no-value-for-parameter
with self.assertRaisesRegexp(ValueError, "arguments: 0"):
_ = Const(1)
with self.assertRaisesRegexp(ValueError, "arguments: 0"):
_ = Const(1, 2)
with self.assertRaisesRegexp(ValueError, "arguments: 1"):
_ = PlusOne()
_ = PlusOne(1)
with self.assertRaisesRegexp(ValueError, "arguments: 1"):
_ = PlusOne(1, 2)
with self.assertRaisesRegexp(ValueError, "arguments: 2"):
_ = PlusMinus()
with self.assertRaisesRegexp(ValueError, "arguments: 2"):
_ = PlusMinus(1)
_ = PlusMinus(1, 2)
_ = PlusOne(1, name="p1")
with self.assertRaisesRegexp(ValueError, "Unknown keyword arguments"):
_ = PlusOne(1, device="/device:GPU:0")
def testFunctionDecorator(self):
@function.Defun(dtypes.float32, func_name="Minus1")
def Minus1(b):
return b - 1.0
with ops.Graph().as_default():
call1 = Minus1([2.])
self.assertTrue(isinstance(Minus1, function._DefinedFunction))
self.assertEqual(Minus1.name, "Minus1")
# pylint: disable=unexpected-keyword-arg
call2 = Minus1(call1, name="next")
# pylint: enable=unexpected-keyword-arg
self.assertEqual("next", call2.op.name)
with session.Session() as sess:
self.assertAllEqual([1], sess.run(call1))
self.assertAllEqual([0], sess.run(call2))
def testNestedFunction(self):
@function.Defun(dtypes.float32)
def Cube(x):
return x * x * x
@function.Defun(dtypes.float32, dtypes.float32)
def CubeXPlusY(x, y):
return Cube(x) + y
with ops.Graph().as_default():
z = CubeXPlusY(3.0, -2.0)
with self.test_session():
self.assertAllEqual(z.eval(), 25.0)
def testNestedDefinedFunction(self):
@function.Defun(dtypes.float32, dtypes.float32)
def CubeXPlusY(x, y):
@function.Defun(dtypes.float32)
def Cube(x):
return x * x * x
return Cube(x) + y
with ops.Graph().as_default():
z = CubeXPlusY(3.0, -2.0)
with self.test_session():
self.assertAllEqual(z.eval(), 25.0)
def testUnusedFunction(self):
invoked = False
# pylint: disable=unused-variable
@function.Defun()
def Unused():
invoked = True
return constant_op.constant(42.)
self.assertFalse(invoked)
g = ops.Graph()
with g.as_default():
@function.Defun()
def Unused2():
invoked = True
return constant_op.constant(7.)
constant_op.constant(3.)
# pylint: enable=unused-variable
self.assertFalse(invoked)
gdef = g.as_graph_def()
self.assertEqual(0, len(gdef.library.function))
def testReduction(self):
g = ops.Graph()
# BN0 is computing batch normed matrix along rows.
def BN0(x):
mean = math_ops.reduce_mean(x, [0])
var = math_ops.reduce_mean(math_ops.square(x - mean)) # biased var
rstd = math_ops.rsqrt(var + 1e-8)
return (x - mean) * rstd
# Wraps BatchNorm in a tf function.
@function.Defun(dtypes.float32)
def BN1(x):
return BN0(x)
with g.as_default():
x = array_ops.placeholder(dtypes.float32)
y0 = BN0(x) # A plain graph
y1 = BN1(x) # A tf function
dx0, = gradients_impl.gradients([y0], [x])
dx1, = gradients_impl.gradients([y1], [x])
# Both should produce the same result and gradient.
with self.test_session(graph=g) as sess:
vals = sess.run([y0, y1, dx0, dx1], {x: np.random.uniform(size=(3, 7))})
self.assertAllClose(vals[0], vals[1])
self.assertAllClose(vals[2], vals[3])
def testCapture(self):
g = ops.Graph()
with g.as_default():
w = variables.Variable(constant_op.constant([[1.0]]))
b = variables.Variable(constant_op.constant([2.0]))
# Foo() captures w and b.
@function.Defun(dtypes.float32)
def Foo(x):
# Plus() captures b.
@function.Defun(dtypes.float32)
def Plus(y):
return y + b
return Plus(math_ops.matmul(w, x))
y = Foo(constant_op.constant([[10.]]))
with self.test_session(graph=g):
variables.global_variables_initializer().run()
self.assertAllEqual(y.eval(), [[12.0]])
def testCaptureControls(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant([10.0])
x = logging_ops.Print(x, [x], "outer")
@function.Defun(dtypes.float32)
def Foo(y):
with ops.control_dependencies([x]):
y = logging_ops.Print(y, [y], "inner")
return y
with self.assertRaisesRegexp(ValueError, "not an element of this graph."):
# NOTE: We still do not support capturing control deps.
_ = Foo(x)
def testCaptureInWhileLoop(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant(1)
@function.Defun()
def Foo():
return control_flow_ops.while_loop(lambda i: i < 10,
lambda i: i + x,
[0])
y = Foo()
with self.test_session(graph=g) as sess:
self.assertEqual(sess.run(y), 10)
def testCaptureInCond(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant(1)
@function.Defun(dtypes.bool)
def Foo(pred):
return control_flow_ops.cond(pred,
lambda: x,
lambda: x + 1)
y = Foo(True)
z = Foo(False)
with self.test_session(graph=g) as sess:
self.assertEqual(sess.run(y), 1)
self.assertEqual(sess.run(z), 2)
def testStableName(self):
@function.Defun()
def Foo(x, y, z):
return math_ops.tanh(math_ops.matmul(x, y) + z)
# We added more randomness to function names in C API.
# TODO(iga): Remove this if statement when we switch to C API.
if ops._USE_C_API: # pylint: disable=protected-access
if sys.byteorder == "big":
self.assertEqual("Foo_kEdkAG8SJvg",
Foo.instantiate([dtypes.float32] * 3).name)
else:
self.assertEqual("Foo_aCYSbwBkR5A",
Foo.instantiate([dtypes.float32] * 3).name)
else:
self.assertEqual("Foo_d643acf7",
Foo.instantiate([dtypes.float32] * 3).name)
def testSignatureHash(self):
# Foo.Inner and Bar.Inner have identical function body but have
# different signatures. They should be treated as two different functions.
@function.Defun()
def Foo(x):
@function.Defun()
def Inner(x):
return x + 10.
return Inner(x)
@function.Defun()
def Bar(x):
@function.Defun()
def Inner(x, unused_y, unused_z):
return x + 10.
return Inner(x, 2., 3.)
g = ops.Graph()
with g.as_default():
x = constant_op.constant(10.0)
y = Foo(x)
z = Bar(x)
with self.test_session(graph=g) as sess:
v0, v1 = sess.run([y, z])
self.assertAllEqual(v0, 20.)
self.assertAllEqual(v1, 20.)
def testShapeFunction(self):
@function.Defun(
dtypes.float32, shape_func=lambda op: [op.inputs[0].get_shape()])
def Foo(x):
return x + 1.0
@function.Defun(
shape_func=lambda op: [[1] + op.inputs[0].get_shape().as_list()])
def Bar(x):
return array_ops.stack([x])
g = ops.Graph()
with g.as_default():
x = Foo([1.0, 2.0])
self.assertEqual(x.get_shape().as_list(), [2])
y = Bar(array_ops.zeros([1, 2, 3]))
self.assertAllEqual(y.get_shape().as_list(), [1, 1, 2, 3])
def testVariableReuse(self):
def LinearWithReuse(input_tensor, reuse=None):
size = input_tensor.shape.dims[1]
with variable_scope.variable_scope("linear", reuse=reuse):
w = variable_scope.get_variable(
"w", shape=[size, size], dtype=input_tensor.dtype)
return math_ops.matmul(input_tensor, w)
@function.Defun(dtypes.float32)
def Foo(inputs):
inputs = array_ops.reshape(inputs, [32, 100])
hidden = LinearWithReuse(inputs)
return LinearWithReuse(hidden, reuse=True)
input_op = array_ops.placeholder(shape=[32, 100], dtype=dtypes.float32)
output_op = Foo(input_op)
global_vars = variables.global_variables()
self.assertEqual(len(global_vars), 1)
self.assertEqual(global_vars[0].name, "linear/w:0")
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
output_val = sess.run(
output_op, feed_dict={input_op: np.random.rand(32, 100)})
self.assertEqual(output_val.shape, (32, 100))
def testFunctionCallInDifferentVariableScopes(self):
@function.Defun(dtypes.float32)
def Foo(inputs):
var = variable_scope.get_variable(
"var",
shape=[10],
dtype=dtypes.float32,
initializer=init_ops.ones_initializer())
return inputs + var
input_op = array_ops.placeholder(shape=[10], dtype=dtypes.float32)
with variable_scope.variable_scope("vs1"):
out1_op = Foo(input_op)
with variable_scope.variable_scope("vs2"):
out2_op = Foo(input_op)
global_vars = variables.global_variables()
self.assertEqual(len(global_vars), 1)
self.assertEqual(global_vars[0].name, "vs1/var:0")
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
out1, out2 = sess.run(
[out1_op, out2_op], feed_dict={input_op: np.linspace(1, 10, 10)})
self.assertAllEqual(out1, np.linspace(2, 11, 10))
self.assertAllEqual(out2, np.linspace(2, 11, 10))
def testTwoInputsSameOp(self):
g = ops.Graph()
with g.as_default():
m = array_ops.placeholder(dtypes.float32)
s, u, v = linalg_ops.svd(m)
ss = math_ops.reduce_sum(s)
uu = math_ops.reduce_sum(u)
vv = math_ops.reduce_sum(v)
result = ss + uu + vv
f = graph_to_function_def.graph_to_function_def(
g,
g.get_operations()[1:], # skip the placeholder
[s, u, v],
[result])
self.assertEqual(len(f.signature.input_arg), 3)
def testGradientWithIntegerFunctionArgument(self):
@function.Defun(dtypes.int32, dtypes.float32)
def Foo(t, x):
return x[t]
g = ops.Graph()
with g.as_default():
inp = array_ops.placeholder(dtypes.float32)
t = constant_op.constant(0, dtypes.int32)
out = Foo(t, inp)
dinp, = gradients_impl.gradients(out, [inp])
x = np.zeros((2,)).astype(np.float32)
with session.Session(graph=g) as sess:
self.assertAllClose(
np.array([1.0, 0.0]).astype(np.float32),
sess.run(dinp, {inp: x}))
def testFunctionMarkedStateful(self):
@function.Defun(dtypes.int32, dtypes.float32)
def Foo(t, x):
return x[t]
@function.Defun(dtypes.int64)
def Bar(x):
return x
# NOTE(mrry): All functions are currently considered stateless by the
# runtime, so we simulate a "stateful" function.
# TODO(b/70565970): Remove this hack when we are able to build stateful
# functions using the API.
# pylint: disable=protected-access
Foo._signature.is_stateful = True
Bar._signature.is_stateful = True
# pylint: enable=protected-access
result_1 = Foo(3, [1.0, 2.0, 3.0, 4.0])
result_2 = Bar(constant_op.constant(100, dtype=dtypes.int64))
with session.Session() as sess:
self.assertEqual(4.0, sess.run(result_1))
self.assertEqual(100, sess.run(result_2))
self.assertEqual((4.0, 100), sess.run((result_1, result_2)))
def testStatefulFunction(self):
@function.Defun()
def FunctionWithStatelessOp():
return constant_op.constant(42.0)
@function.Defun()
def FunctionWithStatefulOp():
return random_ops.random_uniform([100], maxval=10, dtype=dtypes.int32)
@function.Defun()
def FunctionWithStatelessFunctionCall():
return FunctionWithStatelessOp()
@function.Defun()
def FunctionWithStatefulFunctionCall():
return FunctionWithStatefulOp()
# Test that the `is_stateful` bit is propagated.
self.assertFalse(FunctionWithStatelessOp.definition.signature.is_stateful)
self.assertTrue(FunctionWithStatefulOp.definition.signature.is_stateful)
self.assertFalse(
FunctionWithStatelessFunctionCall.definition.signature.is_stateful)
self.assertTrue(
FunctionWithStatefulFunctionCall.definition.signature.is_stateful)
# Ensure that two invocations of the same random-number-generating
# function produce different results.
result1 = FunctionWithStatefulFunctionCall()
result2 = FunctionWithStatefulFunctionCall()
# Statefulness affects how the function is treated by the various
# optimization passes, so run the test in each optimizer
# configuration.
for config in _OptimizerOptions():
with session.Session(config=config) as sess:
val1, val2 = sess.run((result1, result2))
self.assertFalse(all(val1 == val2))
val3, val4 = sess.run((result1, result2))
self.assertFalse(all(val3 == val1))
self.assertFalse(all(val4 == val2))
def testSameFunctionOnTwoDevices(self):
@function.Defun(dtypes.float32)
def AddOne(x):
return x + 1.0
with ops.device("/cpu:0"):
f_0 = AddOne(41.0)
with ops.device("/cpu:1"):
f_1 = AddOne(43.0)
for config in _OptimizerOptions():
config.device_count["CPU"] = 2
with session.Session(config=config) as sess:
self.assertEqual(42.0, sess.run(f_0))
self.assertEqual(44.0, sess.run(f_1))
self.assertEqual((42.0, 44.0), sess.run((f_0, f_1)))
@test_util.with_c_api
class FunctionsFromProtos(test.TestCase):
def expectFunctionsEqual(self, func, grad_func=None, new_func=None):
if new_func is None:
# Make a copy of func.definition to avoid any bugs masked by using the
# same object
serialized_fdef = func.definition.SerializeToString()
# Serialize and then deserialize `func` to create `new_func`
fdef = function_pb2.FunctionDef.FromString(serialized_fdef)
new_func = function._from_definition(fdef, grad_func=grad_func)
self.assertEqual(func.name, new_func.name)
self.assertEqual(func.definition, new_func.definition)
self.assertEqual(func.grad_func_name, new_func.grad_func_name)
self.assertEqual(func.declared_input_types, new_func.declared_input_types)
self.assertEqual(func.captured_inputs, new_func.captured_inputs)
def testBasic(self):
@function.Defun(dtypes.float32, dtypes.float32)
def Foo(x, y):
return x + y
self.expectFunctionsEqual(Foo)
def testGradFunc(self):
@function.Defun(dtypes.float32, dtypes.float32)
def G(x, dy):
return x * dy
@function.Defun(dtypes.float32, grad_func=G)
def F(x):
return math_ops.exp(x) - math_ops.exp(-x)
self.expectFunctionsEqual(F, grad_func=G)
def testCapturedInputs(self):
c = constant_op.constant(10, dtypes.int64)
@function.Defun(dtypes.int64)
def Foo(x):
return x + c
new_func = function._from_definition(Foo.definition)
self.assertEqual(Foo.name, new_func.name)
self.assertEqual(Foo.definition, new_func.definition)
self.assertEqual(Foo.grad_func_name, new_func.grad_func_name)
# Captured inputs are added as regular inputs to the function definition
self.assertEqual(new_func.declared_input_types,
Foo.declared_input_types + (dtypes.int64,))
self.assertEqual(len(new_func.captured_inputs), 0)
def testNestedFunctions(self):
@function.Defun(dtypes.float32)
def Outer(x):
@function.Defun(dtypes.float32)
def Inner(y):
return y + 1
return Inner(Inner(x))
self.expectFunctionsEqual(Outer)
def testFromLibrary(self):
# Define some functions with different gradient functions. Note that many of
# the below functions are identical since function bodies don't matter for
# this test.
@function.Defun(dtypes.float32, dtypes.float32)
def G1(x, dy):
return x * dy
@function.Defun(dtypes.float32, dtypes.float32)
def G2(x, dy):
return x * dy
# F1 and F2 have the same gradient function
@function.Defun(dtypes.float32, grad_func=G1)
def F1(x):
return math_ops.exp(x) - math_ops.exp(-x)
@function.Defun(dtypes.float32, grad_func=G1)
def F2(x):
return math_ops.exp(x) - math_ops.exp(-x)
# F3 has a different gradient function
@function.Defun(dtypes.float32, grad_func=G2)
def F3(x):
return math_ops.exp(x) - math_ops.exp(-x)
# F4 has no gradient function
@function.Defun(dtypes.float32)
def F4(x):
return math_ops.exp(x) - math_ops.exp(-x)
# Instantiate all functions
g = ops.Graph()
with g.as_default():
c = constant_op.constant(1.0, dtypes.float32)
f1 = F1(c)
f2 = F2(c)
f3 = F3(c)
f4 = F4(c)
gradients_impl.gradients([f1, f2, f3, f4], c)
library = g.as_graph_def().library
new_funcs = function._from_library(library)
def CheckNewFunc(func):
new_func = [f for f in new_funcs if f.name == func.name]
self.assertEqual(len(new_func), 1)
self.expectFunctionsEqual(func, new_func=new_func[0])
CheckNewFunc(G1)
CheckNewFunc(G2)
CheckNewFunc(F1)
CheckNewFunc(F2)
CheckNewFunc(F3)
CheckNewFunc(F4)
def testFromLibraryEmptyLib(self):
library = function_pb2.FunctionDefLibrary()
self.assertEqual(len(function._from_library(library)), 0)
def testFromLibraryMissingFuncDef(self):
@function.Defun(dtypes.float32, dtypes.float32)
def G1(x, dy):
return x * dy
@function.Defun(dtypes.float32)
def F1(x):
return math_ops.exp(x) - math_ops.exp(-x)
gradient = function_pb2.GradientDef()
gradient.function_name = F1.name
gradient.gradient_func = G1.name
# Create invalid function def that is missing G1 function def
library = function_pb2.FunctionDefLibrary()
library.gradient.extend([gradient])
library.function.extend([F1.definition])
with self.assertRaisesRegexp(
ValueError,
"FunctionDefLibrary missing 'G1_[0-9a-zA-Z]{8,11}' FunctionDef"):
function._from_library(library)
# Create invalid function def that is missing F1 function def
library = function_pb2.FunctionDefLibrary()
library.gradient.extend([gradient])
library.function.extend([G1.definition])
with self.assertRaisesRegexp(
ValueError,
"FunctionDefLibrary missing 'F1_[0-9a-zA-Z]{8,11}' FunctionDef"):
function._from_library(library)
def testFromLibraryCyclicGradFuncs(self):
@function.Defun(dtypes.float32)
def F1(x):
return math_ops.exp(x) - math_ops.exp(-x)
@function.Defun(dtypes.float32)
def F2(x):
return math_ops.exp(x) - math_ops.exp(-x)
# Create invalid function def library where F1 has gradient function F2 and
# F2 has gradient function F1
library = function_pb2.FunctionDefLibrary()
library.function.extend([F1.definition, F2.definition])
gradient1 = function_pb2.GradientDef()
gradient1.function_name = F1.name
gradient1.gradient_func = F2.name
gradient2 = function_pb2.GradientDef()
gradient2.function_name = F2.name
gradient2.gradient_func = F1.name
library.gradient.extend([gradient1, gradient2])
with self.assertRaisesRegexp(
ValueError, "FunctionDefLibrary contains cyclic gradient functions!"):
function._from_library(library)
@test_util.with_c_api
class FunctionOverloadTest(test.TestCase):
def testBasic(self):
@function.Defun()
def Sinh(x):
return 1 / 2. * (math_ops.exp(x) - math_ops.exp(-x))
g = ops.Graph()
with g.as_default():
x = Sinh(constant_op.constant(0.25, dtypes.float32))
y = Sinh(constant_op.constant(0.25, dtypes.float64))
with self.test_session(graph=g):
self.assertAllClose(x.eval(), np.sinh(0.25))
self.assertAllClose(y.eval(), np.sinh(0.25))
def testGradient(self):
@function.Defun(func_name="Spec")
def G(x, dy):
return x * dy
@function.Defun(grad_func=G)
def F(x):
return math_ops.exp(x) - math_ops.exp(-x)
for dtype in [dtypes.float32, dtypes.float64]:
g = ops.Graph()
with g.as_default():
x = constant_op.constant(0.25, dtype)
y = F(x)
dx, = gradients_impl.gradients(y, x)
with self.test_session(graph=g):
self.assertAllClose(dx.eval(), 0.25)
def testDocString(self):
@function.Defun()
def Foo(x):
"""Successor of x."""
return x + 1
g = ops.Graph()
with g.as_default():
_ = Foo(1)
self.assertEqual(g.as_graph_def().library.function[0].signature.description,
"Successor of x.")
@test_util.with_c_api
class FunctionCaptureByValueTest(test.TestCase):
def testCaptureByValue(self):
g = ops.Graph()
with g.as_default():
w = constant_op.constant([[1.0]])
b = constant_op.constant([2.0])
# Foo() captures w and b.
@function.Defun(dtypes.float32, capture_by_value=True)
def Foo(x):
# Plus() captures b.
@function.Defun(dtypes.float32, capture_by_value=True)
def Plus(y):
return y + b
self.assertEqual(0, len(Plus.captured_inputs))
return Plus(math_ops.matmul(w, x))
y = Foo(constant_op.constant([[10.]]))
self.assertEqual(0, len(Foo.captured_inputs))
with self.test_session(graph=g):
self.assertAllEqual(y.eval(), [[12.0]])
@test_util.with_c_api
class UnrollLSTMTest(test.TestCase):
BATCH_SIZE = 16
LSTM_DIMS = 32
NUM_UNROLL = 20
def _Weights(self):
dims = self.LSTM_DIMS
return random_ops.random_uniform([2 * dims, 4 * dims], -1, 1, seed=123456)
def _Input(self):
return random_ops.random_uniform(
[self.NUM_UNROLL, self.BATCH_SIZE, self.LSTM_DIMS], seed=654321)
# Helper to construct a LSTM cell graph.
@classmethod
def LSTMCell(cls, x, mprev, cprev, weights):
xm = array_ops.concat([x, mprev], 1)
i_i, i_g, f_g, o_g = array_ops.split(
value=math_ops.matmul(xm, weights), num_or_size_splits=4, axis=1)
new_c = math_ops.sigmoid(f_g) * cprev + math_ops.sigmoid(
i_g) * math_ops.tanh(i_i)
new_c = clip_ops.clip_by_value(new_c, -50.0, 50.0)
new_m = math_ops.sigmoid(o_g) * math_ops.tanh(new_c)
return new_m, new_c
def _BuildForward(self, weights, inp, mode="cell"):
def Loop(cell, w, i):
x = array_ops.unstack(i, self.NUM_UNROLL)
m = array_ops.zeros_like(x[0])
c = array_ops.zeros_like(x[0])
for i in range(self.NUM_UNROLL):
m, c = cell(x[i], m, c, w)
return m
cell = UnrollLSTMTest.LSTMCell
if mode == "complete":
# Constructs the complete graph in python.
return Loop(cell, weights, inp)
cell = function.Defun(dtypes.float32, dtypes.float32, dtypes.float32,
dtypes.float32)(cell)
if mode == "cell":
# Just represent the LSTM as a function.
return Loop(cell, weights, inp)
if mode == "loop":
# Wraps the whole loop as a function.
@function.Defun(dtypes.float32, dtypes.float32)
def LSTMLoop(w, i):
return Loop(cell, w, i)
return LSTMLoop(weights, inp)
if mode == "loop10":
# Wraps 10 lstm steps into one function, and the whole loop
# into another calling the formers.
# Groups 10 steps at a time.
@function.Defun(dtypes.float32, dtypes.float32, dtypes.float32,
*([dtypes.float32] * 10))
def Loop10(w, m, c, *args):
for x in args:
m, c = cell(x, m, c, w)
return m, c
@function.Defun(dtypes.float32, dtypes.float32)
def LSTMLoop10(weights, inp):
x = array_ops.unstack(inp, self.NUM_UNROLL)
m = array_ops.zeros_like(x[0])
c = array_ops.zeros_like(x[0])
assert self.NUM_UNROLL % 10 == 0
for i in range(0, self.NUM_UNROLL, 10):
m, c = Loop10(weights, m, c, *x[i:i + 10])
return m
return LSTMLoop10(weights, inp)
def testUnrollLSTM(self):
# Run one step of the unrolled lstm graph.
def RunForward(mode, cfg=None):
tf_logging.info("mode = %s", mode)
g = ops.Graph()
start = time.time()
with g.as_default():
weights = self._Weights()
inp = self._Input()
m = self._BuildForward(weights, inp, mode)
gdef = g.as_graph_def()
finish = time.time()
tf_logging.info("time: %f txt size: %d gdef bin size: %d", finish - start,
len(str(gdef)), len(gdef.SerializeToString()))
with g.as_default(), session.Session(config=cfg) as sess:
return sess.run(m)
mv0 = RunForward("complete")
for cfg in _OptimizerOptions():
tf_logging.info("cfg = %s", cfg)
mv1 = RunForward("cell", cfg)
mv2 = RunForward("loop", cfg)
mv3 = RunForward("loop10", cfg)
self.assertAllClose(mv0, mv1, rtol=1e-4)
self.assertAllClose(mv0, mv2, rtol=1e-4)
self.assertAllClose(mv0, mv3, rtol=1e-4)
def testUnrollLSTMGrad(self):
# Run one step of the unrolled lstm graph.
def RunForwardBackward(mode, cfg=None):
tf_logging.info("mode = %s", mode)
g = ops.Graph()
start = time.time()
with g.as_default():
weights = self._Weights()
inp = self._Input()
m = self._BuildForward(weights, inp, mode)
loss = math_ops.reduce_sum(math_ops.square(m))
dw = gradients_impl.gradients([loss], [weights])
gdef = g.as_graph_def()
finish = time.time()
tf_logging.info("time: %f txt size: %d gdef bin size: %d", finish - start,
len(str(gdef)), len(gdef.SerializeToString()))
with g.as_default(), session.Session(config=cfg) as sess:
return sess.run(dw)
d0 = RunForwardBackward("complete")
for cfg in _OptimizerOptions():
tf_logging.info("cfg = %s", cfg)
d1 = RunForwardBackward("cell", cfg)
d2 = RunForwardBackward("loop", cfg)
d3 = RunForwardBackward("loop10", cfg)
self.assertAllClose(d0, d1, rtol=1e-4, atol=1e-4)
self.assertAllClose(d0, d2, rtol=1e-4, atol=1e-4)
self.assertAllClose(d0, d3, rtol=1e-4, atol=1e-4)
@test_util.with_c_api
class FunctionInlineControlTest(test.TestCase):
def testFoo(self):
dtype = dtypes.float32
cfg = config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0,
do_common_subexpression_elimination=True,
do_function_inlining=True,
do_constant_folding=True)))
cell_func_call_pattern = re.compile(r"Cell[^/]*\(")
for noinline in [False, True]:
@function.Defun(dtype, noinline=noinline)
def Cell(v):
# If v is a vector [n, 1], x is a big square matrix.
x = math_ops.tanh(v + array_ops.transpose(v, [1, 0]))
return math_ops.reduce_sum(x, 1, keepdims=True)
@function.Defun(dtype)
def Forward(x):
for _ in range(10):
# pylint: disable=cell-var-from-loop
x = Cell(x)
return math_ops.reduce_sum(x, [0, 1])
self.assertEqual(noinline, Cell.definition.attr["_noinline"].b)
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtype)
y = Forward(x)
dx, = gradients_impl.gradients([y], [x])
np.random.seed(321)
inp = np.random.uniform(-1, 1, [16, 1]).astype(np.float32)
run_metadata = config_pb2.RunMetadata()
with session.Session(graph=g, config=cfg) as sess:
ans = sess.run(
[y, dx], {x: inp},
run_metadata=run_metadata,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
print(ans[0], np.sum(ans[1]))
self.assertAllClose(ans[0], 255.971, rtol=1e-3)
self.assertAllClose(np.sum(ans[1]), 13.0408, rtol=1e-3)
def MetadataHasCell(run_metadata):
for dev_stats in run_metadata.step_stats.dev_stats:
for node_stats in dev_stats.node_stats:
if cell_func_call_pattern.search(node_stats.timeline_label):
return True
return False
self.assertEqual(MetadataHasCell(run_metadata), noinline)
@function.Defun(*[dtypes.float32] * 3)
def Linear(w, b, x):
return nn_ops.relu(math_ops.matmul(x, w) + b)
@function.Defun(*[dtypes.float32] * 5)
def Linear2(w1, b1, w2, b2, x):
return Linear(w2, b2, Linear(w1, b1, x))
# Set C API before defining module level functions
ops._USE_C_API = True
@function.Defun(*[dtypes.float32] * 3)
def LinearWithCApi(w, b, x):
return nn_ops.relu(math_ops.matmul(x, w) + b)
@function.Defun(*[dtypes.float32] * 5)
def Linear2WithCApi(w1, b1, w2, b2, x):
return LinearWithCApi(w2, b2, LinearWithCApi(w1, b1, x))
# Unset C API after defining module level functions
ops._USE_C_API = False
class ModuleFunctionTest(test.TestCase):
def testBasic(self):
with ops.Graph().as_default():
a, b, c, d, e = [
constant_op.constant([[_]], dtype=dtypes.float32) for _ in range(5)
]
y = Linear(a, b, c)
z = Linear2(a, b, c, d, e)
with session.Session() as sess:
self.assertAllEqual([[1]], sess.run(y))
self.assertAllEqual([[5]], sess.run(z))
@test_util.enable_c_api
def testBasicWithCApi(self):
with ops.Graph().as_default():
a, b, c, d, e = [
constant_op.constant([[_]], dtype=dtypes.float32) for _ in range(5)
]
y = LinearWithCApi(a, b, c)
z = Linear2WithCApi(a, b, c, d, e)
with session.Session() as sess:
self.assertAllEqual([[1]], sess.run(y))
self.assertAllEqual([[5]], sess.run(z))
@test_util.with_c_api
class VariableHoistingTest(test.TestCase):
def _testSimpleModel(self, use_forward_func, use_resource=False):
def _Model(x):
w = variable_scope.get_variable(
"w", (64, 64),
initializer=init_ops.random_uniform_initializer(seed=312),
use_resource=use_resource)
b = variable_scope.get_variable(
"b", (64),
initializer=init_ops.zeros_initializer(),
use_resource=use_resource),
return math_ops.sigmoid(math_ops.matmul(x, w) + b)
@function.Defun()
def Model(x):
return _Model(x)
cvars = []
@function.Defun()
def Grad(x, y0):
if use_forward_func:
y = Model(x)
else:
y = _Model(x)
loss = math_ops.reduce_mean(
math_ops.reduce_sum(y0 * math_ops.log(y), 1), 0)
arg_w, arg_b = function.get_extra_args()
self.assertEqual(arg_w.get_shape(), tensor_shape.TensorShape([64, 64]))
self.assertEqual(arg_b.get_shape(), tensor_shape.TensorShape([64]))
dw, db = gradients_impl.gradients(loss, [arg_w, arg_b])
cvars.extend(function.get_extra_vars())
return loss, dw, db
g = ops.Graph()
with g.as_default():
x = random_ops.random_normal([64, 64], seed=100)
y0 = random_ops.random_normal([64, 64], seed=200)
with variable_scope.variable_scope("Foo"):
loss, dw, db = Grad(x, y0)
self.assertEqual(2, len(cvars))
w, b = cvars[:2]
self.assertEqual("Foo/w", w.op.name)
self.assertEqual("Foo/b", b.op.name)
with self.test_session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
w, b, x, y0, loss, dw, db = sess.run([w, b, x, y0, loss, dw, db])
self.assertAllEqual(w.shape, (64, 64))
self.assertAllClose(np.sum(w), 2050.44)
self.assertAllEqual(b.shape, (64,))
self.assertAllClose(np.sum(b), 0.0)
self.assertAllClose(loss, -2.27, rtol=1e-2)
self.assertAllEqual(dw.shape, (64, 64))
self.assertAllClose(np.sum(dw), -1.04, rtol=1e-2)
self.assertAllEqual(db.shape, (64,))
self.assertAllClose(np.sum(db), 0.509, rtol=1e-2)
def testBasic(self):
self._testSimpleModel(True)
self._testSimpleModel(False)
def testBasicResource(self):
self._testSimpleModel(True, use_resource=True)
self._testSimpleModel(False, use_resource=True)
if __name__ == "__main__":
test.main()
| |
#
# Original author is Sander Marechal <s.marechal@jejik.com>
# http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/
#
# !!! This file is python 3.x compliant !!!
#
# Changes:#
# - Added code to demote daemon process to specified uid and gid
# - Added debug logging
# - Re-coded for python 3.x
# Note: disable specific pylint checks globally here.
# superfluous-parens
# pylint: disable=C0325
import atexit
import grp
import logging
import os
import pwd
import sys
import time
from signal import SIGTERM
_logger = logging.getLogger(__name__)
class Daemon(object):
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(
self,
procbase=None,
dirmask=None,
pidfile=None,
uid="nobody",
gid="nobody",
stdin="/dev/null",
stdout="/dev/null",
stderr="/dev/null",
):
self.procbase = procbase
self.dirmask = dirmask
if procbase and pidfile:
self.pidfile = os.path.join(procbase, pidfile)
else:
self.pidfile = pidfile
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.uid = uid
self.gid = gid
def daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
# Lookup group and user id if we are the root user
if self.uid != "nobody" and os.getuid() == 0:
try:
groupinfo = grp.getgrnam(self.gid)
_logger.debug("our group info. n: {0}, i:{1}".format(groupinfo.gr_name, groupinfo.gr_gid))
except KeyError:
_logger.critical("get daemon group id failed")
sys.exit(1)
try:
userinfo = pwd.getpwnam(self.uid)
_logger.debug("our user info n: {0}, i:{1}".format(userinfo.pw_name, userinfo.pw_uid))
except KeyError:
_logger.critical("get daemon user id failed")
sys.exit(1)
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except os.error as err:
_logger.critical("fork #1 of double fork failed. ({0}): {1}".format(err.errno, err.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except os.error as err:
_logger.critical("fork #2 of double fork failed. ({0}): {1}".format(err.errno, err.strerror))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = open(self.stdin, "rb")
so = open(self.stdout, "a+b")
se = open(self.stderr, "a+b")
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# Setup proc base directory and mask
try:
os.makedirs(self.procbase, int(self.dirmask, 8))
except OSError:
pass
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
# open(self.pidfile, 'w+').write("%s\n" % pid)
with open(self.pidfile, "w+") as handle:
handle.write("%s\n" % pid)
os.chmod(self.pidfile, 0o440)
# If uid != nobody try to demote the process
if self.uid != "nobody":
# Assume that we can only demote the process if we are running as root
if os.getuid() == 0:
# Make the procbase directory and pid file are owned by our process user
os.chown(self.procbase, userinfo.pw_uid, groupinfo.gr_gid)
os.chown(self.pidfile, userinfo.pw_uid, groupinfo.gr_gid)
# demote process to 'pki' user and 'secure' group
os.setgid(groupinfo.gr_gid)
os.setuid(userinfo.pw_uid)
else:
_logger.warning("not running as root, unable to demote process")
def delpid(self):
os.remove(self.pidfile)
def start(self):
"""
Start the daemon
"""
# if pid file exists, try to stop daemon.
if os.path.exists(self.pidfile):
print(("daemon pid exists, stopping daemon.".format(self.pidfile)))
self.stop()
if os.path.exists(self.pidfile):
print(("PID file already exists. daemon already running?".format(self.pidfile)))
return
# Start the daemon
self.daemonize()
self.run()
def stop(self):
"""
Stop the daemon
"""
# abort if pid file does not exist
if not os.path.exists(self.pidfile):
print(("pidfile {0} does not exist. daemon not running?".format(self.pidfile)))
return
# Get the pid from the pidfile
try:
pf = open(self.pidfile, "r")
pid = int(pf.read().strip())
pf.close()
except os.error as err:
_logger.debug(
"unknown error when attempting to get pid from pid file. ({0}): {1}".format(err.errno, err.strerror)
)
pid = None
if not pid:
_logger.warning("pidfile {0} does not exist. daemon not running?".format(self.pidfile))
return # not an error in a restart
# Try killing the daemon process
try:
while True:
os.kill(pid, SIGTERM)
time.sleep(0.2)
except os.error as err:
if err.errno == 3: # No such process
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
_logger.error(
"unknown error when attempting to kill process. ({0}): {1}".format(err.errno, err.strerror)
)
sys.exit(1)
try:
os.rmdir(self.procbase)
except OSError:
pass
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def run(self):
"""
You should override this method when you subclass Daemon. It will be called after the
process has been daemonized by start() or restart().
"""
pass
| |
"""
This file is for biased simulation for alanine dipeptide only, it is used as the test for
more general file biased_simulation_general.py, which could be easily extend to other new
systems.
"""
from ANN_simulation import *
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
from sys import stdout
import ast, argparse
import os
import datetime
from config import *
parser = argparse.ArgumentParser()
parser.add_argument("record_interval", type=int, help="interval to take snapshots")
parser.add_argument("total_num_of_steps", type=int, help="total number of simulation steps")
parser.add_argument("force_constant", type=float, help="force constants")
parser.add_argument("folder_to_store_output_files", type=str, help="folder to store the output pdb and report files")
parser.add_argument("autoencoder_info_file", type=str, help="file to store autoencoder information (coefficients)")
parser.add_argument("pc_potential_center", type=str, help="potential center (should include 'pc_' as prefix)")
parser.add_argument("--out_traj", type=str, default=None, help="output trajectory file")
parser.add_argument("--layer_types", type=str, default=str(CONFIG_27), help='layer types')
parser.add_argument("--num_of_nodes", type=str, default=str(CONFIG_3[:3]), help='number of nodes in each layer')
parser.add_argument("--temperature", type=int, default= CONFIG_21, help='simulation temperature')
parser.add_argument("--data_type_in_input_layer", type=int, default=1, help='data_type_in_input_layer, 0 = cos/sin, 1 = Cartesian coordinates')
parser.add_argument("--platform", type=str, default=CONFIG_23, help='platform on which the simulation is run')
parser.add_argument("--scaling_factor", type=float, default = float(CONFIG_49), help='scaling_factor for ANN_Force')
parser.add_argument("--starting_pdb_file", type=str, default='../resources/alanine_dipeptide.pdb', help='the input pdb file to start simulation')
parser.add_argument("--starting_frame", type=int, default=0, help="index of starting frame in the starting pdb file")
parser.add_argument("--minimize_energy", type=int, default=1, help='whether to minimize energy (1 = yes, 0 = no)')
parser.add_argument("--equilibration_steps", type=int, default=1000, help="number of steps for the equilibration process")
# next few options are for metadynamics
parser.add_argument("--bias_method", type=str, default='US', help="biasing method for enhanced sampling, US = umbrella sampling, MTD = metadynamics")
parser.add_argument("--MTD_pace", type=int, default=CONFIG_66, help="pace of metadynamics")
parser.add_argument("--MTD_height", type=float, default=CONFIG_67, help="height of metadynamics")
parser.add_argument("--MTD_sigma", type=float, default=CONFIG_68, help="sigma of metadynamics")
parser.add_argument("--MTD_WT", type=int, default=CONFIG_69, help="whether to use well-tempered version")
parser.add_argument("--MTD_biasfactor", type=float, default=CONFIG_70, help="biasfactor of well-tempered metadynamics")
# following is for plumed script
parser.add_argument("--plumed_file", type=str, default=None, help="plumed script for biasing force, used only when the bias_method == plumed_other")
parser.add_argument("--plumed_add_string", type=str, default="", help="additional string to be attached to the end of plumed script in args.plumed_file")
# note on "force_constant_adjustable" mode:
# the simulation will stop if either:
# force constant is greater or equal to max_force_constant
# or distance between center of data cloud and potential center is smaller than distance_tolerance
parser.add_argument("--fc_adjustable", help="set the force constant to be adjustable", action="store_true")
parser.add_argument("--max_fc", type=float, default=CONFIG_32, help="max force constant (for force_constant_adjustable mode)")
parser.add_argument("--fc_step", type=float, default=CONFIG_34, help="the value by which the force constant is increased each time (for force_constant_adjustable mode)")
parser.add_argument("--distance_tolerance", type=float, default=CONFIG_35, help="max distance allowed between center of data cloud and potential center (for force_constant_adjustable mode)")
parser.add_argument("--autoencoder_file", type=str, help="pkl file that stores autoencoder (for force_constant_adjustable mode)")
parser.add_argument("--remove_previous", help="remove previous outputs while adjusting force constants", action="store_true")
args = parser.parse_args()
record_interval = args.record_interval
total_number_of_steps = args.total_num_of_steps
input_data_type = ['cossin', 'Cartesian', 'pairwise'][args.data_type_in_input_layer]
force_constant = args.force_constant
scaling_factor = args.scaling_factor
layer_types = re.sub("\[|\]|\"|\'| ",'', args.layer_types).split(',')
num_of_nodes = re.sub("\[|\]|\"|\'| ",'', args.num_of_nodes).split(',')
num_of_nodes = [int(item) for item in num_of_nodes]
out_format = '.dcd' if args.out_traj is None else os.path.splitext(args.out_traj)[1]
if float(force_constant) != 0:
from ANN import *
folder_to_store_output_files = args.folder_to_store_output_files # this is used to separate outputs for different networks into different folders
autoencoder_info_file = args.autoencoder_info_file
potential_center = list([float(x) for x in args.pc_potential_center.replace('"','')\
.replace('pc_','').split(',')]) # this API is the generalization for higher-dimensional cases
if not os.path.exists(folder_to_store_output_files):
try: os.makedirs(folder_to_store_output_files)
except: pass
def run_simulation(force_constant):
assert(os.path.exists(folder_to_store_output_files))
input_pdb_file_of_molecule = args.starting_pdb_file
force_field_file = 'amber99sb.xml'
water_field_file = 'tip3p.xml'
pdb_reporter_file = '%s/output_fc_%f_pc_%s.pdb' %(folder_to_store_output_files, force_constant, str(potential_center).replace(' ',''))
if not args.out_traj is None:
pdb_reporter_file = args.out_traj
state_data_reporter_file = pdb_reporter_file.replace('output_fc', 'report_fc').replace('.pdb', '.txt')
# check if the file exist
for item_filename in [pdb_reporter_file, state_data_reporter_file]:
Helper_func.backup_rename_file_if_exists(item_filename)
index_of_backbone_atoms = CONFIG_57[0]
flag_random_seed = 0 # whether we need to fix this random seed
simulation_temperature = args.temperature
time_step = CONFIG_22 # simulation time step, in ps
pdb = PDBFile(input_pdb_file_of_molecule)
modeller = Modeller(pdb.topology, pdb.getPositions(frame=args.starting_frame))
solvent_opt = 'no_water'
if solvent_opt == 'explicit':
forcefield = ForceField(force_field_file, water_field_file)
modeller.addSolvent(forcefield, model=water_field_file.split('.xml')[0], boxSize=Vec3(3, 3, 3) * nanometers,
ionicStrength=0 * molar)
system = forcefield.createSystem(modeller.topology, nonbondedMethod=PME, nonbondedCutoff=1.0 * nanometers,
constraints=AllBonds, ewaldErrorTolerance=0.0005)
else:
forcefield = ForceField(force_field_file)
system = forcefield.createSystem(modeller.topology, nonbondedMethod=NoCutoff, constraints=AllBonds)
if args.bias_method == "US":
if float(force_constant) != 0:
force = ANN_Force()
force.set_layer_types(layer_types)
force.set_data_type_in_input_layer(args.data_type_in_input_layer)
force.set_list_of_index_of_atoms_forming_dihedrals_from_index_of_backbone_atoms(index_of_backbone_atoms)
force.set_index_of_backbone_atoms(index_of_backbone_atoms)
if args.data_type_in_input_layer == 2:
force.set_list_of_pair_index_for_distances(CONFIG_80)
force.set_num_of_nodes(num_of_nodes)
force.set_potential_center(potential_center)
force.set_force_constant(float(force_constant))
unit_scaling = 1.0 # TODO: check unit scaling
force.set_scaling_factor(float(scaling_factor) / unit_scaling) # since default unit is nm in OpenMM
# TODO: need to fix following for multi-hidden layer cases
temp_coeffs, temp_bias = np.load(autoencoder_info_file)
for item_layer_index in [0, 1]:
assert (len(temp_coeffs[item_layer_index]) ==
num_of_nodes[item_layer_index] * num_of_nodes[item_layer_index + 1]), (len(temp_coeffs[item_layer_index]),
(num_of_nodes[item_layer_index], num_of_nodes[item_layer_index + 1]))
assert (len(temp_bias[item_layer_index]) == num_of_nodes[item_layer_index + 1]), (len(temp_bias[item_layer_index]), num_of_nodes[item_layer_index + 1])
# need tolist() since C++ only accepts Python list
force.set_coeffients_of_connections([item_w.tolist() for item_w in temp_coeffs])
force.set_values_of_biased_nodes([item_w.tolist() for item_w in temp_bias])
system.addForce(force)
elif args.bias_method == "US_on_phipsi":
from openmmplumed import PlumedForce
kappa_string = ','.join([str(force_constant) for _ in potential_center])
plumed_force_string = """
phi: TORSION ATOMS=5,7,9,15
psi: TORSION ATOMS=7,9,15,17
restraint: RESTRAINT ARG=phi,psi AT=%f,%f KAPPA=%s
PRINT STRIDE=10 ARG=* FILE=COLVAR
""" % (potential_center[0], potential_center[1], kappa_string)
system.addForce(PlumedForce(plumed_force_string))
elif args.bias_method == "MTD":
from openmmplumed import PlumedForce
plumed_force_string = Alanine_dipeptide.get_expression_script_for_plumed()
with open(autoencoder_info_file, 'r') as f_in:
plumed_force_string += f_in.read()
# note that dimensionality of MTD is determined by potential_center string
plumed_script_ANN_mode = 'ANN'
if plumed_script_ANN_mode == 'native':
mtd_output_layer_string = ['l_2_out_%d' % item for item in range(len(potential_center))]
elif plumed_script_ANN_mode == 'ANN':
mtd_output_layer_string = ['ann_force.%d' % item for item in range(len(potential_center))]
else: raise Exception('mode error')
mtd_output_layer_string = ','.join(mtd_output_layer_string)
mtd_sigma_string = ','.join([str(args.MTD_sigma) for _ in range(len(potential_center))])
if args.MTD_WT:
mtd_well_tempered_string = 'TEMP=%d BIASFACTOR=%f' % (args.temperature, args.MTD_biasfactor)
else:
mtd_well_tempered_string = ""
plumed_force_string += """
metad: METAD ARG=%s PACE=%d HEIGHT=%f SIGMA=%s FILE=temp_MTD_hills.txt %s
PRINT STRIDE=%d ARG=%s,metad.bias FILE=temp_MTD_out.txt
""" % (mtd_output_layer_string, args.MTD_pace, args.MTD_height, mtd_sigma_string, mtd_well_tempered_string,
record_interval, mtd_output_layer_string)
# print plumed_force_string
system.addForce(PlumedForce(plumed_force_string))
elif args.bias_method == "SMD":
# TODO: this is temporary version
from openmmplumed import PlumedForce
kappa_string = '1000,1000'
plumed_force_string = """
phi: TORSION ATOMS=5,7,9,15
psi: TORSION ATOMS=7,9,15,17
restraint: MOVINGRESTRAINT ARG=phi,psi AT0=-1.5,1.0 STEP0=0 KAPPA0=%s AT1=1.0,-1.0 STEP1=%d KAPPA1=%s
PRINT STRIDE=10 ARG=* FILE=COLVAR
""" % (kappa_string, total_number_of_steps, kappa_string)
system.addForce(PlumedForce(plumed_force_string))
elif args.bias_method == "TMD": # targeted MD
# TODO: this is temporary version
from openmmplumed import PlumedForce
kappa_string = '10000'
plumed_force_string = """
phi: TORSION ATOMS=5,7,9,15
psi: TORSION ATOMS=7,9,15,17
rmsd: RMSD REFERENCE=../resources/alanine_ref_1_TMD.pdb TYPE=OPTIMAL
restraint: MOVINGRESTRAINT ARG=rmsd AT0=0 STEP0=0 KAPPA0=0 AT1=0 STEP1=%d KAPPA1=%s
PRINT STRIDE=10 ARG=* FILE=COLVAR
""" % (total_number_of_steps, kappa_string)
system.addForce(PlumedForce(plumed_force_string))
elif args.bias_method == "plumed_other":
from openmmplumed import PlumedForce
with open(args.plumed_file, 'r') as f_in:
plumed_force_string = f_in.read().strip() + args.plumed_add_string
system.addForce(PlumedForce(plumed_force_string))
else:
raise Exception('bias method error')
# end of biased force
integrator = LangevinIntegrator(simulation_temperature*kelvin, 1/picosecond, time_step*picoseconds)
if flag_random_seed:
integrator.setRandomNumberSeed(1) # set random seed
platform = Platform.getPlatformByName(args.platform)
platform.loadPluginsFromDirectory(CONFIG_25) # load the plugin from specific directory
simulation = Simulation(modeller.topology, system, integrator, platform)
simulation.context.setPositions(modeller.positions)
if args.minimize_energy:
print('begin Minimizing energy...')
print(datetime.datetime.now())
simulation.minimizeEnergy()
print('Done minimizing energy.')
print(datetime.datetime.now())
else:
print('energy minimization not required')
simulation.step(args.equilibration_steps)
if out_format == '.pdb':
simulation.reporters.append(PDBReporter(pdb_reporter_file, record_interval))
elif out_format == '.dcd':
simulation.reporters.append(DCDReporter(pdb_reporter_file.replace('.pdb', '.dcd'), record_interval))
simulation.reporters.append(StateDataReporter(state_data_reporter_file, record_interval,
step=True, potentialEnergy=True, kineticEnergy=True, speed=True,
temperature=True, progress=True, remainingTime=True,
totalSteps=total_number_of_steps + args.equilibration_steps,
))
simulation.step(total_number_of_steps)
print('Done biased simulation!')
return pdb_reporter_file
def get_distance_between_data_cloud_center_and_potential_center(pdb_file):
coor_file = Alanine_dipeptide().generate_coordinates_from_pdb_files(pdb_file)[0]
temp_network = autoencoder.load_from_pkl_file(args.autoencoder_file)
this_simulation_data = single_biased_simulation_data(temp_network, coor_file)
offset = this_simulation_data.get_offset_between_potential_center_and_data_cloud_center(input_data_type)
if layer_types[1] == "Circular":
offset = [min(abs(item), abs(item + 2 * np.pi), abs(item - 2 * np.pi)) for item in offset]
print("circular offset")
print('offset = %s' % str(offset))
distance = sqrt(sum([item * item for item in offset]))
return distance
def run_simulation_ssages(force_constant):
ssages_output_file = '%s/output_fc_%f_pc_%s.json' % (
folder_to_store_output_files, force_constant, str(potential_center).replace(' ', ''))
subprocess.check_output('python ../src/temp_create_json_ssages.py %s %s %s %s %s' % (
ssages_output_file, str(potential_center).replace(' ', ''), autoencoder_info_file.replace('.npy', '.txt'),
ssages_output_file.replace('.json', '.trr'), force_constant), shell=True)
command = "ssages " + ssages_output_file
subprocess.check_output(command, shell=True)
pdb_reporter_file = ssages_output_file.replace('.json', '.pdb')
subprocess.check_output('mdconvert -o %s %s -t ../resources/alanine_dipeptide.pdb' % (
pdb_reporter_file, pdb_reporter_file.replace('.pdb', '.trr')), shell = True)
return pdb_reporter_file
if __name__ == '__main__':
if not args.fc_adjustable:
run_simulation(args.force_constant)
else:
force_constant = args.force_constant
distance_of_data_cloud_center = float("inf")
while force_constant < args.max_fc and distance_of_data_cloud_center > args.distance_tolerance:
if args.remove_previous:
try:
command = 'rm %s/*%s*' % (folder_to_store_output_files, str(potential_center).replace(' ',''))
command = command.replace('[','').replace(']','')
subprocess.check_output(command, shell=True)
print("removing previous results...")
except:
pass
pdb_file = run_simulation(force_constant)
distance_of_data_cloud_center = get_distance_between_data_cloud_center_and_potential_center(pdb_file)
force_constant += args.fc_step
print("distance_between_data_cloud_center_and_potential_center = %f" % distance_of_data_cloud_center)
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from karbor import exception
from karbor.i18n import _
from karbor.services.protection import bank_plugin
from karbor.services.protection.checkpoint import CheckpointCollection
from karbor import utils
from oslo_config import cfg
from oslo_log import log as logging
provider_opts = [
cfg.MultiStrOpt('plugin',
default='',
help='plugins to use for protection'),
cfg.StrOpt('bank',
default='',
help='bank plugin to use for storage'),
cfg.StrOpt('description',
default='',
help='the description of provider'),
cfg.StrOpt('name',
default='',
help='the name of provider'),
cfg.StrOpt('id',
default='',
help='the provider id'),
cfg.BoolOpt('enabled',
default=False,
help='enabled or not'),
]
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
PROTECTION_NAMESPACE = 'karbor.protections'
CONF.register_opt(cfg.StrOpt('provider_config_dir',
default='providers.d',
help='Configuration directory for providers.'
' Absolute path, or relative to karbor '
' configuration directory.'))
class PluggableProtectionProvider(object):
def __init__(self, provider_config):
super(PluggableProtectionProvider, self).__init__()
self._config = provider_config
self._id = self._config.provider.id
self._name = self._config.provider.name
self._description = self._config.provider.description
self._extended_info_schema = {'options_schema': {},
'restore_schema': {},
'saved_info_schema': {}}
self.checkpoint_collection = None
self._bank_plugin = None
self._plugin_map = {}
if (hasattr(self._config.provider, 'bank') and
not self._config.provider.bank):
raise ImportError(_("Empty bank"))
self._load_bank(self._config.provider.bank)
self._bank = bank_plugin.Bank(self._bank_plugin)
self.checkpoint_collection = CheckpointCollection(
self._bank)
if hasattr(self._config.provider, 'plugin'):
for plugin_name in self._config.provider.plugin:
if not plugin_name:
raise ImportError(_("Empty protection plugin"))
self._register_plugin(plugin_name)
@property
def id(self):
return self._id
@property
def name(self):
return self._name
@property
def description(self):
return self._description
@property
def extended_info_schema(self):
return self._extended_info_schema
@property
def bank(self):
return self._bank
@property
def plugins(self):
return self._plugin_map
def load_plugins(self):
return {
plugin_type: plugin_class(self._config)
for plugin_type, plugin_class in self.plugins.items()
}
def _load_bank(self, bank_name):
try:
plugin = utils.load_plugin(PROTECTION_NAMESPACE, bank_name,
self._config)
except Exception:
LOG.exception("Load bank plugin: '%s' failed.", bank_name)
raise
else:
self._bank_plugin = plugin
def _register_plugin(self, plugin_name):
try:
plugin = utils.load_class(PROTECTION_NAMESPACE, plugin_name)
except Exception:
LOG.exception("Load protection plugin: '%s' failed.", plugin_name)
raise
else:
for resource in plugin.get_supported_resources_types():
self._plugin_map[resource] = plugin
if hasattr(plugin, 'get_options_schema'):
self._extended_info_schema['options_schema'][resource] \
= plugin.get_options_schema(resource)
if hasattr(plugin, 'get_restore_schema'):
self._extended_info_schema['restore_schema'][resource] \
= plugin.get_restore_schema(resource)
if hasattr(plugin, 'get_saved_info_schema'):
self._extended_info_schema['saved_info_schema'][resource] \
= plugin.get_saved_info_schema(resource)
def get_checkpoint_collection(self):
return self.checkpoint_collection
def get_checkpoint(self, checkpoint_id, context=None):
return self.get_checkpoint_collection().get(checkpoint_id,
context=context)
def list_checkpoints(self, project_id, provider_id, limit=None,
marker=None, plan_id=None, start_date=None,
end_date=None, sort_dir=None, context=None,
all_tenants=False):
checkpoint_collection = self.get_checkpoint_collection()
return checkpoint_collection.list_ids(
project_id=project_id, provider_id=provider_id, limit=limit,
marker=marker, plan_id=plan_id, start_date=start_date,
end_date=end_date, sort_dir=sort_dir, context=context,
all_tenants=all_tenants)
class ProviderRegistry(object):
def __init__(self):
super(ProviderRegistry, self).__init__()
self.providers = {}
self._load_providers()
def _load_providers(self):
"""load provider"""
config_dir = utils.find_config(CONF.provider_config_dir)
for config_file in os.listdir(config_dir):
if not config_file.endswith('.conf'):
continue
config_path = os.path.abspath(os.path.join(config_dir,
config_file))
provider_config = cfg.ConfigOpts()
provider_config(args=['--config-file=' + config_path])
provider_config.register_opts(provider_opts, 'provider')
provider_enabled = provider_config.provider.enabled
if not provider_enabled:
LOG.info('Provider {0} is not enabled'.format(
provider_config.provider.name)
)
continue
try:
provider = PluggableProtectionProvider(provider_config)
except Exception as e:
LOG.error("Load provider: %(provider)s failed. "
"Reason: %(reason)s",
{'provider': provider_config.provider.name,
'reason': e})
else:
LOG.info('Loaded provider: %s successfully.',
provider_config.provider.name)
self.providers[provider.id] = provider
def list_providers(self, marker=None, limit=None, sort_keys=None,
sort_dirs=None, filters=None):
# TODO(jiaopengju) How to use sort_keys, sort_dirs and filters
provider_ids = sorted(self.providers.keys())
provider_list = sorted(
self.providers.values(), key=lambda item: item.id)
if marker is not None and marker in provider_ids:
provider_list = provider_list[provider_ids.index(marker) + 1:]
filters = filters if filters else {}
valid_providers = []
for provider in provider_list:
provider_dict = dict(
id=provider.id,
name=provider.name,
description=provider.description,
extended_info_schema=provider.extended_info_schema
)
for key, value in filters.items():
if key in provider_dict.keys() and \
value != provider_dict[key]:
break
else:
valid_providers.append(provider_dict)
if limit is not None and len(valid_providers) == limit:
return valid_providers
return valid_providers
def show_provider(self, provider_id):
try:
return self.providers[provider_id]
except KeyError:
raise exception.ProviderNotFound(provider_id=provider_id)
| |
"""Functions for generating interesting polynomials, e.g. for benchmarking. """
from sympy.core import Add, Mul, Symbol, Rational, sympify, Dummy, symbols
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.core.singleton import S
from sympy.polys.polytools import Poly, PurePoly
from sympy.polys.polyutils import _analyze_gens
from sympy.polys.polyclasses import DMP
from sympy.polys.densebasic import (
dmp_zero, dmp_one, dmp_ground, dmp_normal,
dup_from_raw_dict, dmp_raise, dup_random
)
from sympy.polys.densearith import (
dmp_add_term, dmp_neg, dmp_mul, dmp_sqr
)
from sympy.polys.factortools import (
dup_zz_cyclotomic_poly
)
from sympy.polys.domains import ZZ
from sympy.ntheory import nextprime
from sympy.utilities import cythonized, subsets
@cythonized("n,i")
def swinnerton_dyer_poly(n, x=None, **args):
"""Generates n-th Swinnerton-Dyer polynomial in `x`. """
if n <= 0:
raise ValueError("can't generate Swinnerton-Dyer polynomial of order %s" % n)
if x is not None:
x, cls = sympify(x), Poly
else:
x, cls = Dummy('x'), PurePoly
p, elts = 2, [[x, -sqrt(2)],
[x, sqrt(2)]]
for i in xrange(2, n+1):
p, _elts = nextprime(p), []
neg_sqrt = -sqrt(p)
pos_sqrt = +sqrt(p)
for elt in elts:
_elts.append(elt + [neg_sqrt])
_elts.append(elt + [pos_sqrt])
elts = _elts
poly = []
for elt in elts:
poly.append(Add(*elt))
if not args.get('polys', False):
return Mul(*poly).expand()
else:
return PurePoly(Mul(*poly), x)
def cyclotomic_poly(n, x=None, **args):
"""Generates cyclotomic polynomial of order `n` in `x`. """
if n <= 0:
raise ValueError("can't generate cyclotomic polynomial of order %s" % n)
poly = DMP(dup_zz_cyclotomic_poly(int(n), ZZ), ZZ)
if x is not None:
poly = Poly.new(poly, x)
else:
poly = PurePoly.new(poly, Dummy('x'))
if not args.get('polys', False):
return poly.as_expr()
else:
return poly
def symmetric_poly(n, *gens, **args):
"""Generates symmetric polynomial of order `n`. """
gens = _analyze_gens(gens)
if n < 0 or n > len(gens) or not gens:
raise ValueError("can't generate symmetric polynomial of order %s for %s" % (n, gens))
elif not n:
poly = S.One
else:
poly = Add(*[ Mul(*s) for s in subsets(gens, int(n)) ])
if not args.get('polys', False):
return poly
else:
return Poly(poly, *gens)
def random_poly(x, n, inf, sup, domain=ZZ, polys=False):
"""Return a polynomial of degree ``n`` with coefficients in ``[inf, sup]``. """
poly = Poly(dup_random(n, inf, sup, domain), x, domain=domain)
if not polys:
return poly.as_expr()
else:
return poly
@cythonized("n,i,j")
def interpolating_poly(n, x, X='x', Y='y'):
"""Construct Lagrange interpolating polynomial for ``n`` data points. """
if isinstance(X, str):
X = symbols("%s:%s" % (X, n))
if isinstance(Y, str):
Y = symbols("%s:%s" % (Y, n))
coeffs = []
for i in xrange(0, n):
numer = []
denom = []
for j in xrange(0, n):
if i == j:
continue
numer.append(x - X[j])
denom.append(X[i] - X[j])
numer = Mul(*numer)
denom = Mul(*denom)
coeffs.append(numer/denom)
return Add(*[ coeff*y for coeff, y in zip(coeffs, Y) ])
@cythonized("n,i")
def fateman_poly_F_1(n):
"""Fateman's GCD benchmark: trivial GCD """
Y = [ Symbol('y_' + str(i)) for i in xrange(0, n+1) ]
y_0, y_1 = Y[0], Y[1]
u = y_0 + Add(*[ y for y in Y[1:] ])
v = y_0**2 + Add(*[ y**2 for y in Y[1:] ])
F = ((u + 1)*(u + 2)).as_poly(*Y)
G = ((v + 1)*(-3*y_1*y_0**2 + y_1**2 - 1)).as_poly(*Y)
H = Poly(1, *Y)
return F, G, H
@cythonized("n,m,i")
def dmp_fateman_poly_F_1(n, K):
"""Fateman's GCD benchmark: trivial GCD """
u = [K(1), K(0)]
for i in xrange(0, n):
u = [dmp_one(i, K), u]
v = [K(1), K(0), K(0)]
for i in xrange(0, n):
v = [dmp_one(i, K), dmp_zero(i), v]
m = n-1
U = dmp_add_term(u, dmp_ground(K(1), m), 0, n, K)
V = dmp_add_term(u, dmp_ground(K(2), m), 0, n, K)
f = [[-K(3), K(0)], [], [K(1), K(0), -K(1)]]
W = dmp_add_term(v, dmp_ground(K(1), m), 0, n, K)
Y = dmp_raise(f, m, 1, K)
F = dmp_mul(U, V, n, K)
G = dmp_mul(W, Y, n, K)
H = dmp_one(n, K)
return F, G, H
@cythonized("n,i")
def fateman_poly_F_2(n):
"""Fateman's GCD benchmark: linearly dense quartic inputs """
Y = [ Symbol('y_' + str(i)) for i in xrange(0, n+1) ]
y_0 = Y[0]
u = Add(*[ y for y in Y[1:] ])
H = Poly((y_0 + u + 1)**2, *Y)
F = Poly((y_0 - u - 2)**2, *Y)
G = Poly((y_0 + u + 2)**2, *Y)
return H*F, H*G, H
@cythonized("n,m,i")
def dmp_fateman_poly_F_2(n, K):
"""Fateman's GCD benchmark: linearly dense quartic inputs """
u = [K(1), K(0)]
for i in xrange(0, n-1):
u = [dmp_one(i, K), u]
m = n-1
v = dmp_add_term(u, dmp_ground(K(2), m-1), 0, n, K)
f = dmp_sqr([dmp_one(m, K), dmp_neg(v, m, K)], n, K)
g = dmp_sqr([dmp_one(m, K), v], n, K)
v = dmp_add_term(u, dmp_one(m-1, K), 0, n, K)
h = dmp_sqr([dmp_one(m, K), v], n, K)
return dmp_mul(f, h, n, K), dmp_mul(g, h, n, K), h
@cythonized("n,i")
def fateman_poly_F_3(n):
"""Fateman's GCD benchmark: sparse inputs (deg f ~ vars f) """
Y = [ Symbol('y_' + str(i)) for i in xrange(0, n+1) ]
y_0 = Y[0]
u = Add(*[ y**(n+1) for y in Y[1:] ])
H = Poly((y_0**(n+1) + u + 1)**2, *Y)
F = Poly((y_0**(n+1) - u - 2)**2, *Y)
G = Poly((y_0**(n+1) + u + 2)**2, *Y)
return H*F, H*G, H
@cythonized("n,i")
def dmp_fateman_poly_F_3(n, K):
"""Fateman's GCD benchmark: sparse inputs (deg f ~ vars f) """
u = dup_from_raw_dict({n+1: K.one}, K)
for i in xrange(0, n-1):
u = dmp_add_term([u], dmp_one(i, K), n+1, i+1, K)
v = dmp_add_term(u, dmp_ground(K(2), n-2), 0, n, K)
f = dmp_sqr(dmp_add_term([dmp_neg(v, n-1, K)], dmp_one(n-1, K), n+1, n, K), n, K)
g = dmp_sqr(dmp_add_term([v], dmp_one(n-1, K), n+1, n, K), n, K)
v = dmp_add_term(u, dmp_one(n-2, K), 0, n-1, K)
h = dmp_sqr(dmp_add_term([v], dmp_one(n-1, K), n+1, n, K), n, K)
return dmp_mul(f, h, n, K), dmp_mul(g, h, n, K), h
# A few useful polynomials from Wang's paper ('78).
f_0 = dmp_normal([
[[1,2,3], [2]],
[[3]],
[[4,5,6], [1,2,1], [1]]
], 2, ZZ)
f_1 = dmp_normal([
[[1, 0], []],
[[1, 0, 1], [20, 30], [1, 10, 0]],
[[1, 0], [30, 20], [1, 10, 1, 610], [20, 230, 300]],
[[1, 10, 0], [30, 320, 200], [600, 6000]]
], 2, ZZ)
f_2 = dmp_normal([
[[1], [1, 0], [1, 0, 0], [1, 0, 0, 0]],
[[]],
[[1], [1, 90], [90, 0]],
[[1, -11], [], [1, -11, 0, 0]],
[[]],
[[1, -11], [90, -990]]
], 2, ZZ)
f_3 = dmp_normal([
[[1], [], []],
[[1, 0, 0, 0, 1]],
[[1, 0], [], [], [1, 0]],
[[1], [1, 0, 0, 0], [], [1, 0, 0, 0, 1, 0], []],
[[1, 0, 0, 0, 1], [1, 0, 0, 0, 1, 1, 0, 0], []],
[[1, 0], [1, 0, 0, 0, 0], []]
], 2, ZZ)
f_4 = dmp_normal([
[[-1, 0], [], [], [], [], [], [], [], []],
[[-1, 0, 0, 0], [], [], [], [], []],
[[-1, 0, 0], [], [], [], [-5], [], [], [], [], [], [], [], []],
[[-1, 0, 0, 0, 0], [], [1, 0, 3, 0], [], [-5, 0, 0], [-1, 0, 0, 0], [], [], [], []],
[[1, 0, 3, 0, 0, 0], [], [], [-1, 0, 0, 0, 0, 0], []],
[[1, 0, 3, 0, 0], [], [], [-1, 0, 0, 0, 0], [5, 0, 15], [], [], [-5, 0, 0], [], [], [], []],
[[1, 0, 3, 0, 0, 0, 0], [], [], [-1, 0, 0, 0, 0, 0, 0], [5, 0, 15, 0, 0], [1, 0, 3, 0, 0, 0], [], [-5, 0, 0, 0, 0], []],
[[1, 0, 3, 0, 0, 0, 0, 0]],
[[1, 0, 3, 0, 0, 0, 0], [], [], [], [5, 0, 15, 0, 0], [], [], []],
[[1, 0, 3, 0, 0, 0, 0, 0, 0], [], [], [], [5, 0, 15, 0, 0, 0, 0]]
], 2, ZZ)
f_5 = dmp_normal([
[[-1]],
[[-3], [3, 0]],
[[-3], [6, 0], [-3, 0, 0]],
[[-1], [3, 0], [-3, 0, 0], [1, 0, 0, 0]]
], 2, ZZ)
f_6 = dmp_normal([
[[[2115]], [[]]],
[[[45, 0, 0], [], [], [-45, 0, 0]]],
[[[]]],
[[[-423]], [[-47]], [[]], [[141], [], [94, 0], []], [[]]],
[[[-9, 0, 0], [], [], [9, 0, 0]],
[[-1, 0, 0], [], [], [1, 0, 0]],
[[]],
[[3, 0, 0], [], [2, 0, 0, 0], [-3, 0, 0], [], [-2, 0, 0, 0], []]
]
], 3, ZZ)
w_1 = dmp_normal([
[[4, 0, 0], [4, 0, 0, 0], [-4, 0, 0, 0, 0], [-4, 0, 0, 0, 0, 0], []],
[[1, 0, 0, 0], [12, 0], [-1, 0, 0, 12, 0, 0], [-12, 0, 0, 0], [-12, 0, 0, 0, 0]],
[[8], [6, 8, 0], [-4, 4, -8, 0, 0], [-4, -2, -8, 0, 0, 0], []],
[[2, 0], [1, 0, 0, 0], [-1, 0, -2 , 0, 9, 0], [-12, 12, 0, 0], [-12, 3, 0, 0, 0]],
[[6], [-6, 8, 0], [-2, -8, 2, 0, 0], []],
[[2, 0], [-2, 0, 0, 0], [-3, 0], [3, 0, 0, 0]],
[[-2], [2, 0, 0], []]
], 2, ZZ)
w_2 = dmp_normal([
[24, 48, 0, 0],
[24, 0, 0, -72, 0, 0],
[25, 2, 0, 4, 8],
[1, 0, 0, 1, 0, 0, -12],
[1, -1, -2, 292, 0, 0],
[-1, 0, 0, 3, 0, 0, 0],
[-1, 0, 12, 0, 0, 48],
[],
[-12, 0, 0, 0]
], 1, ZZ)
| |
'''tzinfo timezone information for Asia/Ulan_Bator.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Ulan_Bator(DstTzInfo):
'''Asia/Ulan_Bator timezone definition. See datetime.tzinfo for details'''
zone = 'Asia/Ulan_Bator'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1905,7,31,16,52,28),
d(1977,12,31,17,0,0),
d(1983,3,31,16,0,0),
d(1983,9,30,15,0,0),
d(1984,3,31,16,0,0),
d(1984,9,29,15,0,0),
d(1985,3,30,16,0,0),
d(1985,9,28,15,0,0),
d(1986,3,29,16,0,0),
d(1986,9,27,15,0,0),
d(1987,3,28,16,0,0),
d(1987,9,26,15,0,0),
d(1988,3,26,16,0,0),
d(1988,9,24,15,0,0),
d(1989,3,25,16,0,0),
d(1989,9,23,15,0,0),
d(1990,3,24,16,0,0),
d(1990,9,29,15,0,0),
d(1991,3,30,16,0,0),
d(1991,9,28,15,0,0),
d(1992,3,28,16,0,0),
d(1992,9,26,15,0,0),
d(1993,3,27,16,0,0),
d(1993,9,25,15,0,0),
d(1994,3,26,16,0,0),
d(1994,9,24,15,0,0),
d(1995,3,25,16,0,0),
d(1995,9,23,15,0,0),
d(1996,3,30,16,0,0),
d(1996,9,28,15,0,0),
d(1997,3,29,16,0,0),
d(1997,9,27,15,0,0),
d(1998,3,28,16,0,0),
d(1998,9,26,15,0,0),
d(2001,4,27,18,0,0),
d(2001,9,28,17,0,0),
d(2002,3,29,18,0,0),
d(2002,9,27,17,0,0),
d(2003,3,28,18,0,0),
d(2003,9,26,17,0,0),
d(2004,3,26,18,0,0),
d(2004,9,24,17,0,0),
d(2005,3,25,18,0,0),
d(2005,9,23,17,0,0),
d(2006,3,24,18,0,0),
d(2006,9,29,17,0,0),
d(2007,3,30,18,0,0),
d(2007,9,28,17,0,0),
d(2008,3,28,18,0,0),
d(2008,9,26,17,0,0),
d(2009,3,27,18,0,0),
d(2009,9,25,17,0,0),
d(2010,3,26,18,0,0),
d(2010,9,24,17,0,0),
d(2011,3,25,18,0,0),
d(2011,9,23,17,0,0),
d(2012,3,30,18,0,0),
d(2012,9,28,17,0,0),
d(2013,3,29,18,0,0),
d(2013,9,27,17,0,0),
d(2014,3,28,18,0,0),
d(2014,9,26,17,0,0),
d(2015,3,27,18,0,0),
d(2015,9,25,17,0,0),
d(2016,3,25,18,0,0),
d(2016,9,23,17,0,0),
d(2017,3,24,18,0,0),
d(2017,9,29,17,0,0),
d(2018,3,30,18,0,0),
d(2018,9,28,17,0,0),
d(2019,3,29,18,0,0),
d(2019,9,27,17,0,0),
d(2020,3,27,18,0,0),
d(2020,9,25,17,0,0),
d(2021,3,26,18,0,0),
d(2021,9,24,17,0,0),
d(2022,3,25,18,0,0),
d(2022,9,23,17,0,0),
d(2023,3,24,18,0,0),
d(2023,9,29,17,0,0),
d(2024,3,29,18,0,0),
d(2024,9,27,17,0,0),
d(2025,3,28,18,0,0),
d(2025,9,26,17,0,0),
d(2026,3,27,18,0,0),
d(2026,9,25,17,0,0),
d(2027,3,26,18,0,0),
d(2027,9,24,17,0,0),
d(2028,3,24,18,0,0),
d(2028,9,29,17,0,0),
d(2029,3,30,18,0,0),
d(2029,9,28,17,0,0),
d(2030,3,29,18,0,0),
d(2030,9,27,17,0,0),
d(2031,3,28,18,0,0),
d(2031,9,26,17,0,0),
d(2032,3,26,18,0,0),
d(2032,9,24,17,0,0),
d(2033,3,25,18,0,0),
d(2033,9,23,17,0,0),
d(2034,3,24,18,0,0),
d(2034,9,29,17,0,0),
d(2035,3,30,18,0,0),
d(2035,9,28,17,0,0),
d(2036,3,28,18,0,0),
d(2036,9,26,17,0,0),
d(2037,3,27,18,0,0),
d(2037,9,25,17,0,0),
]
_transition_info = [
i(25680,0,'LMT'),
i(25200,0,'ULAT'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
i(32400,3600,'ULAST'),
i(28800,0,'ULAT'),
]
Ulan_Bator = Ulan_Bator()
| |
import functools
from dateutil.parser import parse as parse_date
from django.utils import timezone
from modularodm import (
fields,
Q,
)
from modularodm.exceptions import NoResultsFound
from modularodm.validators import MaxLengthValidator
from framework.auth import Auth
from framework.exceptions import PermissionsError
from framework.mongo import (
ObjectId,
StoredObject,
validators,
)
from website import (
mails,
settings,
tokens,
)
from website.exceptions import (
InvalidSanctionApprovalToken,
InvalidSanctionRejectionToken,
NodeStateError,
)
from website.prereg import utils as prereg_utils
VIEW_PROJECT_URL_TEMPLATE = settings.DOMAIN + '{node_id}/'
class Sanction(StoredObject):
"""Sanction class is a generic way to track approval states"""
# Tell modularodm not to attach backends
_meta = {
'abstract': True,
}
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
# Neither approved not cancelled
UNAPPROVED = 'unapproved'
# Has approval
APPROVED = 'approved'
# Rejected by at least one person
REJECTED = 'rejected'
# Embargo has been completed
COMPLETED = 'completed'
state = fields.StringField(
default=UNAPPROVED,
validate=validators.choice_in((
UNAPPROVED,
APPROVED,
REJECTED,
COMPLETED,
))
)
DISPLAY_NAME = 'Sanction'
# SHORT_NAME must correspond with the associated foreign field to query against,
# e.g. Node.find_one(Q(sanction.SHORT_NAME, 'eq', sanction))
SHORT_NAME = 'sanction'
APPROVAL_NOT_AUTHORIZED_MESSAGE = 'This user is not authorized to approve this {DISPLAY_NAME}'
APPROVAL_INVALID_TOKEN_MESSAGE = 'Invalid approval token provided for this {DISPLAY_NAME}.'
REJECTION_NOT_AUTHORIZED_MESSAEGE = 'This user is not authorized to reject this {DISPLAY_NAME}'
REJECTION_INVALID_TOKEN_MESSAGE = 'Invalid rejection token provided for this {DISPLAY_NAME}.'
# Controls whether or not the Sanction needs unanimous approval or just a single approval
ANY = 'any'
UNANIMOUS = 'unanimous'
mode = UNANIMOUS
initiation_date = fields.DateTimeField(auto_now_add=timezone.now)
# Expiration date-- Sanctions in the UNAPPROVED state that are older than their end_date
# are automatically made ACTIVE by a daily cron job
# Use end_date=None for a non-expiring Sanction
end_date = fields.DateTimeField(default=None)
# Sanction subclasses must have an initiated_by field
# initiated_by = fields.ForeignField('user', backref='initiated')
# Expanded: Dictionary field mapping admin IDs their approval status and relevant tokens:
# {
# 'b3k97': {
# 'has_approved': False,
# 'approval_token': 'Pew7wj1Puf7DENUPFPnXSwa1rf3xPN',
# 'rejection_token': 'TwozClTFOic2PYxHDStby94bCQMwJy'}
# }
approval_state = fields.DictionaryField()
def __repr__(self):
return '<Sanction(end_date={self.end_date!r}) with _id {self._id!r}>'.format(self=self)
@property
def is_pending_approval(self):
return self.state == Sanction.UNAPPROVED
@property
def is_approved(self):
return self.state == Sanction.APPROVED
@property
def is_rejected(self):
return self.state == Sanction.REJECTED
def approve(self, user):
raise NotImplementedError('Sanction subclasses must implement an approve method.')
def reject(self, user):
raise NotImplementedError('Sanction subclasses must implement an approve method.')
def _on_reject(self, user):
"""Callback for rejection of a Sanction
:param User user:
"""
raise NotImplementedError('Sanction subclasses must implement an #_on_reject method')
def _on_complete(self, user):
"""Callback for when a Sanction has approval and enters the ACTIVE state
:param User user:
"""
raise NotImplementedError('Sanction subclasses must implement an #_on_complete method')
def forcibly_reject(self):
self.state = Sanction.REJECTED
class TokenApprovableSanction(Sanction):
# Tell modularodm not to attach backends
_meta = {
'abstract': True,
}
def _validate_authorizer(self, user):
"""Subclasses may choose to provide extra restrictions on who can be an authorizer
:return Boolean: True if user is allowed to be an authorizer else False
"""
return True
def add_authorizer(self, user, node, approved=False, save=False):
"""Add an admin user to this Sanction's approval state.
:param User user: User to add.
:param Node registration: The pending registration node.
:param bool approved: Whether `user` has approved.
:param bool save: Whether to save this object.
"""
valid = self._validate_authorizer(user)
if valid and user._id not in self.approval_state:
self.approval_state[user._id] = {
'has_approved': approved,
'node_id': node._id,
'approval_token': tokens.encode(
{
'user_id': user._id,
'sanction_id': self._id,
'action': 'approve_{}'.format(self.SHORT_NAME)
}
),
'rejection_token': tokens.encode(
{
'user_id': user._id,
'sanction_id': self._id,
'action': 'reject_{}'.format(self.SHORT_NAME)
}
),
}
if save:
self.save()
return True
return False
def remove_authorizer(self, user, save=False):
"""Remove a user as an authorizer
:param User user:
:return Boolean: True if user is removed else False
"""
if user._id not in self.approval_state:
return False
del self.approval_state[user._id]
if save:
self.save()
return True
def _on_approve(self, user, token):
"""Callback for when a single user approves a Sanction. Calls #_on_complete under two conditions:
- mode is ANY and the Sanction has not already been cancelled
- mode is UNANIMOUS and all users have given approval
:param User user:
:param str token: user's approval token
"""
if self.mode == self.ANY or all(authorizer['has_approved'] for authorizer in self.approval_state.values()):
self.state = Sanction.APPROVED
self._on_complete(user)
def token_for_user(self, user, method):
"""
:param str method: 'approval' | 'rejection'
"""
try:
user_state = self.approval_state[user._id]
except KeyError:
raise PermissionsError(self.APPROVAL_NOT_AUTHORIZED_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
return user_state['{0}_token'.format(method)]
def approve(self, user, token):
"""Add user to approval list if user is admin and token verifies."""
try:
if self.approval_state[user._id]['approval_token'] != token:
raise InvalidSanctionApprovalToken(self.APPROVAL_INVALID_TOKEN_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
except KeyError:
raise PermissionsError(self.APPROVAL_NOT_AUTHORIZED_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
self.approval_state[user._id]['has_approved'] = True
self._on_approve(user, token)
def reject(self, user, token):
"""Cancels sanction if user is admin and token verifies."""
try:
if self.approval_state[user._id]['rejection_token'] != token:
raise InvalidSanctionRejectionToken(self.REJECTION_INVALID_TOKEN_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
except KeyError:
raise PermissionsError(self.REJECTION_NOT_AUTHORIZED_MESSAEGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
self.state = Sanction.REJECTED
self._on_reject(user)
def _notify_authorizer(self, user, node):
pass
def _notify_non_authorizer(self, user, node):
pass
def ask(self, group):
"""
:param list group: List of (user, node) tuples containing contributors to notify about the
sanction.
"""
for contrib, node in group:
if contrib._id in self.approval_state:
self._notify_authorizer(contrib, node)
else:
self._notify_non_authorizer(contrib, node)
class EmailApprovableSanction(TokenApprovableSanction):
# Tell modularodm not to attach backends
_meta = {
'abstract': True,
}
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = None
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = None
VIEW_URL_TEMPLATE = ''
APPROVE_URL_TEMPLATE = ''
REJECT_URL_TEMPLATE = ''
# A flag to conditionally run a callback on complete
notify_initiator_on_complete = fields.BooleanField(default=False)
# Store a persistant copy of urls for use when needed outside of a request context.
# This field gets automagically updated whenever models approval_state is modified
# and the model is saved
# {
# 'abcde': {
# 'approve': [APPROVAL_URL],
# 'reject': [REJECT_URL],
# }
# }
stashed_urls = fields.DictionaryField(default=dict)
@staticmethod
def _format_or_empty(template, context):
if context:
return template.format(**context)
return ''
def _view_url(self, user_id, node):
return self._format_or_empty(self.VIEW_URL_TEMPLATE, self._view_url_context(user_id, node))
def _view_url_context(self, user_id, node):
return None
def _approval_url(self, user_id):
return self._format_or_empty(self.APPROVE_URL_TEMPLATE, self._approval_url_context(user_id))
def _approval_url_context(self, user_id):
return None
def _rejection_url(self, user_id):
return self._format_or_empty(self.REJECT_URL_TEMPLATE, self._rejection_url_context(user_id))
def _rejection_url_context(self, user_id):
return None
def _send_approval_request_email(self, user, template, context):
mails.send_mail(
user.username,
template,
user=user,
**context
)
def _email_template_context(self, user, node, is_authorizer=False):
return {}
def _notify_authorizer(self, authorizer, node):
context = self._email_template_context(authorizer, node, is_authorizer=True)
if self.AUTHORIZER_NOTIFY_EMAIL_TEMPLATE:
self._send_approval_request_email(authorizer, self.AUTHORIZER_NOTIFY_EMAIL_TEMPLATE, context)
else:
raise NotImplementedError
def _notify_non_authorizer(self, user, node):
context = self._email_template_context(user, node)
if self.NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE:
self._send_approval_request_email(user, self.NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE, context)
else:
raise NotImplementedError
def add_authorizer(self, user, node, **kwargs):
super(EmailApprovableSanction, self).add_authorizer(user, node, **kwargs)
self.stashed_urls[user._id] = {
'view': self._view_url(user._id, node),
'approve': self._approval_url(user._id),
'reject': self._rejection_url(user._id)
}
self.save()
def _notify_initiator(self):
raise NotImplementedError
def _on_complete(self, *args):
if self.notify_initiator_on_complete:
self._notify_initiator()
class PreregCallbackMixin(object):
def _notify_initiator(self):
from website.project.model import DraftRegistration
registration = self._get_registration()
prereg_schema = prereg_utils.get_prereg_schema()
draft = DraftRegistration.find_one(
Q('registered_node', 'eq', registration)
)
if prereg_schema in registration.registered_schema:
mails.send_mail(
draft.initiator.username,
mails.PREREG_CHALLENGE_ACCEPTED,
user=draft.initiator,
registration_url=registration.absolute_url,
mimetype='html'
)
def _email_template_context(self, user, node, is_authorizer=False, urls=None):
registration = self._get_registration()
prereg_schema = prereg_utils.get_prereg_schema()
if prereg_schema in registration.registered_schema:
return {
'custom_message': ' as part of the Preregistration Challenge (https://cos.io/prereg)'
}
else:
return {}
class Embargo(PreregCallbackMixin, EmailApprovableSanction):
"""Embargo object for registrations waiting to go public."""
DISPLAY_NAME = 'Embargo'
SHORT_NAME = 'embargo'
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_EMBARGO_ADMIN
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_EMBARGO_NON_ADMIN
VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE
APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
initiated_by = fields.ForeignField('user', backref='embargoed')
for_existing_registration = fields.BooleanField(default=False)
@property
def is_completed(self):
return self.state == self.COMPLETED
@property
def embargo_end_date(self):
if self.state == self.APPROVED:
return self.end_date
return False
# NOTE(hrybacki): Old, private registrations are grandfathered and do not
# require to be made public or embargoed. This field differentiates them
# from new registrations entering into an embargo field which should not
# show up in any search related fields.
@property
def pending_registration(self):
return not self.for_existing_registration and self.is_pending_approval
def __repr__(self):
from website.project.model import Node
parent_registration = None
try:
parent_registration = Node.find_one(Q('embargo', 'eq', self))
except NoResultsFound:
pass
return ('<Embargo(parent_registration={0}, initiated_by={1}, '
'end_date={2}) with _id {3}>').format(
parent_registration,
self.initiated_by,
self.end_date,
self._id
)
def _get_registration(self):
from website.project.model import Node
return Node.find_one(Q('embargo', 'eq', self))
def _view_url_context(self, user_id, node):
registration = node or self._get_registration()
return {
'node_id': registration._id
}
def _approval_url_context(self, user_id):
user_approval_state = self.approval_state.get(user_id, {})
approval_token = user_approval_state.get('approval_token')
if approval_token:
registration = self._get_registration()
node_id = user_approval_state.get('node_id', registration._id)
return {
'node_id': node_id,
'token': approval_token,
}
def _rejection_url_context(self, user_id):
user_approval_state = self.approval_state.get(user_id, {})
rejection_token = user_approval_state.get('rejection_token')
if rejection_token:
from website.project.model import Node
root_registration = self._get_registration()
node_id = user_approval_state.get('node_id', root_registration._id)
registration = Node.load(node_id)
return {
'node_id': registration.registered_from,
'token': rejection_token,
}
def _email_template_context(self, user, node, is_authorizer=False, urls=None):
context = super(Embargo, self)._email_template_context(
user,
node,
is_authorizer=is_authorizer
)
urls = urls or self.stashed_urls.get(user._id, {})
registration_link = urls.get('view', self._view_url(user._id, node))
if is_authorizer:
approval_link = urls.get('approve', '')
disapproval_link = urls.get('reject', '')
approval_time_span = settings.EMBARGO_PENDING_TIME.days * 24
registration = self._get_registration()
context.update({
'is_initiator': self.initiated_by == user,
'initiated_by': self.initiated_by.fullname,
'approval_link': approval_link,
'project_name': registration.title,
'disapproval_link': disapproval_link,
'registration_link': registration_link,
'embargo_end_date': self.end_date,
'approval_time_span': approval_time_span,
})
else:
context.update({
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
'embargo_end_date': self.end_date,
})
return context
def _on_reject(self, user):
from website.project.model import NodeLog
parent_registration = self._get_registration()
parent_registration.registered_from.add_log(
action=NodeLog.EMBARGO_CANCELLED,
params={
'node': parent_registration.registered_from_id,
'registration': parent_registration._id,
'embargo_id': self._id,
},
auth=Auth(user),
)
# Remove backref to parent project if embargo was for a new registration
if not self.for_existing_registration:
parent_registration.delete_registration_tree(save=True)
parent_registration.registered_from = None
# Delete parent registration if it was created at the time the embargo was initiated
if not self.for_existing_registration:
parent_registration.is_deleted = True
parent_registration.save()
def disapprove_embargo(self, user, token):
"""Cancels retraction if user is admin and token verifies."""
self.reject(user, token)
def _on_complete(self, user):
from website.project.model import NodeLog
parent_registration = self._get_registration()
if parent_registration.is_spammy:
raise NodeStateError('Cannot complete a spammy registration.')
super(Embargo, self)._on_complete(user)
parent_registration.registered_from.add_log(
action=NodeLog.EMBARGO_APPROVED,
params={
'node': parent_registration.registered_from_id,
'registration': parent_registration._id,
'embargo_id': self._id,
},
auth=Auth(self.initiated_by),
)
self.save()
def approve_embargo(self, user, token):
"""Add user to approval list if user is admin and token verifies."""
self.approve(user, token)
def mark_as_completed(self):
self.state = Sanction.COMPLETED
self.save()
class Retraction(EmailApprovableSanction):
"""
Retraction object for public registrations.
Externally (specifically in user-facing language) retractions should be referred to as "Withdrawals", i.e.
"Retract Registration" -> "Withdraw Registration", "Retracted" -> "Withdrawn", etc.
"""
DISPLAY_NAME = 'Retraction'
SHORT_NAME = 'retraction'
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_RETRACTION_ADMIN
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_RETRACTION_NON_ADMIN
VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE
APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
initiated_by = fields.ForeignField('user', backref='initiated')
justification = fields.StringField(default=None, validate=MaxLengthValidator(2048))
def __repr__(self):
from website.project.model import Node
parent_registration = None
try:
parent_registration = Node.find_one(Q('retraction', 'eq', self))
except NoResultsFound:
pass
return ('<Retraction(parent_registration={0}, initiated_by={1}) '
'with _id {2}>').format(
parent_registration,
self.initiated_by,
self._id
)
def _view_url_context(self, user_id, node):
from website.project.model import Node
registration = Node.find_one(Q('retraction', 'eq', self))
return {
'node_id': registration._id
}
def _approval_url_context(self, user_id):
user_approval_state = self.approval_state.get(user_id, {})
approval_token = user_approval_state.get('approval_token')
if approval_token:
from website.project.model import Node
root_registration = Node.find_one(Q('retraction', 'eq', self))
node_id = user_approval_state.get('node_id', root_registration._id)
return {
'node_id': node_id,
'token': approval_token,
}
def _rejection_url_context(self, user_id):
user_approval_state = self.approval_state.get(user_id, {})
rejection_token = user_approval_state.get('rejection_token')
if rejection_token:
from website.project.model import Node
root_registration = Node.find_one(Q('retraction', 'eq', self))
node_id = user_approval_state.get('node_id', root_registration._id)
registration = Node.load(node_id)
return {
'node_id': registration.registered_from._id,
'token': rejection_token,
}
def _email_template_context(self, user, node, is_authorizer=False, urls=None):
urls = urls or self.stashed_urls.get(user._id, {})
registration_link = urls.get('view', self._view_url(user._id, node))
if is_authorizer:
from website.project.model import Node
approval_link = urls.get('approve', '')
disapproval_link = urls.get('reject', '')
approval_time_span = settings.RETRACTION_PENDING_TIME.days * 24
registration = Node.find_one(Q('retraction', 'eq', self))
return {
'is_initiator': self.initiated_by == user,
'initiated_by': self.initiated_by.fullname,
'project_name': registration.title,
'registration_link': registration_link,
'approval_link': approval_link,
'disapproval_link': disapproval_link,
'approval_time_span': approval_time_span,
}
else:
return {
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
}
def _on_reject(self, user):
from website.project.model import Node, NodeLog
parent_registration = Node.find_one(Q('retraction', 'eq', self))
parent_registration.registered_from.add_log(
action=NodeLog.RETRACTION_CANCELLED,
params={
'node': parent_registration.registered_from_id,
'registration': parent_registration._id,
'retraction_id': self._id,
},
auth=Auth(user),
save=True,
)
def _on_complete(self, user):
from website.project.model import Node, NodeLog
parent_registration = Node.find_one(Q('retraction', 'eq', self))
parent_registration.registered_from.add_log(
action=NodeLog.RETRACTION_APPROVED,
params={
'node': parent_registration.registered_from_id,
'retraction_id': self._id,
'registration': parent_registration._id
},
auth=Auth(self.initiated_by),
)
# Remove any embargoes associated with the registration
if parent_registration.embargo_end_date or parent_registration.is_pending_embargo:
parent_registration.embargo.state = self.REJECTED
parent_registration.registered_from.add_log(
action=NodeLog.EMBARGO_CANCELLED,
params={
'node': parent_registration.registered_from_id,
'registration': parent_registration._id,
'embargo_id': parent_registration.embargo._id,
},
auth=Auth(self.initiated_by),
)
parent_registration.embargo.save()
# Ensure retracted registration is public
# Pass auth=None because the registration initiator may not be
# an admin on components (component admins had the opportunity
# to disapprove the retraction by this point)
for node in parent_registration.node_and_primary_descendants():
node.set_privacy('public', auth=None, save=True, log=False)
node.update_search()
parent_registration.date_modified = timezone.now()
parent_registration.save()
def approve_retraction(self, user, token):
self.approve(user, token)
def disapprove_retraction(self, user, token):
self.reject(user, token)
class RegistrationApproval(PreregCallbackMixin, EmailApprovableSanction):
DISPLAY_NAME = 'Approval'
SHORT_NAME = 'registration_approval'
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_REGISTRATION_ADMIN
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_REGISTRATION_NON_ADMIN
VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE
APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
initiated_by = fields.ForeignField('user', backref='registration_approved')
def _get_registration(self):
from website.project.model import Node
return Node.find_one(Q('registration_approval', 'eq', self))
def _view_url_context(self, user_id, node):
user_approval_state = self.approval_state.get(user_id, {})
node_id = user_approval_state.get('node_id', node._id)
return {
'node_id': node_id
}
def _approval_url_context(self, user_id):
user_approval_state = self.approval_state.get(user_id, {})
approval_token = user_approval_state.get('approval_token')
if approval_token:
registration = self._get_registration()
node_id = user_approval_state.get('node_id', registration._id)
return {
'node_id': node_id,
'token': approval_token,
}
def _rejection_url_context(self, user_id):
user_approval_state = self.approval_state.get(user_id, {})
rejection_token = self.approval_state.get(user_id, {}).get('rejection_token')
if rejection_token:
from website.project.model import Node
root_registration = self._get_registration()
node_id = user_approval_state.get('node_id', root_registration._id)
registration = Node.load(node_id)
return {
'node_id': registration.registered_from._id,
'token': rejection_token,
}
def _email_template_context(self, user, node, is_authorizer=False, urls=None):
context = super(RegistrationApproval, self)._email_template_context(user, node, is_authorizer, urls)
urls = urls or self.stashed_urls.get(user._id, {})
registration_link = urls.get('view', self._view_url(user._id, node))
if is_authorizer:
approval_link = urls.get('approve', '')
disapproval_link = urls.get('reject', '')
approval_time_span = settings.REGISTRATION_APPROVAL_TIME.days * 24
registration = self._get_registration()
context.update({
'is_initiator': self.initiated_by == user,
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
'approval_link': approval_link,
'disapproval_link': disapproval_link,
'approval_time_span': approval_time_span,
'project_name': registration.title,
})
else:
context.update({
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
})
return context
def _add_success_logs(self, node, user):
from website.project.model import NodeLog
src = node.registered_from
src.add_log(
action=NodeLog.PROJECT_REGISTERED,
params={
'parent_node': src.parent_id,
'node': src._primary_key,
'registration': node._primary_key,
},
auth=Auth(user),
save=False
)
src.save()
def _on_complete(self, user):
from website.project.model import NodeLog
register = self._get_registration()
if register.is_spammy:
raise NodeStateError('Cannot approve a a spammy registration')
super(RegistrationApproval, self)._on_complete(user)
self.state = Sanction.APPROVED
registered_from = register.registered_from
# Pass auth=None because the registration initiator may not be
# an admin on components (component admins had the opportunity
# to disapprove the registration by this point)
register.set_privacy('public', auth=None, log=False)
for child in register.get_descendants_recursive(lambda n: n.primary):
child.set_privacy('public', auth=None, log=False)
# Accounts for system actions where no `User` performs the final approval
auth = Auth(user) if user else None
registered_from.add_log(
action=NodeLog.REGISTRATION_APPROVAL_APPROVED,
params={
'node': registered_from._id,
'registration': register._id,
'registration_approval_id': self._id,
},
auth=auth,
)
for node in register.root.node_and_primary_descendants():
self._add_success_logs(node, user)
node.update_search() # update search if public
self.save()
def _on_reject(self, user):
from website.project.model import NodeLog
register = self._get_registration()
registered_from = register.registered_from
register.delete_registration_tree(save=True)
registered_from.add_log(
action=NodeLog.REGISTRATION_APPROVAL_CANCELLED,
params={
'node': registered_from._id,
'registration': register._id,
'registration_approval_id': self._id,
},
auth=Auth(user),
)
class DraftRegistrationApproval(Sanction):
mode = Sanction.ANY
# Since draft registrations that require approval are not immediately registered,
# meta stores registration_choice and embargo_end_date (when applicable)
meta = fields.DictionaryField(default=dict)
def _send_rejection_email(self, user, draft):
schema = draft.registration_schema
prereg_schema = prereg_utils.get_prereg_schema()
if schema._id == prereg_schema._id:
mails.send_mail(
user.username,
mails.PREREG_CHALLENGE_REJECTED,
user=user,
draft_url=draft.absolute_url
)
else:
raise NotImplementedError(
'TODO: add a generic email template for registration approvals'
)
def approve(self, user):
if settings.PREREG_ADMIN_TAG not in user.system_tags:
raise PermissionsError('This user does not have permission to approve this draft.')
self.state = Sanction.APPROVED
self._on_complete(user)
def reject(self, user):
if settings.PREREG_ADMIN_TAG not in user.system_tags:
raise PermissionsError('This user does not have permission to approve this draft.')
self.state = Sanction.REJECTED
self._on_reject(user)
def _on_complete(self, user):
from website.project.model import DraftRegistration
draft = DraftRegistration.find_one(
Q('approval', 'eq', self)
)
auth = Auth(draft.initiator)
registration = draft.register(
auth=auth,
save=True
)
registration_choice = self.meta['registration_choice']
if registration_choice == 'immediate':
sanction = functools.partial(registration.require_approval, draft.initiator)
elif registration_choice == 'embargo':
sanction = functools.partial(
registration.embargo_registration,
draft.initiator,
parse_date(self.meta.get('embargo_end_date'), ignoretz=True)
)
else:
raise ValueError("'registration_choice' must be either 'embargo' or 'immediate'")
sanction(notify_initiator_on_complete=True)
def _on_reject(self, user, *args, **kwargs):
from website.project.model import DraftRegistration
# clear out previous registration options
self.meta = {}
self.save()
draft = DraftRegistration.find_one(
Q('approval', 'eq', self)
)
self._send_rejection_email(draft.initiator, draft)
class EmbargoTerminationApproval(EmailApprovableSanction):
DISPLAY_NAME = 'Embargo Termination Request'
SHORT_NAME = 'embargo_termination_approval'
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_EMBARGO_TERMINATION_ADMIN
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_EMBARGO_TERMINATION_NON_ADMIN
VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE
APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
embargoed_registration = fields.ForeignField('node')
def _get_registration(self):
return self.embargoed_registration
def _view_url_context(self, user_id, node):
registration = node or self._get_registration()
return {
'node_id': registration._id
}
def _approval_url_context(self, user_id):
user_approval_state = self.approval_state.get(user_id, {})
approval_token = user_approval_state.get('approval_token')
if approval_token:
registration = self._get_registration()
node_id = user_approval_state.get('node_id', registration._id)
return {
'node_id': node_id,
'token': approval_token,
}
def _rejection_url_context(self, user_id):
user_approval_state = self.approval_state.get(user_id, {})
rejection_token = user_approval_state.get('rejection_token')
if rejection_token:
root_registration = self._get_registration()
node_id = user_approval_state.get('node_id', root_registration._id)
return {
'node_id': node_id,
'token': rejection_token,
}
def _email_template_context(self, user, node, is_authorizer=False, urls=None):
context = super(EmbargoTerminationApproval, self)._email_template_context(
user,
node,
is_authorizer=is_authorizer
)
urls = urls or self.stashed_urls.get(user._id, {})
registration_link = urls.get('view', self._view_url(user._id, node))
if is_authorizer:
approval_link = urls.get('approve', '')
disapproval_link = urls.get('reject', '')
approval_time_span = settings.EMBARGO_TERMINATION_PENDING_TIME.days * 24
registration = self._get_registration()
context.update({
'is_initiator': self.initiated_by == user,
'initiated_by': self.initiated_by.fullname,
'approval_link': approval_link,
'project_name': registration.title,
'disapproval_link': disapproval_link,
'registration_link': registration_link,
'embargo_end_date': self.end_date,
'approval_time_span': approval_time_span,
})
else:
context.update({
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
'embargo_end_date': self.end_date,
})
return context
def _on_complete(self, user=None):
super(EmbargoTerminationApproval, self)._on_complete(user)
registration = self._get_registration()
registration.terminate_embargo(Auth(user) if user else None)
def _on_reject(self, user=None):
# Just forget this ever happened.
self.embargoed_registration.embargo_termination_approval = None
| |
#!/usr/bin/env python
# Copyright (c) 2006-2008, David Allouche, Jp Calderone, Itamar Shtull-Trauring,
# Johan Dahlin, Olivier Grisel <olivier.grisel@ensta.org>
#
# Send maintenance requests needing new pypi packages to:
# Peter Waller <p@pwaller.net>
# https://github.com/pwaller/pyprof2calltree
#
# See CONTRIBUTORS.txt.
#
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""pyprof2calltree: profiling output which is readable by kcachegrind
This script can either take raw cProfile.Profile.getstats() log entries or
take a previously recorded instance of the pstats.Stats class.
"""
import cProfile
import optparse
import os
import pstats
import subprocess
import sys
import tempfile
from collections import defaultdict
__all__ = ['convert', 'visualize', 'CalltreeConverter']
SCALE = 1e9
class Code(object):
def __init__(self, filename, firstlineno, name):
self.co_filename = filename
self.co_firstlineno = firstlineno
self.co_name = name
def __repr__(self):
return '<Code: %s, %s, %s>' % (self.co_filename, self.co_firstlineno,
self.co_name)
class Entry(object):
def __init__(self, code, callcount, reccallcount, inlinetime, totaltime, calls):
self.code = code
self.callcount = callcount
self.reccallcount = reccallcount
self.inlinetime = inlinetime
self.totaltime = totaltime
self.calls = calls
def __repr__(self):
return '<Entry: %s, %s, %s, %s, %s, %s>' % (
self.code, self.callcount, self.reccallcount, self.inlinetime,
self.totaltime, self.calls
)
class Subentry(object):
def __init__(self, code, callcount, reccallcount, inlinetime, totaltime):
self.code = code
self.callcount = callcount
self.reccallcount = reccallcount
self.inlinetime = inlinetime
self.totaltime = totaltime
def __repr__(self):
return '<Subentry: %s, %s, %s, %s, %s>' % (
self.code, self.callcount, self.reccallcount, self.inlinetime,
self.totaltime
)
def is_basestring(s):
try:
u = unicode
# Python 2.x
return isinstance(s, basestring)
except NameError:
# Python 3.x
return isinstance(s, (str, bytes))
def pstats2entries(data):
"""Helper to convert serialized pstats back to a list of raw entries.
Converse operation of cProfile.Profile.snapshot_stats()
"""
# Each entry's key is a tuple of (filename, line number, function name)
entries = dict()
allcallers = dict()
# first pass over stats to build the list of entry instances
for code_info, call_info in list(data.stats.items()):
# build a fake code object
code = Code(*code_info)
# build a fake entry object. entry.calls will be filled during the
# second pass over stats
cc, nc, tt, ct, callers = call_info
entry = Entry(code, callcount=cc, reccallcount=nc - cc, inlinetime=tt,
totaltime=ct, calls=list())
# collect the new entry
entries[code_info] = entry
allcallers[code_info] = list(callers.items())
# second pass of stats to plug callees into callers
for entry in entries.values():
entry_label = cProfile.label(entry.code)
entry_callers = allcallers.get(entry_label, [])
for entry_caller, call_info in entry_callers:
cc, nc, tt, ct = call_info
subentry = Subentry(entry.code, callcount=cc, reccallcount=nc - cc,
inlinetime=tt, totaltime=ct)
# entry_caller has the same form as code_info
entries[entry_caller].calls.append(subentry)
return list(entries.values())
def is_installed(prog):
"""Return whether or not a given executable is installed on the machine."""
devnull = open(os.devnull, 'w')
retcode = subprocess.call(['which', prog], stdout=devnull)
devnull.close()
return retcode == 0
def _entry_sort_key(entry):
return cProfile.label(entry.code)
KCACHEGRIND_EXECUTABLES = ["kcachegrind", "qcachegrind"]
class CalltreeConverter(object):
"""Convert raw cProfile or pstats data to the calltree format"""
def __init__(self, profiling_data):
if is_basestring(profiling_data):
# treat profiling_data as a filename of pstats serialized data
self.entries = pstats2entries(pstats.Stats(profiling_data))
elif isinstance(profiling_data, pstats.Stats):
# convert pstats data to cProfile list of entries
self.entries = pstats2entries(profiling_data)
else:
# assume this are direct cProfile entries
self.entries = profiling_data
self.out_file = None
self._code_by_position = defaultdict(set)
self._populate_code_by_position()
def _populate_code_by_position(self):
for entry in self.entries:
self._add_code_by_position(entry.code)
if not entry.calls:
continue
for subentry in entry.calls:
self._add_code_by_position(subentry.code)
def _add_code_by_position(self, code):
co_filename, _, co_name = cProfile.label(code)
self._code_by_position[(co_filename, co_name)].add(code)
def munged_function_name(self, code):
co_filename, co_firstlineno, co_name = cProfile.label(code)
if len(self._code_by_position[(co_filename, co_name)]) == 1:
return co_name
return "%s:%d" % (co_name, co_firstlineno)
def output(self, out_file):
"""Write the converted entries to out_file"""
self.out_file = out_file
out_file.write('event: ns : Nanoseconds\n')
out_file.write('events: ns\n')
self._output_summary()
for entry in sorted(self.entries, key=_entry_sort_key):
self._output_entry(entry)
def visualize(self):
"""Launch kcachegrind on the converted entries.
One of the executables listed in KCACHEGRIND_EXECUTABLES
must be present in the system path.
"""
if self.out_file is None:
_, outfile = tempfile.mkstemp(".log", "pyprof2calltree")
f = open(outfile, "w")
self.output(f)
use_temp_file = True
else:
use_temp_file = False
available_cmd = None
for cmd in KCACHEGRIND_EXECUTABLES:
if is_installed(cmd):
available_cmd = cmd
break
if available_cmd is None:
sys.stderr.write("Could not find kcachegrind. Tried: %s\n" %
", ".join(KCACHEGRIND_EXECUTABLES))
return
self.out_file.close()
try:
subprocess.call([cmd, self.out_file.name])
finally:
# clean the temporary file
if use_temp_file:
os.remove(outfile)
self.out_file = None
def _output_summary(self):
max_cost = 0
for entry in self.entries:
totaltime = int(entry.totaltime * SCALE)
max_cost = max(max_cost, totaltime)
# Version 0.7.4 of kcachegrind appears to ignore the summary line and
# calculate the total cost by summing the exclusive cost of all
# functions, but it doesn't hurt to output it anyway.
self.out_file.write('summary: %d\n' % (max_cost,))
def _output_entry(self, entry):
out_file = self.out_file
code = entry.code
co_filename, co_firstlineno, co_name = cProfile.label(code)
munged_name = self.munged_function_name(code)
out_file.write('fl=%s\nfn=%s\n' % (co_filename, munged_name))
inlinetime = int(entry.inlinetime * SCALE)
out_file.write('%d %d\n' % (co_firstlineno, inlinetime))
# recursive calls are counted in entry.calls
if entry.calls:
for subentry in sorted(entry.calls, key=_entry_sort_key):
self._output_subentry(co_firstlineno, subentry.code,
subentry.callcount,
int(subentry.totaltime * SCALE))
out_file.write('\n')
def _output_subentry(self, lineno, code, callcount, totaltime):
out_file = self.out_file
co_filename, co_firstlineno, co_name = cProfile.label(code)
munged_name = self.munged_function_name(code)
out_file.write('cfl=%s\ncfn=%s\n' % (co_filename, munged_name))
out_file.write('calls=%d %d\n' % (callcount, co_firstlineno))
out_file.write('%d %d\n' % (lineno, totaltime))
def main():
"""Execute the converter using parameters provided on the command line"""
usage = ("%s [-k] [-o output_file_path] [-i input_file_path]"
" [-r scriptfile [args]]")
parser = optparse.OptionParser(usage=usage % sys.argv[0])
parser.allow_interspersed_args = False
parser.add_option('-o', '--outfile', dest="outfile",
help="Save calltree stats to <outfile>", default=None)
parser.add_option('-i', '--infile', dest="infile",
help="Read python stats from <infile>", default=None)
parser.add_option('-r', '--run-script', dest="script",
help="Name of the python script to run to collect"
" profiling data", default=None)
parser.add_option('-k', '--kcachegrind', dest="kcachegrind",
help="Run the kcachegrind tool on the converted data",
action="store_true")
options, args = parser.parse_args()
outfile = options.outfile
if options.script is not None:
# collect profiling data by running the given script
sys.argv[:] = [options.script] + args
if not options.outfile:
outfile = '%s.log' % os.path.basename(options.script)
prof = cProfile.Profile()
# Try to deal with programs (e.g., bzr) that avoid sys.exit(),
# but still run atexit handlers.
import atexit
atexit.register(exit)
try:
try:
prof = prof.run('execfile(%r)' % (sys.argv[0],))
except SystemExit:
pass
finally:
kg = CalltreeConverter(pstats.Stats(prof))
elif options.infile is not None:
# use the profiling data from some input file
if not options.outfile:
outfile = '%s.log' % os.path.basename(options.infile)
if options.infile == outfile:
# prevent name collisions by appending another extension
outfile += ".log"
kg = CalltreeConverter(pstats.Stats(options.infile))
else:
# at least an input file or a script to run is required
parser.print_usage()
sys.exit(2)
if options.outfile is not None or not options.kcachegrind:
# user either explicitly required output file or requested by not
# explicitly asking to launch kcachegrind
sys.stderr.write("writing converted data to: %s\n" % outfile)
kg.output(open(outfile, 'w'))
if options.kcachegrind:
sys.stderr.write("launching kcachegrind\n")
kg.visualize()
def visualize(profiling_data):
"""launch the kcachegrind on `profiling_data`
`profiling_data` can either be:
- a pstats.Stats instance
- the filename of a pstats.Stats dump
- the result of a call to cProfile.Profile.getstats()
"""
converter = CalltreeConverter(profiling_data)
converter.visualize()
def convert(profiling_data, outputfile):
"""convert `profiling_data` to calltree format and dump it to `outputfile`
`profiling_data` can either be:
- a pstats.Stats instance
- the filename of a pstats.Stats dump
- the result of a call to cProfile.Profile.getstats()
`outputfile` can either be:
- a file() instance open in write mode
- a filename
"""
converter = CalltreeConverter(profiling_data)
if is_basestring(outputfile):
f = open(outputfile, "w")
try:
converter.output(f)
finally:
f.close()
else:
converter.output(outputfile)
if __name__ == '__main__':
sys.exit(main())
| |
"""The tests for the generic_thermostat."""
import datetime
from os import path
import pytest
import pytz
import voluptuous as vol
from homeassistant import config as hass_config
from homeassistant.components import input_boolean, switch
from homeassistant.components.climate.const import (
ATTR_PRESET_MODE,
DOMAIN,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_NONE,
)
from homeassistant.components.generic_thermostat import (
DOMAIN as GENERIC_THERMOSTAT_DOMAIN,
)
from homeassistant.const import (
ATTR_TEMPERATURE,
SERVICE_RELOAD,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
import homeassistant.core as ha
from homeassistant.core import DOMAIN as HASS_DOMAIN, CoreState, State, callback
from homeassistant.setup import async_setup_component
from homeassistant.util.unit_system import METRIC_SYSTEM
from tests.async_mock import patch
from tests.common import (
assert_setup_component,
async_fire_time_changed,
mock_restore_cache,
)
from tests.components.climate import common
ENTITY = "climate.test"
ENT_SENSOR = "sensor.test"
ENT_SWITCH = "switch.test"
HEAT_ENTITY = "climate.test_heat"
COOL_ENTITY = "climate.test_cool"
ATTR_AWAY_MODE = "away_mode"
MIN_TEMP = 3.0
MAX_TEMP = 65.0
TARGET_TEMP = 42.0
COLD_TOLERANCE = 0.5
HOT_TOLERANCE = 0.5
async def test_setup_missing_conf(hass):
"""Test set up heat_control with missing config values."""
config = {
"platform": "generic_thermostat",
"name": "test",
"target_sensor": ENT_SENSOR,
}
with assert_setup_component(0):
await async_setup_component(hass, "climate", {"climate": config})
async def test_valid_conf(hass):
"""Test set up generic_thermostat with valid config values."""
assert await async_setup_component(
hass,
"climate",
{
"climate": {
"platform": "generic_thermostat",
"name": "test",
"heater": ENT_SWITCH,
"target_sensor": ENT_SENSOR,
}
},
)
@pytest.fixture
async def setup_comp_1(hass):
"""Initialize components."""
hass.config.units = METRIC_SYSTEM
assert await async_setup_component(hass, "homeassistant", {})
await hass.async_block_till_done()
async def test_heater_input_boolean(hass, setup_comp_1):
"""Test heater switching input_boolean."""
heater_switch = "input_boolean.test"
assert await async_setup_component(
hass, input_boolean.DOMAIN, {"input_boolean": {"test": None}}
)
assert await async_setup_component(
hass,
DOMAIN,
{
"climate": {
"platform": "generic_thermostat",
"name": "test",
"heater": heater_switch,
"target_sensor": ENT_SENSOR,
"initial_hvac_mode": HVAC_MODE_HEAT,
}
},
)
await hass.async_block_till_done()
assert STATE_OFF == hass.states.get(heater_switch).state
_setup_sensor(hass, 18)
await hass.async_block_till_done()
await common.async_set_temperature(hass, 23)
assert STATE_ON == hass.states.get(heater_switch).state
async def test_heater_switch(hass, setup_comp_1):
"""Test heater switching test switch."""
platform = getattr(hass.components, "test.switch")
platform.init()
switch_1 = platform.ENTITIES[1]
assert await async_setup_component(
hass, switch.DOMAIN, {"switch": {"platform": "test"}}
)
await hass.async_block_till_done()
heater_switch = switch_1.entity_id
assert await async_setup_component(
hass,
DOMAIN,
{
"climate": {
"platform": "generic_thermostat",
"name": "test",
"heater": heater_switch,
"target_sensor": ENT_SENSOR,
"initial_hvac_mode": HVAC_MODE_HEAT,
}
},
)
await hass.async_block_till_done()
assert STATE_OFF == hass.states.get(heater_switch).state
_setup_sensor(hass, 18)
await common.async_set_temperature(hass, 23)
await hass.async_block_till_done()
assert STATE_ON == hass.states.get(heater_switch).state
def _setup_sensor(hass, temp):
"""Set up the test sensor."""
hass.states.async_set(ENT_SENSOR, temp)
@pytest.fixture
async def setup_comp_2(hass):
"""Initialize components."""
hass.config.units = METRIC_SYSTEM
assert await async_setup_component(
hass,
DOMAIN,
{
"climate": {
"platform": "generic_thermostat",
"name": "test",
"cold_tolerance": 2,
"hot_tolerance": 4,
"heater": ENT_SWITCH,
"target_sensor": ENT_SENSOR,
"away_temp": 16,
"initial_hvac_mode": HVAC_MODE_HEAT,
}
},
)
await hass.async_block_till_done()
async def test_setup_defaults_to_unknown(hass):
"""Test the setting of defaults to unknown."""
hass.config.units = METRIC_SYSTEM
await async_setup_component(
hass,
DOMAIN,
{
"climate": {
"platform": "generic_thermostat",
"name": "test",
"cold_tolerance": 2,
"hot_tolerance": 4,
"heater": ENT_SWITCH,
"target_sensor": ENT_SENSOR,
"away_temp": 16,
}
},
)
await hass.async_block_till_done()
assert HVAC_MODE_OFF == hass.states.get(ENTITY).state
async def test_setup_gets_current_temp_from_sensor(hass):
"""Test that current temperature is updated on entity addition."""
hass.config.units = METRIC_SYSTEM
_setup_sensor(hass, 18)
await hass.async_block_till_done()
await async_setup_component(
hass,
DOMAIN,
{
"climate": {
"platform": "generic_thermostat",
"name": "test",
"cold_tolerance": 2,
"hot_tolerance": 4,
"heater": ENT_SWITCH,
"target_sensor": ENT_SENSOR,
"away_temp": 16,
}
},
)
await hass.async_block_till_done()
assert hass.states.get(ENTITY).attributes["current_temperature"] == 18
async def test_default_setup_params(hass, setup_comp_2):
"""Test the setup with default parameters."""
state = hass.states.get(ENTITY)
assert 7 == state.attributes.get("min_temp")
assert 35 == state.attributes.get("max_temp")
assert 7 == state.attributes.get("temperature")
async def test_get_hvac_modes(hass, setup_comp_2):
"""Test that the operation list returns the correct modes."""
state = hass.states.get(ENTITY)
modes = state.attributes.get("hvac_modes")
assert [HVAC_MODE_HEAT, HVAC_MODE_OFF] == modes
async def test_set_target_temp(hass, setup_comp_2):
"""Test the setting of the target temperature."""
await common.async_set_temperature(hass, 30)
state = hass.states.get(ENTITY)
assert 30.0 == state.attributes.get("temperature")
with pytest.raises(vol.Invalid):
await common.async_set_temperature(hass, None)
state = hass.states.get(ENTITY)
assert 30.0 == state.attributes.get("temperature")
async def test_set_away_mode(hass, setup_comp_2):
"""Test the setting away mode."""
await common.async_set_temperature(hass, 23)
await common.async_set_preset_mode(hass, PRESET_AWAY)
state = hass.states.get(ENTITY)
assert 16 == state.attributes.get("temperature")
async def test_set_away_mode_and_restore_prev_temp(hass, setup_comp_2):
"""Test the setting and removing away mode.
Verify original temperature is restored.
"""
await common.async_set_temperature(hass, 23)
await common.async_set_preset_mode(hass, PRESET_AWAY)
state = hass.states.get(ENTITY)
assert 16 == state.attributes.get("temperature")
await common.async_set_preset_mode(hass, PRESET_NONE)
state = hass.states.get(ENTITY)
assert 23 == state.attributes.get("temperature")
async def test_set_away_mode_twice_and_restore_prev_temp(hass, setup_comp_2):
"""Test the setting away mode twice in a row.
Verify original temperature is restored.
"""
await common.async_set_temperature(hass, 23)
await common.async_set_preset_mode(hass, PRESET_AWAY)
await common.async_set_preset_mode(hass, PRESET_AWAY)
state = hass.states.get(ENTITY)
assert 16 == state.attributes.get("temperature")
await common.async_set_preset_mode(hass, PRESET_NONE)
state = hass.states.get(ENTITY)
assert 23 == state.attributes.get("temperature")
async def test_sensor_bad_value(hass, setup_comp_2):
"""Test sensor that have None as state."""
state = hass.states.get(ENTITY)
temp = state.attributes.get("current_temperature")
_setup_sensor(hass, None)
await hass.async_block_till_done()
state = hass.states.get(ENTITY)
assert temp == state.attributes.get("current_temperature")
async def test_sensor_unknown(hass):
"""Test when target sensor is Unknown."""
hass.states.async_set("sensor.unknown", STATE_UNKNOWN)
assert await async_setup_component(
hass,
"climate",
{
"climate": {
"platform": "generic_thermostat",
"name": "unknown",
"heater": ENT_SWITCH,
"target_sensor": "sensor.unknown",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("climate.unknown")
assert state.attributes.get("current_temperature") is None
async def test_sensor_unavailable(hass):
"""Test when target sensor is Unavailable."""
hass.states.async_set("sensor.unavailable", STATE_UNAVAILABLE)
assert await async_setup_component(
hass,
"climate",
{
"climate": {
"platform": "generic_thermostat",
"name": "unavailable",
"heater": ENT_SWITCH,
"target_sensor": "sensor.unavailable",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("climate.unavailable")
assert state.attributes.get("current_temperature") is None
async def test_set_target_temp_heater_on(hass, setup_comp_2):
"""Test if target temperature turn heater on."""
calls = _setup_switch(hass, False)
_setup_sensor(hass, 25)
await hass.async_block_till_done()
await common.async_set_temperature(hass, 30)
assert 1 == len(calls)
call = calls[0]
assert HASS_DOMAIN == call.domain
assert SERVICE_TURN_ON == call.service
assert ENT_SWITCH == call.data["entity_id"]
async def test_set_target_temp_heater_off(hass, setup_comp_2):
"""Test if target temperature turn heater off."""
calls = _setup_switch(hass, True)
_setup_sensor(hass, 30)
await hass.async_block_till_done()
await common.async_set_temperature(hass, 25)
assert 2 == len(calls)
call = calls[0]
assert HASS_DOMAIN == call.domain
assert SERVICE_TURN_OFF == call.service
assert ENT_SWITCH == call.data["entity_id"]
async def test_temp_change_heater_on_within_tolerance(hass, setup_comp_2):
"""Test if temperature change doesn't turn on within tolerance."""
calls = _setup_switch(hass, False)
await common.async_set_temperature(hass, 30)
_setup_sensor(hass, 29)
await hass.async_block_till_done()
assert 0 == len(calls)
async def test_temp_change_heater_on_outside_tolerance(hass, setup_comp_2):
"""Test if temperature change turn heater on outside cold tolerance."""
calls = _setup_switch(hass, False)
await common.async_set_temperature(hass, 30)
_setup_sensor(hass, 27)
await hass.async_block_till_done()
assert 1 == len(calls)
call = calls[0]
assert HASS_DOMAIN == call.domain
assert SERVICE_TURN_ON == call.service
assert ENT_SWITCH == call.data["entity_id"]
async def test_temp_change_heater_off_within_tolerance(hass, setup_comp_2):
"""Test if temperature change doesn't turn off within tolerance."""
calls = _setup_switch(hass, True)
await common.async_set_temperature(hass, 30)
_setup_sensor(hass, 33)
await hass.async_block_till_done()
assert 0 == len(calls)
async def test_temp_change_heater_off_outside_tolerance(hass, setup_comp_2):
"""Test if temperature change turn heater off outside hot tolerance."""
calls = _setup_switch(hass, True)
await common.async_set_temperature(hass, 30)
_setup_sensor(hass, 35)
await hass.async_block_till_done()
assert 1 == len(calls)
call = calls[0]
assert HASS_DOMAIN == call.domain
assert SERVICE_TURN_OFF == call.service
assert ENT_SWITCH == call.data["entity_id"]
async def test_running_when_hvac_mode_is_off(hass, setup_comp_2):
"""Test that the switch turns off when enabled is set False."""
calls = _setup_switch(hass, True)
await common.async_set_temperature(hass, 30)
await common.async_set_hvac_mode(hass, HVAC_MODE_OFF)
assert 1 == len(calls)
call = calls[0]
assert HASS_DOMAIN == call.domain
assert SERVICE_TURN_OFF == call.service
assert ENT_SWITCH == call.data["entity_id"]
async def test_no_state_change_when_hvac_mode_off(hass, setup_comp_2):
"""Test that the switch doesn't turn on when enabled is False."""
calls = _setup_switch(hass, False)
await common.async_set_temperature(hass, 30)
await common.async_set_hvac_mode(hass, HVAC_MODE_OFF)
_setup_sensor(hass, 25)
await hass.async_block_till_done()
assert 0 == len(calls)
async def test_hvac_mode_heat(hass, setup_comp_2):
"""Test change mode from OFF to HEAT.
Switch turns on when temp below setpoint and mode changes.
"""
await common.async_set_hvac_mode(hass, HVAC_MODE_OFF)
await common.async_set_temperature(hass, 30)
_setup_sensor(hass, 25)
await hass.async_block_till_done()
calls = _setup_switch(hass, False)
await common.async_set_hvac_mode(hass, HVAC_MODE_HEAT)
assert 1 == len(calls)
call = calls[0]
assert HASS_DOMAIN == call.domain
assert SERVICE_TURN_ON == call.service
assert ENT_SWITCH == call.data["entity_id"]
def _setup_switch(hass, is_on):
"""Set up the test switch."""
hass.states.async_set(ENT_SWITCH, STATE_ON if is_on else STATE_OFF)
calls = []
@callback
def log_call(call):
"""Log service calls."""
calls.append(call)
hass.services.async_register(ha.DOMAIN, SERVICE_TURN_ON, log_call)
hass.services.async_register(ha.DOMAIN, SERVICE_TURN_OFF, log_call)
return calls
@pytest.fixture
async def setup_comp_3(hass):
"""Initialize components."""
hass.config.temperature_unit = TEMP_CELSIUS
assert await async_setup_component(
hass,
DOMAIN,
{
"climate": {
"platform": "generic_thermostat",
"name": "test",
"cold_tolerance": 2,
"hot_tolerance": 4,
"away_temp": 30,
"heater": ENT_SWITCH,
"target_sensor": ENT_SENSOR,
"ac_mode": True,
"initial_hvac_mode": HVAC_MODE_COOL,
}
},
)
await hass.async_block_till_done()
async def test_set_target_temp_ac_off(hass, setup_comp_3):
"""Test if target temperature turn ac off."""
calls = _setup_switch(hass, True)
_setup_sensor(hass, 25)
await hass.async_block_till_done()
await common.async_set_temperature(hass, 30)
assert 2 == len(calls)
call = calls[0]
assert HASS_DOMAIN == call.domain
assert SERVICE_TURN_OFF == call.service
assert ENT_SWITCH == call.data["entity_id"]
async def test_turn_away_mode_on_cooling(hass, setup_comp_3):
"""Test the setting away mode when cooling."""
_setup_switch(hass, True)
_setup_sensor(hass, 25)
await hass.async_block_till_done()
await common.async_set_temperature(hass, 19)
await common.async_set_preset_mode(hass, PRESET_AWAY)
state = hass.states.get(ENTITY)
assert 30 == state.attributes.get("temperature")
async def test_hvac_mode_cool(hass, setup_comp_3):
"""Test change mode from OFF to COOL.
Switch turns on when temp below setpoint and mode changes.
"""
await common.async_set_hvac_mode(hass, HVAC_MODE_OFF)
await common.async_set_temperature(hass, 25)
_setup_sensor(hass, 30)
await hass.async_block_till_done()
calls = _setup_switch(hass, False)
await common.async_set_hvac_mode(hass, HVAC_MODE_COOL)
assert 1 == len(calls)
call = calls[0]
assert HASS_DOMAIN == call.domain
assert SERVICE_TURN_ON == call.service
assert ENT_SWITCH == call.data["entity_id"]
async def test_set_target_temp_ac_on(hass, setup_comp_3):
"""Test if target temperature turn ac on."""
calls = _setup_switch(hass, False)
_setup_sensor(hass, 30)
await hass.async_block_till_done()
await common.async_set_temperature(hass, 25)
assert 1 == len(calls)
call = calls[0]
assert HASS_DOMAIN == call.domain
assert SERVICE_TURN_ON == call.service
assert ENT_SWITCH == call.data["entity_id"]
async def test_temp_change_ac_off_within_tolerance(hass, setup_comp_3):
"""Test if temperature change doesn't turn ac off within tolerance."""
calls = _setup_switch(hass, True)
await common.async_set_temperature(hass, 30)
_setup_sensor(hass, 29.8)
await hass.async_block_till_done()
assert 0 == len(calls)
async def test_set_temp_change_ac_off_outside_tolerance(hass, setup_comp_3):
"""Test if temperature change turn ac off."""
calls = _setup_switch(hass, True)
await common.async_set_temperature(hass, 30)
_setup_sensor(hass, 27)
await hass.async_block_till_done()
assert 1 == len(calls)
call = calls[0]
assert HASS_DOMAIN == call.domain
assert SERVICE_TURN_OFF == call.service
assert ENT_SWITCH == call.data["entity_id"]
async def test_temp_change_ac_on_within_tolerance(hass, setup_comp_3):
"""Test if temperature change doesn't turn ac on within tolerance."""
calls = _setup_switch(hass, False)
await common.async_set_temperature(hass, 25)
_setup_sensor(hass, 25.2)
await hass.async_block_till_done()
assert 0 == len(calls)
async def test_temp_change_ac_on_outside_tolerance(hass, setup_comp_3):
"""Test if temperature change turn ac on."""
calls = _setup_switch(hass, False)
await common.async_set_temperature(hass, 25)
_setup_sensor(hass, 30)
await hass.async_block_till_done()
assert 1 == len(calls)
call = calls[0]
assert HASS_DOMAIN == call.domain
assert SERVICE_TURN_ON == call.service
assert ENT_SWITCH == call.data["entity_id"]
async def test_running_when_operating_mode_is_off_2(hass, setup_comp_3):
"""Test that the switch turns off when enabled is set False."""
calls = _setup_switch(hass, True)
await common.async_set_temperature(hass, 30)
await common.async_set_hvac_mode(hass, HVAC_MODE_OFF)
assert 1 == len(calls)
call = calls[0]
assert HASS_DOMAIN == call.domain
assert SERVICE_TURN_OFF == call.service
assert ENT_SWITCH == call.data["entity_id"]
async def test_no_state_change_when_operation_mode_off_2(hass, setup_comp_3):
"""Test that the switch doesn't turn on when enabled is False."""
calls = _setup_switch(hass, False)
await common.async_set_temperature(hass, 30)
await common.async_set_hvac_mode(hass, HVAC_MODE_OFF)
_setup_sensor(hass, 35)
await hass.async_block_till_done()
assert 0 == len(calls)
@pytest.fixture
async def setup_comp_4(hass):
"""Initialize components."""
hass.config.temperature_unit = TEMP_CELSIUS
assert await async_setup_component(
hass,
DOMAIN,
{
"climate": {
"platform": "generic_thermostat",
"name": "test",
"cold_tolerance": 0.3,
"hot_tolerance": 0.3,
"heater": ENT_SWITCH,
"target_sensor": ENT_SENSOR,
"ac_mode": True,
"min_cycle_duration": datetime.timedelta(minutes=10),
"initial_hvac_mode": HVAC_MODE_COOL,
}
},
)
await hass.async_block_till_done()
async def test_temp_change_ac_trigger_on_not_long_enough(hass, setup_comp_4):
"""Test if temperature change turn ac on."""
calls = _setup_switch(hass, False)
await common.async_set_temperature(hass, 25)
_setup_sensor(hass, 30)
await hass.async_block_till_done()
assert 0 == len(calls)
async def test_temp_change_ac_trigger_on_long_enough(hass, setup_comp_4):
"""Test if temperature change turn ac on."""
fake_changed = datetime.datetime(
1918, 11, 11, 11, 11, 11, tzinfo=datetime.timezone.utc
)
with patch(
"homeassistant.helpers.condition.dt_util.utcnow", return_value=fake_changed
):
calls = _setup_switch(hass, False)
await common.async_set_temperature(hass, 25)
_setup_sensor(hass, 30)
await hass.async_block_till_done()
assert 1 == len(calls)
call = calls[0]
assert HASS_DOMAIN == call.domain
assert SERVICE_TURN_ON == call.service
assert ENT_SWITCH == call.data["entity_id"]
async def test_temp_change_ac_trigger_off_not_long_enough(hass, setup_comp_4):
"""Test if temperature change turn ac on."""
calls = _setup_switch(hass, True)
await common.async_set_temperature(hass, 30)
_setup_sensor(hass, 25)
await hass.async_block_till_done()
assert 0 == len(calls)
async def test_temp_change_ac_trigger_off_long_enough(hass, setup_comp_4):
"""Test if temperature change turn ac on."""
fake_changed = datetime.datetime(
1918, 11, 11, 11, 11, 11, tzinfo=datetime.timezone.utc
)
with patch(
"homeassistant.helpers.condition.dt_util.utcnow", return_value=fake_changed
):
calls = _setup_switch(hass, True)
await common.async_set_temperature(hass, 30)
_setup_sensor(hass, 25)
await hass.async_block_till_done()
assert 1 == len(calls)
call = calls[0]
assert HASS_DOMAIN == call.domain
assert SERVICE_TURN_OFF == call.service
assert ENT_SWITCH == call.data["entity_id"]
async def test_mode_change_ac_trigger_off_not_long_enough(hass, setup_comp_4):
"""Test if mode change turns ac off despite minimum cycle."""
calls = _setup_switch(hass, True)
await common.async_set_temperature(hass, 30)
_setup_sensor(hass, 25)
await hass.async_block_till_done()
assert 0 == len(calls)
await common.async_set_hvac_mode(hass, HVAC_MODE_OFF)
assert 1 == len(calls)
call = calls[0]
assert "homeassistant" == call.domain
assert SERVICE_TURN_OFF == call.service
assert ENT_SWITCH == call.data["entity_id"]
async def test_mode_change_ac_trigger_on_not_long_enough(hass, setup_comp_4):
"""Test if mode change turns ac on despite minimum cycle."""
calls = _setup_switch(hass, False)
await common.async_set_temperature(hass, 25)
_setup_sensor(hass, 30)
await hass.async_block_till_done()
assert 0 == len(calls)
await common.async_set_hvac_mode(hass, HVAC_MODE_HEAT)
assert 1 == len(calls)
call = calls[0]
assert "homeassistant" == call.domain
assert SERVICE_TURN_ON == call.service
assert ENT_SWITCH == call.data["entity_id"]
@pytest.fixture
async def setup_comp_5(hass):
"""Initialize components."""
hass.config.temperature_unit = TEMP_CELSIUS
assert await async_setup_component(
hass,
DOMAIN,
{
"climate": {
"platform": "generic_thermostat",
"name": "test",
"cold_tolerance": 0.3,
"hot_tolerance": 0.3,
"heater": ENT_SWITCH,
"target_sensor": ENT_SENSOR,
"ac_mode": True,
"min_cycle_duration": datetime.timedelta(minutes=10),
"initial_hvac_mode": HVAC_MODE_COOL,
}
},
)
await hass.async_block_till_done()
async def test_temp_change_ac_trigger_on_not_long_enough_2(hass, setup_comp_5):
"""Test if temperature change turn ac on."""
calls = _setup_switch(hass, False)
await common.async_set_temperature(hass, 25)
_setup_sensor(hass, 30)
await hass.async_block_till_done()
assert 0 == len(calls)
async def test_temp_change_ac_trigger_on_long_enough_2(hass, setup_comp_5):
"""Test if temperature change turn ac on."""
fake_changed = datetime.datetime(
1918, 11, 11, 11, 11, 11, tzinfo=datetime.timezone.utc
)
with patch(
"homeassistant.helpers.condition.dt_util.utcnow", return_value=fake_changed
):
calls = _setup_switch(hass, False)
await common.async_set_temperature(hass, 25)
_setup_sensor(hass, 30)
await hass.async_block_till_done()
assert 1 == len(calls)
call = calls[0]
assert HASS_DOMAIN == call.domain
assert SERVICE_TURN_ON == call.service
assert ENT_SWITCH == call.data["entity_id"]
async def test_temp_change_ac_trigger_off_not_long_enough_2(hass, setup_comp_5):
"""Test if temperature change turn ac on."""
calls = _setup_switch(hass, True)
await common.async_set_temperature(hass, 30)
_setup_sensor(hass, 25)
await hass.async_block_till_done()
assert 0 == len(calls)
async def test_temp_change_ac_trigger_off_long_enough_2(hass, setup_comp_5):
"""Test if temperature change turn ac on."""
fake_changed = datetime.datetime(
1918, 11, 11, 11, 11, 11, tzinfo=datetime.timezone.utc
)
with patch(
"homeassistant.helpers.condition.dt_util.utcnow", return_value=fake_changed
):
calls = _setup_switch(hass, True)
await common.async_set_temperature(hass, 30)
_setup_sensor(hass, 25)
await hass.async_block_till_done()
assert 1 == len(calls)
call = calls[0]
assert HASS_DOMAIN == call.domain
assert SERVICE_TURN_OFF == call.service
assert ENT_SWITCH == call.data["entity_id"]
async def test_mode_change_ac_trigger_off_not_long_enough_2(hass, setup_comp_5):
"""Test if mode change turns ac off despite minimum cycle."""
calls = _setup_switch(hass, True)
await common.async_set_temperature(hass, 30)
_setup_sensor(hass, 25)
await hass.async_block_till_done()
assert 0 == len(calls)
await common.async_set_hvac_mode(hass, HVAC_MODE_OFF)
assert 1 == len(calls)
call = calls[0]
assert "homeassistant" == call.domain
assert SERVICE_TURN_OFF == call.service
assert ENT_SWITCH == call.data["entity_id"]
async def test_mode_change_ac_trigger_on_not_long_enough_2(hass, setup_comp_5):
"""Test if mode change turns ac on despite minimum cycle."""
calls = _setup_switch(hass, False)
await common.async_set_temperature(hass, 25)
_setup_sensor(hass, 30)
await hass.async_block_till_done()
assert 0 == len(calls)
await common.async_set_hvac_mode(hass, HVAC_MODE_HEAT)
assert 1 == len(calls)
call = calls[0]
assert "homeassistant" == call.domain
assert SERVICE_TURN_ON == call.service
assert ENT_SWITCH == call.data["entity_id"]
@pytest.fixture
async def setup_comp_6(hass):
"""Initialize components."""
hass.config.temperature_unit = TEMP_CELSIUS
assert await async_setup_component(
hass,
DOMAIN,
{
"climate": {
"platform": "generic_thermostat",
"name": "test",
"cold_tolerance": 0.3,
"hot_tolerance": 0.3,
"heater": ENT_SWITCH,
"target_sensor": ENT_SENSOR,
"min_cycle_duration": datetime.timedelta(minutes=10),
"initial_hvac_mode": HVAC_MODE_HEAT,
}
},
)
await hass.async_block_till_done()
async def test_temp_change_heater_trigger_off_not_long_enough(hass, setup_comp_6):
"""Test if temp change doesn't turn heater off because of time."""
calls = _setup_switch(hass, True)
await common.async_set_temperature(hass, 25)
_setup_sensor(hass, 30)
await hass.async_block_till_done()
assert 0 == len(calls)
async def test_temp_change_heater_trigger_on_not_long_enough(hass, setup_comp_6):
"""Test if temp change doesn't turn heater on because of time."""
calls = _setup_switch(hass, False)
await common.async_set_temperature(hass, 30)
_setup_sensor(hass, 25)
await hass.async_block_till_done()
assert 0 == len(calls)
async def test_temp_change_heater_trigger_on_long_enough(hass, setup_comp_6):
"""Test if temperature change turn heater on after min cycle."""
fake_changed = datetime.datetime(
1918, 11, 11, 11, 11, 11, tzinfo=datetime.timezone.utc
)
with patch(
"homeassistant.helpers.condition.dt_util.utcnow", return_value=fake_changed
):
calls = _setup_switch(hass, False)
await common.async_set_temperature(hass, 30)
_setup_sensor(hass, 25)
await hass.async_block_till_done()
assert 1 == len(calls)
call = calls[0]
assert HASS_DOMAIN == call.domain
assert SERVICE_TURN_ON == call.service
assert ENT_SWITCH == call.data["entity_id"]
async def test_temp_change_heater_trigger_off_long_enough(hass, setup_comp_6):
"""Test if temperature change turn heater off after min cycle."""
fake_changed = datetime.datetime(
1918, 11, 11, 11, 11, 11, tzinfo=datetime.timezone.utc
)
with patch(
"homeassistant.helpers.condition.dt_util.utcnow", return_value=fake_changed
):
calls = _setup_switch(hass, True)
await common.async_set_temperature(hass, 25)
_setup_sensor(hass, 30)
await hass.async_block_till_done()
assert 1 == len(calls)
call = calls[0]
assert HASS_DOMAIN == call.domain
assert SERVICE_TURN_OFF == call.service
assert ENT_SWITCH == call.data["entity_id"]
async def test_mode_change_heater_trigger_off_not_long_enough(hass, setup_comp_6):
"""Test if mode change turns heater off despite minimum cycle."""
calls = _setup_switch(hass, True)
await common.async_set_temperature(hass, 25)
_setup_sensor(hass, 30)
await hass.async_block_till_done()
assert 0 == len(calls)
await common.async_set_hvac_mode(hass, HVAC_MODE_OFF)
assert 1 == len(calls)
call = calls[0]
assert "homeassistant" == call.domain
assert SERVICE_TURN_OFF == call.service
assert ENT_SWITCH == call.data["entity_id"]
async def test_mode_change_heater_trigger_on_not_long_enough(hass, setup_comp_6):
"""Test if mode change turns heater on despite minimum cycle."""
calls = _setup_switch(hass, False)
await common.async_set_temperature(hass, 30)
_setup_sensor(hass, 25)
await hass.async_block_till_done()
assert 0 == len(calls)
await common.async_set_hvac_mode(hass, HVAC_MODE_HEAT)
assert 1 == len(calls)
call = calls[0]
assert "homeassistant" == call.domain
assert SERVICE_TURN_ON == call.service
assert ENT_SWITCH == call.data["entity_id"]
@pytest.fixture
async def setup_comp_7(hass):
"""Initialize components."""
hass.config.temperature_unit = TEMP_CELSIUS
assert await async_setup_component(
hass,
DOMAIN,
{
"climate": {
"platform": "generic_thermostat",
"name": "test",
"cold_tolerance": 0.3,
"hot_tolerance": 0.3,
"heater": ENT_SWITCH,
"target_temp": 25,
"target_sensor": ENT_SENSOR,
"ac_mode": True,
"min_cycle_duration": datetime.timedelta(minutes=15),
"keep_alive": datetime.timedelta(minutes=10),
"initial_hvac_mode": HVAC_MODE_COOL,
}
},
)
await hass.async_block_till_done()
async def test_temp_change_ac_trigger_on_long_enough_3(hass, setup_comp_7):
"""Test if turn on signal is sent at keep-alive intervals."""
calls = _setup_switch(hass, True)
await hass.async_block_till_done()
_setup_sensor(hass, 30)
await hass.async_block_till_done()
await common.async_set_temperature(hass, 25)
test_time = datetime.datetime.now(pytz.UTC)
async_fire_time_changed(hass, test_time)
await hass.async_block_till_done()
assert 0 == len(calls)
async_fire_time_changed(hass, test_time + datetime.timedelta(minutes=5))
await hass.async_block_till_done()
assert 0 == len(calls)
async_fire_time_changed(hass, test_time + datetime.timedelta(minutes=10))
await hass.async_block_till_done()
assert 1 == len(calls)
call = calls[0]
assert HASS_DOMAIN == call.domain
assert SERVICE_TURN_ON == call.service
assert ENT_SWITCH == call.data["entity_id"]
async def test_temp_change_ac_trigger_off_long_enough_3(hass, setup_comp_7):
"""Test if turn on signal is sent at keep-alive intervals."""
calls = _setup_switch(hass, False)
await hass.async_block_till_done()
_setup_sensor(hass, 20)
await hass.async_block_till_done()
await common.async_set_temperature(hass, 25)
test_time = datetime.datetime.now(pytz.UTC)
async_fire_time_changed(hass, test_time)
await hass.async_block_till_done()
assert 0 == len(calls)
async_fire_time_changed(hass, test_time + datetime.timedelta(minutes=5))
await hass.async_block_till_done()
assert 0 == len(calls)
async_fire_time_changed(hass, test_time + datetime.timedelta(minutes=10))
await hass.async_block_till_done()
assert 1 == len(calls)
call = calls[0]
assert HASS_DOMAIN == call.domain
assert SERVICE_TURN_OFF == call.service
assert ENT_SWITCH == call.data["entity_id"]
@pytest.fixture
async def setup_comp_8(hass):
"""Initialize components."""
hass.config.temperature_unit = TEMP_CELSIUS
assert await async_setup_component(
hass,
DOMAIN,
{
"climate": {
"platform": "generic_thermostat",
"name": "test",
"cold_tolerance": 0.3,
"hot_tolerance": 0.3,
"target_temp": 25,
"heater": ENT_SWITCH,
"target_sensor": ENT_SENSOR,
"min_cycle_duration": datetime.timedelta(minutes=15),
"keep_alive": datetime.timedelta(minutes=10),
"initial_hvac_mode": HVAC_MODE_HEAT,
}
},
)
await hass.async_block_till_done()
async def test_temp_change_heater_trigger_on_long_enough_2(hass, setup_comp_8):
"""Test if turn on signal is sent at keep-alive intervals."""
calls = _setup_switch(hass, True)
await hass.async_block_till_done()
_setup_sensor(hass, 20)
await hass.async_block_till_done()
await common.async_set_temperature(hass, 25)
test_time = datetime.datetime.now(pytz.UTC)
async_fire_time_changed(hass, test_time)
await hass.async_block_till_done()
assert 0 == len(calls)
async_fire_time_changed(hass, test_time + datetime.timedelta(minutes=5))
await hass.async_block_till_done()
assert 0 == len(calls)
async_fire_time_changed(hass, test_time + datetime.timedelta(minutes=10))
await hass.async_block_till_done()
assert 1 == len(calls)
call = calls[0]
assert HASS_DOMAIN == call.domain
assert SERVICE_TURN_ON == call.service
assert ENT_SWITCH == call.data["entity_id"]
async def test_temp_change_heater_trigger_off_long_enough_2(hass, setup_comp_8):
"""Test if turn on signal is sent at keep-alive intervals."""
calls = _setup_switch(hass, False)
await hass.async_block_till_done()
_setup_sensor(hass, 30)
await hass.async_block_till_done()
await common.async_set_temperature(hass, 25)
test_time = datetime.datetime.now(pytz.UTC)
async_fire_time_changed(hass, test_time)
await hass.async_block_till_done()
assert 0 == len(calls)
async_fire_time_changed(hass, test_time + datetime.timedelta(minutes=5))
await hass.async_block_till_done()
assert 0 == len(calls)
async_fire_time_changed(hass, test_time + datetime.timedelta(minutes=10))
await hass.async_block_till_done()
assert 1 == len(calls)
call = calls[0]
assert HASS_DOMAIN == call.domain
assert SERVICE_TURN_OFF == call.service
assert ENT_SWITCH == call.data["entity_id"]
@pytest.fixture
async def setup_comp_9(hass):
"""Initialize components."""
hass.config.temperature_unit = TEMP_FAHRENHEIT
assert await async_setup_component(
hass,
DOMAIN,
{
"climate": {
"platform": "generic_thermostat",
"name": "test",
"cold_tolerance": 0.3,
"hot_tolerance": 0.3,
"target_temp": 25,
"heater": ENT_SWITCH,
"target_sensor": ENT_SENSOR,
"min_cycle_duration": datetime.timedelta(minutes=15),
"keep_alive": datetime.timedelta(minutes=10),
"precision": 0.1,
}
},
)
await hass.async_block_till_done()
async def test_precision(hass, setup_comp_9):
"""Test that setting precision to tenths works as intended."""
await common.async_set_temperature(hass, 23.27)
state = hass.states.get(ENTITY)
assert 23.3 == state.attributes.get("temperature")
async def test_custom_setup_params(hass):
"""Test the setup with custom parameters."""
result = await async_setup_component(
hass,
DOMAIN,
{
"climate": {
"platform": "generic_thermostat",
"name": "test",
"heater": ENT_SWITCH,
"target_sensor": ENT_SENSOR,
"min_temp": MIN_TEMP,
"max_temp": MAX_TEMP,
"target_temp": TARGET_TEMP,
}
},
)
assert result
await hass.async_block_till_done()
state = hass.states.get(ENTITY)
assert state.attributes.get("min_temp") == MIN_TEMP
assert state.attributes.get("max_temp") == MAX_TEMP
assert state.attributes.get("temperature") == TARGET_TEMP
async def test_restore_state(hass):
"""Ensure states are restored on startup."""
mock_restore_cache(
hass,
(
State(
"climate.test_thermostat",
HVAC_MODE_OFF,
{ATTR_TEMPERATURE: "20", ATTR_PRESET_MODE: PRESET_AWAY},
),
),
)
hass.state = CoreState.starting
await async_setup_component(
hass,
DOMAIN,
{
"climate": {
"platform": "generic_thermostat",
"name": "test_thermostat",
"heater": ENT_SWITCH,
"target_sensor": ENT_SENSOR,
"away_temp": 14,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("climate.test_thermostat")
assert state.attributes[ATTR_TEMPERATURE] == 20
assert state.attributes[ATTR_PRESET_MODE] == PRESET_AWAY
assert state.state == HVAC_MODE_OFF
async def test_no_restore_state(hass):
"""Ensure states are restored on startup if they exist.
Allows for graceful reboot.
"""
mock_restore_cache(
hass,
(
State(
"climate.test_thermostat",
HVAC_MODE_OFF,
{ATTR_TEMPERATURE: "20", ATTR_PRESET_MODE: PRESET_AWAY},
),
),
)
hass.state = CoreState.starting
await async_setup_component(
hass,
DOMAIN,
{
"climate": {
"platform": "generic_thermostat",
"name": "test_thermostat",
"heater": ENT_SWITCH,
"target_sensor": ENT_SENSOR,
"target_temp": 22,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("climate.test_thermostat")
assert state.attributes[ATTR_TEMPERATURE] == 22
assert state.state == HVAC_MODE_OFF
async def test_restore_state_uncoherence_case(hass):
"""
Test restore from a strange state.
- Turn the generic thermostat off
- Restart HA and restore state from DB
"""
_mock_restore_cache(hass, temperature=20)
calls = _setup_switch(hass, False)
_setup_sensor(hass, 15)
await _setup_climate(hass)
await hass.async_block_till_done()
state = hass.states.get(ENTITY)
assert 20 == state.attributes[ATTR_TEMPERATURE]
assert HVAC_MODE_OFF == state.state
assert 0 == len(calls)
calls = _setup_switch(hass, False)
await hass.async_block_till_done()
state = hass.states.get(ENTITY)
assert HVAC_MODE_OFF == state.state
async def _setup_climate(hass):
assert await async_setup_component(
hass,
DOMAIN,
{
"climate": {
"platform": "generic_thermostat",
"name": "test",
"cold_tolerance": 2,
"hot_tolerance": 4,
"away_temp": 30,
"heater": ENT_SWITCH,
"target_sensor": ENT_SENSOR,
"ac_mode": True,
}
},
)
def _mock_restore_cache(hass, temperature=20, hvac_mode=HVAC_MODE_OFF):
mock_restore_cache(
hass,
(
State(
ENTITY,
hvac_mode,
{ATTR_TEMPERATURE: str(temperature), ATTR_PRESET_MODE: PRESET_AWAY},
),
),
)
async def test_reload(hass):
"""Test we can reload."""
assert await async_setup_component(
hass,
DOMAIN,
{
"climate": {
"platform": "generic_thermostat",
"name": "test",
"heater": "switch.any",
"target_sensor": "sensor.any",
}
},
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
assert hass.states.get("climate.test") is not None
yaml_path = path.join(
_get_fixtures_base_path(),
"fixtures",
"generic_thermostat/configuration.yaml",
)
with patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path):
await hass.services.async_call(
GENERIC_THERMOSTAT_DOMAIN,
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
assert hass.states.get("climate.test") is None
assert hass.states.get("climate.reload")
def _get_fixtures_base_path():
return path.dirname(path.dirname(path.dirname(__file__)))
| |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for file sinks."""
import glob
import logging
import os
import shutil
import tempfile
import unittest
import hamcrest as hc
import apache_beam as beam
from apache_beam import coders
from apache_beam.io import fileio
from apache_beam.test_pipeline import TestPipeline
from apache_beam.transforms.display import DisplayData
from apache_beam.transforms.display_test import DisplayDataItemMatcher
# TODO: Refactor code so all io tests are using same library
# TestCaseWithTempDirCleanup class.
class _TestCaseWithTempDirCleanUp(unittest.TestCase):
"""Base class for TestCases that deals with TempDir clean-up.
Inherited test cases will call self._new_tempdir() to start a temporary dir
which will be deleted at the end of the tests (when tearDown() is called).
"""
def setUp(self):
self._tempdirs = []
def tearDown(self):
for path in self._tempdirs:
if os.path.exists(path):
shutil.rmtree(path)
self._tempdirs = []
def _new_tempdir(self):
result = tempfile.mkdtemp()
self._tempdirs.append(result)
return result
def _create_temp_file(self, name='', suffix=''):
if not name:
name = tempfile.template
file_name = tempfile.NamedTemporaryFile(
delete=False, prefix=name,
dir=self._new_tempdir(), suffix=suffix).name
return file_name
class MyFileSink(fileio.FileSink):
def open(self, temp_path):
# TODO: Fix main session pickling.
# file_handle = super(MyFileSink, self).open(temp_path)
file_handle = fileio.FileSink.open(self, temp_path)
file_handle.write('[start]')
return file_handle
def write_encoded_record(self, file_handle, encoded_value):
file_handle.write('[')
file_handle.write(encoded_value)
file_handle.write(']')
def close(self, file_handle):
file_handle.write('[end]')
# TODO: Fix main session pickling.
# file_handle = super(MyFileSink, self).close(file_handle)
file_handle = fileio.FileSink.close(self, file_handle)
class TestFileSink(_TestCaseWithTempDirCleanUp):
def test_file_sink_writing(self):
temp_path = os.path.join(self._new_tempdir(), 'filesink')
sink = MyFileSink(
temp_path, file_name_suffix='.foo', coder=coders.ToStringCoder())
# Manually invoke the generic Sink API.
init_token = sink.initialize_write()
writer1 = sink.open_writer(init_token, '1')
writer1.write('a')
writer1.write('b')
res1 = writer1.close()
writer2 = sink.open_writer(init_token, '2')
writer2.write('x')
writer2.write('y')
writer2.write('z')
res2 = writer2.close()
_ = list(sink.finalize_write(init_token, [res1, res2]))
# Retry the finalize operation (as if the first attempt was lost).
res = list(sink.finalize_write(init_token, [res1, res2]))
# Check the results.
shard1 = temp_path + '-00000-of-00002.foo'
shard2 = temp_path + '-00001-of-00002.foo'
self.assertEqual(res, [shard1, shard2])
self.assertEqual(open(shard1).read(), '[start][a][b][end]')
self.assertEqual(open(shard2).read(), '[start][x][y][z][end]')
# Check that any temp files are deleted.
self.assertItemsEqual([shard1, shard2], glob.glob(temp_path + '*'))
def test_file_sink_display_data(self):
temp_path = os.path.join(self._new_tempdir(), 'display')
sink = MyFileSink(
temp_path, file_name_suffix='.foo', coder=coders.ToStringCoder())
dd = DisplayData.create_from(sink)
expected_items = [
DisplayDataItemMatcher(
'compression', 'auto'),
DisplayDataItemMatcher(
'file_pattern',
'{}{}'.format(temp_path,
'-%(shard_num)05d-of-%(num_shards)05d.foo'))]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_empty_write(self):
temp_path = tempfile.NamedTemporaryFile().name
sink = MyFileSink(
temp_path, file_name_suffix='.foo', coder=coders.ToStringCoder())
p = TestPipeline()
p | beam.Create([]) | beam.io.Write(sink) # pylint: disable=expression-not-assigned
p.run()
self.assertEqual(
open(temp_path + '-00000-of-00001.foo').read(), '[start][end]')
def test_fixed_shard_write(self):
temp_path = os.path.join(self._new_tempdir(), 'empty')
sink = MyFileSink(
temp_path,
file_name_suffix='.foo',
num_shards=3,
shard_name_template='_NN_SSS_',
coder=coders.ToStringCoder())
p = TestPipeline()
p | beam.Create(['a', 'b']) | beam.io.Write(sink) # pylint: disable=expression-not-assigned
p.run()
concat = ''.join(
open(temp_path + '_03_%03d_.foo' % shard_num).read()
for shard_num in range(3))
self.assertTrue('][a][' in concat, concat)
self.assertTrue('][b][' in concat, concat)
def test_file_sink_multi_shards(self):
temp_path = os.path.join(self._new_tempdir(), 'multishard')
sink = MyFileSink(
temp_path, file_name_suffix='.foo', coder=coders.ToStringCoder())
# Manually invoke the generic Sink API.
init_token = sink.initialize_write()
num_shards = 1000
writer_results = []
for i in range(num_shards):
uuid = 'uuid-%05d' % i
writer = sink.open_writer(init_token, uuid)
writer.write('a')
writer.write('b')
writer.write(uuid)
writer_results.append(writer.close())
res_first = list(sink.finalize_write(init_token, writer_results))
# Retry the finalize operation (as if the first attempt was lost).
res_second = list(sink.finalize_write(init_token, writer_results))
self.assertItemsEqual(res_first, res_second)
res = sorted(res_second)
for i in range(num_shards):
shard_name = '%s-%05d-of-%05d.foo' % (temp_path, i, num_shards)
uuid = 'uuid-%05d' % i
self.assertEqual(res[i], shard_name)
self.assertEqual(
open(shard_name).read(), ('[start][a][b][%s][end]' % uuid))
# Check that any temp files are deleted.
self.assertItemsEqual(res, glob.glob(temp_path + '*'))
def test_file_sink_io_error(self):
temp_path = os.path.join(self._new_tempdir(), 'ioerror')
sink = MyFileSink(
temp_path, file_name_suffix='.foo', coder=coders.ToStringCoder())
# Manually invoke the generic Sink API.
init_token = sink.initialize_write()
writer1 = sink.open_writer(init_token, '1')
writer1.write('a')
writer1.write('b')
res1 = writer1.close()
writer2 = sink.open_writer(init_token, '2')
writer2.write('x')
writer2.write('y')
writer2.write('z')
res2 = writer2.close()
os.remove(res2)
with self.assertRaises(Exception):
list(sink.finalize_write(init_token, [res1, res2]))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| |
"""Dataset preprocessing."""
import builtins
from typing import List, Optional, Text
import bisect
import collections
import dataclasses
import enum
import re
import fire
import gast as ast
import numpy as np
from python_graphs import control_flow
from python_graphs import instruction as instruction_module
from core.data import post_domination
from core.data import tokenization
class EdgeTypes(enum.IntEnum):
UNCONDITIONAL_FORWARD = enum.auto()
UNCONDITIONAL_BACKWARD = enum.auto()
TRUE_FORWARD = enum.auto()
TRUE_BACKWARD = enum.auto()
FALSE_FORWARD = enum.auto()
FALSE_BACKWARD = enum.auto()
@dataclasses.dataclass
class RawRuntimeErrorProblem:
"""RawRuntimeErrorProblem."""
source: Text
problem_id: Optional[Text]
submission_id: Optional[Text]
edge_sources: List[int]
edge_dests: List[int]
edge_types: List[int]
num_edges: int
node_span_starts: List[int]
node_span_ends: List[int]
branch_list: List[List[int]]
raises_list: List[int]
start_index: int
exit_index: int
step_limit: int
target: int
target_lineno: Optional[int]
post_domination_matrix: List[List[int]]
@dataclasses.dataclass
class RuntimeErrorProblem:
"""RuntimeErrorProblem for use on an accelerator."""
tokens: List[int]
docstring_tokens: List[int]
problem_id: Text
submission_id: Text
edge_sources: List[int]
edge_dests: List[int]
edge_types: List[int]
num_edges: int
node_token_span_starts: List[int]
node_token_span_ends: List[int]
token_node_indexes: List[int]
true_branch_nodes: List[int]
false_branch_nodes: List[int]
raise_nodes: List[int]
start_index: int
exit_index: int
step_limit: int
target: int
target_lineno: Optional[int]
target_node_indexes: List[int]
post_domination_matrix: List[List[int]]
in_dataset: bool
def get_character_index(source, lineno, col_offset):
lines = source.split('\n')
line_index = lineno - 1
line_start = sum(len(line) + 1 for line in lines[:line_index])
return line_start + col_offset
def get_span(instruction, source):
ast_node = instruction.node
if instruction.source == instruction_module.EXCEPTION:
# Caution: Leaky abstraction.
# This is an exception write, e.g. the write to `value` in "except Exception as value:".
# The accesses of an exception node are defined in control_flow's handle_ExceptHandler.
# This is a hacky (but hopefully general) way to access the span of the exception write.
# We use regex to find 'as' to determine the span.
# In "except Exception as value:", the resulting span is "value:".
name_node = instruction.node # A Name, Tuple, or List AST node.
parent = instruction.accesses[0][-1] # An AST ExceptHandler node.
lineno = parent.lineno
col_offset = parent.col_offset
end_lineno = parent.body[0].lineno
end_col_offset = parent.body[0].col_offset
extended_span_start = get_character_index(source, lineno, col_offset)
extended_span_end = get_character_index(source, end_lineno, end_col_offset)
match = re.search(r'\bas\b', source[extended_span_start:extended_span_end])
after_as = extended_span_start + match.span()[1]
untrimmed = source[after_as:extended_span_end]
leading_spaces = len(untrimmed) - len(untrimmed.lstrip())
trailing_spaces = len(untrimmed) - len(untrimmed.rstrip())
span_start = after_as + leading_spaces
span_end = extended_span_end - trailing_spaces
return span_start, span_end
elif instruction.source == instruction_module.ARGS:
arg0 = instruction.accesses[0][1]
argN = instruction.accesses[-1][1]
lineno = arg0.lineno
col_offset = arg0.col_offset
end_lineno = argN.end_lineno
end_col_offset = argN.end_col_offset
else:
lineno = ast_node.lineno
col_offset = ast_node.col_offset
end_lineno = ast_node.end_lineno
end_col_offset = ast_node.end_col_offset
span_start = get_character_index(source, lineno, col_offset)
span_end = get_character_index(source, end_lineno, end_col_offset)
return span_start, span_end
def examine_udfs(graph, problem_id, submission_id):
nodes = graph.nodes
ast_nodes = [n.instruction.node for n in nodes]
# This doesn't consider the scope of a function, such as if
# it is defined inside a class.
nodes_by_function_name = {
ast_node.name: ast_node
for ast_node in ast_nodes
if isinstance(ast_node, (ast.FunctionDef, ast.ClassDef))
}
# We're interested in splitting instructions that call user defined functions
# into multiple nodes. We won't do this for FunctionDef, ClassDef.
# If it's a class, we'll point to the init function.
total_function_calls = 0
calls_by_function_name = collections.defaultdict(int)
for node in nodes:
if isinstance(node.instruction.node, (ast.FunctionDef, ast.ClassDef)):
continue
num_func_calls = 0
for ast_node in ast.walk(node.instruction.node):
if isinstance(ast_node, ast.Call):
if isinstance(ast_node.func, ast.Name):
# e.g. "func_name()"
function_name = ast_node.func.id
elif isinstance(ast_node.func, ast.Attribute):
# e.g. "o.func_name()"
function_name = ast_node.func.attr
else:
# e.g. o[0]() (ast.Subscript)
continue
if function_name in nodes_by_function_name:
num_func_calls += 1
calls_by_function_name[function_name] += 1
total_function_calls += num_func_calls
elif function_name in dir(builtins):
# Builtin function called.
pass
else: # Unknown function called
pass
if calls_by_function_name.values() and max(calls_by_function_name.values()) > 1:
n = max(calls_by_function_name.values())
for f in calls_by_function_name:
if calls_by_function_name[f] == n:
break
return 'Function called more than once'
elif total_function_calls == 0:
return 'No UDFs called'
else:
return 'UDFs called at most once'
def make_rawruntimeerrorproblem(
source, target, target_lineno=None, problem_id=None, submission_id=None):
"""Constructs a RawRuntimeErrorProblem from the provided source and target.
Fields:
- source: The text of a program
- edge_sources: Together with edge_dests, forms an adjacency list of all edges in the program's graph representation
- edge_dests: Together with edge_sources, forms an adjacency list of all edges in the program's graph representation
- edge_types: A list the same length as edge_sources and edge_dests, contains the integer enum type of each edge in the program's graph representation.
- node_span_starts: A list of the source span start for each node in the program's graph representation.
- node_span_ends: A list of the source span ends for each node in the program's graph representation.
"""
graph = control_flow.get_control_flow_graph(source)
lines = source.strip().split('\n')
nodes = graph.nodes
udf_usage = examine_udfs(graph, problem_id, submission_id)
if udf_usage != 'No UDFs called':
raise ValueError('UDF not currently supported.')
# cfg.nodes does not include an exit node, so we add 1.
num_nodes = len(nodes) + 1
exit_index = len(nodes)
start_node = graph.get_start_control_flow_node()
if start_node == '<exit>':
start_index = exit_index
elif start_node == '<raise>' or start_node == '<return>':
start_index = exit_index + 1
else:
start_index = nodes.index(start_node)
# node_span_starts and node_span_ends
node_span_starts = []
node_span_ends = []
node_indexes = {}
for node_index, node in enumerate(nodes):
node_indexes[node.uuid] = node_index
node_span_start, node_span_end = get_span(node.instruction, source)
node_span_starts.append(node_span_start)
node_span_ends.append(node_span_end)
# For consistency with legacy datasets, we count the number of edges this way.
# The actual number of edges is higher.
num_edges = 0
for node_index, node in enumerate(nodes):
for next_node in node.next:
num_edges += 1
branch_list = get_branch_list(nodes, exit_index)
raises_list = get_raises_list(nodes, exit_index)
step_limit = get_step_limit(lines)
edge_sources = []
edge_dests = []
edge_types = []
for node_index, (true_branch, false_branch) in enumerate(branch_list):
if true_branch == false_branch:
edge_sources.append(node_index)
edge_dests.append(true_branch)
edge_types.append(EdgeTypes.UNCONDITIONAL_FORWARD.value)
edge_sources.append(true_branch)
edge_dests.append(node_index)
edge_types.append(EdgeTypes.UNCONDITIONAL_BACKWARD.value)
else:
edge_sources.append(node_index)
edge_dests.append(true_branch)
edge_types.append(EdgeTypes.TRUE_FORWARD.value)
edge_sources.append(true_branch)
edge_dests.append(node_index)
edge_types.append(EdgeTypes.TRUE_BACKWARD.value)
edge_sources.append(node_index)
edge_dests.append(false_branch)
edge_types.append(EdgeTypes.FALSE_FORWARD.value)
edge_sources.append(false_branch)
edge_dests.append(node_index)
edge_types.append(EdgeTypes.FALSE_BACKWARD.value)
post_domination_matrix = post_domination.get_post_domination_matrix(graph)
return RawRuntimeErrorProblem(
source=source,
problem_id=problem_id,
submission_id=submission_id,
edge_sources=edge_sources,
edge_dests=edge_dests,
edge_types=edge_types,
num_edges=num_edges,
node_span_starts=node_span_starts,
node_span_ends=node_span_ends,
branch_list=branch_list,
raises_list=raises_list,
start_index=start_index,
exit_index=exit_index,
step_limit=step_limit,
target=target,
target_lineno=target_lineno,
post_domination_matrix=post_domination_matrix,
)
def get_step_limit(lines):
"""Computes the maximum number of IPA-GNN steps allowed for a program."""
step_limit = 1 # Start with one step for reaching exit.
indents = []
for line in lines:
indent = len(line) - len(line.lstrip())
while indents and indent <= indents[-1]:
indents.pop()
step_limit += 2 ** len(indents)
if (line.lstrip().startswith('for') or line.lstrip().startswith('while')):
indents.append(indent)
# We add steps at both levels of indentation for loops.
# Before for the initial condition check, after for subsequent condition
# checks.
step_limit += 2 ** len(indents)
return step_limit
def get_node_index(node_or_label, indexes_by_id, exit_index, raise_index):
if node_or_label == '<raise>':
return raise_index
elif node_or_label == '<exit>' or node_or_label == '<return>':
return exit_index
else:
return indexes_by_id[id(node_or_label)]
def get_branch_list(nodes, exit_index):
"""Computes the branch list for the control flow graph.
Args:
nodes: A list of control_flow.ControlFlowNodes.
exit_index: The index of the exit node.
Returns:
A Python list representing the branch options available from each node. Each
entry in the list corresponds to a node in the control flow graph, with the
final entry corresponding to the exit node (not present in the cfg). Each
entry is a 2-tuple indicating the next node reached by the True and False
branch respectively (these may be the same.) The exit node leads to itself
along both branches.
"""
indexes_by_id = {
id(node): index for index, node in enumerate(nodes)
}
raise_index = exit_index + 1
branches = []
for node in nodes:
node_branches = node.get_branches(
include_except_branches=True,
include_reraise_branches=True)
if node_branches:
true_branch = node_branches[True]
false_branch = node_branches[False]
true_index = get_node_index(true_branch, indexes_by_id, exit_index, raise_index)
false_index = get_node_index(false_branch, indexes_by_id, exit_index, raise_index)
branches.append([true_index, false_index])
else:
next_nodes = node.next_from_end
assert len(next_nodes) <= 1
if next_nodes:
next_node = next(iter(next_nodes))
next_index = get_node_index(next_node, indexes_by_id, exit_index, raise_index)
else:
# NOTE(dbieber): We are sending the true and false branches of a raise node
# to itself. We may wish to change this behavior.
next_index = indexes_by_id[id(node)]
branches.append([next_index, next_index])
# Finally we add branches from the exit node to itself.
# Omit this if running on BasicBlocks rather than ControlFlowNodes, because
# ControlFlowGraphs have an exit BasicBlock, but no exit ControlFlowNodes.
branches.append([exit_index, exit_index])
return branches
def get_raises_list(nodes, exit_index):
"""Compute the "raises list" for the control flow graph.
Args:
nodes: A list of control_flow.ControlFlowNodes.
exit_index: The index of the exit node. The top-level "raise index" is assumed
to be exit_index + 1.
Returns:
A Python list indicating where each node would directly raise to if it were to
raise an exception.
"""
raise_index = exit_index + 1
indexes_by_id = {
id(node): index for index, node in enumerate(nodes)
}
raises_list = []
for node in nodes:
exits_from_middle = node.block.exits_from_middle
assert len(exits_from_middle) <= 1
if exits_from_middle:
raise_block = next(iter(exits_from_middle))
if raise_block.label == '<raise>':
index = raise_index
elif raise_block.label == '<exit>' or raise_block.label == '<return>':
index = exit_index
else:
raise_node = raise_block.control_flow_nodes[0]
index = indexes_by_id[id(raise_node)]
else:
index = raise_index
raises_list.append(index)
# Finally we add an unused raise edge from the exit node to the raise node.
# The raise edge from the raise node will be added later.
raises_list.append(raise_index)
return raises_list
def get_nodes_at_lineno(raw, lineno):
if lineno is None or lineno == 0:
return []
# Compute the line boundaries.
line_index = lineno - 1
lines = raw.source.split('\n')
line_starts = [0]
current_line_start = 0
for line in lines:
current_line_start += len(line) + 1
line_starts.append(current_line_start)
line_start = line_starts[line_index]
line_end = line_starts[line_index + 1]
# Determine which nodes intersect the line.
overlapping_nodes = []
for node, (start, end) in enumerate(zip(raw.node_span_starts, raw.node_span_ends)):
if (line_start <= start <= line_end
or line_start <= end <= line_end
or start <= line_start <= end
or start <= line_end <= end):
overlapping_nodes.append(node)
return overlapping_nodes
def hardcoded_filter(tokens_extended):
return len(tokens_extended) <= 512
def make_runtimeerrorproblem(
source, target, docstring=None, extended_source=None,
target_lineno=0, tokenizer=None,
problem_id=None, submission_id=None):
raw = make_rawruntimeerrorproblem(
source, target, target_lineno=target_lineno,
problem_id=problem_id, submission_id=submission_id)
tokenizer = tokenizer or tokenization.load_tokenizer()
token_data = tokenize_raw_with_spans(tokenizer, raw)
if extended_source is not None and extended_source != source:
extended_tokenized = tokenizer(extended_source)
tokens_extended = extended_tokenized['input_ids']
else:
tokens_extended = token_data['tokens']
if docstring is not None:
docstring_tokenized = tokenizer(docstring)
docstring_tokens = docstring_tokenized['input_ids']
else:
docstring_tokens = []
in_dataset = hardcoded_filter(tokens_extended)
branch_list = np.array(raw.branch_list)
target_node_indexes = get_nodes_at_lineno(raw, target_lineno)
return RuntimeErrorProblem(
tokens=token_data['tokens'],
docstring_tokens=docstring_tokens,
problem_id=raw.problem_id,
submission_id=raw.submission_id,
edge_sources=raw.edge_sources,
edge_dests=raw.edge_dests,
edge_types=raw.edge_types,
num_edges=raw.num_edges,
node_token_span_starts=token_data['node_token_span_starts'],
node_token_span_ends=token_data['node_token_span_ends'],
token_node_indexes=token_data['token_node_indexes'],
true_branch_nodes=branch_list[:, 0],
false_branch_nodes=branch_list[:, 1],
raise_nodes=raw.raises_list,
start_index=raw.start_index,
exit_index=raw.exit_index,
step_limit=raw.step_limit,
target=raw.target,
target_lineno=raw.target_lineno,
target_node_indexes=target_node_indexes,
post_domination_matrix=raw.post_domination_matrix,
in_dataset=in_dataset,
)
def tokenize_raw_with_spans(tokenizer, raw):
return tokenize_with_spans(tokenizer, raw.source, raw.node_span_starts, raw.node_span_ends, raw.target)
def tokenize_with_spans(tokenizer, source, node_span_starts, node_span_ends, target):
tokenized = tokenizer(source, return_offsets_mapping=True)
tokens = tokenized['input_ids']
offset_mapping = tokenized['offset_mapping']
if offset_mapping:
token_starts, token_ends = zip(*offset_mapping)
else: # No tokens.
token_starts, token_ends = tuple(), tuple()
node_token_span_starts = []
node_token_span_ends = []
token_node_indexes = [-1] * len(tokens)
for i, (node_span_start, node_span_end) in enumerate(zip(node_span_starts, node_span_ends)):
# First token starting before or at node_span_start:
node_token_span_start = bisect.bisect_left(token_starts, node_span_start)
while token_starts[node_token_span_start] > node_span_start:
node_token_span_start -= 1
# First token starting after or at node_span_end:
node_token_span_end = bisect.bisect_left(token_ends, node_span_end)
node_token_span_starts.append(node_token_span_start)
node_token_span_ends.append(node_token_span_end)
token_node_indexes[node_token_span_start:node_token_span_end] = (
[i] * (node_token_span_end - node_token_span_start))
return {
'tokens': tokens,
'node_token_span_starts': node_token_span_starts,
'node_token_span_ends': node_token_span_ends,
'token_node_indexes': token_node_indexes,
}
def demo_parse_code():
"""Demonstration of making and processing a RuntimeErrorProblem."""
source = """n = input()
print(any(set('47') >= set(str(i)) and n % i == 0 for i in range(1, n+1)) and 'YES' or 'NO')
"""
raw = make_rawruntimeerrorproblem(source, 'n/a')
tokenizer = tokenization.load_tokenizer()
data = tokenize_raw_with_spans(tokenizer, raw)
if __name__ == '__main__':
fire.Fire()
| |
from unittest import TestCase
import filters as f
from filters.test import BaseFilterTestCase
from iota import Address, Bundle, Iota, AsyncIota, Tag, Transaction, TryteString
from iota.adapter import MockAdapter, async_return
from iota.commands.extended.get_transfers import GetTransfersCommand, \
GetTransfersRequestFilter
from iota.crypto.types import Seed
from iota.filters import Trytes
from test import mock
from test import patch, MagicMock, async_test
class GetTransfersRequestFilterTestCase(BaseFilterTestCase):
filter_type = GetTransfersCommand(MockAdapter()).get_request_filter
skip_value_check = True
def setUp(self):
super(GetTransfersRequestFilterTestCase, self).setUp()
# Define a few tryte sequences that we can re-use between tests.
self.seed = 'HELLOIOTA'
def test_pass_happy_path(self):
"""
Request is valid.
"""
request = {
'seed': Seed(self.seed),
'start': 0,
'stop': 10,
'inclusionStates': True,
}
filter_ = self._filter(request)
self.assertFilterPasses(filter_)
self.assertDictEqual(filter_.cleaned_data, request)
def test_pass_compatible_types(self):
"""
The request contains values that can be converted to the expected
types.
"""
filter_ = self._filter({
# ``seed`` can be any TrytesCompatible value.
'seed': bytearray(self.seed.encode('ascii')),
# These values must still be integers/bools, however.
'start': 42,
'stop': 86,
'inclusionStates': True,
})
self.assertFilterPasses(filter_)
self.assertDictEqual(
filter_.cleaned_data,
{
'seed': Seed(self.seed),
'start': 42,
'stop': 86,
'inclusionStates': True,
},
)
def test_pass_optional_parameters_excluded(self):
"""
The request contains only required parameters.
"""
filter_ = self._filter({
'seed': Seed(self.seed),
})
self.assertFilterPasses(filter_)
self.assertDictEqual(
filter_.cleaned_data,
{
'seed': Seed(self.seed),
'start': 0,
'stop': None,
'inclusionStates': False,
}
)
def test_fail_empty_request(self):
"""
The request is empty.
"""
self.assertFilterErrors(
{},
{
'seed': [f.FilterMapper.CODE_MISSING_KEY],
},
)
def test_fail_unexpected_parameters(self):
"""
The request contains unexpected parameters.
"""
self.assertFilterErrors(
{
'seed': Seed(self.seed),
# Your rules are really beginning to annoy me.
'foo': 'bar',
},
{
'foo': [f.FilterMapper.CODE_EXTRA_KEY],
},
)
def test_fail_seed_null(self):
"""
``seed`` is null.
"""
self.assertFilterErrors(
{
'seed': None,
},
{
'seed': [f.Required.CODE_EMPTY],
},
)
def test_fail_seed_wrong_type(self):
"""
``seed`` cannot be converted into a TryteString.
"""
self.assertFilterErrors(
{
'seed': 42,
},
{
'seed': [f.Type.CODE_WRONG_TYPE],
},
)
def test_fail_seed_malformed(self):
"""
``seed`` has the correct type, but it contains invalid characters.
"""
self.assertFilterErrors(
{
'seed': b'not valid; seeds can only contain uppercase and "9".',
},
{
'seed': [Trytes.CODE_NOT_TRYTES],
},
)
def test_fail_start_string(self):
"""
``start`` is a string.
"""
self.assertFilterErrors(
{
# Not valid; it must be an int.
'start': '0',
'seed': Seed(self.seed),
},
{
'start': [f.Type.CODE_WRONG_TYPE],
},
)
def test_fail_start_float(self):
"""
``start`` is a float.
"""
self.assertFilterErrors(
{
# Even with an empty fpart, floats are not valid.
# It's gotta be an int.
'start': 8.0,
'seed': Seed(self.seed),
},
{
'start': [f.Type.CODE_WRONG_TYPE],
},
)
def test_fail_start_too_small(self):
"""
``start`` is less than 0.
"""
self.assertFilterErrors(
{
'start': -1,
'seed': Seed(self.seed),
},
{
'start': [f.Min.CODE_TOO_SMALL],
},
)
def test_fail_stop_string(self):
"""
``stop`` is a string.
"""
self.assertFilterErrors(
{
# Not valid; it must be an int.
'stop': '0',
'seed': Seed(self.seed),
},
{
'stop': [f.Type.CODE_WRONG_TYPE],
},
)
def test_fail_stop_float(self):
"""
``stop`` is a float.
"""
self.assertFilterErrors(
{
# Even with an empty fpart, floats are not valid.
# It's gotta be an int.
'stop': 8.0,
'seed': Seed(self.seed),
},
{
'stop': [f.Type.CODE_WRONG_TYPE],
},
)
def test_fail_stop_too_small(self):
"""
``stop`` is less than 0.
"""
self.assertFilterErrors(
{
'stop': -1,
'seed': Seed(self.seed),
},
{
'stop': [f.Min.CODE_TOO_SMALL],
},
)
def test_fail_stop_occurs_before_start(self):
"""
``stop`` is less than ``start``.
"""
self.assertFilterErrors(
{
'start': 1,
'stop': 0,
'seed': Seed(self.seed),
},
{
'start': [GetTransfersRequestFilter.CODE_INTERVAL_INVALID],
},
)
def test_fail_interval_too_large(self):
"""
``stop`` is way more than ``start``.
"""
self.assertFilterErrors(
{
'start': 0,
'stop': GetTransfersRequestFilter.MAX_INTERVAL + 1,
'seed': Seed(self.seed),
},
{
'stop': [GetTransfersRequestFilter.CODE_INTERVAL_TOO_BIG],
},
)
def test_fail_inclusion_states_wrong_type(self):
"""
``inclusionStates`` is not a boolean.
"""
self.assertFilterErrors(
{
'inclusionStates': '1',
'seed': Seed(self.seed),
},
{
'inclusionStates': [f.Type.CODE_WRONG_TYPE],
},
)
class GetTransfersCommandTestCase(TestCase):
def setUp(self):
super(GetTransfersCommandTestCase, self).setUp()
self.adapter = MockAdapter()
self.command = GetTransfersCommand(self.adapter)
# Define some tryte sequences we can re-use between tests.
self.addy1 =\
Address(
b'TESTVALUEONE9DONTUSEINPRODUCTION99999YDZ'
b'E9TAFAJGJA9CECKDAEPHBICDR9LHFCOFRBQDHC9IG'
)
self.addy2 =\
Address(
b'TESTVALUETWO9DONTUSEINPRODUCTION99999TES'
b'GINEIDLEEHRAOGEBMDLENFDAFCHEIHZ9EBZDD9YHL'
)
def test_wireup(self):
"""
Verify that the command is wired up correctly. (sync)
The API method indeed calls the appropiate command.
"""
with patch('iota.commands.extended.get_transfers.GetTransfersCommand.__call__',
MagicMock(return_value=async_return('You found me!'))
) as mocked_command:
api = Iota(self.adapter)
# Don't need to call with proper args here.
response = api.get_transfers()
self.assertTrue(mocked_command.called)
self.assertEqual(
response,
'You found me!'
)
@async_test
async def test_wireup_async(self):
"""
Verify that the command is wired up correctly. (async)
The API method indeed calls the appropiate command.
"""
with patch('iota.commands.extended.get_transfers.GetTransfersCommand.__call__',
MagicMock(return_value=async_return('You found me!'))
) as mocked_command:
api = AsyncIota(self.adapter)
# Don't need to call with proper args here.
response = await api.get_transfers()
self.assertTrue(mocked_command.called)
self.assertEqual(
response,
'You found me!'
)
@async_test
async def test_full_scan(self):
"""
Scanning the Tangle for all transfers.
"""
# To speed up the test, we will mock the address generator.
# :py:class:`iota.crypto.addresses.AddressGenerator` already has
# its own test case, so this does not impact the stability of the
# codebase.
def create_generator(ag, start, step=1):
for addy in [self.addy1, self.addy2][start::step]:
yield addy
# The first address received IOTA.
self.adapter.seed_response(
'findTransactions',
{
'duration': 42,
'hashes': [
'TESTVALUEFIVE9DONTUSEINPRODUCTION99999VH'
'YHRHJETGYCAFZGABTEUBWCWAS9WF99UHBHRHLIOFJ',
],
},
)
# The second address is unused. It has no transactions and was not spent from.
self.adapter.seed_response(
'findTransactions',
{
'duration': 1,
'hashes': [],
},
)
self.adapter.seed_response(
'wereAddressesSpentFrom',
{
'states': [False],
},
)
self.adapter.seed_response(
'getTrytes',
{
'duration': 99,
# Thankfully, we do not have to seed a realistic response for
# ``getTrytes``, as we will be mocking the ``getBundles``
# command that uses on it.
'trytes': [''],
},
)
bundle = Bundle([
Transaction(
address = self.addy1,
timestamp = 1483033814,
# These values are not relevant to the test.
hash_ = None,
signature_message_fragment = None,
value = 42,
tag = Tag(b''),
current_index = 0,
last_index = 0,
bundle_hash = None,
trunk_transaction_hash = None,
branch_transaction_hash = None,
attachment_timestamp = 1483033814,
attachment_timestamp_lower_bound = 12,
attachment_timestamp_upper_bound = 0,
nonce = None,
)
])
mock_get_bundles =\
mock.Mock(return_value=async_return({
'bundles': [bundle],
}))
with mock.patch(
'iota.crypto.addresses.AddressGenerator.create_iterator',
create_generator,
):
with mock.patch(
'iota.commands.extended.get_bundles.GetBundlesCommand._execute',
mock_get_bundles,
):
response = await self.command(seed=Seed.random())
self.assertDictEqual(
response,
{
'bundles': [bundle],
},
)
@async_test
async def test_no_transactions(self):
"""
There are no transactions for the specified seed.
"""
# To speed up the test, we will mock the address generator.
# :py:class:`iota.crypto.addresses.AddressGenerator` already has
# its own test case, so this does not impact the stability of the
# codebase.
def create_generator(ag, start, step=1):
for addy in [self.addy1][start::step]:
yield addy
self.adapter.seed_response(
'findTransactions',
{
'duration': 1,
'hashes': [],
},
)
self.adapter.seed_response(
'wereAddressesSpentFrom',
{
'states': [False],
},
)
with mock.patch(
'iota.crypto.addresses.AddressGenerator.create_iterator',
create_generator,
):
response = await self.command(seed=Seed.random())
self.assertDictEqual(response, {'bundles': []})
@async_test
async def test_start(self):
"""
Scanning the Tangle for all transfers, with start index.
"""
def create_generator(ag, start, step=1):
# Inject an invalid value into the generator, to ensure it is
# skipped.
for addy in [None, self.addy1, self.addy2][start::step]:
yield addy
# The first address received IOTA.
self.adapter.seed_response(
'findTransactions',
{
'duration': 42,
'hashes': [
'TESTVALUEFIVE9DONTUSEINPRODUCTION99999VH'
'YHRHJETGYCAFZGABTEUBWCWAS9WF99UHBHRHLIOFJ',
],
},
)
# The second address is unused. It has no transactions and was not spent from.
self.adapter.seed_response(
'findTransactions',
{
'duration': 1,
'hashes': [],
},
)
self.adapter.seed_response(
'wereAddressesSpentFrom',
{
'states': [True],
},
)
self.adapter.seed_response(
'getTrytes',
{
'duration': 99,
'trytes': [''],
},
)
bundle = Bundle([
Transaction(
address = self.addy1,
timestamp = 1483033814,
# These values are not relevant to the test.
hash_ = None,
signature_message_fragment = None,
value = 42,
tag = Tag(b''),
current_index = 0,
last_index = 0,
bundle_hash = None,
trunk_transaction_hash = None,
branch_transaction_hash = None,
attachment_timestamp = 1483033814,
attachment_timestamp_lower_bound = 12,
attachment_timestamp_upper_bound = 0,
nonce = None,
)
])
mock_get_bundles = mock.Mock(return_value=async_return({
'bundles': [bundle],
}))
with mock.patch(
'iota.crypto.addresses.AddressGenerator.create_iterator',
create_generator,
):
with mock.patch(
'iota.commands.extended.get_bundles.GetBundlesCommand._execute',
mock_get_bundles,
):
response = await self.command(seed=Seed.random(), start=1)
self.assertDictEqual(
response,
{
'bundles': [bundle],
},
)
@async_test
async def test_stop(self):
"""
Scanning the Tangle for all transfers, with stop index.
"""
def create_generator(ag, start, step=1):
# Inject an invalid value into the generator, to ensure it is
# skipped.
for addy in [self.addy1, None][start::step]:
yield addy
# The first address received IOTA.
self.adapter.seed_response(
'findTransactions',
{
'duration': 42,
'hashes': [
'TESTVALUEFIVE9DONTUSEINPRODUCTION99999VH'
'YHRHJETGYCAFZGABTEUBWCWAS9WF99UHBHRHLIOFJ',
],
},
)
self.adapter.seed_response(
'getTrytes',
{
'duration': 99,
'trytes': [''],
},
)
bundle = Bundle([
Transaction(
address = self.addy1,
timestamp = 1483033814,
# These values are not relevant to the test.
hash_ = None,
signature_message_fragment = None,
value = 42,
tag = Tag(b''),
current_index = 0,
last_index = 0,
bundle_hash = None,
trunk_transaction_hash = None,
branch_transaction_hash = None,
attachment_timestamp = 1483033814,
attachment_timestamp_lower_bound = 12,
attachment_timestamp_upper_bound = 0,
nonce = None,
)
])
mock_get_bundles = mock.Mock(return_value=async_return({
'bundles': [bundle],
}))
with mock.patch(
'iota.crypto.addresses.AddressGenerator.create_iterator',
create_generator,
):
with mock.patch(
'iota.commands.extended.get_bundles.GetBundlesCommand._execute',
mock_get_bundles,
):
response = await self.command(seed=Seed.random(), stop=1)
self.assertDictEqual(
response,
{
'bundles': [bundle],
},
)
@async_test
async def test_get_inclusion_states(self):
"""
Fetching inclusion states with transactions.
"""
def create_generator(ag, start, step=1):
for addy in [self.addy1][start::step]:
yield addy
# The first address received IOTA.
self.adapter.seed_response(
'findTransactions',
{
'duration': 42,
'hashes': [
'TESTVALUEFIVE9DONTUSEINPRODUCTION99999VH'
'YHRHJETGYCAFZGABTEUBWCWAS9WF99UHBHRHLIOFJ',
],
},
)
# For this test, we have to generate a real TryteString.
transaction_trytes =\
TryteString(
b'KMYUMNEUAYODAQSNGWTAERRRHNZBZCOLMVVOBTVWLOFYCJKYMGRAMH9RQ9MTZOSZMH'
b'QNZFHFEJEDFQ99HSUNVOTULDJGXEDULS9ZHABVDZODJUMCNWVCPNSCUVKVYWCEXBHW'
b'RBZBSWFPQLWZWMUPGQIGAEGOVE9DDXBVCIPKQYCFZFBELTSMVFSIXLPTACTKAFMCTK'
b'CPYD9BWDJMLKWAOBDSJNQYAHS9GFIQKZCROLFZJVUEIVXVNBRRLEIWTYVHURUXHSCG'
b'DKEIEGPOCXKCYWIBUG9ABYCALYJVFLBNGMS9ARHGTQXBZFLENXCJVKHPVKD9KSAEOL'
b'FFVAJCNKLDVHOCDARWUNKARDYMVKFKRSMUTYOUXSBFFYTKRREBDJZTLVUROQFCBXQN'
b'SXDDYTZTEBRSXOBMLXHJKSJAVOOVCXATOWNQDWHT9CCUAAJUJKDOQLMAEZACSNFKXZ'
b'IGWDQEUEFRZYAOSDNVMSXWYLVDAUXZSHNHAIBEMNPFUGORYUETNJK9UCEMSUJYBBDK'
b'BHIPKEINQCGOVYCPKUPJMUCUVZOJSIWYRFMFXYUVSMOUALAQBWIMXBUBXSAETGKJRP'
b'AHVAXHQJDMEVSRFYEXUSIEBKMGYCUKFD9JPGUV9AIYUVCRUURKMYUHMVE9OJCYYWTQ'
b'WUWFMTBZYFXASHHVCMSWXKBRQFHHQVEQMEULJRWZKLWFFSGGKEHUZZFNDNITSRAUH9'
b'PQK9OGLYMVBSHXQLLZHOBBIM9KVUWDLHZRDKQQVLQXGWYXEEVQPDZUO9PVXMALOMRQ'
b'VCTHGIZLILSCFKTBRESYZGBZKHXEODNDJZ9GK9ROWYXNGFHZCCBHHZEYEOGWXRGSUD'
b'SUZFUAUBXVXZHCUVJSYBWTCYCEDYKZNGWFZYKSQLW9FUYMWDVXKZEWT9SCVMQCODZK'
b'DRNKTINTPNOJOLGQJDAJMFWRFSWZJLYZGSTSIDSXLUJBZRZNLEDNBKAUNGTCYUPDRW'
b'JOCEBQ9YG9IZLLRMJITISJOTLQMOGXVQIZXHMTJVMMWM9FOIOT9KFZMANEPOEOV9HX'
b'JNEGURUKRWDGYNPVGAWMWQVABIJNL9MDXKONEPMYACOZ9BE9UZMAFTKYWPFWIQWAPK'
b'GUXQTOQVWYYVZYGQDLBIQDVOZIWGOMGOBAUARICQZVNXD9UVEFBBAJKQBHRHXTBUOW'
b'VBFKYQWZWTMMXVKZRIZUBVPQ9XHLJHFHWFZUIZVSNAKBDHDFGJCYQETOMEDTOXIUT9'
b'OAJVIHWAGTCNPEZTERMMN9EZEWSJHKQAUMXPBZTNQOEQCVXIMAAYO9NIUFLTCFIMK9'
b'9AFAGWJFA9VOFPUDJLRAMORGSUDBLWWKXEDZ9XPQUZSGANGESHKKGGQSGSYDCRLHZD'
b'PKA9HKYBKLKKCXYRQQIPXCFETJJDZYPCLUNHGBKEJDRCIHEXKCQQNOV9QFHLGFXOCR'
b'HPAFCUTPMY9NOZVQHROYJSCMGRSVMOBWADAZNFIAHWGIQUUZBOVODSFAUNRTXSDU9W'
b'EIRBXQNRSJXFRAQGHA9DYOQJGLVZUJKAQ9CTUOTT9ZKQOQNNLJDUPDXZJYPRCVLRZT'
b'UCZPNBREYCCKHK9FUWGITAJATFPUOFLZDHPNJYUTXFGNYJOBRD9BVHKZENFXIUYDTL'
b'CE9JYIIYMXMCXMWTHOLTQFKFHDLVPGMQNITEUXSYLAQULCZOJVBIPYP9M9X9QCNKBX'
b'W9DVJEQFFY9KQVMKNVTAHQVRXUKEM9FZOJLHAGEECZBUHOQFZOSPRXKZOCCKAOHMSV'
b'QCFG9CWAHKVWNA9QTLYQI9NKOSHWJCNGPJBLEQPUIWJBIOAWKLBXUCERTSL9FVCLYN'
b'ADPYTPKJOIEMAQGWBVGSRCZINXEJODUDCT9FHOUMQM9ZHRMBJYSOMPNMEAJGEHICJI'
b'PVXRKCYX9RZVT9TDZIMXGZJAIYJRGIVMSOICSUINRBQILMJOUQYXCYNJ9WGGJFHYTU'
b'LWOIPUXXFNTIFNOJRZFSQQNAWBQZOLHHLVGHEPWTKKQEVIPVWZUN9ZBICZ9DZZBVII'
b'BF9EPHARZJUFJGBQXQFQIBUECAWRSEKYJNYKNSVBCOWTFBZ9NAHFSAMRBPEYGPRGKW'
b'WTWACZOAPEOECUO9OTMGABJVAIICIPXGSXACVINSYEQFTRCQPCEJXZCY9XZWVWVJRZ'
b'CYEYNFUUBKPWCHICGJZXKE9GSUDXZYUAPLHAKAHYHDXNPHENTERYMMBQOPSQIDENXK'
b'LKCEYCPVTZQLEEJVYJZV9BWU999999999999999999999999999FFL999999999999'
b'9999999999999RJQGVD99999999999A99999999USGBXHGJUEWAUAKNPPRHJXDDMQV'
b'YDSYZJSDWFYLOQVFGBOSLE9KHFDLDYHUYTXVSFAFCOCLQUHJXTEIQRNBTLHEGJFGVF'
b'DJCE9IKAOCSYHLCLWPVVNWNESKLYAJG9FGGZOFXCEYOTWLVIJUHGY9QCU9FMZJY999'
b'9999HYBUYQKKRNAVDPVGYBTVDZ9SVQBLCCVLJTPEQWWOIG9CQZIFQKCROH9YHUCNJT'
b'SYPBVZVBNESX999999D9TARGPQTNIYRZURQGVHCAWEDRBJIIEJIUZYENVE9LLJQMXH'
b'GSUUYUCPSOWBCXVFDCHHAZUDC9LUODYWO'
)
self.adapter.seed_response(
'getTrytes',
{
'duration': 99,
'trytes': [bytes(transaction_trytes)],
},
)
transaction = Transaction.from_tryte_string(transaction_trytes)
mock_get_bundles = mock.Mock(return_value=async_return({
'bundles': [Bundle([transaction])],
}))
mock_get_inclusion_states = mock.Mock(return_value=async_return({
'states': [True],
}))
with mock.patch(
'iota.crypto.addresses.AddressGenerator.create_iterator',
create_generator,
):
with mock.patch(
'iota.commands.extended.get_bundles.GetBundlesCommand._execute',
mock_get_bundles,
):
with mock.patch(
'iota.commands.core.get_inclusion_states.GetInclusionStatesCommand._execute',
mock_get_inclusion_states,
):
response = await self.command(
seed = Seed.random(),
inclusionStates = True,
# To keep the test focused, only retrieve a single
# transaction.
start = 0,
stop = 1,
)
bundle = response['bundles'][0] # type: Bundle
self.assertTrue(bundle[0].is_confirmed)
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for data processing in struct2depth readers."""
from typing import Text
import tensorflow.compat.v1 as tf
def read_image_as_float_tensor(image_filepath):
"""Returns a 3-channel float Tensor of the specified image.
Args:
image_filepath: A local valid filepath. Supported types BMP, GIF (only
first image is taken from an animated GIF), JPEG, and PNG,
"""
# Read, decode, and normalize images.
encoded_image = tf.io.read_file(image_filepath)
# decode_image supports BMP, GIF, JPEG, or PNG by calling the appropriate
# format decoding method. All decode_bmp, decode_jpeg and decode_png return
# 3-D arrays [height, width, num_channels].
# By default, decode_gif returns a 4-D array [num_frames, height, width, 3],
# expand_animations=False will truncate animated GIF files to the first frame.
# Thereby enforces a tensor rank=3.
# Channels=3 deals with 'RGBA' format PNG by dropping the transparency mask.
decoded_image = tf.image.decode_image(encoded_image, channels=3,
expand_animations=False)
# Help 'tf.keras.initializers.*' to infer shape.
decoded_image.set_shape([None, None, 3])
# Scaling is performed appropriately before casting.
decoded_image = tf.image.convert_image_dtype(decoded_image, dtype=tf.float32)
# Decoded_image range is [0.0, 1.0].
return decoded_image
def read_image_grayscale(image_filepath):
"""Returns a 1-channel uint8 Tensor of the specified image.
Args:
image_filepath: A local valid filepath.
"""
# Read, decode, and normalize images.
image_string = tf.io.read_file(image_filepath)
# Decode_image might return a 4-dimensional shape.
image_grayscale = tf.image.decode_image(image_string, channels=1)
# Enforces a 3-dimensional shape.
image_grayscale.set_shape([None, None, 1])
return image_grayscale
def read_image_validity_mask(image_filepath):
"""Returns a 1-channel binary Tensor(int32) of the specified image.
Args:
image_filepath: A local valid filepath.
"""
validity_mask_uint8 = read_image_grayscale(image_filepath)
# TPU does not support uint8-images, thus validity_mask is re-encoded as
# int32-image.
validity_mask_int = tf.cast(validity_mask_uint8, dtype=tf.int32)
# validity_mask are used to compute the loss in valid pixels only.
# validity_mask is converted to binary {0, 1} values to allow:
# valid_loss_per_pixel = loss_per_pixel * validity_mask.
validity_mask_int = tf.math.minimum(validity_mask_int, 1)
return validity_mask_int
def crop_egomotion(egomotion, offset_height, offset_width, target_height,
target_width):
"""Transforms camera egomotion when the image is cropped.
Args:
egomotion: a 2-d transformation matrix.
offset_height: amount of offset in y direction.
offset_width: amount of offset in x direction.
target_height: target height of images.
target_width: target width of images.
Returns:
A 2-d transformation matrix.
"""
del offset_height, offset_width, target_height, target_width # unused
return egomotion
def crop_intrinsics(intrinsics, offset_height, offset_width, target_height,
target_width):
"""Crops camera intrinsics based on target image dimensions and offset.
Args:
intrinsics: 1-d array containing w, h, fx, fy, x0, y0.
offset_height: amount of offset in y direction.
offset_width: amount of offset in x direction.
target_height: target height of images.
target_width: target width of images.
Returns:
A 1-d tensor containing the adjusted camera intrinsics.
"""
with tf.name_scope('crop_intrinsics'):
w, h, fx, fy, x0, y0 = tf.unstack(intrinsics)
x0 -= tf.cast(offset_width, tf.float32)
y0 -= tf.cast(offset_height, tf.float32)
w = tf.cast(target_width, tf.float32)
h = tf.cast(target_height, tf.float32)
return tf.stack((w, h, fx, fy, x0, y0))
def crop_image(image, offset_height, offset_width, target_height, target_width):
"""Crops an image represented as a tensor.
Args:
image: an image represented as a (height, wdith, channels)-tensor.
offset_height: amount of offset in y direction.
offset_width: amount of offset in x direction.
target_height: target height of images.
target_width: target width of images.
Returns:
A cropped image represented as a (height, width, channels)-tensor.
Raises:
ValueError: Image tensor has incorrect rank.
"""
with tf.name_scope('crop_image'):
if image.shape.rank != 3:
raise ValueError('Rank of endpoint is %d. Must be 3.' %
(image.shape.rank))
out_img = tf.image.crop_to_bounding_box(image, offset_height, offset_width,
target_height, target_width)
return out_img
def resize_egomotion(egomotion, target_size):
"""Transforms camera egomotion when the image is resized.
Args:
egomotion: a 2-d transformation matrix.
target_size: target size, a tuple of (height, width).
Returns:
A 2-d transformation matrix.
"""
del target_size # unused
return egomotion
def resize_intrinsics(intrinsics, target_size):
"""Transforms camera intrinsics when image is resized.
Args:
intrinsics: 1-d array containing w, h, fx, fy, x0, y0.
target_size: target size, a tuple of (height, width).
Returns:
A 1-d tensor containing the adjusted camera intrinsics.
"""
with tf.name_scope('resize_intrinsics'):
w, h, fx, fy, x0, y0 = tf.unstack(intrinsics)
def float_div(a, b):
return tf.cast(a, tf.float32) / tf.cast(b, tf.float32)
xfactor = float_div(target_size[1], w)
yfactor = float_div(target_size[0], h)
fx *= xfactor
fy *= yfactor
x0 *= xfactor
y0 *= yfactor
w = target_size[1]
h = target_size[0]
return tf.stack((w, h, fx, fy, x0, y0))
def resize_area(image, size):
"""Resizes an image represented as a tensor using the area method.
Args:
image: an image represented as a (height, width, channels)-tensor.
size: A tuple ot two integers, the target (height, width).
Returns:
An image represented as a (height, wdith, channels)-tensor.
"""
return _apply_on_one_image(tf.image.resize_area, image, size)
def resize_nearest_neighbor(image, size):
"""Resizes an image represented as a tensor using the nearest neighbor method.
Args:
image: an image represented as a (height, width, channels)-tensor.
size: A tuple ot two integers, the target (height, width).
Returns:
An image represented as a (height, wdith, channels)-tensor.
"""
return _apply_on_one_image(tf.image.resize_nearest_neighbor, image, size)
def flip_egomotion(egomotion):
"""Transforms camera egomotion when the image is flipped horizontally.
The intrinsics matrix is ((fx, 0, x0), (0, fy, y0), (0, 0, 1)).
Given a pixel (px, py, 1), the x coordinate is x = px * fx + 1.
Now what if we flip the image along x? This maps px to w - 1 - px,
where w is the image width. Therefore for the flipped image,
we have x' = (w - px - 1) * fx + 1.
Therefore x' = -x + (w - 1 - 2 * x0) / fx,
if x0 = ((w - 1) / 2), that is, if the optical center is exactly at
the center of the image, then indeed x' = -x, so we can just flip x.
Otherwise there is a correction which is inrinsics-dependent:
we'd have to add a small translation component to flip_mat, but we ignore
this small correction for now.
Args:
egomotion: a 2-d transformation matrix.
Returns:
A 2-d transformation matrix.
"""
with tf.name_scope('flip_egomotion'):
flip_mat = tf.constant(
[[-1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]],
dtype=tf.float32)
egomotion = tf.matmul(tf.matmul(flip_mat, egomotion), flip_mat)
return egomotion
def flip_intrinsics(intrinsics):
"""Flips camera intrinsics when the image is flipped horizontally.
Args:
intrinsics: 1-d array containing w, h, fx, fy, x0, y0.
Returns:
A 1-d tensor containing the adjusted camera intrinsics.
"""
with tf.name_scope('flip_intrinsics'):
w, h, fx, fy, x0, y0 = tf.unstack(intrinsics)
x0 = w - x0
y0 = h - y0
return tf.stack((w, h, fx, fy, x0, y0))
def flip_left_right(image):
"""Horizontally flips an image (left/right) represented as a tensor.
Args:
image: an image represented as a (height, wdith, channels)-tensor.
Returns:
A flipped image represented as a (height, wdith, channels)-tensor.
"""
return _apply_on_one_image(tf.image.flip_left_right, image)
def _apply_on_one_image(fn, image, *args, **kwargs):
"""Makes a function that acts on one image (out of one that acts on a batch).
Args:
fn: A function that receives a batch of images as a first argument (rank 4),
and other args and kwargs.
image: A tensor of rank 3 (height, width, channels) representing an image.
*args: Arguments to pass to fn
**kwargs: Keyword arguments to pass to fn
Returns:
The result of `fn` when applied on `image`, after adding a batch dimension
to `image` and removing it from the result.
"""
with tf.name_scope('apply_on_one_image'):
image = tf.convert_to_tensor(image)
if image.shape.rank != 3:
raise ValueError('Rank of endpoint is %d. Must be 3.' %
image.shape.rank)
out_image = tf.expand_dims(image, axis=0)
out_image = fn(out_image, *args, **kwargs)
return tf.squeeze(out_image, axis=0)
| |
# Lint as: python2, python3
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
"""TensorFlow API compatibility tests.
This test ensures all changes to the public API of TensorFlow are intended.
If this test fails, it means a change has been made to the public API. Backwards
incompatible changes are not allowed. You can run the test with
"--update_goldens" flag set to "True" to update goldens when making changes to
the public TF python API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import re
import sys
import six
from six.moves import range
import tensorflow as tf
from google.protobuf import message
from google.protobuf import text_format
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.tools.api.lib import api_objects_pb2
from tensorflow.tools.api.lib import python_object_to_proto_visitor
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
# pylint: disable=g-import-not-at-top,unused-import
_TENSORBOARD_AVAILABLE = True
try:
import tensorboard as _tb
except ImportError:
_TENSORBOARD_AVAILABLE = False
# pylint: enable=g-import-not-at-top,unused-import
# FLAGS defined at the bottom:
FLAGS = None
# DEFINE_boolean, update_goldens, default False:
_UPDATE_GOLDENS_HELP = """
Update stored golden files if API is updated. WARNING: All API changes
have to be authorized by TensorFlow leads.
"""
# DEFINE_boolean, only_test_core_api, default False:
_ONLY_TEST_CORE_API_HELP = """
Some TF APIs are being moved outside of the tensorflow/ directory. There is
no guarantee which versions of these APIs will be present when running this
test. Therefore, do not error out on API changes in non-core TF code
if this flag is set.
"""
# DEFINE_boolean, verbose_diffs, default True:
_VERBOSE_DIFFS_HELP = """
If set to true, print line by line diffs on all libraries. If set to
false, only print which libraries have differences.
"""
_API_GOLDEN_FOLDER_V1 = resource_loader.get_path_to_datafile('../golden/v1')
_API_GOLDEN_FOLDER_V2 = resource_loader.get_path_to_datafile('../golden/v2')
_TEST_README_FILE = resource_loader.get_path_to_datafile('README.txt')
_UPDATE_WARNING_FILE = resource_loader.get_path_to_datafile(
'API_UPDATE_WARNING.txt')
_NON_CORE_PACKAGES = ['estimator']
# TODO(annarev): remove this once we test with newer version of
# estimator that actually has compat v1 version.
if not hasattr(tf.compat.v1, 'estimator'):
tf.compat.v1.estimator = tf.estimator
tf.compat.v2.estimator = tf.estimator
def _KeyToFilePath(key, api_version):
"""From a given key, construct a filepath.
Filepath will be inside golden folder for api_version.
Args:
key: a string used to determine the file path
api_version: a number indicating the tensorflow API version, e.g. 1 or 2.
Returns:
A string of file path to the pbtxt file which describes the public API
"""
def _ReplaceCapsWithDash(matchobj):
match = matchobj.group(0)
return '-%s' % (match.lower())
case_insensitive_key = re.sub('([A-Z]{1})', _ReplaceCapsWithDash,
six.ensure_str(key))
api_folder = (
_API_GOLDEN_FOLDER_V2 if api_version == 2 else _API_GOLDEN_FOLDER_V1)
return os.path.join(api_folder, '%s.pbtxt' % case_insensitive_key)
def _FileNameToKey(filename):
"""From a given filename, construct a key we use for api objects."""
def _ReplaceDashWithCaps(matchobj):
match = matchobj.group(0)
return match[1].upper()
base_filename = os.path.basename(filename)
base_filename_without_ext = os.path.splitext(base_filename)[0]
api_object_key = re.sub('((-[a-z]){1})', _ReplaceDashWithCaps,
six.ensure_str(base_filename_without_ext))
return api_object_key
def _VerifyNoSubclassOfMessageVisitor(path, parent, unused_children):
"""A Visitor that crashes on subclasses of generated proto classes."""
# If the traversed object is a proto Message class
if not (isinstance(parent, type) and issubclass(parent, message.Message)):
return
if parent is message.Message:
return
# Check that it is a direct subclass of Message.
if message.Message not in parent.__bases__:
raise NotImplementedError(
'Object tf.%s is a subclass of a generated proto Message. '
'They are not yet supported by the API tools.' % path)
def _FilterNonCoreGoldenFiles(golden_file_list):
"""Filter out non-core API pbtxt files."""
filtered_file_list = []
filtered_package_prefixes = ['tensorflow.%s.' % p for p in _NON_CORE_PACKAGES]
for f in golden_file_list:
if any(
six.ensure_str(f).rsplit('/')[-1].startswith(pre)
for pre in filtered_package_prefixes):
continue
filtered_file_list.append(f)
return filtered_file_list
def _FilterGoldenProtoDict(golden_proto_dict, omit_golden_symbols_map):
"""Filter out golden proto dict symbols that should be omitted."""
if not omit_golden_symbols_map:
return golden_proto_dict
filtered_proto_dict = dict(golden_proto_dict)
for key, symbol_list in six.iteritems(omit_golden_symbols_map):
api_object = api_objects_pb2.TFAPIObject()
api_object.CopyFrom(filtered_proto_dict[key])
filtered_proto_dict[key] = api_object
module_or_class = None
if api_object.HasField('tf_module'):
module_or_class = api_object.tf_module
elif api_object.HasField('tf_class'):
module_or_class = api_object.tf_class
if module_or_class is not None:
for members in (module_or_class.member, module_or_class.member_method):
filtered_members = [m for m in members if m.name not in symbol_list]
# Two steps because protobuf repeated fields disallow slice assignment.
del members[:]
members.extend(filtered_members)
return filtered_proto_dict
class ApiCompatibilityTest(test.TestCase):
def __init__(self, *args, **kwargs):
super(ApiCompatibilityTest, self).__init__(*args, **kwargs)
golden_update_warning_filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(), _UPDATE_WARNING_FILE)
self._update_golden_warning = file_io.read_file_to_string(
golden_update_warning_filename)
test_readme_filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(), _TEST_README_FILE)
self._test_readme_message = file_io.read_file_to_string(
test_readme_filename)
def _AssertProtoDictEquals(self,
expected_dict,
actual_dict,
verbose=False,
update_goldens=False,
additional_missing_object_message='',
api_version=2):
"""Diff given dicts of protobufs and report differences a readable way.
Args:
expected_dict: a dict of TFAPIObject protos constructed from golden files.
actual_dict: a ict of TFAPIObject protos constructed by reading from the
TF package linked to the test.
verbose: Whether to log the full diffs, or simply report which files were
different.
update_goldens: Whether to update goldens when there are diffs found.
additional_missing_object_message: Message to print when a symbol is
missing.
api_version: TensorFlow API version to test.
"""
diffs = []
verbose_diffs = []
expected_keys = set(expected_dict.keys())
actual_keys = set(actual_dict.keys())
only_in_expected = expected_keys - actual_keys
only_in_actual = actual_keys - expected_keys
all_keys = expected_keys | actual_keys
# This will be populated below.
updated_keys = []
for key in all_keys:
diff_message = ''
verbose_diff_message = ''
# First check if the key is not found in one or the other.
if key in only_in_expected:
diff_message = 'Object %s expected but not found (removed). %s' % (
key, additional_missing_object_message)
verbose_diff_message = diff_message
elif key in only_in_actual:
diff_message = 'New object %s found (added).' % key
verbose_diff_message = diff_message
else:
# Do not truncate diff
self.maxDiff = None # pylint: disable=invalid-name
# Now we can run an actual proto diff.
try:
self.assertProtoEquals(expected_dict[key], actual_dict[key])
except AssertionError as e:
updated_keys.append(key)
diff_message = 'Change detected in python object: %s.' % key
verbose_diff_message = str(e)
# All difference cases covered above. If any difference found, add to the
# list.
if diff_message:
diffs.append(diff_message)
verbose_diffs.append(verbose_diff_message)
# If diffs are found, handle them based on flags.
if diffs:
diff_count = len(diffs)
logging.error(self._test_readme_message)
logging.error('%d differences found between API and golden.', diff_count)
messages = verbose_diffs if verbose else diffs
for i in range(diff_count):
print('Issue %d\t: %s' % (i + 1, messages[i]), file=sys.stderr)
if update_goldens:
# Write files if requested.
logging.warning(self._update_golden_warning)
# If the keys are only in expected, some objects are deleted.
# Remove files.
for key in only_in_expected:
filepath = _KeyToFilePath(key, api_version)
file_io.delete_file(filepath)
# If the files are only in actual (current library), these are new
# modules. Write them to files. Also record all updates in files.
for key in only_in_actual | set(updated_keys):
filepath = _KeyToFilePath(key, api_version)
file_io.write_string_to_file(
filepath, text_format.MessageToString(actual_dict[key]))
else:
# Fail if we cannot fix the test by updating goldens.
self.fail('%d differences found between API and golden.' % diff_count)
else:
logging.info('No differences found between API and golden.')
def testNoSubclassOfMessage(self):
visitor = public_api.PublicAPIVisitor(_VerifyNoSubclassOfMessageVisitor)
visitor.do_not_descend_map['tf'].append('contrib')
# Skip compat.v1 and compat.v2 since they are validated in separate tests.
visitor.private_map['tf.compat'] = ['v1', 'v2']
traverse.traverse(tf, visitor)
def testNoSubclassOfMessageV1(self):
if not hasattr(tf.compat, 'v1'):
return
visitor = public_api.PublicAPIVisitor(_VerifyNoSubclassOfMessageVisitor)
visitor.do_not_descend_map['tf'].append('contrib')
if FLAGS.only_test_core_api:
visitor.do_not_descend_map['tf'].extend(_NON_CORE_PACKAGES)
visitor.private_map['tf.compat'] = ['v1', 'v2']
traverse.traverse(tf.compat.v1, visitor)
def testNoSubclassOfMessageV2(self):
if not hasattr(tf.compat, 'v2'):
return
visitor = public_api.PublicAPIVisitor(_VerifyNoSubclassOfMessageVisitor)
visitor.do_not_descend_map['tf'].append('contrib')
if FLAGS.only_test_core_api:
visitor.do_not_descend_map['tf'].extend(_NON_CORE_PACKAGES)
visitor.private_map['tf.compat'] = ['v1', 'v2']
traverse.traverse(tf.compat.v2, visitor)
def _checkBackwardsCompatibility(self,
root,
golden_file_pattern,
api_version,
additional_private_map=None,
omit_golden_symbols_map=None):
# Extract all API stuff.
visitor = python_object_to_proto_visitor.PythonObjectToProtoVisitor()
public_api_visitor = public_api.PublicAPIVisitor(visitor)
public_api_visitor.private_map['tf'].append('contrib')
if api_version == 2:
public_api_visitor.private_map['tf'].append('enable_v2_behavior')
public_api_visitor.do_not_descend_map['tf.GPUOptions'] = ['Experimental']
if FLAGS.only_test_core_api:
public_api_visitor.do_not_descend_map['tf'].extend(_NON_CORE_PACKAGES)
if additional_private_map:
public_api_visitor.private_map.update(additional_private_map)
traverse.traverse(root, public_api_visitor)
proto_dict = visitor.GetProtos()
# Read all golden files.
golden_file_list = file_io.get_matching_files(golden_file_pattern)
if FLAGS.only_test_core_api:
golden_file_list = _FilterNonCoreGoldenFiles(golden_file_list)
def _ReadFileToProto(filename):
"""Read a filename, create a protobuf from its contents."""
ret_val = api_objects_pb2.TFAPIObject()
text_format.Merge(file_io.read_file_to_string(filename), ret_val)
return ret_val
golden_proto_dict = {
_FileNameToKey(filename): _ReadFileToProto(filename)
for filename in golden_file_list
}
golden_proto_dict = _FilterGoldenProtoDict(golden_proto_dict,
omit_golden_symbols_map)
# Diff them. Do not fail if called with update.
# If the test is run to update goldens, only report diffs but do not fail.
self._AssertProtoDictEquals(
golden_proto_dict,
proto_dict,
verbose=FLAGS.verbose_diffs,
update_goldens=FLAGS.update_goldens,
api_version=api_version)
def testAPIBackwardsCompatibility(self):
api_version = 1
if hasattr(tf, '_major_api_version') and tf._major_api_version == 2:
api_version = 2
golden_file_pattern = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_KeyToFilePath('*', api_version))
omit_golden_symbols_map = {}
if (api_version == 2 and FLAGS.only_test_core_api
and not _TENSORBOARD_AVAILABLE):
# In TF 2.0 these summary symbols are imported from TensorBoard.
omit_golden_symbols_map['tensorflow.summary'] = [
'audio', 'histogram', 'image', 'scalar', 'text']
self._checkBackwardsCompatibility(
tf,
golden_file_pattern,
api_version,
# Skip compat.v1 and compat.v2 since they are validated
# in separate tests.
additional_private_map={'tf.compat': ['v1', 'v2']},
omit_golden_symbols_map=omit_golden_symbols_map)
# Check that V2 API does not have contrib
self.assertTrue(api_version == 1 or not hasattr(tf, 'contrib'))
def testAPIBackwardsCompatibilityV1(self):
api_version = 1
golden_file_pattern = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_KeyToFilePath('*', api_version))
self._checkBackwardsCompatibility(
tf.compat.v1, golden_file_pattern, api_version,
additional_private_map={
'tf': ['pywrap_tensorflow'],
'tf.compat': ['v1', 'v2'],
},
omit_golden_symbols_map={'tensorflow': ['pywrap_tensorflow']})
def testAPIBackwardsCompatibilityV2(self):
api_version = 2
golden_file_pattern = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_KeyToFilePath('*', api_version))
omit_golden_symbols_map = {}
if FLAGS.only_test_core_api and not _TENSORBOARD_AVAILABLE:
# In TF 2.0 these summary symbols are imported from TensorBoard.
omit_golden_symbols_map['tensorflow.summary'] = [
'audio', 'histogram', 'image', 'scalar', 'text']
self._checkBackwardsCompatibility(
tf.compat.v2,
golden_file_pattern,
api_version,
additional_private_map={'tf.compat': ['v1', 'v2']},
omit_golden_symbols_map=omit_golden_symbols_map)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--update_goldens', type=bool, default=False, help=_UPDATE_GOLDENS_HELP)
# TODO(mikecase): Create Estimator's own API compatibility test or
# a more general API compatibility test for use for TF components.
parser.add_argument(
'--only_test_core_api',
type=bool,
default=True, # only_test_core_api default value
help=_ONLY_TEST_CORE_API_HELP)
parser.add_argument(
'--verbose_diffs', type=bool, default=True, help=_VERBOSE_DIFFS_HELP)
FLAGS, unparsed = parser.parse_known_args()
# Now update argv, so that unittest library does not get confused.
sys.argv = [sys.argv[0]] + unparsed
test.main()
| |
#---------------------------------------------------------------------------
# Copyright 2013-2019 The Open Source Electronic Health Record Alliance
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#---------------------------------------------------------------------------
from __future__ import division
from builtins import object
from past.utils import old_div
import argparse
import codecs
import glob
import os
import re
import shutil
import stat
import sys
import tempfile
from LoggerManager import logger, initConsoleLogging, initFileLogging
""" constants """
EXTERNAL_DATA_SIZE_THRESHOLD = 1*1024*1024 # 1 MiB
EXTERNAL_DATA_PREFIX = ".ExternalData_SHA1_"
IGNORE_FILE_LIST = ("CMakeLists.txt")
VALID_KIDS_BUILD_SUFFIX_LIST = (".KIDs", "KIDS", ".KID", ".kids", ".kid")
VALID_PATCH_INFO_SUFFIX_LIST = (".TXTs",".TXT",".txt","txts")
VALID_CSV_FILE_SUFFIX_LIST = (".csv",".CSV")
VALID_GLOBAL_FILE_SUFFIX_LIST = (".GBLs", ".GBL")
VALID_ROUTINE_IMPORT_FILE_SUFFIX_LIST = (".RSA", "rsa", ".RO", ".ro")
VALID_HEADER_FILE_SUFFIX_LIST = (".json",".JSON")
VALID_SHA1_FILE_SUFFIX_LIST = (".SHA1",".sha1")
VALID_KIDS_BUILD_HEADER_SUFFIX_LIST = tuple(
[x+y for x in VALID_KIDS_BUILD_SUFFIX_LIST for y in VALID_HEADER_FILE_SUFFIX_LIST]
)
VALID_KIDS_BUILD_SHA1_SUFFIX_LIST = tuple(
[x+y for x in VALID_KIDS_BUILD_SUFFIX_LIST for y in VALID_SHA1_FILE_SUFFIX_LIST]
)
VALID_PATCH_INFO_SHA1_SUFFIX_LIST = tuple(
[x+y for x in VALID_PATCH_INFO_SUFFIX_LIST for y in VALID_SHA1_FILE_SUFFIX_LIST]
)
VALID_GLOBAL_SHA1_SUFFIX_LIST = tuple(
[x+y for x in VALID_GLOBAL_FILE_SUFFIX_LIST for y in VALID_SHA1_FILE_SUFFIX_LIST]
)
VALID_ROUTINE_SHA1_SUFFIX_LIST = tuple(
[x+y for x in VALID_ROUTINE_IMPORT_FILE_SUFFIX_LIST for y in VALID_SHA1_FILE_SUFFIX_LIST]
)
"""
Utilities functions to check if it is valid file type extension
"""
def isValidSha1Suffix(fileName):
return fileName.endswith(VALID_SHA1_FILE_SUFFIX_LIST)
def isValidPythonSuffix(fileName):
return fileName.endswith(".py")
def isValidKIDSBuildSuffix(fileName):
return fileName.endswith(VALID_KIDS_BUILD_SUFFIX_LIST)
def isValidPatchInfoSuffix(fileName):
return fileName.endswith(VALID_PATCH_INFO_SUFFIX_LIST)
def isValidGlobalFileSuffix(fileName):
return fileName.endswith(VALID_GLOBAL_FILE_SUFFIX_LIST)
def isValidRoutineFileSuffix(fileName):
return fileName.endswith(VALID_ROUTINE_IMPORT_FILE_SUFFIX_LIST)
def isValidKIDSBuildHeaderSuffix(fileName):
return fileName.endswith(VALID_KIDS_BUILD_HEADER_SUFFIX_LIST)
def isValidKIDSBuildSha1Suffix(fileName):
return fileName.endswith(VALID_KIDS_BUILD_SHA1_SUFFIX_LIST)
def isValidPatchInfoSha1Suffix(fileName):
return fileName.endswith(VALID_PATCH_INFO_SHA1_SUFFIX_LIST)
def isValidGlobalSha1Suffix(fileName):
return fileName.endswith(VALID_GLOBAL_SHA1_SUFFIX_LIST)
def isValidRoutineSha1Suffix(fileName):
return fileName.endswith(VALID_ROUTINE_SHA1_SUFFIX_LIST)
def isValidCSVSuffix(fileName):
return fileName.endswith(VALID_CSV_FILE_SUFFIX_LIST)
def isValidPatchDataSuffix(fileName, includeExternalExt=False):
isValid = ( isValidKIDSBuildSuffix(fileName) or
isValidPatchInfoSuffix(fileName) or
isValidGlobalFileSuffix(fileName) or
isValidRoutineFileSuffix(fileName) or
isValidCSVSuffix(fileName) or
isValidPythonSuffix(fileName)
)
if includeExternalExt:
isValid = isValid or (isValidKIDSBuildHeaderSuffix(fileName) or
isValidKIDSBuildSha1Suffix(fileName) or
isValidPatchInfoSha1Suffix(fileName) or
isValidGlobalSha1Suffix(fileName) or
isValidRoutineSha1Suffix(fileName)
)
return isValid
def isValidPatchRelatedFiles(absFileName, checkExternalExt=False):
fileName = os.path.basename(absFileName)
# ignore files that starts with .
if fileName.startswith('.'):
return False
# ignore symlink files as well
try:
st = os.stat(absFileName)
if stat.S_ISLNK(st.st_mode):
return False
except OSError:
return False
# ignore the external data file
if fileName.startswith(EXTERNAL_DATA_PREFIX):
return False
if fileName in IGNORE_FILE_LIST:
return False
# ignore invalid file extensions
if not isValidPatchDataSuffix(fileName, checkExternalExt):
return False
return True
""" utility function to check if externalData name is valid """
def isValidExternalDataFileName(fileName):
baseName = os.path.basename(fileName)
return baseName.startswith(EXTERNAL_DATA_PREFIX)
""" retrive sha1 hash from the filename directly """
def getSha1HashFromExternalDataFileName(fileName):
baseName = os.path.basename(fileName)
return baseName[len(EXTERNAL_DATA_PREFIX):]
""" generate External Data filename """
def generateExternalDataFileName(sha1Sum):
return "%s%s" % (EXTERNAL_DATA_PREFIX, sha1Sum)
""" read the sha1Sum from sha1 file """
def readSha1SumFromSha1File(sha1File):
with open(sha1File, "r") as input:
return input.readline().rstrip('\r\n ')
""" add file to git ignore list
@fileName: absolute path of the file
"""
def addToGitIgnoreList(fileName):
rootDir = os.path.dirname(fileName)
basename = os.path.basename(fileName)
gitIgnoreFile = os.path.join(rootDir, ".gitignore")
if os.path.exists(gitIgnoreFile):
with open(gitIgnoreFile, "r") as ignoreFile:
for line in ignoreFile:
if line.strip(' \r\n') == basename:
return
with open(gitIgnoreFile, "a") as ignoreFile:
ignoreFile.write("%s\n" % basename)
else:
with open(gitIgnoreFile, "w") as ignoreFile:
ignoreFile.write("%s\n" % basename)
""" utility method to generate sha1 hash key for input file """
def generateSha1Sum(inputFilename):
assert os.path.exists(inputFilename)
fileSize = os.path.getsize(inputFilename)
MAX_READ_SIZE = 20 * 1024 * 1024 # 20 MiB
buf = old_div(fileSize,50)
if buf > MAX_READ_SIZE:
buf = MAX_READ_SIZE
with open(inputFilename, "r") as inputFile:
return generateSha1SumCommon(inputFile, buf)
""" utility method to generate sha1 hash key for file like object """
def generateSha1SumCommon(fileObject, buf=1024):
import hashlib
hashString = b''
while True:
nByte = fileObject.read(buf)
if nByte:
hashString += codecs.encode(nByte, encoding="ascii", errors='ignore')
else:
break
return hashlib.sha1(hashString).hexdigest()
""" Convert the KIDS Build, Global or TXT file to External Data format """
class ExternalDataConverter(object):
def __init__(self, externalDir, gitignore=False,
sizeLimit=EXTERNAL_DATA_SIZE_THRESHOLD):
self._externDir = None
self._gitIgnore = gitignore
if externalDir != None and os.path.exists(externalDir):
self._externDir = os.path.abspath(externalDir)
if sizeLimit <=0:
sizeLimit = EXTERNAL_DATA_SIZE_THRESHOLD
self._sizeLimit = sizeLimit
"""
Convert All the files with size > than threshold under the current
directory recursively
"""
def convertCurrentDir(self, curDir):
assert os.path.exists(curDir)
absCurDir = os.path.abspath(curDir)
for (root, dirs, files) in os.walk(absCurDir):
for fileName in files:
absFileName = os.path.join(root, fileName)
if not isValidPatchRelatedFiles(absFileName):
continue
# get the size of the file
fileSize = os.path.getsize(absFileName)
if fileSize < self._sizeLimit:
continue
if isValidKIDSBuildSuffix(fileName):
logger.info("converting KIDS file %s " % absFileName)
self.convertKIDSBuildFile(absFileName)
else:
self.convertToSha1File(absFileName)
""" """
def convertKIDSBuildFile(self, kidsFile):
from KIDSBuildParser import KIDSBuildParser, outputMetaDataInJSON
assert os.path.exists(kidsFile)
""" write KIDS metadata file """
kidsParser = KIDSBuildParser(None)
""" do not parse the routine part """
kidsParser.unregisterSectionHandler(KIDSBuildParser.ROUTINE_SECTION)
kidsParser.parseKIDSBuild(kidsFile)
logger.info("output meta data as %s" % (kidsFile+".json"))
outputMetaDataInJSON(kidsParser, kidsFile+".json")
self.convertToSha1File(kidsFile)
""" """
def convertToSha1File(self, inputFile):
assert os.path.exists(inputFile)
""" write the sha-1 hash to the .SHA1 file """
sha1Sum = generateSha1Sum(inputFile)
with open(inputFile + ".sha1", "w") as output:
output.write("%s\n" % sha1Sum)
""" add the file to ignore list """
if self._gitIgnore:
addToGitIgnoreList(inputFile)
self.__moveToExternalDir__(inputFile, sha1Sum)
def __moveToExternalDir__(self, fileName, sha1Sum):
assert os.path.exists(fileName)
destFile = generateExternalDataFileName(sha1Sum)
if self._externDir:
destFile = os.path.join(self._externDir, destFile)
else:
destFile = os.path.join(os.path.dirname(fileName), destFile)
if os.path.exists(destFile):
if generateSha1Sum(destFile) == sha1Sum:
os.remove(fileName)
logger.info("%s already exists and is valid" % destFile)
return
os.remove(destFile)
os.rename(fileName, destFile)
def main():
initConsoleLogging()
parser = argparse.ArgumentParser(
description='Convert Patch Data to external data format')
parser.add_argument('-i', '--inputDir', required=True,
help='path to top leve directory to convert all patch data')
parser.add_argument('-e', '--externalDataDir', required=False, default=None,
help='output dir to store the external data,'
' default is inplace')
parser.add_argument('-g', '--gitignore', required=False, default=False,
action="store_true",
help='Add original file to .gitignore, default is not')
parser.add_argument('-s', '--size', default=1, type=int,
help='file size threshold to be converted to external '
'data, unit is MiB, default is 1(MiB)')
parser.add_argument('-l', '--logFile', default=None,
help='output log file, default is no')
result = parser.parse_args();
logger.info (result)
if result.logFile:
initFileLogging(result.logFile)
converter = ExternalDataConverter(result.externalDataDir, result.gitignore,
result.size*EXTERNAL_DATA_SIZE_THRESHOLD)
converter.convertCurrentDir(result.inputDir)
if __name__ == '__main__':
main()
| |
import unittest
import os, time
import OpTestConfiguration
from common.OpTestSystem import OpSystemState
from common.OpTestInstallUtil import InstallUtil
"""
THE PLAN:
- assert physical presence
- clears any existing keys
- gets the machine in a known state without secureboot
- enroll a set of PK, KEK, db
- pregenerated, use secvar sysfs interface
- reboot, and ensure secure boot is now enabled
- fail to regular kexec an unsigned kernel
- fail to load an unsigned kernel
- fail to load a dbx'd kernel
- fail to load a signed kernel with unenrolled key
- successfully load a signed kernel
- assert physical presence
- ensure machine is in a non-secure boot state
"""
"""
Generating physicalPresence.bin:
Create an attribute override file with the following contents (not including leading spaces):
CLEAR
target = k0:s0:
ATTR_BOOT_FLAGS 0x15000000 CONST
ATTR_PHYS_PRES_FAKE_ASSERT 0x01 CONST
# ATTR_PHYS_PRES_REQUEST_OPEN_WINDOW 0x01 CONST
Go to your op-build's <op-build>//build/hostboot-<commit#>/obj/genfiles directory
From that directory run the following:
./attributeOverride -d <path_to_attribute_override_text_file_from_above>
If it is successful, then an attrOverride.bin file will be created in that directory
"""
"""
Generating oskeys.tar:
Keys were generated via the makefile in https://git.kernel.org/pub/scm/linux/kernel/git/jejb/efitools.git.
NOTE: these tools will currently only build on x86
Running make generates and assembles a set of openssl keys, ESLs, and signed auth files following the
commands below.
Only the auth files are included in the tarball.
Generating keys (PK example):
openssl req -new -x509 -newkey rsa:2048 -subj "/CN=PK/" -keyout PK.key -out PK.crt -days 3650 -nodes -sha256
Generating ESLs (PK example):
cert-to-efi-sig-list PK.crt PK.esl
Generating Auths:
sign-efi-sig-list -k PK.key -c PK.crt PK PK.esl PK.auth
sign-efi-sig-list -k PK.key -c PK.crt KEK KEK.esl KEK.auth
sign-efi-sig-list -k KEK.key -c KEK.crt db db.esl db.auth
sign-efi-sig-list -k KEK.key -c KEK.crt dbx dbx.esl dbx.auth
NOTE: dbx.esl is currently generated using a soon-to-be-released internal tool, and will be integrated in this test/documentation
Normally, dbx.esl would be generated with hash-to-efi-sig-list, however that tool has a dependency on PECOFF which
is not compatible with POWER.
NOTE: newPK is signed by the PK to test updating the PK, and deletePK is an empty file signed by newPK
to test the removal of a PK, which exits secure boot enforcement mode.
Generating oskernels.tar:
kernel-unsigned - very stripped down kernel built with minimal config options. no signature
kernel-signed - same kernel as above, signed with the db present in oskeys.tar
kernel-unenrolled - same kernel, signed with another generated key that is NOT in oskeys.tar
kernel-dbx - same kconfig, but adjusted version name. signed with the same db key. hash present in oskeys.tar's dbx
Signing kernels:
Kernels are signed with the `sign-file` utility in the linux source tree, like so:
./scripts/sign-file sha256 db.key db.crt vmlinuz kernel-signed
"""
# Variable data after enrollment (located in /sys/firmware/secvar/vars/<variable name>/data should
# be in the ESL format, WITHOUT the signed update auth header.
# These hashes are of the ESL data prior to signing the data as an update, and should match the
# post-enrollment data
# Future work: generate the full set of key/crt->ESL->auth data as part of this test, and
# calculate the expected hashes from the generated ESL rather than hardcoding them here.
esl_hashes = {
"PK": "91f15df8fc8f80bd0a1bbf2c77a5c5a16d2b189dd6f14d7b7c1e274fedd53f47",
"KEK": "1b6e26663bbd4bbb2b44af9e36d14258cdf700428f04388b0c689696450a9544",
"db": "480b652075d7b52ce07577631444848fb1231d6e4da9394e6adbe734795a7eb2",
"dbx": "2310745cd7756d9bfd8cacf0935a27a7bd1d2f1b1783da03902b5598a0928da6",
"newPK": "9a1d186c08c18887b68fadd81be48bca06dd007fa214dfcdb0f4195b5aff996c",
}
class OsSecureBoot(unittest.TestCase):
def setUp(self):
conf = OpTestConfiguration.conf
self.cv_SYSTEM = conf.system()
self.cv_BMC = conf.bmc()
self.cv_HOST = conf.host()
self.cv_IPMI = conf.ipmi()
self.OpIU = InstallUtil()
self.URL = conf.args.secvar_payload_url
self.bmc_type = conf.args.bmc_type
def getTestData(self, data="keys"):
con = self.cv_SYSTEM.console
self.OpIU.configure_host_ip()
fil = "os{}.tar".format(data)
url = self.URL + "/" + fil
con.run_command("wget {0} -O /tmp/{1}".format(url, fil))
con.run_command("tar xf /tmp/{} -C /tmp/".format(fil))
def checkFirmwareSupport(self):
if "OpenBMC" not in self.bmc_type:
self.skipTest("Test only applies for OpenBMC-based machines")
self.cv_SYSTEM.goto_state(OpSystemState.PETITBOOT_SHELL)
con = self.cv_SYSTEM.console
output = con.run_command_ignore_fail("test -d /sys/firmware/devicetree/base/ibm,opal/secvar || echo no")
if "no" in "".join(output):
self.skipTest("Skiboot does not support secure variables")
output = con.run_command_ignore_fail("test -d /sys/firmware/secvar || echo no")
if "no" in "".join(output):
self.skipTest("Skiroot does not support the secure variables sysfs interface")
# We only support one backend for now, skip if using an unknown backend
# NOTE: This file must exist if the previous checks pass, fail the test if not present
output = con.run_command("cat /sys/firmware/secvar/format")
if "ibm,edk2-compat-v1" not in "".join(output):
self.skipTest("Test case only supports the 'ibm,edk2-compat-v1' backend")
def cleanPhysicalPresence(self):
self.cv_BMC.run_command("rm -f /usr/local/share/pnor/ATTR_TMP")
self.cv_BMC.run_command("rm -f /var/lib/obmc/cfam_overrides")
# Unset the Key Clear Request sensor
self.cv_IPMI.ipmitool.run("raw 0x04 0x30 0xE8 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00")
# Reboot to be super sure
self.cv_SYSTEM.goto_state(OpSystemState.OFF)
self.cv_SYSTEM.goto_state(OpSystemState.PETITBOOT_SHELL)
# Assert physical presence remotely to remove any currently installed OS secure boot keys
# NOTE: This is NOT something an end-user should expect to do, there is a different process
# for a physical presence assertion that actually requires physical access on production machines
# This is included in the test to make sure the machine is in a clean initial state, and also
# to ensure that skiboot handles physical presence key clear reset requests properly.
def assertPhysicalPresence(self):
self.cv_SYSTEM.goto_state(OpSystemState.OFF)
# This file was generated using the settings detailed at the top of this file
# It should be sufficient for any version of hostboot after these attributes were added
# This might break if something changes about these attributes, or if these attributes are not
# used on a later platform.
# NOTE: This override will NOT work on production firmware
self.cv_BMC.image_transfer("test_binaries/physicalPresence.bin")
self.cv_BMC.run_command("cp /tmp/physicalPresence.bin /usr/local/share/pnor/ATTR_TMP")
# Disable security settings on development images, to allow remote physical presence assertion
self.cv_BMC.run_command("echo '0 0x283a 0x15000000' > /var/lib/obmc/cfam_overrides")
self.cv_BMC.run_command("echo '0 0x283F 0x20000000' >> /var/lib/obmc/cfam_overrides")
# The rest of this function applies to the physical presence assertion of a production machine,
# and should behave the same way.
# The "ClearHostSecurityKeys" sensor is used on the OpenBMC to keep track of any Key Clear Request.
# During the (re-)IPL, the values will be sent to Hostboot for processing.
# This sets the sensor value to 0x40, which indicates KEY_CLEAR_OS_KEYS
self.cv_IPMI.ipmitool.run("raw 0x04 0x30 0xE8 0x00 0x40 0x00 0x00 0x00 0x00 0x00 0x00 0x00")
# Read back the sensor value.
# Expected Output is 4 bytes: where the first byte (ZZ) is the sensor value: ZZ 40 00 00
output = self.cv_IPMI.ipmitool.run("raw 0x04 0x2D 0xE8")
self.assertTrue("40 40 00 00" in output)
# Special case, powering on this way since there is no appropriate state
# for the opened physical presence window
self.cv_SYSTEM.sys_power_on()
raw_pty = self.cv_SYSTEM.console.get_console()
# Check for expected hostboot log output for a success physical presence assertion
raw_pty.expect("Opened Physical Presence Detection Window", timeout=120)
raw_pty.expect("System Will Power Off and Wait For Manual Power On", timeout=30)
raw_pty.expect("shutdown complete", timeout=30)
# Machine is off now, can resume using the state machine
self.cv_SYSTEM.set_state(OpSystemState.OFF)
# Turn it back on to complete the process
self.cv_SYSTEM.goto_state(OpSystemState.PETITBOOT_SHELL)
con = self.cv_SYSTEM.console
# After a physical presence, the firmware should NOT be enforcing secure boot as there should be
# no PK (or other secure boot keys)
con.run_command("test ! -f /sys/firmware/devicetree/base/ibm,secureboot/os-secureboot-enforcing")
# After a physical presence clear, there should be device tree entries indicating that
# 1. a physical presence was asserted, and
con.run_command("test -f /sys/firmware/devicetree/base/ibm,secureboot/physical-presence-asserted")
# 2. what request was made, in this case clearing of os secureboot keys
con.run_command("test -f /sys/firmware/devicetree/base/ibm,secureboot/clear-os-keys")
# As mentioned before, no keys should be enrolled, double check to make sure each is empty
for k in ["PK", "KEK", "db", "dbx"]:
# Size should be ascii "0" for each, as each should be empty
output = con.run_command("cat /sys/firmware/secvar/vars/{}/size".format(k))
self.assertTrue("0" in output)
# Data should not contain anything
output = con.run_command("cat /sys/firmware/secvar/vars/{}/data | wc -c".format(k))
self.assertTrue("0" in output)
# Enroll keys to enable secure boot
# Keys are generated ahead of time, following the process outlined at the top of this file
# See: "Generating oskeys.tar"
def addSecureBootKeys(self):
self.cv_SYSTEM.goto_state(OpSystemState.PETITBOOT_SHELL)
con = self.cv_SYSTEM.console
# Fetch the pregenerated test data containing the signed update files
# Future work: generate these test files are part of the test case (dependent on efitools/secvarctl)
self.getTestData()
# Enqueue the PK update first, will enter secure mode and enforce signature
# checking for the remaining updates below
con.run_command("cat /tmp/PK.auth > /sys/firmware/secvar/vars/PK/update")
# Enqueue the KEK update
con.run_command("cat /tmp/KEK.auth > /sys/firmware/secvar/vars/KEK/update")
# Enqueue the db update, this contains the key needed for validating signed kernels
con.run_command("cat /tmp/db.auth > /sys/firmware/secvar/vars/db/update")
# Enqueue the dbx update, contains a list of denylisted kernel hashes
con.run_command("cat /tmp/dbx.auth > /sys/firmware/secvar/vars/dbx/update")
# System needs to power fully off to process keys on next reboot
# Key updates are only processed as skiboot initializes
self.cv_SYSTEM.goto_state(OpSystemState.OFF)
self.cv_SYSTEM.goto_state(OpSystemState.PETITBOOT_SHELL)
# If all key updates were processed successfully, then we should have entered secure mode
# This device tree entry is created if a PK is present, and forces skiroot to only kexec
# properly signed kernels with a key in the db variable.
con.run_command("test -f /sys/firmware/devicetree/base/ibm,secureboot/os-secureboot-enforcing")
# Loop through and double check that all the variables now contain data
for k in ["PK", "KEK", "db", "dbx"]:
# Size should return a nonzero ascii value when enrolled
output = con.run_command("cat /sys/firmware/secvar/vars/{}/size".format(k))
self.assertFalse("0" in output)
# Data should contain the ESL data as generated before
# NOTE: this is NOT the same as the .auth data, the auth header and signature are removed
# as part of processing the update
# Future work: compare the /data field against the generated ESL data
output = con.run_command("cat /sys/firmware/secvar/vars/{}/data | wc -c".format(k))
self.assertFalse("0" in output)
# Check the integrity of the data by hashing and comparing against an expected hash
# See top of the file for how these hashes were calculated
output = con.run_command("sha256sum /sys/firmware/secvar/vars/{}/data".format(k))
# output is of the form ["<hash> <filename>"], so extract just the hash value to compare
output = output[0].split(" ")[0]
self.assertTrue(esl_hashes[k] == output)
# Attempt to kexec load a set of kernels to ensure secure mode is enforced correctly
def checkKexecKernels(self):
self.cv_SYSTEM.goto_state(OpSystemState.PETITBOOT_SHELL)
con = self.cv_SYSTEM.console
# Obtain pregenerated test kernels, see top of file for how these were generated
self.getTestData(data="kernels")
# Fail regular kexec_load, syscall should be disabled by skiroot when enforcing secure boot
# Petitboot should automatically use avoid using this syscall when applicable
output = con.run_command_ignore_fail("kexec -l /tmp/kernel-unsigned")
self.assertTrue("Permission denied" in "".join(output))
# Fail using kexec_file_load with an unsigned kernel
output = con.run_command_ignore_fail("kexec -s /tmp/kernel-unsigned")
self.assertTrue("Permission denied" in "".join(output))
# Fail loading a kernel whose hash is in the dbx denylist
output = con.run_command_ignore_fail("kexec -s /tmp/kernel-dbx")
self.assertTrue("Permission denied" in "".join(output))
# Fail loading a properly signed kernel with key that is NOT enrolled in the db
# Future work: enroll the key used to sign this kernel and try again
output = con.run_command_ignore_fail("kexec -s /tmp/kernel-unenrolled")
self.assertTrue("Permission denied" in "".join(output))
# Successfully kexec_file_load a kernel signed with a key in the db
output = con.run_command("kexec -s /tmp/kernel-signed")
# To replace the PK, sign a new PK esl with the previous PK
# Replacing the PK will not change the secure enforcing status, nor will
# it remove the other variables
# To delete the PK, sign an empty file with the PK. The update processing
# logic will interpret this as a deletion.
# NOTE: removing the PK DISABLES OS secure boot enforcement, but will NOT
# clear your other variables.
def replaceAndDeletePK(self):
con = self.cv_SYSTEM.console
# Obtain tarball containing a replacement PK
self.getTestData(data="keys")
# Enqueue an update to the PK
# New PK updates must be signed with the previous PK
con.run_command("cat /tmp/newPK.auth > /sys/firmware/secvar/vars/PK/update")
# Reboot the system to process the update
self.cv_SYSTEM.goto_state(OpSystemState.OFF)
self.cv_SYSTEM.goto_state(OpSystemState.PETITBOOT_SHELL)
# Confirm we are still enforcing secure boot
con.run_command("test -f /sys/firmware/devicetree/base/ibm,secureboot/os-secureboot-enforcing")
# Check that the new PK is enrolled now
output = con.run_command("sha256sum /sys/firmware/secvar/vars/PK/data")
output = output[0].split(" ")[0]
self.assertTrue(esl_hashes["newPK"] == output)
# Obtain tarball containing a PK deletion update
self.getTestData(data="keys")
# Enqueue a deletion update to the PK
# This update is a signed empty file, which is interpreted as a deletion action
con.run_command("cat /tmp/deletePK.auth > /sys/firmware/secvar/vars/PK/update")
# Reboot the system to process the update
self.cv_SYSTEM.goto_state(OpSystemState.OFF)
self.cv_SYSTEM.goto_state(OpSystemState.PETITBOOT_SHELL)
# Secure boot enforcement should now be DISABLED
con.run_command("test ! -f /sys/firmware/devicetree/base/ibm,secureboot/os-secureboot-enforcing")
# PK size should be empty now
output = con.run_command("cat /sys/firmware/secvar/vars/PK/size")
self.assertTrue("0" in output)
# PK data should not contain any data
output = con.run_command("cat /sys/firmware/secvar/vars/PK/data | wc -c")
self.assertTrue("0" in output)
# Loop through and double check that all the other variables still contain their data
# This is the same logic as in .addSecureBootKeys()
for k in ["KEK", "db", "dbx"]:
output = con.run_command("cat /sys/firmware/secvar/vars/{}/size".format(k))
self.assertFalse("0" in output)
output = con.run_command("cat /sys/firmware/secvar/vars/{}/data | wc -c".format(k))
self.assertFalse("0" in output)
output = con.run_command("sha256sum /sys/firmware/secvar/vars/{}/data".format(k))
output = output[0].split(" ")[0]
self.assertTrue(esl_hashes[k] == output)
def runTest(self):
# skip test if the machine firmware doesn't support secure variables
self.checkFirmwareSupport()
# clean up any previous physical presence attempt
self.cleanPhysicalPresence()
# start in a clean secure boot state
self.assertPhysicalPresence()
self.cleanPhysicalPresence()
# add secure boot keys
self.addSecureBootKeys()
# attempt to securely boot test kernels
self.checkKexecKernels()
# replace PK, delete PK
self.replaceAndDeletePK()
# clean up after, and ensure keys are properly cleared
self.assertPhysicalPresence()
self.cleanPhysicalPresence()
| |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GAX wrapper for Pubsub API requests."""
import functools
from google.cloud.gapic.pubsub.v1.publisher_client import PublisherClient
from google.cloud.gapic.pubsub.v1.subscriber_client import SubscriberClient
from google.gax import CallOptions
from google.gax import INITIAL_PAGE
from google.gax.errors import GaxError
from google.gax.grpc import exc_to_code
from google.protobuf.json_format import MessageToDict
from google.cloud.proto.pubsub.v1.pubsub_pb2 import PubsubMessage
from google.cloud.proto.pubsub.v1.pubsub_pb2 import PushConfig
from grpc import insecure_channel
from grpc import StatusCode
from google.cloud._helpers import _to_bytes
from google.cloud._helpers import _pb_timestamp_to_rfc3339
from google.cloud._helpers import make_secure_channel
from google.cloud._http import DEFAULT_USER_AGENT
from google.cloud.exceptions import Conflict
from google.cloud.exceptions import NotFound
from google.cloud.iterator import GAXIterator
from google.cloud.pubsub import __version__
from google.cloud.pubsub._helpers import subscription_name_from_path
from google.cloud.pubsub.subscription import Subscription
from google.cloud.pubsub.topic import Topic
class _PublisherAPI(object):
"""Helper mapping publisher-related APIs.
:type gax_api: :class:`.publisher_client.PublisherClient`
:param gax_api: API object used to make GAX requests.
:type client: :class:`~google.cloud.pubsub.client.Client`
:param client: The client that owns this API object.
"""
def __init__(self, gax_api, client):
self._gax_api = gax_api
self._client = client
def list_topics(self, project, page_size=0, page_token=None):
"""List topics for the project associated with this API.
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics/list
:type project: str
:param project: project ID
:type page_size: int
:param page_size: maximum number of topics to return, If not passed,
defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of topics. If not
passed, the API will return the first page of
topics.
:rtype: :class:`~google.cloud.iterator.Iterator`
:returns: Iterator of :class:`~google.cloud.pubsub.topic.Topic`
accessible to the current API.
"""
if page_token is None:
page_token = INITIAL_PAGE
options = CallOptions(page_token=page_token)
path = 'projects/%s' % (project,)
page_iter = self._gax_api.list_topics(
path, page_size=page_size, options=options)
return GAXIterator(self._client, page_iter, _item_to_topic)
def topic_create(self, topic_path):
"""API call: create a topic
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics/create
:type topic_path: str
:param topic_path: fully-qualified path of the new topic, in format
``projects/<PROJECT>/topics/<TOPIC_NAME>``.
:rtype: dict
:returns: ``Topic`` resource returned from the API.
:raises: :exc:`google.cloud.exceptions.Conflict` if the topic already
exists
"""
try:
topic_pb = self._gax_api.create_topic(topic_path)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.FAILED_PRECONDITION:
raise Conflict(topic_path)
raise
return {'name': topic_pb.name}
def topic_get(self, topic_path):
"""API call: retrieve a topic
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics/get
:type topic_path: str
:param topic_path: fully-qualified path of the topic, in format
``projects/<PROJECT>/topics/<TOPIC_NAME>``.
:rtype: dict
:returns: ``Topic`` resource returned from the API.
:raises: :exc:`google.cloud.exceptions.NotFound` if the topic does not
exist
"""
try:
topic_pb = self._gax_api.get_topic(topic_path)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(topic_path)
raise
return {'name': topic_pb.name}
def topic_delete(self, topic_path):
"""API call: delete a topic
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics/create
:type topic_path: str
:param topic_path: fully-qualified path of the new topic, in format
``projects/<PROJECT>/topics/<TOPIC_NAME>``.
"""
try:
self._gax_api.delete_topic(topic_path)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(topic_path)
raise
def topic_publish(self, topic_path, messages, timeout=30):
"""API call: publish one or more messages to a topic
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics/publish
:type topic_path: str
:param topic_path: fully-qualified path of the topic, in format
``projects/<PROJECT>/topics/<TOPIC_NAME>``.
:type messages: list of dict
:param messages: messages to be published.
:type timeout: int
:param timeout: (Optional) Timeout seconds.
:rtype: list of string
:returns: list of opaque IDs for published messages.
:raises: :exc:`google.cloud.exceptions.NotFound` if the topic does not
exist
"""
options = CallOptions(is_bundling=False, timeout=timeout)
message_pbs = [_message_pb_from_mapping(message)
for message in messages]
try:
result = self._gax_api.publish(topic_path, message_pbs,
options=options)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(topic_path)
raise
return result.message_ids
def topic_list_subscriptions(self, topic, page_size=0, page_token=None):
"""API call: list subscriptions bound to a topic
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics.subscriptions/list
:type topic: :class:`~google.cloud.pubsub.topic.Topic`
:param topic: The topic that owns the subscriptions.
:type page_size: int
:param page_size: maximum number of subscriptions to return, If not
passed, defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of subscriptions.
If not passed, the API will return the first page
of subscriptions.
:rtype: :class:`~google.cloud.iterator.Iterator`
:returns: Iterator of
:class:`~google.cloud.pubsub.subscription.Subscription`
accessible to the current API.
:raises: :exc:`~google.cloud.exceptions.NotFound` if the topic does
not exist.
"""
if page_token is None:
page_token = INITIAL_PAGE
options = CallOptions(page_token=page_token)
topic_path = topic.full_name
try:
page_iter = self._gax_api.list_topic_subscriptions(
topic_path, page_size=page_size, options=options)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(topic_path)
raise
iterator = GAXIterator(self._client, page_iter,
_item_to_subscription_for_topic)
iterator.topic = topic
return iterator
class _SubscriberAPI(object):
"""Helper mapping subscriber-related APIs.
:type gax_api: :class:`.publisher_client.SubscriberClient`
:param gax_api: API object used to make GAX requests.
:type client: :class:`~google.cloud.pubsub.client.Client`
:param client: The client that owns this API object.
"""
def __init__(self, gax_api, client):
self._gax_api = gax_api
self._client = client
def list_subscriptions(self, project, page_size=0, page_token=None):
"""List subscriptions for the project associated with this API.
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/list
:type project: str
:param project: project ID
:type page_size: int
:param page_size: maximum number of subscriptions to return, If not
passed, defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of subscriptions.
If not passed, the API will return the first page
of subscriptions.
:rtype: :class:`~google.cloud.iterator.Iterator`
:returns: Iterator of
:class:`~google.cloud.pubsub.subscription.Subscription`
accessible to the current API.
"""
if page_token is None:
page_token = INITIAL_PAGE
options = CallOptions(page_token=page_token)
path = 'projects/%s' % (project,)
page_iter = self._gax_api.list_subscriptions(
path, page_size=page_size, options=options)
# We attach a mutable topics dictionary so that as topic
# objects are created by Subscription.from_api_repr, they
# can be re-used by other subscriptions from the same topic.
topics = {}
item_to_value = functools.partial(
_item_to_sub_for_client, topics=topics)
return GAXIterator(self._client, page_iter, item_to_value)
def subscription_create(self, subscription_path, topic_path,
ack_deadline=None, push_endpoint=None):
"""API call: create a subscription
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/create
:type subscription_path: str
:param subscription_path:
the fully-qualified path of the new subscription, in format
``projects/<PROJECT>/subscriptions/<SUB_NAME>``.
:type topic_path: str
:param topic_path: the fully-qualified path of the topic being
subscribed, in format
``projects/<PROJECT>/topics/<TOPIC_NAME>``.
:type ack_deadline: int
:param ack_deadline:
(Optional) the deadline (in seconds) by which messages pulled from
the back-end must be acknowledged.
:type push_endpoint: str
:param push_endpoint:
(Optional) URL to which messages will be pushed by the back-end.
If not set, the application must pull messages.
:rtype: dict
:returns: ``Subscription`` resource returned from the API.
"""
if push_endpoint is not None:
push_config = PushConfig(push_endpoint=push_endpoint)
else:
push_config = None
if ack_deadline is None:
ack_deadline = 0
try:
sub_pb = self._gax_api.create_subscription(
subscription_path, topic_path,
push_config=push_config, ack_deadline_seconds=ack_deadline)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.FAILED_PRECONDITION:
raise Conflict(topic_path)
raise
return MessageToDict(sub_pb)
def subscription_get(self, subscription_path):
"""API call: retrieve a subscription
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/get
:type subscription_path: str
:param subscription_path:
the fully-qualified path of the subscription, in format
``projects/<PROJECT>/subscriptions/<SUB_NAME>``.
:rtype: dict
:returns: ``Subscription`` resource returned from the API.
"""
try:
sub_pb = self._gax_api.get_subscription(subscription_path)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(subscription_path)
raise
return MessageToDict(sub_pb)
def subscription_delete(self, subscription_path):
"""API call: delete a subscription
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/delete
:type subscription_path: str
:param subscription_path:
the fully-qualified path of the subscription, in format
``projects/<PROJECT>/subscriptions/<SUB_NAME>``.
"""
try:
self._gax_api.delete_subscription(subscription_path)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(subscription_path)
raise
def subscription_modify_push_config(self, subscription_path,
push_endpoint):
"""API call: update push config of a subscription
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/modifyPushConfig
:type subscription_path: str
:param subscription_path:
the fully-qualified path of the new subscription, in format
``projects/<PROJECT>/subscriptions/<SUB_NAME>``.
:type push_endpoint: str
:param push_endpoint:
(Optional) URL to which messages will be pushed by the back-end.
If not set, the application must pull messages.
"""
push_config = PushConfig(push_endpoint=push_endpoint)
try:
self._gax_api.modify_push_config(subscription_path, push_config)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(subscription_path)
raise
def subscription_pull(self, subscription_path, return_immediately=False,
max_messages=1):
"""API call: retrieve messages for a subscription
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/modifyPushConfig
:type subscription_path: str
:param subscription_path:
the fully-qualified path of the new subscription, in format
``projects/<PROJECT>/subscriptions/<SUB_NAME>``.
:type return_immediately: bool
:param return_immediately: if True, the back-end returns even if no
messages are available; if False, the API
call blocks until one or more messages are
available.
:type max_messages: int
:param max_messages: the maximum number of messages to return.
:rtype: list of dict
:returns: the ``receivedMessages`` element of the response.
"""
try:
response_pb = self._gax_api.pull(
subscription_path, max_messages,
return_immediately=return_immediately)
except GaxError as exc:
code = exc_to_code(exc.cause)
if code == StatusCode.NOT_FOUND:
raise NotFound(subscription_path)
elif code == StatusCode.DEADLINE_EXCEEDED:
# NOTE: The JSON-over-HTTP API returns a 200 with an empty
# response when ``return_immediately`` is ``False``, so
# we "mutate" the gRPC error into a non-error to conform.
if not return_immediately:
return []
raise
return [_received_message_pb_to_mapping(rmpb)
for rmpb in response_pb.received_messages]
def subscription_acknowledge(self, subscription_path, ack_ids):
"""API call: acknowledge retrieved messages
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/modifyPushConfig
:type subscription_path: str
:param subscription_path:
the fully-qualified path of the new subscription, in format
``projects/<PROJECT>/subscriptions/<SUB_NAME>``.
:type ack_ids: list of string
:param ack_ids: ack IDs of messages being acknowledged
"""
try:
self._gax_api.acknowledge(subscription_path, ack_ids)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(subscription_path)
raise
def subscription_modify_ack_deadline(self, subscription_path, ack_ids,
ack_deadline):
"""API call: update ack deadline for retrieved messages
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/modifyAckDeadline
:type subscription_path: str
:param subscription_path:
the fully-qualified path of the new subscription, in format
``projects/<PROJECT>/subscriptions/<SUB_NAME>``.
:type ack_ids: list of string
:param ack_ids: ack IDs of messages being acknowledged
:type ack_deadline: int
:param ack_deadline: the deadline (in seconds) by which messages pulled
from the back-end must be acknowledged.
"""
try:
self._gax_api.modify_ack_deadline(
subscription_path, ack_ids, ack_deadline)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(subscription_path)
raise
def _message_pb_from_mapping(message):
"""Helper for :meth:`_PublisherAPI.topic_publish`.
Performs "impedance matching" between the protobuf attrs and the keys
expected in the JSON API.
"""
return PubsubMessage(data=_to_bytes(message['data']),
attributes=message['attributes'])
def _message_pb_to_mapping(message_pb):
"""Helper for :meth:`pull`, et aliae
Performs "impedance matching" between the protobuf attrs and the keys
expected in the JSON API.
"""
return {
'messageId': message_pb.message_id,
'data': message_pb.data,
'attributes': message_pb.attributes,
'publishTime': _pb_timestamp_to_rfc3339(message_pb.publish_time),
}
def _received_message_pb_to_mapping(received_message_pb):
"""Helper for :meth:`pull`, et aliae
Performs "impedance matching" between the protobuf attrs and the keys
expected in the JSON API.
"""
return {
'ackId': received_message_pb.ack_id,
'message': _message_pb_to_mapping(
received_message_pb.message),
}
def make_gax_publisher_api(credentials=None, host=None):
"""Create an instance of the GAX Publisher API.
If the ``credentials`` are omitted, then we create an insecure
``channel`` pointing at the local Pub / Sub emulator.
:type credentials: :class:`~google.auth.credentials.Credentials`
:param credentials: (Optional) Credentials for getting access
tokens.
:type host: str
:param host: (Optional) The host for an insecure channel. Only
used if ``credentials`` are omitted.
:rtype: :class:`.publisher_client.PublisherClient`
:returns: A publisher API instance with the proper channel.
"""
if credentials is None:
channel = insecure_channel(host)
else:
channel = make_secure_channel(
credentials, DEFAULT_USER_AGENT,
PublisherClient.SERVICE_ADDRESS)
return PublisherClient(
channel=channel, lib_name='gccl', lib_version=__version__)
def make_gax_subscriber_api(credentials=None, host=None):
"""Create an instance of the GAX Subscriber API.
If the ``credentials`` are omitted, then we create an insecure
``channel`` pointing at the local Pub / Sub emulator.
:type credentials: :class:`~google.auth.credentials.Credentials`
:param credentials: (Optional) Credentials for getting access
tokens.
:type host: str
:param host: (Optional) The host for an insecure channel. Only
used if ``credentials`` are omitted.
:rtype: :class:`.subscriber_client.SubscriberClient`
:returns: A subscriber API instance with the proper channel.
"""
if credentials is None:
channel = insecure_channel(host)
else:
channel = make_secure_channel(
credentials, DEFAULT_USER_AGENT,
SubscriberClient.SERVICE_ADDRESS)
return SubscriberClient(
channel=channel, lib_name='gccl', lib_version=__version__)
def _item_to_topic(iterator, resource):
"""Convert a protobuf topic to the native object.
:type iterator: :class:`~google.cloud.iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type resource: :class:`.pubsub_pb2.Topic`
:param resource: A topic returned from the API.
:rtype: :class:`~google.cloud.pubsub.topic.Topic`
:returns: The next topic in the page.
"""
return Topic.from_api_repr(
{'name': resource.name}, iterator.client)
def _item_to_subscription_for_topic(iterator, subscription_path):
"""Convert a subscription name to the native object.
:type iterator: :class:`~google.cloud.iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type subscription_path: str
:param subscription_path: Subscription path returned from the API.
:rtype: :class:`~google.cloud.pubsub.subscription.Subscription`
:returns: The next subscription in the page.
"""
subscription_name = subscription_name_from_path(
subscription_path, iterator.client.project)
return Subscription(subscription_name, iterator.topic)
def _item_to_sub_for_client(iterator, sub_pb, topics):
"""Convert a subscription protobuf to the native object.
.. note::
This method does not have the correct signature to be used as
the ``item_to_value`` argument to
:class:`~google.cloud.iterator.Iterator`. It is intended to be
patched with a mutable topics argument that can be updated
on subsequent calls. For an example, see how the method is
used above in :meth:`_SubscriberAPI.list_subscriptions`.
:type iterator: :class:`~google.cloud.iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type sub_pb: :class:`.pubsub_pb2.Subscription`
:param sub_pb: A subscription returned from the API.
:type topics: dict
:param topics: A dictionary of topics to be used (and modified)
as new subscriptions are created bound to topics.
:rtype: :class:`~google.cloud.pubsub.subscription.Subscription`
:returns: The next subscription in the page.
"""
resource = MessageToDict(sub_pb)
return Subscription.from_api_repr(
resource, iterator.client, topics=topics)
| |
"""The base entity class and any non-player classes derived from it.
Defines all the objects in the game.
"""
import random as rand
import sys
import pyglet
from geometry.shape import Shape
from geometry.vector import Vector
from utils import WINDOW_WIDTH, WINDOW_HEIGHT, wrap_angle, rand_direction
class Entity(object):
"""Base class for defining an entity in the
game (such as a ship, asteroid, etc.)
The Entity class is a base class for all the objects in
the game, including the player, the asteroids, and anything
else. It is defined graphically by a shape. The class also
stores the current position, rotation, and scale of the objects
which are used when it is drawn to the screen, as well as a
direction vector which defines where the entity is pointing.
Attributes:
_shape: the shape graphically defining the entity
pos: the position of the entity
rot: the rotation of the entity (in degrees)
scale: the scale of the entity
lin_speed: the linear speed of the entity
rot_speed: the rotational speed of the entity
_direction: the direction of the entity (used for movement)
"""
def __init__(self, verts, direction, lin_speed=0.0, rot_speed=0.0,
pos=Vector(0, 0), rot=0.0, scale=1.0):
"""Creates a new Entity
Args:
verts: the vertices defining the shape of the entity
direction: the direction of the entity
lin_speed: the linear speed of the entity
rot_speed: the rotational speed of the entity
pos: the position of the entity
rot: the rotation of the entity (in degrees)
scale: the scale of the entity
Returns:
a new Entity
"""
self._shape = Shape(verts, pos, rot, scale)
self.pos = pos
self.rot = rot
self.scale = scale
self.lin_speed = lin_speed
self.rot_speed = rot_speed
self._direction = direction.normalize()
def update(self, dt):
"""Updates the entity's shape
Args:
dt: the amount of time since the last update
window: the game window
"""
# Move and rotate the entity
self.pos += self._direction * self.lin_speed
self.rot += self.rot_speed
self.rot = wrap_angle(self.rot)
# Set new values
self._shape.update(self.pos, self.rot, self.scale)
# Reflect across the screen, if necessary
self._reflect_across_screen()
def draw(self):
"""Draws the entity onto the screen
"""
# Defer this to the shape
self._shape.draw()
def collides(self, other):
"""Checks whether this entity collides with another one
Args:
other: the other entity to test collision with
Returns:
True if the entities collide, False if they do not
"""
return self._shape.collides(other._shape)
def _reflect_across_screen(self):
"""Checks if the entity needs to be reflected across the
screen and generates a new position if it does
"""
# Start with the current position
new_pos = Vector(self.pos.x, self.pos.y)
# For each side of the screen, check if the entity has
# exceeded the bounds using the approximate length
# Check horizontal
# Left-side
if (self.pos.x + self._shape.effective_length < 0):
dist = abs(0 - (self.pos.x + self._shape.effective_length))
new_pos.x = WINDOW_WIDTH + dist
# Right-side
elif (self.pos.x - self._shape.effective_length > WINDOW_WIDTH):
dist = abs((self.pos.x - self._shape.effective_length) -
WINDOW_WIDTH)
new_pos.x = -dist
# Check vertical
# Bottom
if (self.pos.y + self._shape.effective_length < 0):
dist = abs(0 - (self.pos.y + self._shape.effective_length))
new_pos.y = WINDOW_HEIGHT + dist
# Top
elif (self.pos.y - self._shape.effective_length > WINDOW_HEIGHT):
dist = abs((self.pos.y - self._shape.effective_length) -
WINDOW_HEIGHT)
new_pos.y = -dist
# Set the new position of the entity.
# If no reflection was needed, it's just the original position
self.pos = new_pos
# Direction stored as a property to ensure normalization
@property
def direction(self):
return self._direction
@direction.setter
def direction(self, value):
self._direction = value.normalize()
class Asteroid(Entity):
"""Defines an asteroid
Asteroids only come in a discrete number of sizes and shapes.
Size determines the scale and what the asteroid will break into
when destroyed: a 'small' asteroid disappears, while larger
asteroids break into smaller ones.
Attributes:
_shapes: a list of tuples with two elements,
element 0 is a tuple of vertices defining a shape
element 1 is the default scale of that shape
(the scale for a medium-sized asteroid with that
shape)
max_lin_speed: the maximum linear speed an asteroid can have
min_lin_speed: the minimum linear speed an asteroid can have
max_rot_speed: the maximum rotational speed an asteroid
can have
effect_player: the game's EffectPlayer
"""
class Size:
"""Defines the possible asteroid sizes"""
SMALL = 0 # Explodes
MEDIUM = 1 # Breaks into 2 smalls
LARGE = 2 # Breaks into 2 mediums
HUGE = 3 # Breaks into 3 mediums
_shapes = None
max_lin_speed = 1.5
max_rot_speed = 2.5
min_lin_speed = 0.5
@classmethod
def _get_shapes(cls):
"""Retrieves the asteroids shapes defined in
asteroids.txt and populates the shapes list
with them"""
# Ensure we actually have an asteroids.txt file to parse
try:
ast_file = pyglet.resource.file('asteroids.txt', 'r')
except IOError:
sys.exit('ERROR: res/asteroids.txt not found!')
shapes = []
for line in ast_file:
# Each line of asteroids.txt is a list of numbers
# with spaces in between each entry, so first
# we split the line to turn that into a list
verts = line.split(' ')
# Next we convert each entry into a floating point
# number
for i in range(0, len(verts)):
try:
verts[i] = float(verts[i])
except ValueError:
sys.exit("""ERROR: found entry in asteroids.txt which is
not a number!""")
# The last entry of each line is the default scale
# (scale for a medium-sized asteroid with this shape)
# Each entry in shapes is a tuple with entry 0 being
# a tuple of the vertices of the shape, entry 1 being
# the default scale
shapes.append((tuple(verts[0:-1]), verts[-1]))
cls._shapes = shapes
def __init__(self, size, direction, lin_speed, rot_speed,
shape_index=None, pos=Vector(0, 0), rot=0.0):
"""Creates a new Asteroid
Args:
size: the size of the Asteroid, which should be a value
from Asteroid.Size
shape_index: the index of the shape to grab from _shapes.
If None, then we'll grab a random one
pos: the position of the asteroid
rot: the rotation of the asteroid
Returns:
a new Asteroid
"""
self.size = size
# If we haven't grabbed the shapes from asteroids.txt, do so
if Asteroid._shapes is None:
self._get_shapes()
# Two cases:
# 1. a shape index wasn't supplied, generate a random one
# (this is for newly generated asteroids)
# 2. a shape index was supplied, use that (this is for
# asteroids which resulted from the destruction of a larger
# one)
if shape_index is None:
self.shape_index = rand.randrange(0, len(Asteroid._shapes))
else:
self.shape_index = shape_index
# Get the relevant data from the tuple
self._shape = Asteroid._shapes[self.shape_index][0]
self._def_scale = Asteroid._shapes[self.shape_index][1]
# For each size, we apply a scaling factor on top of the
# default scale to make smaller asteroids smaller and larger
# asteroids larger
# As with the definition of default scale, a medium sized
# asteroid just uses the default scale (scale_factor = 1.0)
if size == Asteroid.Size.SMALL:
scale_factor = 0.7
elif size == Asteroid.Size.MEDIUM:
scale_factor = 1.0
elif size == Asteroid.Size.LARGE:
scale_factor = 1.2
elif size == Asteroid.Size.HUGE:
scale_factor = 1.5
# Grab the EffectPlayer
from effect import EffectPlayer
self.effect_player = EffectPlayer.instance()
Entity.__init__(self, self._shape, direction, lin_speed=lin_speed,
rot_speed=rot_speed, pos=pos, rot=rot,
scale=self._def_scale * scale_factor)
def destroy(self):
"""Destroys an asteroid and breaks it into pieces
(if applicable)
Returns:
a list containing the asteroids resulting from
the destruction of this one
"""
# Play destroy animation
self.effect_player.play_animation('ASTEROID_DESTROY', self.pos)
# Depending on the size of the asteroid, create
# new asteroids resulting from its destruction
if self.size == Asteroid.Size.SMALL:
return []
elif self.size == Asteroid.Size.MEDIUM:
return [self._get_random_asteroid(Asteroid.Size.SMALL),
self._get_random_asteroid(Asteroid.Size.SMALL)]
elif self.size == Asteroid.Size.LARGE:
return [self._get_random_asteroid(Asteroid.Size.MEDIUM),
self._get_random_asteroid(Asteroid.Size.MEDIUM)]
elif self.size == Asteroid.Size.HUGE:
return [self._get_random_asteroid(Asteroid.Size.MEDIUM),
self._get_random_asteroid(Asteroid.Size.MEDIUM),
self._get_random_asteroid(Asteroid.Size.MEDIUM)]
def _get_random_asteroid(self, size):
"""Creates a random asteroid that results from the destruction
of this asteroid
Parameters:
size: the size that the new asteroid will have
Returns:
a new random asteroid, based on the characteristcs of
this asteroid
"""
# Generate random speeds
lin_speed = rand.uniform(self.min_lin_speed, self.max_lin_speed)
rot_speed = rand.uniform(0, self.max_rot_speed)
# Get random direction
direction = rand_direction(self.pos)
return Asteroid(size, direction, lin_speed, rot_speed, pos=self.pos,
shape_index=self.shape_index)
class Bullet(Entity):
"""A bullet that is shot by the player and can destroy an asteroid
Attributes:
_lifespan: the amount of time, in seconds, that a bullet lasts
before it is destroyed
_current_lifespan: the current amount of time the bullet has
existed on the screen
expired: a boolean value representing whether the bullet's
current amount of time in play has exceeded the lifespan
"""
_lifespan = 1.6
def __init__(self, pos, rot, direction):
"""Creates a new bullet
Parameters:
pos: the position of the bullet
rot: the rotation of the bullet, in degrees
direction: the direction of the bullet's movement
Returns:
a new Bullet
"""
self.expired = False
self._current_lifespan = 0
Entity.__init__(self, (10, 10, 10, -10, -10, -10, -10, 10),
direction, pos=pos, rot=rot, lin_speed=3.0,
scale=0.25)
def update(self, dt):
"""Updates this bullet's parameters, and checks
the current lifespan of the bullet
Parameters:
dt: the time since the last update
"""
# Update all of the Entity values
Entity.update(self, dt)
# Add the amount of time that has passed to the current
# lifespan. If the current lifespan has exceeded the lifespan
# of a bullet, flag the bullet has expired.
self._current_lifespan += dt
if self._current_lifespan >= self._lifespan:
self.expired = True
| |
# -*- coding: utf-8 -*-
from django.conf import settings
from django.http import HttpResponse
from django.http import HttpResponseBadRequest
from django.http import HttpResponseForbidden
from django.http import HttpResponseRedirect
from django.template.response import TemplateResponse
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.views.decorators.cache import never_cache
from django.views.generic.base import View
from django.views.generic.edit import FormMixin
from itsdangerous import URLSafeTimedSerializer
from lizard_auth_server import forms
from lizard_auth_server.models import Token
from lizard_auth_server.models import UserProfile
from lizard_auth_server.views import ErrorMessageResponse
# py3 only:
from urllib.parse import urlencode
from urllib.parse import urljoin
from urllib.parse import urlparse
import datetime
import json
import logging
import pytz
logger = logging.getLogger(__name__)
TOKEN_TIMEOUT = datetime.timedelta(minutes=settings.SSO_TOKEN_TIMEOUT_MINUTES)
class ProcessGetFormView(FormMixin, View):
"""
A view which validates a form using GET parameters
instead of POST.
See Django's ProcessFormView.
"""
def get_form(self, form_class=None):
if form_class is None:
form_class = self.get_form_class()
return form_class(self.request.GET)
@method_decorator(never_cache)
def get(self, request, *args, **kwargs):
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
class FormInvalidMixin(object):
"""Provides a default error message for form_invalid"""
def form_invalid(self, form):
logger.error("Error while decrypting form: %s", form.errors.as_text())
return ErrorMessageResponse(self.request, _(form.errors.as_text()), 400)
class PortalActionView(FormInvalidMixin, ProcessGetFormView):
"""
View that allows portals to do some miscellaneous actions,
like logging out.
"""
form_class = forms.DecryptForm
def form_valid(self, form):
if form.cleaned_data["action"] == "logout":
nextparams = {
"message": self.request.GET["message"],
"key": self.request.GET["key"],
}
# after logout, redirect user to the LogoutRedirectView,
# which should redirect the user back to the portal again.
nextparams = urlencode(
[
(
"next",
"%s?%s"
% (
reverse("lizard_auth_server.sso.logout_redirect"),
urlencode(nextparams),
),
)
]
)
url = "%s?%s" % (reverse("logout"), nextparams)
return HttpResponseRedirect(url)
return HttpResponseBadRequest("Unknown action")
class LogoutRedirectView(FormInvalidMixin, ProcessGetFormView):
"""
View that redirects the user to the logout page of the portal.
"""
form_class = forms.DecryptForm
def form_valid(self, form):
if form.cleaned_data["action"] == "logout":
url = urljoin(get_domain(form), "sso/local_logout") + "/"
return HttpResponseRedirect(url)
else:
return HttpResponseBadRequest("Unknown action")
class RequestTokenView(ProcessGetFormView):
"""
Request Token Request view called by the portal application to obtain a
one-time Request Token.
"""
form_class = forms.DecryptForm
def form_valid(self, form):
token = Token.objects.create_for_portal(form.portal)
params = {"request_token": token.request_token}
# encrypt the token with the secret key of the portal
data = URLSafeTimedSerializer(token.portal.sso_secret).dumps(params)
return HttpResponse(data)
def form_invalid(self, form):
logger.error("Error while decrypting form: %s", form.errors.as_text())
return HttpResponseBadRequest("Bad signature")
class AuthorizeView(FormInvalidMixin, ProcessGetFormView):
"""
The portal get's redirected to this view with the `request_token` obtained
by the Request Token Request by the portal application beforehand.
This view checks if the user is logged in on the server application and if
that user has the necessary rights.
If the user is not logged in, the user is prompted to log in.
"""
form_class = forms.DecryptForm
def form_valid(self, form):
request_token = form.cleaned_data["request_token"]
try:
self.token = Token.objects.get(
request_token=request_token, portal=form.portal, user__isnull=True
)
except Token.DoesNotExist:
return HttpResponseForbidden("Invalid request token")
if self.check_token_timeout():
self.domain = get_domain(form)
if self.request.user.is_authenticated:
return self.form_valid_authenticated()
return self.form_valid_unauthenticated(
form.cleaned_data.get("force_sso_login", True)
)
return self.token_timeout()
def check_token_timeout(self):
delta = datetime.datetime.now(tz=pytz.UTC) - self.token.created
return delta <= TOKEN_TIMEOUT
def token_timeout(self):
self.token.delete()
return ErrorMessageResponse(
self.request,
_("Token timed out. Please return to the portal " "to get a fresh token."),
403,
)
def form_valid_authenticated(self):
"""
Called then login succeeded.
"""
if self.has_access():
return self.success()
return self.access_denied()
def has_access(self):
"""
Check whether the user has access to the portal.
"""
# check whether the UserProfile object is related to this Portal
try:
# get_profile is deprecated in Django >= 1.7
# profile = self.request.user.get_profile()
profile = self.request.user.user_profile
except UserProfile.DoesNotExist:
return False
return profile.has_access(self.token.portal)
def success(self):
params = {
"request_token": self.token.request_token,
"auth_token": self.token.auth_token,
}
# encrypt the tokens with the secret key of the portal
message = URLSafeTimedSerializer(self.token.portal.sso_secret).dumps(params)
# link the user model to the token model, so we can return the
# proper profile when the SSO client calls the VerifyView
self.token.user = self.request.user
self.token.save()
# redirect user back to the portal
url = urljoin(self.domain, "sso/local_login/")
url = "%s?%s" % (url, urlencode({"message": message}))
return HttpResponseRedirect(url)
def access_denied(self):
"""
Show a user-friendly access denied page.
"""
context = {"login_url": self.build_login_url()}
return TemplateResponse(
self.request, "lizard_auth_server/access_denied.html", context, status=403
)
def build_login_url(self):
"""
Store the authorize view (most likely the current view) as
"next" page for a login page.
"""
nextparams = {
"message": self.request.GET["message"],
"key": self.request.GET["key"],
}
params = urlencode(
[
(
"next",
"%s?%s"
% (
reverse("lizard_auth_server.sso.authorize"),
urlencode(nextparams),
),
)
]
)
return "%s?%s" % (reverse("login"), params)
def build_back_to_portal_url(self):
"""Redirect user back to the portal, without logging him in."""
return urljoin(self.domain, "sso/local_not_logged_in/")
def form_valid_unauthenticated(self, force_sso_login):
"""
Redirect user to login page if force_sso_login == True, else, return
without having to log in.
"""
if force_sso_login:
# Typical situation -- force the user to login.
return HttpResponseRedirect(self.build_login_url())
else:
# Return the unauthenticated user back to the portal.
return HttpResponseRedirect(self.build_back_to_portal_url())
def construct_user_data(user=None, profile=None):
"""
Construct a dict of information about a user object,
like first_name, and permissions.
Older versions of this server did not send information about
roles, and only a single organisation name. Older clients still
expect that, so we need to stay backward compatible.
"""
if user is None:
user = profile.user
if profile is None:
# get_profile is deprecated in Django >= 1.7
# profile = user.get_profile()
profile = user.user_profile
data = {}
for key in [
"pk",
"username",
"first_name",
"last_name",
"email",
"is_active",
"is_staff",
"is_superuser",
]:
data[key] = getattr(user, key)
data["permissions"] = []
for perm in user.user_permissions.select_related("content_type").all():
data["permissions"].append(
{
"content_type": perm.content_type.natural_key(),
"codename": perm.codename,
}
)
# For backward compatibility, if the user has at least one
# organisation, send then name of one of them.
data["organisation"] = profile.organisation
# datetimes should be serialized to an iso8601 string
data["created_at"] = profile.created_at.isoformat()
return data
def construct_organisation_role_dict(organisation_roles):
"""Return a dict with 3 keys: organisations, roles, and organisation_roles.
Args:
organisation_roles: an iterable of OrganisationRoles.
"""
data = {}
# Defensive programming: make sure we have a unique set of
# organisation_roles. At the moment of writing, models.
# UserProfile.all_organisation_roles() does not...
organisation_roles = set(organisation_roles)
organisations = set(obj.organisation for obj in organisation_roles)
roles = set(obj.role for obj in organisation_roles)
data["organisation_roles"] = [
[obj.organisation.unique_id, obj.role.unique_id] for obj in organisation_roles
]
data["organisations"] = [obj.as_dict() for obj in organisations]
data["roles"] = [obj.as_dict() for obj in roles]
return data
def get_domain(form):
"""Return domain for the redirect back to the site.
Normally, the ``redirect_url`` is used. If your server is known under
several domains, you can pass a ``domain`` GET parameter.
Note: the domain can also have an extra path element, so
http://some.where/something is allowed, if needed.
"""
portal_redirect = form.portal.redirect_url
domain = form.cleaned_data.get("domain", None)
# BBB, previously the "next" parameter was used, but django itself also
# uses it, leading to conflicts. IF "next" starts with "http", we use it
# and otherwise we omit it.
next = form.cleaned_data.get("next", None)
if next:
if next.startswith("http"): # Includes https :-)
domain = next
if domain is None:
return portal_redirect
netloc = urlparse(domain)[1]
if netloc == "":
return urljoin(portal_redirect, domain)
if form.portal.allowed_domain != "" and domain_match(
netloc, form.portal.allowed_domain
):
return domain
return portal_redirect
def domain_match(domain, suffix):
"""Test if `domain` ends with `suffix`.
Args:
domain (str): a domain name.
suffix (str): a string the domain name should end with. Multiple
suffixes are possible and should be separated by whitespace,
for example: 'lizard.net ddsc.nl'.
Returns:
bool: True if domain ends with the specified suffix, False otherwise.
"""
return domain.endswith(tuple(suffix.split()))
class VerifyView(ProcessGetFormView):
"""
View called by the portal application to verify the Auth Token passed by
the portal request as GET parameter with the server application
"""
form_class = forms.DecryptForm
def get_user_json(self):
"""
Returns the JSON string representation of the user object for a portal.
"""
profile = self.token.user.user_profile
data = construct_user_data(profile=profile)
return json.dumps(data)
def get_organisation_roles_json(self, portal):
profile = self.token.user.user_profile
data = construct_organisation_role_dict(profile.all_organisation_roles(portal))
return json.dumps(data)
def form_valid(self, form):
auth_token = form.cleaned_data["auth_token"]
try:
self.token = Token.objects.get(
auth_token=auth_token, user__isnull=False, portal=form.portal
)
except Token.DoesNotExist:
return HttpResponseForbidden("Invalid auth token")
# get some metadata about the user, so we can construct a user on the
# SSO client
params = {
"user": self.get_user_json(),
"roles": self.get_organisation_roles_json(form.portal),
}
# encrypt the data
data = URLSafeTimedSerializer(self.token.portal.sso_secret).dumps(params)
# disable the token
self.token.delete()
return HttpResponse(data)
def form_invalid(self, form):
logger.error("Error while decrypting form: %s", form.errors.as_text())
return HttpResponseBadRequest("Bad signature")
| |
# -*- coding: utf-8 -*-
import os
#import get_colours
import breaking_levels
import theme_mapping
import mapping_utility
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from matplotlib_scalebar.scalebar import ScaleBar
#import matplotlib.patches as mpatches
from matplotlib.text import TextPath
import matplotlib.lines as mlines
def map_scatter(gdf, ax, size=12, marker='.', facecolor='navy', zorder=1, extend_context=True, alpha=1.):
colour_list = [facecolor]*len(gdf)
sizes = [size]*len(gdf)
ax = _mapping(gdf, colour_list, ax, marker=marker, sizes=sizes, alpha=alpha, zorder=zorder, extend_context=extend_context,)
return ax
def map_category(gdf, cat_column, ax, cat_order=None, marker_order=None, size_order=None, colour_order=None, size=12, facecolor='navy', zorder=1, extend_context=False, alpha=1.):
default_marker_list = ['.', '*', '+', ',', '<', '>','^', '_', 'D', 'H', 'P', 'X', 'd', 'h', 'o', 'p', 's', 'v', 'x', '|', 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,'1', '2', '3', '4', '8', ]
if cat_order is None:
catset = list(set(gdf[cat_column].tolist()))
else:
catset = cat_order
markerset = []
if not(marker_order is None):
markerset = marker_order
i = 0
while len(markerset)<len(catset):
if default_marker_list[i] not in markerset:
markerset.append(default_marker_list[i])
i+=1
if i>=len(default_marker_list):
j = len(catset)-len(markerset)
if j<=len(default_marker_list):
markerset.extend(default_marker_list[:j])
else:
markerset.extend(default_marker_list)
i = len(default_marker_list)-1
print '!!! warning: repeated markers !!!'
sizeset = []
if not(size_order is None):
sizeset = size_order
while len(sizeset)<len(catset):
sizeset.append(size)
colourset = []
if not(colour_order is None):
colourset = colour_order
while len(colourset)<len(catset):
colourset.append(facecolor)
for c,m,s,o in zip(catset, markerset, sizeset, colourset):
temp_gdf = gdf[gdf[cat_column]==c]
olist = [o]*len(temp_gdf)
slist = [s]*len(temp_gdf)
ax = _mapping(temp_gdf, olist, ax, marker=m, sizes=slist, alpha=alpha, zorder=zorder, extend_context=False,)
if extend_context:
minx, miny, maxx, maxy = gdf.geometry.total_bounds
xlim = [minx,maxx]
ylim = [miny,maxy]
ax.set_xlim(xlim)
ax.set_ylim(ylim)
return ax
def map_colour(gdf, colour_column, ax, size=12, marker='.', zorder=1., extend_context=True, alpha=1.):
colour_list = gdf[colour_column].tolist()
sizes = [size]*len(gdf)
ax = _mapping(gdf, colour_list, ax, marker=marker, sizes=sizes, alpha=alpha, zorder=zorder, extend_context=extend_context,)
return ax
def map_size(gdf, size_column, ax, size_scale=1., marker='.', facecolor='navy', zorder=1, extend_context=True, alpha=1.):
colour_list = [facecolor]*len(gdf)
sizes = gdf[size_column].tolist()
sizes = [ s*size_scale for s in sizes ]
ax = _mapping(gdf, colour_list, ax, marker=marker, sizes=sizes, alpha=alpha, zorder=zorder, extend_context=extend_context,)
return ax
def map_sequence(gdf, bysequence, ax, by_colour=None, by_size=None,
sizing='level', sizes=12, size_scale=None, marker='.', break_method='quantile',
break_N=6, break_cuts=[], break_vmin=None, break_vmax=None,
color_group='cmocean_sequential', color_name='Turbid_10', reverse=False,
alpha=1., zorder=1, facecolor='navy',
extend_context=True, add_legend=True,
font_path=None, legend_loc='upper left', legend_format='%.2f'):
if by_colour==True and (by_size is None or by_size==False):
ax = colour_level_points(gdf, bysequence, ax, sizes=sizes, marker=marker, break_method=break_method,
break_N=break_N, break_cuts=break_cuts, break_vmin=break_vmin, break_vmax=break_vmax,
color_group=color_group, color_name=color_name, reverse=reverse,
legend_format=legend_format, alpha=alpha, zorder=zorder,
extend_context=extend_context, add_legend=add_legend,
font_path=font_path, legend_loc=legend_loc)
elif by_size==True and (by_colour is None or by_colour==False):
ax = size_level_points(gdf, bysequence, ax, sizing=sizing, sizes=sizes, marker=marker, break_method=break_method,
break_N=break_N, break_cuts=break_cuts, break_vmin=break_vmin, break_vmax=break_vmax,
facecolor=facecolor, size_scale=size_scale,
legend_format=legend_format, alpha=alpha, zorder=zorder,
extend_context=extend_context, add_legend=add_legend,
font_path=font_path, legend_loc=legend_loc)
elif by_size==True and by_colour==True:
ax = colour_size_level_points(gdf, bysequence, ax, sizing=sizing, sizes=sizes, marker=marker, break_method=break_method,
break_N=break_N, break_cuts=break_cuts, break_vmin=break_vmin, break_vmax=break_vmax,
color_group=color_group, color_name=color_name, reverse=reverse,
legend_format=legend_format, alpha=alpha, zorder=zorder,
extend_context=extend_context, add_legend=add_legend,
font_path=font_path, legend_loc=legend_loc)
elif by_colour is None and by_size is None:
print 'no styling(by_colour and/or by_size) is True, use map_scatter'
if isinstance(sizes, (list,tuple)):
sizes=sizes[0]
ax = map_scatter(gdf, ax, size=sizes, marker=marker, facecolor=facecolor, zorder=zorder, extend_context=extend_context, alpha=alpha)
else: # by_size==False and by_colour==False
print 'nothing had been done cause all types of drawing are False'
print 'returning empty ax'
return ax
def colour_level_points(gdf, bysequence, ax, sizes=12, marker='.', break_method='quantile',
break_N=6, break_cuts=[], break_vmin=None, break_vmax=None,
color_group='cmocean_sequential', color_name='Turbid_10', reverse=False,
alpha=1., zorder=1, extend_context=True, add_legend=True,
font_path=None, legend_loc='upper left',
legend_format='%.2f'):
if isinstance(sizes, list):
sizes = sizes[0]
sizes = [sizes]*len(gdf)
level_list, colour_list, colour_tuples = theme_mapping.colouring_sequence(gdf, bysequence, break_method=break_method, break_N=break_N, break_cuts=break_cuts, break_vmin=break_vmin, break_vmax=break_vmax, color_group=color_group, color_name=color_name, reverse=reverse, legend_format=legend_format)
if add_legend:
colour_tuples2 = colour_tuples
else:
colour_tuples2 = None
ax = _mapping(gdf, colour_list, ax, colour_tuples=colour_tuples2, marker=marker, sizes=sizes, alpha=alpha, zorder=zorder, extend_context=extend_context, font_path=font_path, legend_loc=legend_loc)
return ax
def size_level_points(gdf, bysequence, ax, sizing='level', break_method='quantile', break_N=6, break_cuts=[], break_vmin=None, break_vmax=None,
facecolor='navy', sizes=12, marker='.', size_scale=None,
alpha=1., zorder=1, extend_context=True, add_legend=True,
font_path=None, legend_loc='upper left', legend_format='%.2f'):
size_list = theme_mapping.get_sizes(gdf, bysequence, break_method, break_N, break_cuts, break_vmin, break_vmax, sizing, sizes, size_scale)
colour_list = [facecolor]*len(gdf)
ax = _mapping(gdf, colour_list, ax, marker=marker, sizes=size_list, alpha=alpha, zorder=zorder, extend_context=extend_context, font_path=font_path, legend_loc=legend_loc)
return ax
def colour_size_level_points(gdf, bysequence, ax, marker='.',
break_method='quantile', break_N=6, break_cuts=[],
break_vmin=None, break_vmax=None,
color_group='cmocean_sequential', color_name='Turbid_10', reverse=False,
sizing='level', sizes=12, size_scale=None,
alpha=1., zorder=1, extend_context=True, add_legend=True,
font_path=None, legend_loc='upper left',
legend_format='%.2f'):
level_list, colour_list, colour_tuples = theme_mapping.colouring_sequence(gdf, bysequence, break_method=break_method, break_N=break_N, break_cuts=break_cuts, break_vmin=break_vmin, break_vmax=break_vmax, color_group=color_group, color_name=color_name, reverse=reverse, legend_format=legend_format)
size_list = theme_mapping.get_sizes(gdf, bysequence, break_method, break_N, break_cuts, break_vmin, break_vmax, sizing, sizes, size_scale)
if add_legend:
colour_tuples2 = colour_tuples
else:
colour_tuples2 = None
ax = _mapping(gdf, colour_list, ax, colour_tuples=colour_tuples2, marker=marker, sizes=size_list, alpha=alpha, zorder=zorder, extend_context=extend_context, font_path=font_path, legend_loc=legend_loc)
return ax
"""
def get_icons(iconset, font_size=12):
mdir = os.path.dirname(__file__)
fname = os.path.join(mdir, iconset+'.ttf')
myfont = FontProperties(fname=fname, size=font_size)
return myfont
"""
def get_font(font_path=None, font_size=12, font_style='normal'):
if font_path is None:
mdir = os.path.dirname(__file__)
#fname = os.path.join(mdir, 'fonts', 'NotoSans-Regular.ttf')
fname = os.path.join(mdir, 'fonts', 'GenJyuuGothicL-P-Normal.ttf')
else:
fname = font_path
myfont = FontProperties(fname=fname, size=font_size, style=font_style)
return myfont
def _mapping(gdf, colour_list, ax, colour_tuples=None, marker='.', xlim=None, ylim=None, ec='#FFFFFF', lw=1., sizes=12, alpha=1., zorder=1, extend_context=True, font_path=None, legend_loc='upper left'):
## todo legend of the color_levels
if extend_context:
minx, miny, maxx, maxy = gdf.geometry.total_bounds
if xlim is None:
xlim = [minx,maxx]
if ylim is None:
ylim = [miny,maxy]
ax.set_xlim(xlim)
ax.set_ylim(ylim)
pts = gdf.geometry.tolist()
points = [[],[]]
for i in range(len(pts)):
P = pts[i]
#co = colour_list[i]
#print co
xx,yy = P.xy
#pa = PolygonPatch(P, fc=co, ec=ec, lw=lw, alpha=alpha, zorder=zorder)
points[0].append(list(xx)[0])
points[1].append(list(yy)[0])
if isinstance(marker, str):
ax.scatter(points[0], points[1], c=colour_list, marker=marker, s=sizes, alpha=alpha, zorder=zorder)
elif isinstance(marker, tuple):
w, ifont = marker
for xx,yy,co,ss in zip(points[0], points[1], colour_list, sizes):
ax.text(xx, yy, w, fontproperties=ifont, size=ss, color=co, alpha=alpha, zorder=zorder, ha='center', va='center')
if not(colour_tuples is None):
myfont = get_font(font_path=font_path)
handles = []
if isinstance(marker, tuple):
w, ifont = marker
tp = TextPath((0,0), w, prop=ifont, size=12)
for v,r,c in colour_tuples:
patch = ax.scatter([-1000,],[-1000,], s=500, marker=tp, c=c,label=r.decode('utf-8'))
handles.append(patch)
ax.legend(handles=handles, prop=myfont, loc=legend_loc)
elif isinstance(marker, str):
msize = sizes
if isinstance(sizes, (list,tuple)):
msize = sizes[0]
elif isinstance(sizes, (float,int)):
msize = sizes
else:
msize = 8
for v,r,c in colour_tuples:
mar = mlines.Line2D([], [], linestyle="None", color=c, marker=marker,
markersize=12, label=r.decode('utf-8'))
handles.append(mar)
ax.legend(handles=handles, prop=myfont, loc=legend_loc)
return ax
def prepare_map(ax, map_context=None, background_colour=None, xlim=None, ylim=None, show_xy=False):
return mapping_utility.prepare_map(ax, map_context=map_context, background_colour=background_colour, xlim=xlim, ylim=ylim, show_xy=show_xy)
######################3 testing area
def test_scatter():
import geopandas as gpd
import markerset as ms
gdf = gpd.read_file('../testdata/points2008.shp')
ii = ms.get_marker('weathercons', 'night-lightning')
fig,ax = plt.subplots(figsize=(7,7))
ax = prepare_map(ax, map_context=gdf, background_colour='royalblue', show_xy=False)
ax = map_scatter(gdf, ax, marker=ii, extend_context=False)
#plt.show()
def test_sequential_colour():
import geopandas as gpd
import markerset as ms
ii = ms.get_marker('brandico', 'facebook')
#ii = ms.weathercons('horizon')
gdf = gpd.read_file('../testdata/points2008.shp')
fig,ax = plt.subplots(figsize=(7,7))
ax = prepare_map(ax, map_context=gdf, background_colour='royalblue', show_xy=False)
ax = map_sequence(gdf, 'time', ax, by_colour=True, marker=ii, extend_context=False)
#ax = add_border(gdf, ax, lw=1.5, ec='#000000', alpha=0.3)
#ax.set_title('areaa')
## colour_tuples for legend (ind, range, colorhex)
ax.set_title('test sequential colour')
#plt.show()
def test_sequential_size():
import geopandas as gpd
import markerset as ms
ii = ms.get_marker('maki', 'airport')
#ii = ms.weathercons('horizon')
gdf = gpd.read_file('../testdata/points2008.shp')
fig,ax = plt.subplots(figsize=(7,7))
ax = prepare_map(ax, map_context=gdf, background_colour='royalblue', show_xy=False)
ax = map_sequence(gdf, 'time', ax, by_size=True, marker=ii, extend_context=False)
ax.set_title('test sequential size level')
#plt.show()
def test_sequential_size2():
import geopandas as gpd
import markerset as ms
ii = ms.get_marker('linecons', 'paper-plane')
#ii = ms.weathercons('horizon')
gdf = gpd.read_file('../testdata/points2008.shp')
fig,ax = plt.subplots(figsize=(7,7))
ax = prepare_map(ax, map_context=gdf, background_colour='royalblue', show_xy=False)
ax = map_sequence(gdf, 'time', ax, by_size=True, sizing='graduated', marker=ii, extend_context=False)
ax.set_title('test sequential size graduated')
#plt.show()
def test_sequential_both():
import geopandas as gpd
import markerset as ms
ii = ms.get_marker('fontawesome', 'user-o')
#ii = ms.weathercons('horizon')
gdf = gpd.read_file('../testdata/points2008.shp')
fig,ax = plt.subplots(figsize=(7,7))
ax = prepare_map(ax, map_context=gdf, background_colour='royalblue', show_xy=False)
ax = map_sequence(gdf, 'time', ax, by_colour=True, by_size=True, marker=ii, extend_context=False)
ax.set_title('test sequential colour and size')
#plt.show()
def test_listing():
import markerset as ms
print ms.list_icon_sets()
print ms.list_icon_names('weathercons')
#print ms.get_char_map('weathercons')['horizon']
ii = ms.get_marker('weathercons', 'night-lightning')
#ii = ms.weathercons('horizon') # or this
if __name__ == '__main__':
test_listing()
test_sequential_colour()
test_sequential_size()
test_sequential_size2()
test_sequential_both()
test_scatter()
plt.show()
######################3 testing area
| |
import unittest
from unittest import expectedFailure, skip
import colourettu
class Test_Colour(unittest.TestCase):
def test_default_colour(self):
"""Default colour is white"""
colour1 = colourettu.Colour()
self.assertEqual(colour1._r, 255)
self.assertEqual(colour1._g, 255)
self.assertEqual(colour1._b, 255)
def test_3hex_colour(self):
"""Set the colour as like #dc8"""
colour1 = colourettu.Colour("#dc8")
self.assertEqual(colour1._r, 221)
self.assertEqual(colour1._g, 204)
self.assertEqual(colour1._b, 136)
def test_6hex_colour(self):
"""Set the colour as like #123456"""
colour1 = colourettu.Colour("#123456")
self.assertEqual(colour1._r, 18)
self.assertEqual(colour1._g, 52)
self.assertEqual(colour1._b, 86)
def test_red(self):
"""Get red of colour"""
colour1 = colourettu.Colour("#D00")
self.assertEqual(colour1.red(), 221)
def test_green(self):
"""Get green of colour"""
colour1 = colourettu.Colour("#0B0")
self.assertEqual(colour1.green(), 187)
def test_blue(self):
"""Get blue of colour"""
colour1 = colourettu.Colour("#00A")
self.assertEqual(colour1.blue(), 170)
def test_rgb(self):
"""Get rgb tuple of colour"""
colour1 = colourettu.Colour("#345")
self.assertEqual(colour1.rgb(), (51, 68, 85))
def test_hex(self):
"""Get hex value of colour"""
colour1 = colourettu.Colour()
self.assertEqual(colour1.hex(), "#FFFFFF")
def test_hex_black(self):
"""Get hex value of colour with leading zeros"""
colour1 = colourettu.Colour("#0102DD")
self.assertEqual(colour1.hex(), "#0102DD")
def test_list(self):
"""Get value of colour given as a list"""
colour1 = colourettu.Colour([5, 10, 25])
self.assertEqual(colour1.hex(), "#050A19")
def test_tuple(self):
"""Get value of colour given as a tuple"""
colour1 = colourettu.Colour((35, 60, 128))
self.assertEqual(colour1.hex(), "#233C80")
def test_list_normalized_rgb(self):
"""Get value of colour given as a list of normalized rgb values"""
colour1 = colourettu.Colour(
[0.24287275804811442, 0.4339925778684171, 0.16562176715691224],
normalized_rgb=True,
)
self.assertEqual(colour1.hex(), "#3D6E2A")
def test_tuple_normalized_rgb(self):
"""Get value of colour given as a tuple of normalized rgb values"""
colour1 = colourettu.Colour(
(0.5656023325553875, 0.8070789468680986, 0.8006291331865334),
normalized_rgb=True,
)
self.assertEqual(colour1.hex(), "#90CDCC")
def test_tuple_normalized_rgb_value_too_big(self):
"""Fail on invalid normalized RGB values (too big, tuple)"""
with self.assertRaises(ValueError):
colour1 = colourettu.Colour((1.2, 0.4, 0.378), normalized_rgb=True)
def test_tuple_normalized_rgb_value_too_small(self):
"""Fail on invalid normalized RGB values (too small, tuple)"""
with self.assertRaises(ValueError):
colour1 = colourettu.Colour((0.28, -0.4, 0.378), normalized_rgb=True)
def test_list_normalized_rgb_value_too_big(self):
"""Fail on invalid normalized RGB values (too big, list)"""
with self.assertRaises(ValueError):
colour1 = colourettu.Colour([0.289, 0.289, 1.33], normalized_rgb=True)
def test_list_normalized_rgb_value_too_small(self):
"""Fail on invalid normalized RGB values (too small, list)"""
with self.assertRaises(ValueError):
colour1 = colourettu.Colour([-0.289, 0.289, 0.33], normalized_rgb=True)
def test_tuple_not_normalized(self):
"""Fail if something other than `True` or `False` is passed for `normalized_rgb`"""
with self.assertRaises(TypeError):
colour1 = colourettu.Colour(normalized_rgb="stuff")
@skip("Not yet defined")
def test_list_not_normalized(self):
pass
@skip("Not yet defined")
def test_tuple_for_normalized_rgb_too_long(self):
pass
@skip("Not yet defined")
def test_tuple_for_normalized_rgb_too_short(self):
pass
def test_normalized_value_zero_float(self):
"""Pass a value of 0 for a normalized RGB value"""
colour1 = colourettu.Colour([0.0, 0.0, 0.0], normalized_rgb=True)
colour2 = colourettu.Colour("#000")
self.assertEqual(colour1, colour2)
def test_normalized_value_one_float(self):
"""Pass a value of 1 for a normalized RGB value"""
colour1 = colourettu.Colour([1.0, 1.0, 1.0], normalized_rgb=True)
colour2 = colourettu.Colour("#FFF")
self.assertEqual(colour1, colour2)
def test_normalized_value_zero_int(self):
"""Pass a value of 0 for a normalized RGB value"""
colour1 = colourettu.Colour([0, 0, 0], normalized_rgb=True)
colour2 = colourettu.Colour("#000")
self.assertEqual(colour1, colour2)
def test_normalized_value_one_int(self):
"""Pass a value of 1 for a normalized RGB value"""
colour1 = colourettu.Colour([1, 1, 1], normalized_rgb=True)
colour2 = colourettu.Colour("#FFF")
self.assertEqual(colour1, colour2)
@expectedFailure
def test_colour_vs_color(self):
"""test alternate spelling
Removed in v1.0.0"""
self.assertEqual(colourettu.Color, colourettu.Colour)
def test_bad_hex_string_length(self):
"""Invalid hex string should raise error"""
with self.assertRaises(ValueError):
colour1 = colourettu.Colour("#dddd")
def test_not_hex_string(self):
"""Stings must start with '#', otherwiser raise Error"""
with self.assertRaises(ValueError):
colour1 = colourettu.Colour("dddd")
def test_bad_hex_chars(self):
"""Hex strings that don't contain only hex characters [0-9a-f]
should raise error"""
with self.assertRaises(ValueError):
colour1 = colourettu.Colour("#asdfgh")
def test_bad_list_legnth(self):
"""Lists must be 3 long, otherwise raise error"""
with self.assertRaises(ValueError):
colour1 = colourettu.Colour([1, 1, 1, 1])
def test_bad_tuple_legnth(self):
"""Tuples must be 3 long, otherwise raise error"""
with self.assertRaises(ValueError):
colour1 = colourettu.Colour((1, 1, 1, 1))
def test_bad_list_value(self):
"""Lists my contain integer, otherwise raise error"""
with self.assertRaises(TypeError):
colour1 = colourettu.Colour([1, 2, "stuff"])
def test_bad_Tuple_value(self):
"""Tuples my contain integer, otherwise raise error"""
with self.assertRaises(TypeError):
colour1 = colourettu.Colour((1, 2, "stuff"))
def test_rgb_normalized_white(self):
"""Get normalized rgb tuple of white"""
colour1 = colourettu.Colour("#FFF")
self.assertEqual(colour1.normalized_rgb(), (1, 1, 1))
def test_rgb_normalized_black(self):
"""Get normalize rgb tuple of black"""
colour1 = colourettu.Colour("#000")
self.assertEqual(colour1.normalized_rgb(), (0, 0, 0))
def test_repr(self):
"""Representation of `Colour` class"""
colour1 = colourettu.Colour()
self.assertEqual(colour1.__repr__(), "<colourettu.Colour #FFFFFF>")
def test_str(self):
"""String representation of `Colour` class"""
colour1 = colourettu.Colour()
self.assertEqual(colour1.__str__(), "#FFFFFF")
def test_eq(self):
"""Equality operator"""
c1 = colourettu.Colour()
c2 = colourettu.Colour()
c3 = colourettu.Colour("#abc")
c4 = colourettu.Colour("#abc")
c5 = colourettu.Colour("#123456")
c6 = c5
c7 = colourettu.Colour("#000000")
c8 = colourettu.Colour([0, 0, 0])
self.assertEqual(c1, c2)
self.assertEqual(c3, c4)
self.assertEqual(c5, c6)
self.assertEqual(c7, c8)
class Test_Contrast(unittest.TestCase):
def test_white_white(self):
"""The Contrast of White and White should be 1"""
self.assertAlmostEqual(colourettu.contrast("#FFF", "#FFF"), 1)
def test_black_black(self):
"""The Contrast of Black and Black should be 1"""
self.assertAlmostEqual(colourettu.contrast("#000", "#000"), 1)
def test_white_black(self):
"""The Contrast of White and Black should be 21"""
self.assertAlmostEqual(colourettu.contrast("#FFF", "#000"), 21)
def test_from_colour(self):
"""The Contrast of a provided colour against White"""
colour1 = colourettu.Colour("#cde")
self.assertTrue(colour1.contrast("#FFF"))
class Test_Luminance(unittest.TestCase):
def test_white_hex(self):
"""The Luminance of White (as hex) should be 1"""
self.assertAlmostEqual(colourettu.luminance("#FFFFFF"), 1)
def test_white_tuple(self):
"""The Luminance of White (as a tuple) should be 1"""
self.assertAlmostEqual(colourettu.luminance((255, 255, 255)), 1)
def test_black(self):
"""The Luminance of Black should be 0"""
self.assertAlmostEqual(colourettu.luminance("#000"), 0)
def test_colour_provided(self):
"""The Luminance of a provided colour"""
colour1 = colourettu.Colour()
self.assertAlmostEqual(colourettu.luminance(colour1), 1)
def test_as_colour_property(self):
"""The Luminance of as a property of a colour"""
colour1 = colourettu.Colour()
self.assertAlmostEqual(colour1.luminance(), 1)
class Test_Blend(unittest.TestCase):
def test_white_white(self):
"""Blending White with White should still be White."""
c1 = colourettu.Colour("#fff")
c2 = colourettu.Colour([255, 255, 255])
c3 = colourettu.blend(c1, c2)
self.assertEqual(c3, c1)
def test_black_black(self):
"""Blending Black with Black should still be Black."""
c1 = colourettu.Colour("#000")
c2 = colourettu.Colour([0, 0, 0])
c3 = colourettu.blend(c1, c2)
self.assertEqual(c3, c1)
def test_white_black(self):
"""Blending White with Black should still be dark grey."""
c1 = colourettu.Colour("#fff")
c2 = colourettu.Colour("#000")
c_expected = colourettu.Colour([180, 180, 180])
c3 = colourettu.blend(c1, c2)
self.assertEqual(c3, c_expected)
def test_black_white(self):
"""
Order shouldn't matter. Blending White with Black should still be
dark grey again.
"""
c1 = colourettu.Colour("#fff")
c2 = colourettu.Colour("#000")
c_expected = colourettu.Colour([180, 180, 180])
c3 = colourettu.blend(c2, c1)
self.assertEqual(c3, c_expected)
def main():
unittest.main()
if __name__ == "__main__":
main()
| |
#
# This file is part of snmpsim software.
#
# Copyright (c) 2010-2019, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/snmpsim/license.html
#
# SNMP Agent Simulator
#
import json
import os
import re
import tempfile
import time
import uuid
from functools import wraps
from pyasn1.type import univ
from pysnmp.carrier.asyncore.dgram import udp
from pysnmp.carrier.asyncore.dgram import udp6
from pysnmp.entity import engine
from snmpsim import error
from snmpsim import log
from snmpsim.reporting.formats import base
def camel2snake(name):
s1 = re.sub(r'(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub(r'([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def ensure_base_types(f):
"""Convert decorated function's kwargs to Python types.
Also turn camel-cased keys into snake case.
"""
def to_base_types(item):
if isinstance(item, engine.SnmpEngine):
item = item.snmpEngineID
if isinstance(item, (univ.Integer, univ.OctetString,
univ.ObjectIdentifier)):
item = item.prettyPrint()
if item.startswith('0x'):
item = item[2:]
return item
if isinstance(item, (udp.UdpTransportAddress, udp6.Udp6TransportAddress)):
return str(item[0])
return item
def to_dct(dct):
items = {}
for k, v in dct.items():
k = to_base_types(k)
k = camel2snake(k)
if isinstance(v, dict):
v = to_dct(v)
else:
v = to_base_types(v)
items[k] = v
return items
@wraps(f)
def decorated_function(*args, **kwargs):
return f(*args, **to_dct(kwargs))
return decorated_function
class NestingDict(dict):
"""Dict with sub-dict as a defaulted value"""
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
class BaseJsonReporter(base.BaseReporter):
"""Common base for JSON-backed family of reporters.
"""
REPORTING_PERIOD = 300
REPORTING_FORMAT = ''
REPORTING_VERSION = 1
PRODUCER_UUID = str(uuid.uuid1())
def __init__(self, *args):
if not args:
raise error.SnmpsimError(
'Missing %s parameter(s). Expected: '
'<method>:<reports-dir>[:dumping-period]' % self.__class__.__name__)
self._reports_dir = os.path.join(args[0], self.REPORTING_FORMAT)
if len(args) > 1:
try:
self.REPORTING_PERIOD = int(args[1])
except Exception as exc:
raise error.SnmpsimError(
'Malformed reports dumping period: %s' % args[1])
try:
if not os.path.exists(self._reports_dir):
os.makedirs(self._reports_dir)
except OSError as exc:
raise error.SnmpsimError(
'Failed to create reports directory %s: '
'%s' % (self._reports_dir, exc))
self._metrics = NestingDict()
self._next_dump = time.time() + self.REPORTING_PERIOD
log.debug(
'Initialized %s metrics reporter for instance %s, metrics '
'directory %s, dumping period is %s seconds' % (
self.__class__.__name__, self.PRODUCER_UUID, self._reports_dir,
self.REPORTING_PERIOD))
def flush(self):
"""Dump accumulated metrics into a JSON file.
Reset all counters upon success.
"""
if not self._metrics:
return
now = int(time.time())
if self._next_dump > now:
return
self._next_dump = now + self.REPORTING_PERIOD
self._metrics['format'] = self.REPORTING_FORMAT
self._metrics['version'] = self.REPORTING_VERSION
self._metrics['producer'] = self.PRODUCER_UUID
dump_path = os.path.join(self._reports_dir, '%s.json' % now)
log.debug('Dumping JSON metrics to %s' % dump_path)
try:
json_doc = json.dumps(self._metrics, indent=2)
with tempfile.NamedTemporaryFile(delete=False) as fl:
fl.write(json_doc.encode('utf-8'))
os.rename(fl.name, dump_path)
except Exception as exc:
log.error(
'Failure while dumping metrics into '
'%s: %s' % (dump_path, exc))
self._metrics.clear()
class MinimalJsonReporter(BaseJsonReporter):
"""Collect activity metrics and dump brief report.
Accumulates and periodically dumps activity metrics reflecting
SNMP command responder performance.
These counters are accumulated in memory for some time, then get
written down as a JSON file indexed by time. Consumers are expected
to process each of these files and are free to remove them.
`MinimalJsonReporter` works with both SNMPv1/v2c and SNMPv3
command responder.
Activity metrics are arranged as a data structure like this:
.. code-block:: python
{
'format': 'minimaljson',
'version': 1,
'producer': <UUID>,
'first_update': '{timestamp}',
'last_update': '{timestamp}',
'transports': {
'total': 0,
'failures': 0
},
'agents': {
'total': 0,
'failures': 0
},
'data_files': {
'total': 0,
'failures': 0
}
}
"""
REPORTING_FORMAT = 'minimaljson'
def update_metrics(self, **kwargs):
"""Process activity update.
Update internal counters based on activity update information.
Parameters in `kwargs` serve two purposes: some are used to
build activity scopes e.g. {transport_domain}->{snmp_engine},
however those suffixed `*_count` are used to update corresponding
activity counters that eventually will make their way to
consumers.
"""
root_metrics = self._metrics
metrics = root_metrics
now = int(time.time())
if 'first_update' not in metrics:
metrics['first_update'] = now
metrics['last_update'] = now
metrics = root_metrics
try:
metrics = metrics['transports']
metrics['total'] = (
metrics.get('total', 0)
+ kwargs.get('transport_call_count', 0))
metrics['failures'] = (
metrics.get('failures', 0)
+ kwargs.get('transport_failure_count', 0))
except KeyError:
pass
metrics = root_metrics
try:
metrics = metrics['data_files']
metrics['total'] = (
metrics.get('total', 0)
+ kwargs.get('datafile_call_count', 0))
metrics['failures'] = (
metrics.get('failures', 0)
+ kwargs.get('datafile_failure_count', 0))
# TODO: some data is still not coming from snmpsim v2carch core
except KeyError:
pass
class FullJsonReporter(BaseJsonReporter):
"""Collect activity metrics and dump detailed report.
Accumulates and periodically dumps activity counters reflecting
SNMP command responder performance.
These counters are accumulated in memory for some time, then get
written down as a JSON file indexed by time. Consumers are expected
to process each of these files and are free to remove them.
`FullJsonReporter` can only work within full SNMPv3 command responder.
Activity metrics are arranged as a data structure like this:
.. code-block:: python
{
'format': 'fulljson',
'version': 1,
'producer': <UUID>,
'first_update': '{timestamp}',
'last_update': '{timestamp}',
'{transport_protocol}': {
'{transport_endpoint}': { # local address
'transport_domain': '{transport_domain}', # endpoint ID
'{transport_address}', { # peer address
'packets': 0,
'parse_failures': 0, # n/a
'auth_failures': 0, # n/a
'context_failures': 0, # n/a
'{snmp_engine}': {
'{security_model}': {
'{security_level}': {
'{security_name}': {
'{context_engine_id}': {
'{context_name}': {
'{pdu_type}': {
'{data_file}': {
'pdus': 0,
'varbinds': 0,
'failures': 0,
'{variation_module}': {
'calls': 0,
'failures': 0
}
}
}
}
}
}
}
}
}
}
}
}
}
Where `{token}` is replaced with a concrete value taken from request.
"""
REPORTING_FORMAT = 'fulljson'
@ensure_base_types
def update_metrics(self, **kwargs):
"""Process activity update.
Update internal counters based on activity update information.
Parameters in `kwargs` serve two purposes: some are used to
build activity scopes e.g. {transport_domain}->{snmp_engine},
however those suffixed `*_count` are used to update corresponding
activity counters that eventually will make their way to
consumers.
"""
metrics = self._metrics
now = int(time.time())
if 'first_update' not in metrics:
metrics['first_update'] = now
metrics['last_update'] = now
try:
metrics = metrics[kwargs['transport_protocol']]
metrics = metrics['%s:%s' % kwargs['transport_endpoint']]
metrics['transport_domain'] = kwargs['transport_domain']
metrics = metrics[kwargs['transport_address']]
metrics['packets'] = (
metrics.get('packets', 0)
+ kwargs.get('transport_call_count', 0))
# TODO: collect these counters
metrics['parse_failures'] = 0
metrics['auth_failures'] = 0
metrics['context_failures'] = 0
metrics = metrics[kwargs['snmp_engine']]
metrics = metrics[kwargs['security_model']]
metrics = metrics[kwargs['security_level']]
metrics = metrics[kwargs['security_name']]
metrics = metrics[kwargs['context_engine_id']]
metrics = metrics[kwargs['pdu_type']]
metrics = metrics[kwargs['data_file']]
metrics['pdus'] = (
metrics.get('pdus', 0)
+ kwargs.get('datafile_call_count', 0))
metrics['failures'] = (
metrics.get('failures', 0)
+ kwargs.get('datafile_failure_count', 0))
metrics['varbinds'] = (
metrics.get('varbinds', 0)
+ kwargs.get('varbind_count', 0))
metrics = metrics['variations']
metrics = metrics[kwargs['variation']]
metrics['calls'] = (
metrics.get('pdus', 0)
+ kwargs.get('variation_call_count', 0))
metrics['failures'] = (
metrics.get('failures', 0)
+ kwargs.get('variation_failure_count', 0))
except KeyError:
return
| |
from __future__ import absolute_import, division, print_function
import itertools
import math
from glob import glob
import heapq
from collections import Iterable, Iterator
from toolz import (merge, concat, frequencies, merge_with, take, curry, reduce,
join, reduceby, compose, second, valmap, count, map, partition_all,
filter, pluck)
try:
from cytoolz import (curry, frequencies, merge_with, join, reduceby,
compose, second, count, pluck)
except ImportError:
pass
from ..multiprocessing import get as mpget
from ..core import istask, get_dependencies, reverse_dict
from ..optimize import fuse
names = ('bag-%d' % i for i in itertools.count(1))
load_names = ('load-%d' % i for i in itertools.count(1))
def lazify_task(task, start=True):
"""
Given a task, remove unnecessary calls to ``list``
Example
-------
>>> task = (sum, (list, (map, inc, [1, 2, 3]))) # doctest: +SKIP
>>> lazify_task(task) # doctest: +SKIP
(sum, (map, inc, [1, 2, 3]))
"""
if not istask(task):
return task
head, tail = task[0], task[1:]
if not start and head is list:
task = task[1]
return lazify_task(*tail, start=False)
else:
return (head,) + tuple([lazify_task(arg, False) for arg in tail])
def lazify(dsk):
"""
Remove unnecessary calls to ``list`` in tasks
See Also:
``dask.bag.core.lazify_task``
"""
return valmap(lazify_task, dsk)
get = curry(mpget, optimizations=[fuse, lazify])
def list2(seq):
""" Another list function that won't be removed by lazify """
return list(seq)
class Item(object):
def __init__(self, dsk, key, get=get):
self.dask = dsk
self.key = key
self.get = get
def compute(self):
return self.get(self.dask, self.key)
__int__ = __float__ = __complex__ = __bool__ = compute
class Bag(object):
""" Unordered collection with repeats
Computed in paritions with dask
>>> dsk = {('x', 0): (range, 5),
... ('x', 1): (range, 5),
... ('x', 2): (range, 5)}
>>> b = Bag(dsk, 'x', npartitions=3)
>>> sorted(b.map(lambda x: x * 10)) # doctest: +SKIP
[0, 0, 0, 10, 10, 10, 20, 20, 20, 30, 30, 30, 40, 40, 40]
>>> int(b.fold(lambda x, y: x + y)) # doctest: +SKIP
30
"""
def __init__(self, dsk, name, npartitions, get=get):
self.dask = dsk
self.name = name
self.npartitions = npartitions
self.get = get
@classmethod
def from_filenames(cls, filenames):
""" Create dask by loading in lines from many files
Provide list of filenames
>>> b = Bag.from_filenames(['myfile.1.txt', 'myfile.2.txt']) # doctest: +SKIP
Or a globstring
>>> b = Bag.from_filenames('myfiles.*.txt') # doctest: +SKIP
"""
if isinstance(filenames, str):
filenames = sorted(glob(filenames))
d = dict((('load', i), (list, (open, fn)))
for i, fn in enumerate(filenames))
return Bag(d, 'load', len(d))
@classmethod
def from_sequence(cls, seq, partition_size=None, npartitions=None):
""" Create dask from Python sequence
This sequence should be relatively small in memory. Dask Bag works
best when it handles loading your data itself. Commonly we load a
sequence of filenames into a Bag and then use ``.map`` to open them.
Parameters
----------
seq: Iterable
A sequence of elements to put into the dask
partition_size: int (optional)
The length of each partition
npartitions: int (optional)
The number of desired partitions
It is best to provide either ``partition_size`` or ``npartitions``
(though not both.)
Example
-------
>>> b = Bag.from_sequence(['Alice', 'Bob', 'Chuck'], partition_size=2)
"""
seq = list(seq)
if npartitions and not partition_size:
partition_size = int(math.ceil(len(seq) / npartitions))
if npartitions is None and partition_size is None:
if len(seq) < 100:
partition_size = 1
else:
partition_size = int(len(seq) / 100)
parts = list(partition_all(partition_size, seq))
name = next(load_names)
d = dict(((name, i), part) for i, part in enumerate(parts))
return Bag(d, name, len(d))
def map(self, func):
name = next(names)
dsk = dict(((name, i), (list, (map, func, (self.name, i))))
for i in range(self.npartitions))
return Bag(merge(self.dask, dsk), name, self.npartitions)
def filter(self, predicate):
name = next(names)
dsk = dict(((name, i), (list, (filter, predicate, (self.name, i))))
for i in range(self.npartitions))
return Bag(merge(self.dask, dsk), name, self.npartitions)
def map_partitions(self, func):
name = next(names)
dsk = dict(((name, i), (func, (self.name, i)))
for i in range(self.npartitions))
return Bag(merge(self.dask, dsk), name, self.npartitions)
def pluck(self, key):
name = next(names)
if isinstance(key, list):
key = (list2, key)
dsk = dict(((name, i), (list, (pluck, key, (self.name, i))))
for i in range(self.npartitions))
return Bag(merge(self.dask, dsk), name, self.npartitions)
def fold(self, binop, combine=None, initial=None):
a = next(names)
b = next(names)
if initial:
dsk = dict(((a, i), (reduce, binop, (self.name, i), initial))
for i in range(self.npartitions))
else:
dsk = dict(((a, i), (reduce, binop, (self.name, i)))
for i in range(self.npartitions))
dsk2 = {b: (reduce, combine or binop, list(dsk.keys()))}
return Item(merge(self.dask, dsk, dsk2), b)
def frequencies(self):
a = next(names)
b = next(names)
dsk = dict(((a, i), (frequencies, (self.name, i)))
for i in range(self.npartitions))
dsk2 = {(b, 0): (dictitems,
(merge_with, sum, list(sorted(dsk.keys()))))}
return Bag(merge(self.dask, dsk, dsk2), b, 1)
def topk(self, k, key=None):
a = next(names)
b = next(names)
if key:
topk = curry(heapq.nlargest, key=key)
else:
topk = heapq.nlargest
dsk = dict(((a, i), (list, (topk, k, (self.name, i))))
for i in range(self.npartitions))
dsk2 = {(b, 0): (list, (topk, k, (concat, list(dsk.keys()))))}
return Bag(merge(self.dask, dsk, dsk2), b, 1)
def _reduction(self, perpartition, aggregate):
a = next(names)
b = next(names)
dsk = dict(((a, i), (perpartition, (self.name, i)))
for i in range(self.npartitions))
dsk2 = {b: (aggregate, list(dsk.keys()))}
return Item(merge(self.dask, dsk, dsk2), b)
def sum(self):
return self._reduction(sum, sum)
def max(self):
return self._reduction(max, max)
def min(self):
return self._reduction(min, min)
def any(self):
return self._reduction(any, any)
def all(self):
return self._reduction(all, all)
def count(self):
return self._reduction(count, sum)
def mean(self):
def chunk(seq):
total, n = 0.0, 0
for x in seq:
total += x
n += 1
return total, n
def agg(x):
totals, counts = list(zip(*x))
return 1.0 * sum(totals) / sum(counts)
return self._reduction(chunk, agg)
def var(self, ddof=0):
def chunk(seq):
squares, total, n = 0.0, 0.0, 0
for x in seq:
squares += x**2
total += x
n += 1
return squares, total, n
def agg(x):
squares, totals, counts = list(zip(*x))
x2, x, n = float(sum(squares)), float(sum(totals)), sum(counts)
result = (x2 / n) - (x / n)**2
return result * n / (n - ddof)
return self._reduction(chunk, agg)
def std(self, ddof=0):
return math.sqrt(self.var(ddof=ddof))
def join(self, other, on_self, on_other=None):
assert isinstance(other, Iterable)
assert not isinstance(other, Bag)
if on_other is None:
on_other = on_self
name = next(names)
dsk = dict(((name, i), (list, (join, on_other, other,
on_self, (self.name, i))))
for i in range(self.npartitions))
return Bag(merge(self.dask, dsk), name, self.npartitions)
def product(self, other):
""" Cartesian product between two bags """
assert isinstance(other, Bag)
name = next(names)
n, m = self.npartitions, other.npartitions
dsk = dict(((name, i*m + j),
(list, (itertools.product, (self.name, i),
(other.name, j))))
for i in range(n) for j in range(m))
return Bag(merge(self.dask, other.dask, dsk), name, n*m)
def foldby(self, key, binop, initial=None, combine=None,
combine_initial=None):
a = next(names)
b = next(names)
if combine is None:
combine = binop
if initial:
dsk = dict(((a, i),
(reduceby, key, binop, (self.name, i), initial))
for i in range(self.npartitions))
else:
dsk = dict(((a, i),
(reduceby, key, binop, (self.name, i)))
for i in range(self.npartitions))
combine2 = lambda acc, x: combine(acc, x[1])
if combine_initial:
dsk2 = {(b, 0): (dictitems,
(reduceby,
0, combine2,
(concat, (map, dictitems, list(dsk.keys()))),
combine_initial))}
else:
dsk2 = {(b, 0): (dictitems,
(merge_with,
(curry, reduce, combine),
list(dsk.keys())))}
return Bag(merge(self.dask, dsk, dsk2), b, 1)
def take(self, k):
name = next(names)
return Bag(merge(self.dask, {(name, 0): (list, (take, k, (self.name,
0)))}), name, 1)
def _keys(self):
return [(self.name, i) for i in range(self.npartitions)]
def __iter__(self):
results = self.get(self.dask, self._keys())
if isinstance(results[0], Iterable):
results = concat(results)
if not isinstance(results, Iterator):
results = iter(results)
return results
def dictitems(d):
""" A pickleable version of dict.items """
return list(d.items())
| |
__author__ = 'MBlaauw'
#import libraries
import os
import cv2
import numpy as np
from scipy.sparse import lil_matrix
from scipy.stats import expon
from sklearn.decomposition import RandomizedPCA
from sklearn import cross_validation
from sklearn import svm
from sklearn import metrics
from sklearn import preprocessing
from time import time
from sklearn.grid_search import GridSearchCV
from sklearn.neural_network import BernoulliRBM
from sklearn.preprocessing import StandardScaler
from sklearn import linear_model
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.pipeline import Pipeline
wd = '/Users/mblaauw/Downloads/06_P_PROJECTS/Kaggle_CatsVsDogs/' #change this to make the code work
dataTrainDir = '/Users/mblaauw/Downloads/06_P_PROJECTS/Kaggle_CatsVsDogs/data/train/'
dataTestDir = '/Users/mblaauw/Downloads/06_P_PROJECTS/Kaggle_CatsVsDogs/data/test1/'
os.chdir(wd)
labels = ['cat.', 'dog.']
desiredDimensions = [45, 45]
#define loading and pre-processing function grayscale
def preprocessImg(animal, number, dim1, dim2, dataDir):
imageName = '{0:s}{1:s}{2:d}{3:s}'.format(dataDir, animal, number, '.jpg')
npImage = cv2.imread(imageName)
npImage = cv2.cvtColor(npImage, cv2.COLOR_BGR2GRAY)
avg = np.mean(npImage.reshape(1, npImage.shape[0] * npImage.shape [1]))
avg = np.tile(avg, (npImage.shape[0], npImage.shape [1]))
npImage = npImage - avg
npImage = cv2.resize(npImage, (dim1, dim2))
return(npImage.reshape(1, dim1 * dim2))
#m = 1000 #pet Train dataset
m = 12500 #full Train dataset
mTest = 12500 #number of images in the test set
indexesIm = np.random.permutation(m * len(labels))
idxImages = np.tile(range(m), len(labels))
idxImages = idxImages[indexesIm]
testIndexes = range(len(indexesIm), len(indexesIm) + mTest)
y = np.append(np.tile(0, m), np.tile(1, m))
y = y[indexesIm]
def animalInput(theNumber):
if theNumber == 0:
return 'cat.'
elif theNumber == 1:
return 'dog.'
else:
return ''
#Build the sparse matrix with the preprocessed image data for both train and test data
bigMatrixTrain = np.empty(shape=(((len(indexesIm), desiredDimensions[0] * desiredDimensions[1]))))
bigMatrixTest = np.empty(shape=(((len(testIndexes), desiredDimensions[0] * desiredDimensions[1]))))
for i in range(len(indexesIm)):
bigMatrixTrain[i, :] = preprocessImg(animalInput(y[i]), idxImages[i], desiredDimensions[0], desiredDimensions[1], dataTrainDir)
someNumbers = range(mTest)
for ii in someNumbers:
bigMatrixTest[ii, :] = preprocessImg(animalInput('printNothing'), ii + 1, desiredDimensions[0], desiredDimensions[1], dataTestDir)
#Divide dataset for cross validation purposes
X_train, X_test, y_train, y_test = cross_validation.train_test_split(
bigMatrixTrain, y, test_size = 0.4, random_state = 0) #fix this
#Reduce features to main components so that they contain 99% of variance
n_components = 250
print("Extracting the top %d eigenfaces from %d faces"
% (n_components, X_train.shape[0]))
t0 = time()
pca = RandomizedPCA(n_components = n_components, whiten = True)
pca.fit(X_train)
print("done in %0.3fs" % (time() - t0))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train = pca.transform(X_train)
X_test = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
# Train a SVM classification model
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e3, 1e4, 1e5],
'gamma': [0.0001,0.001, 0.01, 0.1], }
clf = GridSearchCV(svm.SVC(kernel='rbf', class_weight='auto', verbose = True, probability=False), param_grid)
clf = clf.fit(X_train, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
# Quantitative evaluation of the model quality on the test set
print("Predicting people's names on the test set")
t0 = time()
prediction = clf.predict(X_test)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, prediction))
print(confusion_matrix(y_test, prediction))
correctValues = sum(prediction == y_test)
percentage = float(correctValues)/len(y_test)
print(percentage)
#mmodel number 2
bigMatrixTrain = (bigMatrixTrain - np.min(bigMatrixTrain, 0)) / (np.max(bigMatrixTrain, 0) + 0.0001) # 0-1 scaling
#Divide dataset for cross validation purposes
X_train, X_test, y_train, y_test = cross_validation.train_test_split(
bigMatrixTrain, y, test_size = 0.4, random_state = 0) #fix this
# specify parameters and distributions to sample from
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger fitting time
rbm.n_components = 300
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, y_train)
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(y_test, classifier.predict(X_test))))
print("Logistic regression using RBM features:\n%s\n" % (
confusion_matrix(y_test, classifier.predict(X_test))))
#mmodel number 3
#Divide dataset for cross validation purposes
X_train, X_test, y_train, y_test = cross_validation.train_test_split(
bigMatrixTrain, y, test_size = 0.4, random_state = 0) #fix this
# specify parameters and distributions to sample from
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger fitting time
rbm.n_components = 300
logistic.C = 6000.0
n_components = 250
print("Extracting the top %d eigenfaces from %d faces"
% (n_components, X_train.shape[0]))
t0 = time()
pca = RandomizedPCA(n_components = n_components, whiten = True)
pca.fit(X_train)
print("done in %0.3fs" % (time() - t0))
X_train = pca.transform(X_train)
X_test = pca.transform(X_test)
# need fix
bigMatrixTrain = (np.row_stack(X_train, X_test) - np.min(np.row_stack(X_train, X_test), 0)) / (np.max(np.row_stack(X_train, X_test), 0) + 0.0001) # 0-1 scaling
X_train = bigMatrixTrain[0:X_train.shape[0], :]
X_test = bigMatrixTrain[X_train.shape[0] + 1 :bigMatrixTrain.shape[0], :]
# Training RBM-Logistic Pipeline
classifier.fit(X_train, y_train)
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(y_test, classifier.predict(X_test))))
print("Logistic regression using RBM features:\n%s\n" % (
confusion_matrix(y_test, classifier.predict(X_test))))
#prediction probability
predictionFromDataset2 = clf.predict_proba(X_test)
predictionFromDataset2 = predictionFromDataset2[:, 1]
fpr, tpr, thresholds = metrics.roc_curve(y_test, predictionFromDataset2)
predictionProbability = metrics.auc(fpr, tpr)
#Prediction
#predictionFromTest = clf.predict_proba(testMatrixReduced)
predictionFromTest = clf.predict(bigMatrixTest)
#label = predictionFromTest[:, 1]
idVector = range(1, mTest + 1)
#predictionsToCsv = np.column_stack((idVector, label))
predictionsToCsv = np.column_stack((idVector, predictionFromTest))
import csv
ofile = open('predictionVII.csv', "wb")
fileToBeWritten = csv.writer(ofile, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
for row in predictionsToCsv:
fileToBeWritten.writerow(row)
ofile.close()
| |
# Copyright (c) 2008,2009 Citrix Systems, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; version 2.1 only. with the special
# exception on linking described in file LICENSE.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
import sys
import syslog
import os
from xml.dom.minidom import getDOMImplementation
from xml.dom.minidom import parse as parseXML
the_root_prefix = ""
def root_prefix():
"""Returns a string to prefix to all file name references, which
is useful for testing."""
return the_root_prefix
def set_root_prefix(prefix):
global the_root_prefix
the_root_prefix = prefix
log_destination = "syslog"
def get_log_destination():
"""Returns the current log destination.
'syslog' means "log to syslog".
'stderr' means "log to stderr"."""
return log_destination
def set_log_destination(dest):
global log_destination
log_destination = dest
#
# Logging.
#
def log(s):
if get_log_destination() == 'syslog':
syslog.syslog(s)
else:
print >>sys.stderr, s
#
# Exceptions.
#
class Error(Exception):
def __init__(self, msg):
Exception.__init__(self)
self.msg = msg
#
# Run external utilities
#
def run_command(command):
log("Running command: " + ' '.join(command))
rc = os.spawnl(os.P_WAIT, root_prefix() + command[0], *command)
if rc != 0:
log("Command failed %d: " % rc + ' '.join(command))
return False
return True
#
# Configuration File Handling.
#
class ConfigurationFile(object):
"""Write a file, tracking old and new versions.
Supports writing a new version of a file and applying and
reverting those changes.
"""
__STATE = {"OPEN":"OPEN",
"NOT-APPLIED":"NOT-APPLIED", "APPLIED":"APPLIED",
"REVERTED":"REVERTED", "COMMITTED": "COMMITTED"}
def __init__(self, path):
dirname,basename = os.path.split(path)
self.__state = self.__STATE['OPEN']
self.__children = []
self.__path = os.path.join(dirname, basename)
self.__oldpath = os.path.join(dirname, "." + basename + ".xapi-old")
self.__newpath = os.path.join(dirname, "." + basename + ".xapi-new")
self.__f = open(self.__newpath, "w")
def attach_child(self, child):
self.__children.append(child)
def path(self):
return self.__path
def readlines(self):
try:
return open(self.path()).readlines()
except:
return ""
def write(self, args):
if self.__state != self.__STATE['OPEN']:
raise Error("Attempt to write to file in state %s" % self.__state)
self.__f.write(args)
def close(self):
if self.__state != self.__STATE['OPEN']:
raise Error("Attempt to close file in state %s" % self.__state)
self.__f.close()
self.__state = self.__STATE['NOT-APPLIED']
def changed(self):
if self.__state != self.__STATE['NOT-APPLIED']:
raise Error("Attempt to compare file in state %s" % self.__state)
return True
def apply(self):
if self.__state != self.__STATE['NOT-APPLIED']:
raise Error("Attempt to apply configuration from state %s" % self.__state)
for child in self.__children:
child.apply()
log("Applying changes to %s configuration" % self.__path)
# Remove previous backup.
if os.access(self.__oldpath, os.F_OK):
os.unlink(self.__oldpath)
# Save current configuration.
if os.access(self.__path, os.F_OK):
os.link(self.__path, self.__oldpath)
os.unlink(self.__path)
# Apply new configuration.
assert(os.path.exists(self.__newpath))
os.link(self.__newpath, self.__path)
# Remove temporary file.
os.unlink(self.__newpath)
self.__state = self.__STATE['APPLIED']
def revert(self):
if self.__state != self.__STATE['APPLIED']:
raise Error("Attempt to revert configuration from state %s" % self.__state)
for child in self.__children:
child.revert()
log("Reverting changes to %s configuration" % self.__path)
# Remove existing new configuration
if os.access(self.__newpath, os.F_OK):
os.unlink(self.__newpath)
# Revert new configuration.
if os.access(self.__path, os.F_OK):
os.link(self.__path, self.__newpath)
os.unlink(self.__path)
# Revert to old configuration.
if os.access(self.__oldpath, os.F_OK):
os.link(self.__oldpath, self.__path)
os.unlink(self.__oldpath)
# Leave .*.xapi-new as an aid to debugging.
self.__state = self.__STATE['REVERTED']
def commit(self):
if self.__state != self.__STATE['APPLIED']:
raise Error("Attempt to commit configuration from state %s" % self.__state)
for child in self.__children:
child.commit()
log("Committing changes to %s configuration" % self.__path)
if os.access(self.__oldpath, os.F_OK):
os.unlink(self.__oldpath)
if os.access(self.__newpath, os.F_OK):
os.unlink(self.__newpath)
self.__state = self.__STATE['COMMITTED']
#
# Helper functions for encoding/decoding database attributes to/from XML.
#
def _str_to_xml(xml, parent, tag, val):
e = xml.createElement(tag)
parent.appendChild(e)
v = xml.createTextNode(val)
e.appendChild(v)
def _str_from_xml(n):
def getText(nodelist):
rc = ""
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc = rc + node.data
return rc
return getText(n.childNodes).strip()
def _bool_to_xml(xml, parent, tag, val):
if val:
_str_to_xml(xml, parent, tag, "True")
else:
_str_to_xml(xml, parent, tag, "False")
def _bool_from_xml(n):
s = _str_from_xml(n)
if s == "True":
return True
elif s == "False":
return False
else:
raise Error("Unknown boolean value %s" % s)
def _strlist_to_xml(xml, parent, ltag, itag, val):
e = xml.createElement(ltag)
parent.appendChild(e)
for v in val:
c = xml.createElement(itag)
e.appendChild(c)
cv = xml.createTextNode(v)
c.appendChild(cv)
def _strlist_from_xml(n, ltag, itag):
ret = []
for n in n.childNodes:
if n.nodeName == itag:
ret.append(_str_from_xml(n))
return ret
def _map_to_xml(xml, parent, tag, val, attrs):
e = xml.createElement(tag)
parent.appendChild(e)
for n,v in val.items():
if n in attrs:
_str_to_xml(xml, e, n, v)
else:
log("Unknown other-config attribute: %s" % n)
def _map_from_xml(n, attrs):
ret = {}
for n in n.childNodes:
if n.nodeName in attrs:
ret[n.nodeName] = _str_from_xml(n)
return ret
def _otherconfig_to_xml(xml, parent, val, attrs):
return _map_to_xml(xml, parent, "other_config", val, attrs)
def _otherconfig_from_xml(n, attrs):
return _map_from_xml(n, attrs)
#
# Definitions of the database objects (and their attributes) used by interface-reconfigure.
#
# Each object is defined by a dictionary mapping an attribute name in
# the xapi database to a tuple containing two items:
# - a function which takes this attribute and encodes it as XML.
# - a function which takes XML and decocdes it into a value.
#
# other-config attributes are specified as a simple array of strings
_PIF_XML_TAG = "pif"
_VLAN_XML_TAG = "vlan"
_TUNNEL_XML_TAG = "tunnel"
_BOND_XML_TAG = "bond"
_NETWORK_XML_TAG = "network"
_POOL_XML_TAG = "pool"
_ETHTOOL_OTHERCONFIG_ATTRS = ['ethtool-%s' % x for x in 'autoneg', 'speed', 'duplex', 'rx', 'tx', 'sg', 'tso', 'ufo', 'gso', 'gro', 'lro' ]
_PIF_OTHERCONFIG_ATTRS = [ 'domain', 'peerdns', 'defaultroute', 'mtu', 'static-routes' ] + \
[ 'bond-%s' % x for x in 'mode', 'miimon', 'downdelay',
'updelay', 'use_carrier', 'hashing-algorithm' ] + \
[ 'vlan-bug-workaround' ] + \
_ETHTOOL_OTHERCONFIG_ATTRS
_PIF_ATTRS = { 'uuid': (_str_to_xml,_str_from_xml),
'management': (_bool_to_xml,_bool_from_xml),
'network': (_str_to_xml,_str_from_xml),
'device': (_str_to_xml,_str_from_xml),
'bond_master_of': (lambda x, p, t, v: _strlist_to_xml(x, p, 'bond_master_of', 'slave', v),
lambda n: _strlist_from_xml(n, 'bond_master_of', 'slave')),
'bond_slave_of': (_str_to_xml,_str_from_xml),
'VLAN': (_str_to_xml,_str_from_xml),
'VLAN_master_of': (_str_to_xml,_str_from_xml),
'VLAN_slave_of': (lambda x, p, t, v: _strlist_to_xml(x, p, 'VLAN_slave_of', 'master', v),
lambda n: _strlist_from_xml(n, 'VLAN_slave_Of', 'master')),
'tunnel_access_PIF_of': (lambda x, p, t, v: _strlist_to_xml(x, p, 'tunnel_access_PIF_of', 'pif', v),
lambda n: _strlist_from_xml(n, 'tunnel_access_PIF_of', 'pif')),
'tunnel_transport_PIF_of': (lambda x, p, t, v: _strlist_to_xml(x, p, 'tunnel_transport_PIF_of', 'pif', v),
lambda n: _strlist_from_xml(n, 'tunnel_transport_PIF_of', 'pif')),
'ip_configuration_mode': (_str_to_xml,_str_from_xml),
'IP': (_str_to_xml,_str_from_xml),
'netmask': (_str_to_xml,_str_from_xml),
'gateway': (_str_to_xml,_str_from_xml),
'DNS': (_str_to_xml,_str_from_xml),
'MAC': (_str_to_xml,_str_from_xml),
'other_config': (lambda x, p, t, v: _otherconfig_to_xml(x, p, v, _PIF_OTHERCONFIG_ATTRS),
lambda n: _otherconfig_from_xml(n, _PIF_OTHERCONFIG_ATTRS)),
# Special case: We write the current value
# PIF.currently-attached to the cache but since it will
# not be valid when we come to use the cache later
# (i.e. after a reboot) we always read it as False.
'currently_attached': (_bool_to_xml, lambda n: False),
}
_VLAN_ATTRS = { 'uuid': (_str_to_xml,_str_from_xml),
'tagged_PIF': (_str_to_xml,_str_from_xml),
'untagged_PIF': (_str_to_xml,_str_from_xml),
}
_TUNNEL_ATTRS = { 'uuid': (_str_to_xml,_str_from_xml),
'access_PIF': (_str_to_xml,_str_from_xml),
'transport_PIF': (_str_to_xml,_str_from_xml),
}
_BOND_ATTRS = { 'uuid': (_str_to_xml,_str_from_xml),
'master': (_str_to_xml,_str_from_xml),
'slaves': (lambda x, p, t, v: _strlist_to_xml(x, p, 'slaves', 'slave', v),
lambda n: _strlist_from_xml(n, 'slaves', 'slave')),
}
_NETWORK_OTHERCONFIG_ATTRS = [ 'mtu',
'static-routes',
'vswitch-controller-fail-mode',
'vswitch-disable-in-band' ] \
+ _ETHTOOL_OTHERCONFIG_ATTRS
_NETWORK_ATTRS = { 'uuid': (_str_to_xml,_str_from_xml),
'bridge': (_str_to_xml,_str_from_xml),
'MTU': (_str_to_xml,_str_from_xml),
'PIFs': (lambda x, p, t, v: _strlist_to_xml(x, p, 'PIFs', 'PIF', v),
lambda n: _strlist_from_xml(n, 'PIFs', 'PIF')),
'other_config': (lambda x, p, t, v: _otherconfig_to_xml(x, p, v, _NETWORK_OTHERCONFIG_ATTRS),
lambda n: _otherconfig_from_xml(n, _NETWORK_OTHERCONFIG_ATTRS)),
}
_POOL_OTHERCONFIG_ATTRS = ['vswitch-controller-fail-mode']
_POOL_ATTRS = { 'other_config': (lambda x, p, t, v: _otherconfig_to_xml(x, p, v, _POOL_OTHERCONFIG_ATTRS),
lambda n: _otherconfig_from_xml(n, _POOL_OTHERCONFIG_ATTRS)),
}
#
# Database Cache object
#
_db = None
def db():
assert(_db is not None)
return _db
def db_init_from_cache(cache):
global _db
assert(_db is None)
_db = DatabaseCache(cache_file=cache)
def db_init_from_xenapi(session):
global _db
assert(_db is None)
_db = DatabaseCache(session_ref=session)
class DatabaseCache(object):
def __read_xensource_inventory(self):
filename = root_prefix() + "/etc/xensource-inventory"
f = open(filename, "r")
lines = [x.strip("\n") for x in f.readlines()]
f.close()
defs = [ (l[:l.find("=")], l[(l.find("=") + 1):]) for l in lines ]
defs = [ (a, b.strip("'")) for (a,b) in defs ]
return dict(defs)
def __pif_on_host(self,pif):
return self.__pifs.has_key(pif)
def __get_pif_records_from_xapi(self, session, host):
self.__pifs = {}
for (p,rec) in session.xenapi.PIF.get_all_records().items():
if rec['host'] != host:
continue
self.__pifs[p] = {}
for f in _PIF_ATTRS:
self.__pifs[p][f] = rec[f]
self.__pifs[p]['other_config'] = {}
for f in _PIF_OTHERCONFIG_ATTRS:
if not rec['other_config'].has_key(f): continue
self.__pifs[p]['other_config'][f] = rec['other_config'][f]
def __get_vlan_records_from_xapi(self, session):
self.__vlans = {}
for (v,rec) in session.xenapi.VLAN.get_all_records().items():
if not self.__pif_on_host(rec['untagged_PIF']):
continue
self.__vlans[v] = {}
for f in _VLAN_ATTRS:
self.__vlans[v][f] = rec[f]
def __get_tunnel_records_from_xapi(self, session):
self.__tunnels = {}
for t in session.xenapi.tunnel.get_all():
rec = session.xenapi.tunnel.get_record(t)
if not self.__pif_on_host(rec['transport_PIF']):
continue
self.__tunnels[t] = {}
for f in _TUNNEL_ATTRS:
self.__tunnels[t][f] = rec[f]
def __get_bond_records_from_xapi(self, session):
self.__bonds = {}
for (b,rec) in session.xenapi.Bond.get_all_records().items():
if not self.__pif_on_host(rec['master']):
continue
self.__bonds[b] = {}
for f in _BOND_ATTRS:
self.__bonds[b][f] = rec[f]
def __get_network_records_from_xapi(self, session):
self.__networks = {}
for (n,rec) in session.xenapi.network.get_all_records().items():
self.__networks[n] = {}
for f in _NETWORK_ATTRS:
if f == "PIFs":
# drop PIFs on other hosts
self.__networks[n][f] = [p for p in rec[f] if self.__pif_on_host(p)]
elif f == "MTU" and f not in rec:
# XenServer 5.5 network records did not have an
# MTU field, so allow this to be missing.
pass
else:
self.__networks[n][f] = rec[f]
self.__networks[n]['other_config'] = {}
for f in _NETWORK_OTHERCONFIG_ATTRS:
if not rec['other_config'].has_key(f): continue
self.__networks[n]['other_config'][f] = rec['other_config'][f]
def __get_pool_records_from_xapi(self, session):
self.__pools = {}
for p in session.xenapi.pool.get_all():
rec = session.xenapi.pool.get_record(p)
self.__pools[p] = {}
for f in _POOL_ATTRS:
self.__pools[p][f] = rec[f]
for f in _POOL_OTHERCONFIG_ATTRS:
if rec['other_config'].has_key(f):
self.__pools[p]['other_config'][f] = rec['other_config'][f]
def __to_xml(self, xml, parent, key, ref, rec, attrs):
"""Encode a database object as XML"""
e = xml.createElement(key)
parent.appendChild(e)
if ref:
e.setAttribute('ref', ref)
for n,v in rec.items():
if attrs.has_key(n):
h,_ = attrs[n]
h(xml, e, n, v)
else:
raise Error("Unknown attribute %s" % n)
def __from_xml(self, e, attrs):
"""Decode a database object from XML"""
ref = e.attributes['ref'].value
rec = {}
for n in e.childNodes:
if n.nodeName in attrs:
_,h = attrs[n.nodeName]
rec[n.nodeName] = h(n)
return (ref,rec)
def __init__(self, session_ref=None, cache_file=None):
if session_ref and cache_file:
raise Error("can't specify session reference and cache file")
if cache_file == None:
import XenAPI
session = XenAPI.xapi_local()
if not session_ref:
log("No session ref given on command line, logging in.")
session.xenapi.login_with_password("root", "")
else:
session._session = session_ref
try:
inventory = self.__read_xensource_inventory()
assert(inventory.has_key('INSTALLATION_UUID'))
log("host uuid is %s" % inventory['INSTALLATION_UUID'])
host = session.xenapi.host.get_by_uuid(inventory['INSTALLATION_UUID'])
self.__get_pif_records_from_xapi(session, host)
self.__get_pool_records_from_xapi(session)
self.__get_tunnel_records_from_xapi(session)
self.__get_vlan_records_from_xapi(session)
self.__get_bond_records_from_xapi(session)
self.__get_network_records_from_xapi(session)
finally:
if not session_ref:
session.xenapi.session.logout()
else:
log("Loading xapi database cache from %s" % cache_file)
xml = parseXML(root_prefix() + cache_file)
self.__pifs = {}
self.__bonds = {}
self.__vlans = {}
self.__pools = {}
self.__tunnels = {}
self.__networks = {}
assert(len(xml.childNodes) == 1)
toplevel = xml.childNodes[0]
assert(toplevel.nodeName == "xenserver-network-configuration")
for n in toplevel.childNodes:
if n.nodeName == "#text":
pass
elif n.nodeName == _PIF_XML_TAG:
(ref,rec) = self.__from_xml(n, _PIF_ATTRS)
self.__pifs[ref] = rec
elif n.nodeName == _BOND_XML_TAG:
(ref,rec) = self.__from_xml(n, _BOND_ATTRS)
self.__bonds[ref] = rec
elif n.nodeName == _VLAN_XML_TAG:
(ref,rec) = self.__from_xml(n, _VLAN_ATTRS)
self.__vlans[ref] = rec
elif n.nodeName == _TUNNEL_XML_TAG:
(ref,rec) = self.__from_xml(n, _TUNNEL_ATTRS)
self.__vlans[ref] = rec
elif n.nodeName == _NETWORK_XML_TAG:
(ref,rec) = self.__from_xml(n, _NETWORK_ATTRS)
self.__networks[ref] = rec
elif n.nodeName == _POOL_XML_TAG:
(ref,rec) = self.__from_xml(n, _POOL_ATTRS)
self.__pools[ref] = rec
else:
raise Error("Unknown XML element %s" % n.nodeName)
def save(self, cache_file):
xml = getDOMImplementation().createDocument(
None, "xenserver-network-configuration", None)
for (ref,rec) in self.__pifs.items():
self.__to_xml(xml, xml.documentElement, _PIF_XML_TAG, ref, rec, _PIF_ATTRS)
for (ref,rec) in self.__bonds.items():
self.__to_xml(xml, xml.documentElement, _BOND_XML_TAG, ref, rec, _BOND_ATTRS)
for (ref,rec) in self.__vlans.items():
self.__to_xml(xml, xml.documentElement, _VLAN_XML_TAG, ref, rec, _VLAN_ATTRS)
for (ref,rec) in self.__tunnels.items():
self.__to_xml(xml, xml.documentElement, _TUNNEL_XML_TAG, ref, rec, _TUNNEL_ATTRS)
for (ref,rec) in self.__networks.items():
self.__to_xml(xml, xml.documentElement, _NETWORK_XML_TAG, ref, rec,
_NETWORK_ATTRS)
for (ref,rec) in self.__pools.items():
self.__to_xml(xml, xml.documentElement, _POOL_XML_TAG, ref, rec, _POOL_ATTRS)
temp_file = cache_file + ".%d" % os.getpid()
f = open(temp_file, 'w')
f.write(xml.toprettyxml())
f.close()
os.rename(temp_file, cache_file)
def get_pif_by_uuid(self, uuid):
pifs = map(lambda (ref,rec): ref,
filter(lambda (ref,rec): uuid == rec['uuid'],
self.__pifs.items()))
if len(pifs) == 0:
raise Error("Unknown PIF \"%s\"" % uuid)
elif len(pifs) > 1:
raise Error("Non-unique PIF \"%s\"" % uuid)
return pifs[0]
def get_pifs_by_device(self, device):
return map(lambda (ref,rec): ref,
filter(lambda (ref,rec): rec['device'] == device,
self.__pifs.items()))
def get_networks_with_bridge(self, bridge):
return map(lambda (ref,rec): ref,
filter(lambda (ref,rec): rec['bridge'] == bridge,
self.__networks.items()))
def get_network_by_bridge(self, bridge):
#Assumes one network has bridge.
try:
return self.get_networks_with_bridge(bridge)[0]
except KeyError:
return None
def get_pif_by_bridge(self, bridge):
networks = self.get_networks_with_bridge(bridge)
if len(networks) == 0:
raise Error("No matching network \"%s\"" % bridge)
answer = None
for network in networks:
nwrec = self.get_network_record(network)
for pif in nwrec['PIFs']:
pifrec = self.get_pif_record(pif)
if answer:
raise Error("Multiple PIFs on host for network %s" % (bridge))
answer = pif
if not answer:
raise Error("No PIF on host for network %s" % (bridge))
return answer
def get_pif_record(self, pif):
if self.__pifs.has_key(pif):
return self.__pifs[pif]
raise Error("Unknown PIF \"%s\"" % pif)
def get_all_pifs(self):
return self.__pifs
def pif_exists(self, pif):
return self.__pifs.has_key(pif)
def get_management_pif(self):
""" Returns the management pif on host
"""
all = self.get_all_pifs()
for pif in all:
pifrec = self.get_pif_record(pif)
if pifrec['management']: return pif
return None
def get_network_record(self, network):
if self.__networks.has_key(network):
return self.__networks[network]
raise Error("Unknown network \"%s\"" % network)
def get_bond_record(self, bond):
if self.__bonds.has_key(bond):
return self.__bonds[bond]
else:
return None
def get_vlan_record(self, vlan):
if self.__vlans.has_key(vlan):
return self.__vlans[vlan]
else:
return None
def get_pool_record(self):
if len(self.__pools) > 0:
return self.__pools.values()[0]
#
#
#
PIF_OTHERCONFIG_DEFAULTS = {'gro': 'off', 'lro': 'off'}
def ethtool_settings(oc, defaults = {}):
settings = []
if oc.has_key('ethtool-speed'):
val = oc['ethtool-speed']
if val in ["10", "100", "1000"]:
settings += ['speed', val]
else:
log("Invalid value for ethtool-speed = %s. Must be 10|100|1000." % val)
if oc.has_key('ethtool-duplex'):
val = oc['ethtool-duplex']
if val in ["half", "full"]:
settings += ['duplex', val]
else:
log("Invalid value for ethtool-duplex = %s. Must be half|full." % val)
if oc.has_key('ethtool-autoneg'):
val = oc['ethtool-autoneg']
if val in ["true", "on"]:
settings += ['autoneg', 'on']
elif val in ["false", "off"]:
settings += ['autoneg', 'off']
else:
log("Invalid value for ethtool-autoneg = %s. Must be on|true|off|false." % val)
offload = []
for opt in ("rx", "tx", "sg", "tso", "ufo", "gso", "gro", "lro"):
if oc.has_key("ethtool-" + opt):
val = oc["ethtool-" + opt]
if val in ["true", "on"]:
offload += [opt, 'on']
elif val in ["false", "off"]:
offload += [opt, 'off']
else:
log("Invalid value for ethtool-%s = %s. Must be on|true|off|false." % (opt, val))
elif opt in defaults:
offload += [opt, defaults[opt]]
return settings,offload
# By default the MTU is taken from the Network.MTU setting for VIF,
# PIF and Bridge. However it is possible to override this by using
# {VIF,PIF,Network}.other-config:mtu.
#
# type parameter is a string describing the object that the oc parameter
# is from. e.g. "PIF", "Network"
def mtu_setting(nw, type, oc):
mtu = None
nwrec = db().get_network_record(nw)
if nwrec.has_key('MTU'):
mtu = nwrec['MTU']
else:
mtu = "1500"
if oc.has_key('mtu'):
log("Override Network.MTU setting on bridge %s from %s.MTU is %s" % \
(nwrec['bridge'], type, mtu))
mtu = oc['mtu']
if mtu is not None:
try:
int(mtu) # Check that the value is an integer
return mtu
except ValueError, x:
log("Invalid value for mtu = %s" % mtu)
return None
#
# IP Network Devices -- network devices with IP configuration
#
def pif_ipdev_name(pif):
"""Return the ipdev name associated with pif"""
pifrec = db().get_pif_record(pif)
nwrec = db().get_network_record(pifrec['network'])
if nwrec['bridge']:
# TODO: sanity check that nwrec['bridgeless'] != 'true'
return nwrec['bridge']
else:
# TODO: sanity check that nwrec['bridgeless'] == 'true'
return pif_netdev_name(pif)
#
# Bare Network Devices -- network devices without IP configuration
#
def netdev_exists(netdev):
return os.path.exists(root_prefix() + "/sys/class/net/" + netdev)
def pif_netdev_name(pif):
"""Get the netdev name for a PIF."""
pifrec = db().get_pif_record(pif)
if pif_is_vlan(pif):
return "%(device)s.%(VLAN)s" % pifrec
else:
return pifrec['device']
#
# Bridges
#
def pif_is_bridged(pif):
pifrec = db().get_pif_record(pif)
nwrec = db().get_network_record(pifrec['network'])
if nwrec['bridge']:
# TODO: sanity check that nwrec['bridgeless'] != 'true'
return True
else:
# TODO: sanity check that nwrec['bridgeless'] == 'true'
return False
def pif_bridge_name(pif):
"""Return the bridge name of a pif.
PIF must be a bridged PIF."""
pifrec = db().get_pif_record(pif)
nwrec = db().get_network_record(pifrec['network'])
if nwrec['bridge']:
return nwrec['bridge']
else:
raise Error("PIF %(uuid)s does not have a bridge name" % pifrec)
#
# Bonded PIFs
#
def pif_is_bond(pif):
pifrec = db().get_pif_record(pif)
return len(pifrec['bond_master_of']) > 0
def pif_get_bond_masters(pif):
"""Returns a list of PIFs which are bond masters of this PIF"""
pifrec = db().get_pif_record(pif)
bso = pifrec['bond_slave_of']
# bond-slave-of is currently a single reference but in principle a
# PIF could be a member of several bonds which are not
# concurrently attached. Be robust to this possibility.
if not bso or bso == "OpaqueRef:NULL":
bso = []
elif not type(bso) == list:
bso = [bso]
bondrecs = [db().get_bond_record(bond) for bond in bso]
bondrecs = [rec for rec in bondrecs if rec]
return [bond['master'] for bond in bondrecs]
def pif_get_bond_slaves(pif):
"""Returns a list of PIFs which make up the given bonded pif."""
pifrec = db().get_pif_record(pif)
bmo = pifrec['bond_master_of']
if len(bmo) > 1:
raise Error("Bond-master-of contains too many elements")
if len(bmo) == 0:
return []
bondrec = db().get_bond_record(bmo[0])
if not bondrec:
raise Error("No bond record for bond master PIF")
return bondrec['slaves']
#
# VLAN PIFs
#
def pif_is_vlan(pif):
return db().get_pif_record(pif)['VLAN'] != '-1'
def pif_get_vlan_slave(pif):
"""Find the PIF which is the VLAN slave of pif.
Returns the 'physical' PIF underneath the a VLAN PIF @pif."""
pifrec = db().get_pif_record(pif)
vlan = pifrec['VLAN_master_of']
if not vlan or vlan == "OpaqueRef:NULL":
raise Error("PIF is not a VLAN master")
vlanrec = db().get_vlan_record(vlan)
if not vlanrec:
raise Error("No VLAN record found for PIF")
return vlanrec['tagged_PIF']
def pif_get_vlan_masters(pif):
"""Returns a list of PIFs which are VLANs on top of the given pif."""
pifrec = db().get_pif_record(pif)
vlans = [db().get_vlan_record(v) for v in pifrec['VLAN_slave_of']]
return [v['untagged_PIF'] for v in vlans if v and db().pif_exists(v['untagged_PIF'])]
#
# Tunnel PIFs
#
def pif_is_tunnel(pif):
return len(db().get_pif_record(pif)['tunnel_access_PIF_of']) > 0
#
# Datapath base class
#
class Datapath(object):
"""Object encapsulating the actions necessary to (de)configure the
datapath for a given PIF. Does not include configuration of the
IP address on the ipdev.
"""
def __init__(self, pif):
self._pif = pif
@classmethod
def rewrite(cls):
"""Class method called when write action is called. Can be used
to update any backend specific configuration."""
pass
def configure_ipdev(self, cfg):
"""Write ifcfg TYPE field for an IPdev, plus any type specific
fields to cfg
"""
raise NotImplementedError
def preconfigure(self, parent):
"""Prepare datapath configuration for PIF, but do not actually
apply any changes.
Any configuration files should be attached to parent.
"""
raise NotImplementedError
def bring_down_existing(self):
"""Tear down any existing network device configuration which
needs to be undone in order to bring this PIF up.
"""
raise NotImplementedError
def configure(self):
"""Apply the configuration prepared in the preconfigure stage.
Should assume any configuration files changed attached in
the preconfigure stage are applied and bring up the
necesary devices to provide the datapath for the
PIF.
Should not bring up the IPdev.
"""
raise NotImplementedError
def post(self):
"""Called after the IPdev has been brought up.
Should do any final setup, including reinstating any
devices which were taken down in the bring_down_existing
hook.
"""
raise NotImplementedError
def bring_down(self):
"""Tear down and deconfigure the datapath. Should assume the
IPdev has already been brought down.
"""
raise NotImplementedError
def DatapathFactory():
# XXX Need a datapath object for bridgeless PIFs
try:
network_conf = open(root_prefix() + "/etc/xensource/network.conf", 'r')
network_backend = network_conf.readline().strip()
network_conf.close()
except Exception, e:
raise Error("failed to determine network backend:" + e)
if network_backend == "bridge":
from InterfaceReconfigureBridge import DatapathBridge
return DatapathBridge
elif network_backend in ["openvswitch", "vswitch"]:
from InterfaceReconfigureVswitch import DatapathVswitch
return DatapathVswitch
else:
raise Error("unknown network backend %s" % network_backend)
| |
#!/usr/bin/env python
# $Id: NameMapper.py,v 1.11 2006/01/15 20:45:22 tavis_rudd Exp $
"""NameMapper Tests
Meta-Data
================================================================================
Author: Tavis Rudd <tavis@damnsimple.com>,
Version: $Revision: 1.11 $
Start Date: 2001/10/01
Last Revision Date: $Date: 2006/01/15 20:45:22 $
"""
from __future__ import generators
__author__ = "Tavis Rudd <tavis@damnsimple.com>"
__revision__ = "$Revision: 1.11 $"[11:-2]
import sys
import types
import os
import os.path
import unittest_local_copy as unittest
from Cheetah.NameMapper import NotFound, valueForKey, \
valueForName, valueFromSearchList, valueFromFrame, valueFromFrameOrSearchList
##################################################
## TEST DATA FOR USE IN THE TEMPLATES ##
class DummyClass:
classVar1 = 123
def __init__(self):
self.instanceVar1 = 123
def __str__(self):
return 'object'
def meth(self, arg="arff"):
return str(arg)
def meth1(self, arg="doo"):
return arg
def meth2(self, arg1="a1", arg2="a2"):
raise ValueError
def meth3(self):
"""Tests a bug that Jeff Johnson reported on Oct 1, 2001"""
x = 'A string'
try:
for i in [1,2,3,4]:
if x == 2:
pass
if x == 'xx':
pass
return x
except:
raise
def dummyFunc(arg="Scooby"):
return arg
def funcThatRaises():
raise ValueError
testNamespace = {
'aStr':'blarg',
'anInt':1,
'aFloat':1.5,
'aDict': {'one':'item1',
'two':'item2',
'nestedDict':{'one':'nestedItem1',
'two':'nestedItem2',
'funcThatRaises':funcThatRaises,
'aClass': DummyClass,
},
'nestedFunc':dummyFunc,
},
'aClass': DummyClass,
'aFunc': dummyFunc,
'anObj': DummyClass(),
'aMeth': DummyClass().meth1,
'none' : None,
'emptyString':'',
'funcThatRaises':funcThatRaises,
}
autoCallResults = {'aFunc':'Scooby',
'aMeth':'doo',
}
results = testNamespace.copy()
results.update({'anObj.meth1':'doo',
'aDict.one':'item1',
'aDict.nestedDict':testNamespace['aDict']['nestedDict'],
'aDict.nestedDict.one':'nestedItem1',
'aDict.nestedDict.aClass':DummyClass,
'aDict.nestedFunc':'Scooby',
'aClass.classVar1':123,
'anObj.instanceVar1':123,
'anObj.meth3':'A string',
})
for k in testNamespace.keys():
# put them in the globals for the valueFromFrame tests
exec '%s = testNamespace[k]'%k
##################################################
## TEST BASE CLASSES
class NameMapperTest(unittest.TestCase):
failureException = (NotFound,AssertionError)
_testNamespace = testNamespace
_results = results
def namespace(self):
return self._testNamespace
def VFN(self, name, autocall=True):
return valueForName(self.namespace(), name, autocall)
def VFS(self, searchList, name, autocall=True):
return valueFromSearchList(searchList, name, autocall)
# alias to be overriden later
get = VFN
def check(self, name):
got = self.get(name)
if autoCallResults.has_key(name):
expected = autoCallResults[name]
else:
expected = self._results[name]
assert got == expected
##################################################
## TEST CASE CLASSES
class VFN(NameMapperTest):
def test1(self):
"""string in dict lookup"""
self.check('aStr')
def test2(self):
"""string in dict lookup in a loop"""
for i in range(10):
self.check('aStr')
def test3(self):
"""int in dict lookup"""
self.check('anInt')
def test4(self):
"""int in dict lookup in a loop"""
for i in range(10):
self.check('anInt')
def test5(self):
"""float in dict lookup"""
self.check('aFloat')
def test6(self):
"""float in dict lookup in a loop"""
for i in range(10):
self.check('aFloat')
def test7(self):
"""class in dict lookup"""
self.check('aClass')
def test8(self):
"""class in dict lookup in a loop"""
for i in range(10):
self.check('aClass')
def test9(self):
"""aFunc in dict lookup"""
self.check('aFunc')
def test10(self):
"""aFunc in dict lookup in a loop"""
for i in range(10):
self.check('aFunc')
def test11(self):
"""aMeth in dict lookup"""
self.check('aMeth')
def test12(self):
"""aMeth in dict lookup in a loop"""
for i in range(10):
self.check('aMeth')
def test13(self):
"""aMeth in dict lookup"""
self.check('aMeth')
def test14(self):
"""aMeth in dict lookup in a loop"""
for i in range(10):
self.check('aMeth')
def test15(self):
"""anObj in dict lookup"""
self.check('anObj')
def test16(self):
"""anObj in dict lookup in a loop"""
for i in range(10):
self.check('anObj')
def test17(self):
"""aDict in dict lookup"""
self.check('aDict')
def test18(self):
"""aDict in dict lookup in a loop"""
for i in range(10):
self.check('aDict')
def test17(self):
"""aDict in dict lookup"""
self.check('aDict')
def test18(self):
"""aDict in dict lookup in a loop"""
for i in range(10):
self.check('aDict')
def test19(self):
"""aClass.classVar1 in dict lookup"""
self.check('aClass.classVar1')
def test20(self):
"""aClass.classVar1 in dict lookup in a loop"""
for i in range(10):
self.check('aClass.classVar1')
def test23(self):
"""anObj.instanceVar1 in dict lookup"""
self.check('anObj.instanceVar1')
def test24(self):
"""anObj.instanceVar1 in dict lookup in a loop"""
for i in range(10):
self.check('anObj.instanceVar1')
## tests 22, 25, and 26 removed when the underscored lookup was removed
def test27(self):
"""anObj.meth1 in dict lookup"""
self.check('anObj.meth1')
def test28(self):
"""anObj.meth1 in dict lookup in a loop"""
for i in range(10):
self.check('anObj.meth1')
def test29(self):
"""aDict.one in dict lookup"""
self.check('aDict.one')
def test30(self):
"""aDict.one in dict lookup in a loop"""
for i in range(10):
self.check('aDict.one')
def test31(self):
"""aDict.nestedDict in dict lookup"""
self.check('aDict.nestedDict')
def test32(self):
"""aDict.nestedDict in dict lookup in a loop"""
for i in range(10):
self.check('aDict.nestedDict')
def test33(self):
"""aDict.nestedDict.one in dict lookup"""
self.check('aDict.nestedDict.one')
def test34(self):
"""aDict.nestedDict.one in dict lookup in a loop"""
for i in range(10):
self.check('aDict.nestedDict.one')
def test35(self):
"""aDict.nestedFunc in dict lookup"""
self.check('aDict.nestedFunc')
def test36(self):
"""aDict.nestedFunc in dict lookup in a loop"""
for i in range(10):
self.check('aDict.nestedFunc')
def test37(self):
"""aDict.nestedFunc in dict lookup - without autocalling"""
assert self.get('aDict.nestedFunc', False) == dummyFunc
def test38(self):
"""aDict.nestedFunc in dict lookup in a loop - without autocalling"""
for i in range(10):
assert self.get('aDict.nestedFunc', False) == dummyFunc
def test39(self):
"""aMeth in dict lookup - without autocalling"""
assert self.get('aMeth', False) == self.namespace()['aMeth']
def test40(self):
"""aMeth in dict lookup in a loop - without autocalling"""
for i in range(10):
assert self.get('aMeth', False) == self.namespace()['aMeth']
def test41(self):
"""anObj.meth3 in dict lookup"""
self.check('anObj.meth3')
def test42(self):
"""aMeth in dict lookup in a loop"""
for i in range(10):
self.check('anObj.meth3')
def test43(self):
"""NotFound test"""
def test(self=self):
self.get('anObj.methX')
self.assertRaises(NotFound,test)
def test44(self):
"""NotFound test in a loop"""
def test(self=self):
self.get('anObj.methX')
for i in range(10):
self.assertRaises(NotFound,test)
def test45(self):
"""Other exception from meth test"""
def test(self=self):
self.get('anObj.meth2')
self.assertRaises(ValueError, test)
def test46(self):
"""Other exception from meth test in a loop"""
def test(self=self):
self.get('anObj.meth2')
for i in range(10):
self.assertRaises(ValueError,test)
def test47(self):
"""None in dict lookup"""
self.check('none')
def test48(self):
"""None in dict lookup in a loop"""
for i in range(10):
self.check('none')
def test49(self):
"""EmptyString in dict lookup"""
self.check('emptyString')
def test50(self):
"""EmptyString in dict lookup in a loop"""
for i in range(10):
self.check('emptyString')
def test51(self):
"""Other exception from func test"""
def test(self=self):
self.get('funcThatRaises')
self.assertRaises(ValueError, test)
def test52(self):
"""Other exception from func test in a loop"""
def test(self=self):
self.get('funcThatRaises')
for i in range(10):
self.assertRaises(ValueError,test)
def test53(self):
"""Other exception from func test"""
def test(self=self):
self.get('aDict.nestedDict.funcThatRaises')
self.assertRaises(ValueError, test)
def test54(self):
"""Other exception from func test in a loop"""
def test(self=self):
self.get('aDict.nestedDict.funcThatRaises')
for i in range(10):
self.assertRaises(ValueError,test)
def test55(self):
"""aDict.nestedDict.aClass in dict lookup"""
self.check('aDict.nestedDict.aClass')
def test56(self):
"""aDict.nestedDict.aClass in dict lookup in a loop"""
for i in range(10):
self.check('aDict.nestedDict.aClass')
def test57(self):
"""aDict.nestedDict.aClass in dict lookup - without autocalling"""
assert self.get('aDict.nestedDict.aClass', False) == DummyClass
def test58(self):
"""aDict.nestedDict.aClass in dict lookup in a loop - without autocalling"""
for i in range(10):
assert self.get('aDict.nestedDict.aClass', False) == DummyClass
def test59(self):
"""Other exception from func test -- but without autocalling shouldn't raise"""
self.get('aDict.nestedDict.funcThatRaises', False)
def test60(self):
"""Other exception from func test in a loop -- but without autocalling shouldn't raise"""
for i in range(10):
self.get('aDict.nestedDict.funcThatRaises', False)
class VFS(VFN):
_searchListLength = 1
def searchList(self):
lng = self._searchListLength
if lng == 1:
return [self.namespace()]
elif lng == 2:
return [self.namespace(),{'dummy':1234}]
elif lng == 3:
# a tuple for kicks
return ({'dummy':1234}, self.namespace(),{'dummy':1234})
elif lng == 4:
# a generator for more kicks
return self.searchListGenerator()
def searchListGenerator(self):
class Test:
pass
for i in [Test(),{'dummy':1234}, self.namespace(),{'dummy':1234}]:
yield i
def get(self, name, autocall=True):
return self.VFS(self.searchList(), name, autocall)
class VFS_2namespaces(VFS):
_searchListLength = 2
class VFS_3namespaces(VFS):
_searchListLength = 3
class VFS_4namespaces(VFS):
_searchListLength = 4
class VFF(VFN):
def get(self, name, autocall=True):
ns = self._testNamespace
aStr = ns['aStr']
aFloat = ns['aFloat']
none = 'some'
return valueFromFrame(name, autocall)
def setUp(self):
"""Mod some of the data
"""
self._testNamespace = ns = self._testNamespace.copy()
self._results = res = self._results.copy()
ns['aStr'] = res['aStr'] = 'BLARG'
ns['aFloat'] = res['aFloat'] = 0.1234
res['none'] = 'some'
res['True'] = True
res['False'] = False
res['None'] = None
res['eval'] = eval
def test_VFF_1(self):
"""Builtins"""
self.check('True')
self.check('None')
self.check('False')
assert self.get('eval', False)==eval
assert self.get('range', False)==range
class VFFSL(VFS):
_searchListLength = 1
def setUp(self):
"""Mod some of the data
"""
self._testNamespace = ns = self._testNamespace.copy()
self._results = res = self._results.copy()
ns['aStr'] = res['aStr'] = 'BLARG'
ns['aFloat'] = res['aFloat'] = 0.1234
res['none'] = 'some'
del ns['anInt'] # will be picked up by globals
def VFFSL(self, searchList, name, autocall=True):
anInt = 1
none = 'some'
return valueFromFrameOrSearchList(searchList, name, autocall)
def get(self, name, autocall=True):
return self.VFFSL(self.searchList(), name, autocall)
class VFFSL_2(VFFSL):
_searchListLength = 2
class VFFSL_3(VFFSL):
_searchListLength = 3
class VFFSL_4(VFFSL):
_searchListLength = 4
if sys.platform.startswith('java'):
del VFF, VFFSL, VFFSL_2, VFFSL_3, VFFSL_4
##################################################
## if run from the command line ##
if __name__ == '__main__':
unittest.main()
| |
# -*- coding: utf-8 -*-
# flake8: noqa
# Disable Flake8 because of all the sphinx imports
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Airflow documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 9 20:50:01 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
"""Configuration of Airflow Docs"""
import os
import sys
from typing import Dict
import airflow
from airflow.configuration import default_config_yaml
try:
import sphinx_airflow_theme # pylint: disable=unused-import
airflow_theme_is_available = True
except ImportError:
airflow_theme_is_available = False
autodoc_mock_imports = [
'MySQLdb',
'adal',
'analytics',
'azure',
'azure.cosmos',
'azure.datalake',
'azure.mgmt',
'boto3',
'botocore',
'bson',
'cassandra',
'celery',
'cloudant',
'cryptography',
'cx_Oracle',
'datadog',
'distributed',
'docker',
'google',
'google_auth_httplib2',
'googleapiclient',
'grpc',
'hdfs',
'httplib2',
'jaydebeapi',
'jenkins',
'jira',
'kubernetes',
'mesos',
'msrestazure',
'pandas',
'pandas_gbq',
'paramiko',
'pinotdb',
'psycopg2',
'pydruid',
'pyhive',
'pyhive',
'pymongo',
'pymssql',
'pysftp',
'qds_sdk',
'redis',
'simple_salesforce',
'slackclient',
'smbclient',
'snowflake',
'sshtunnel',
'tenacity',
'vertica_python',
'winrm',
'zdesk',
]
# Hack to allow changing for piece of the code to behave differently while
# the docs are being built. The main objective was to alter the
# behavior of the utils.apply_default that was hiding function headers
os.environ['BUILDING_AIRFLOW_DOCS'] = 'TRUE'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.join(os.path.dirname(__file__), 'exts'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.graphviz',
'sphinxarg.ext',
'sphinxcontrib.httpdomain',
'sphinxcontrib.jinja',
'sphinx.ext.intersphinx',
'autoapi.extension',
'exampleinclude',
'docroles',
'removemarktransform',
]
autodoc_default_options = {
'show-inheritance': True,
'members': True
}
jinja_contexts = {
'config_ctx': {"configs": default_config_yaml()}
}
viewcode_follow_imported_members = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Airflow'
# copyright = u''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '1.0.0'
version = airflow.__version__
# The full version, including alpha/beta/rc tags.
# release = '1.0.0'
release = airflow.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
'_api/airflow/_vendor',
'_api/airflow/api',
'_api/airflow/bin',
'_api/airflow/config_templates',
'_api/airflow/configuration',
'_api/airflow/contrib/auth',
'_api/airflow/contrib/example_dags',
'_api/airflow/contrib/index.rst',
'_api/airflow/contrib/kubernetes',
'_api/airflow/contrib/task_runner',
'_api/airflow/contrib/utils',
'_api/airflow/dag',
'_api/airflow/default_login',
'_api/airflow/example_dags',
'_api/airflow/exceptions',
'_api/airflow/index.rst',
'_api/airflow/jobs',
'_api/airflow/lineage',
'_api/airflow/logging_config',
'_api/airflow/macros',
'_api/airflow/migrations',
'_api/airflow/plugins_manager',
'_api/airflow/security',
'_api/airflow/serialization',
'_api/airflow/settings',
'_api/airflow/sentry',
'_api/airflow/stats',
'_api/airflow/task',
'_api/airflow/ti_deps',
'_api/airflow/utils',
'_api/airflow/version',
'_api/airflow/www',
'_api/airflow/www_rbac',
'_api/main',
'autoapi_templates',
'howto/operator/gcp/_partials',
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
keep_warnings = True
intersphinx_mapping = {
'boto3': ('https://boto3.amazonaws.com/v1/documentation/api/latest/', None),
'mongodb': ('https://api.mongodb.com/python/current/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'python': ('https://docs.python.org/3/', None),
'requests': ('https://requests.readthedocs.io/en/master/', None),
'sqlalchemy': ('https://docs.sqlalchemy.org/en/latest/', None),
'hdfs': ('https://hdfscli.readthedocs.io/en/latest/', None),
# google-cloud-python
'google-cloud-automl': ('https://googleapis.dev/python/automl/latest', None),
'google-cloud-bigquery': ('https://googleapis.dev/python/bigquery/latest', None),
'google-cloud-bigquery-datatransfer': ('https://googleapis.dev/python/bigquerydatatransfer/latest', None),
'google-cloud-bigquery-storage': ('https://googleapis.dev/python/bigquerystorage/latest', None),
'google-cloud-bigtable': ('https://googleapis.dev/python/bigtable/latest', None),
'google-cloud-container': ('https://googleapis.dev/python/container/latest', None),
'google-cloud-core': ('https://googleapis.dev/python/google-cloud-core/latest', None),
'google-cloud-datastore': ('https://googleapis.dev/python/datastore/latest', None),
'google-cloud-dlp': ('https://googleapis.dev/python/dlp/latest', None),
'google-cloud-kms': ('https://googleapis.dev/python/cloudkms/latest', None),
'google-cloud-language': ('https://googleapis.dev/python/language/latest', None),
'google-cloud-pubsub': ('https://googleapis.dev/python/pubsub/latest', None),
'google-cloud-redis': ('https://googleapis.dev/python/redis/latest', None),
'google-cloud-spanner': ('https://googleapis.dev/python/spanner/latest', None),
'google-cloud-speech': ('https://googleapis.dev/python/speech/latest', None),
'google-cloud-storage': ('https://googleapis.dev/python/storage/latest', None),
'google-cloud-tasks': ('https://googleapis.dev/python/cloudtasks/latest', None),
'google-cloud-texttospeech': ('https://googleapis.dev/python/texttospeech/latest', None),
'google-cloud-translate': ('https://googleapis.dev/python/translation/latest', None),
'google-cloud-videointelligence': ('https://googleapis.dev/python/videointelligence/latest', None),
'google-cloud-vision': ('https://googleapis.dev/python/vision/latest', None),
}
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
if airflow_theme_is_available:
html_theme = 'sphinx_airflow_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
import sphinx_rtd_theme # pylint: disable=wrong-import-position,wrong-import-order
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Airflow Documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = ""
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
html_favicon = "../airflow/www/static/pin_32.png"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# A list of JavaScript filename. The entry must be a filename string or a
# tuple containing the filename string and the attributes dictionary. The
# filename must be relative to the html_static_path, or a full URI with
# scheme like http://example.org/script.js.
html_js_files = ['jira-links.js']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
if airflow_theme_is_available:
html_sidebars = {
'**': [
'version-selector.html',
'searchbox.html',
'globaltoc.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Airflowdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
} # type: Dict[str,str]
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Airflow.tex', u'Airflow Documentation',
u'Apache Airflow', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'airflow', u'Airflow Documentation',
[u'Apache Airflow'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [(
'index', 'Airflow', u'Airflow Documentation',
u'Apache Airflow', 'Airflow',
'Airflow is a system to programmatically author, schedule and monitor data pipelines.',
'Miscellaneous'
), ]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# sphinx-autoapi configuration
# See:
# https://sphinx-autoapi.readthedocs.io/en/latest/config.html
# Paths (relative or absolute) to the source code that you wish to generate
# your API documentation from.
autoapi_dirs = [
os.path.abspath('../airflow'),
]
# A directory that has user-defined templates to override our default templates.
autoapi_template_dir = 'autoapi_templates'
# A list of patterns to ignore when finding files
autoapi_ignore = [
# These modules are backcompat shims, don't build docs for them
'*/airflow/contrib/operators/s3_to_gcs_transfer_operator.py',
'*/airflow/contrib/operators/gcs_to_gcs_transfer_operator.py',
'*/airflow/contrib/operators/gcs_to_gcs_transfer_operator.py',
'*/node_modules/*',
'*/migrations/*',
]
# Keep the AutoAPI generated files on the filesystem after the run.
# Useful for debugging.
autoapi_keep_files = True
# Relative path to output the AutoAPI files into. This can also be used to place the generated documentation
# anywhere in your documentation hierarchy.
autoapi_root = '_api'
# -- Options for examole include ------------------------------------------
exampleinclude_sourceroot = os.path.abspath('..')
# -- Additional HTML Context variable
html_context = {
# Google Analytics ID.
# For more information look at:
# https://github.com/readthedocs/sphinx_rtd_theme/blob/master/sphinx_rtd_theme/layout.html#L222-L232
'theme_analytics_id': 'UA-140539454-1',
}
if airflow_theme_is_available:
html_context = {
# Variables used to build a button for editing the source code
#
# The path is created according to the following template:
#
# https://{{ github_host|default("github.com") }}/{{ github_user }}/{{ github_repo }}/
# {{ theme_vcs_pageview_mode|default("blob") }}/{{ github_version }}{{ conf_py_path }}
# {{ pagename }}{{ suffix }}
#
# More information:
# https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/templates/doc_builder/conf.py.tmpl#L100-L103
# https://github.com/readthedocs/sphinx_rtd_theme/blob/master/sphinx_rtd_theme/breadcrumbs.html#L45
# https://github.com/apache/airflow-site/blob/91f760c/sphinx_airflow_theme/sphinx_airflow_theme/suggest_change_button.html#L36-L40
#
'theme_vcs_pageview_mode': 'edit',
'conf_py_path': '/docs/',
'github_user': 'apache',
'github_repo': 'airflow',
'github_version': 'master',
'display_github': 'master',
'suffix': '.rst',
}
| |
from __future__ import unicode_literals
import warnings
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection
from django.db.models import Prefetch
from django.db.models.query import get_prefetcher
from django.test import TestCase, override_settings
from django.test.utils import CaptureQueriesContext
from django.utils import six
from django.utils.encoding import force_text
from .models import (
Author, Author2, AuthorAddress, AuthorWithAge, Bio, Book, Bookmark,
BookReview, BookWithYear, Comment, Department, Employee, FavoriteAuthors,
House, LessonEntry, Person, Qualification, Reader, Room, TaggedItem,
Teacher, WordEntry,
)
class PrefetchRelatedTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.book1 = Book.objects.create(title='Poems')
cls.book2 = Book.objects.create(title='Jane Eyre')
cls.book3 = Book.objects.create(title='Wuthering Heights')
cls.book4 = Book.objects.create(title='Sense and Sensibility')
cls.author1 = Author.objects.create(name='Charlotte', first_book=cls.book1)
cls.author2 = Author.objects.create(name='Anne', first_book=cls.book1)
cls.author3 = Author.objects.create(name='Emily', first_book=cls.book1)
cls.author4 = Author.objects.create(name='Jane', first_book=cls.book4)
cls.book1.authors.add(cls.author1, cls.author2, cls.author3)
cls.book2.authors.add(cls.author1)
cls.book3.authors.add(cls.author3)
cls.book4.authors.add(cls.author4)
cls.reader1 = Reader.objects.create(name='Amy')
cls.reader2 = Reader.objects.create(name='Belinda')
cls.reader1.books_read.add(cls.book1, cls.book4)
cls.reader2.books_read.add(cls.book2, cls.book4)
def test_m2m_forward(self):
with self.assertNumQueries(2):
lists = [list(b.authors.all()) for b in Book.objects.prefetch_related('authors')]
normal_lists = [list(b.authors.all()) for b in Book.objects.all()]
self.assertEqual(lists, normal_lists)
def test_m2m_reverse(self):
with self.assertNumQueries(2):
lists = [list(a.books.all()) for a in Author.objects.prefetch_related('books')]
normal_lists = [list(a.books.all()) for a in Author.objects.all()]
self.assertEqual(lists, normal_lists)
def test_foreignkey_forward(self):
with self.assertNumQueries(2):
books = [a.first_book for a in Author.objects.prefetch_related('first_book')]
normal_books = [a.first_book for a in Author.objects.all()]
self.assertEqual(books, normal_books)
def test_foreignkey_reverse(self):
with self.assertNumQueries(2):
[list(b.first_time_authors.all())
for b in Book.objects.prefetch_related('first_time_authors')]
self.assertQuerysetEqual(self.book2.authors.all(), ["<Author: Charlotte>"])
def test_onetoone_reverse_no_match(self):
# Regression for #17439
with self.assertNumQueries(2):
book = Book.objects.prefetch_related('bookwithyear').all()[0]
with self.assertNumQueries(0):
with self.assertRaises(BookWithYear.DoesNotExist):
book.bookwithyear
def test_survives_clone(self):
with self.assertNumQueries(2):
[list(b.first_time_authors.all())
for b in Book.objects.prefetch_related('first_time_authors').exclude(id=1000)]
def test_len(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
len(qs)
[list(b.first_time_authors.all()) for b in qs]
def test_bool(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
bool(qs)
[list(b.first_time_authors.all()) for b in qs]
def test_count(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
[b.first_time_authors.count() for b in qs]
def test_exists(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
[b.first_time_authors.exists() for b in qs]
def test_in_and_prefetch_related(self):
"""
Regression test for #20242 - QuerySet "in" didn't work the first time
when using prefetch_related. This was fixed by the removal of chunked
reads from QuerySet iteration in
70679243d1786e03557c28929f9762a119e3ac14.
"""
qs = Book.objects.prefetch_related('first_time_authors')
self.assertIn(qs[0], qs)
def test_clear(self):
"""
Test that we can clear the behavior by calling prefetch_related()
"""
with self.assertNumQueries(5):
with_prefetch = Author.objects.prefetch_related('books')
without_prefetch = with_prefetch.prefetch_related(None)
[list(a.books.all()) for a in without_prefetch]
def test_m2m_then_m2m(self):
"""
Test we can follow a m2m and another m2m
"""
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books__read_by')
lists = [[[six.text_type(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists,
[
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
def test_overriding_prefetch(self):
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books', 'books__read_by')
lists = [[[six.text_type(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists,
[
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books__read_by', 'books')
lists = [[[six.text_type(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists,
[
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
def test_get(self):
"""
Test that objects retrieved with .get() get the prefetch behavior.
"""
# Need a double
with self.assertNumQueries(3):
author = Author.objects.prefetch_related('books__read_by').get(name="Charlotte")
lists = [[six.text_type(r) for r in b.read_by.all()]
for b in author.books.all()]
self.assertEqual(lists, [["Amy"], ["Belinda"]]) # Poems, Jane Eyre
def test_foreign_key_then_m2m(self):
"""
Test we can follow an m2m relation after a relation like ForeignKey
that doesn't have many objects
"""
with self.assertNumQueries(2):
qs = Author.objects.select_related('first_book').prefetch_related('first_book__read_by')
lists = [[six.text_type(r) for r in a.first_book.read_by.all()]
for a in qs]
self.assertEqual(lists, [["Amy"],
["Amy"],
["Amy"],
["Amy", "Belinda"]])
def test_reverse_one_to_one_then_m2m(self):
"""
Test that we can follow a m2m relation after going through
the select_related reverse of an o2o.
"""
qs = Author.objects.prefetch_related('bio__books').select_related('bio')
with self.assertNumQueries(1):
list(qs.all())
Bio.objects.create(author=self.author1)
with self.assertNumQueries(2):
list(qs.all())
def test_attribute_error(self):
qs = Reader.objects.all().prefetch_related('books_read__xyz')
with self.assertRaises(AttributeError) as cm:
list(qs)
self.assertIn('prefetch_related', str(cm.exception))
def test_invalid_final_lookup(self):
qs = Book.objects.prefetch_related('authors__name')
with self.assertRaises(ValueError) as cm:
list(qs)
self.assertIn('prefetch_related', str(cm.exception))
self.assertIn("name", str(cm.exception))
def test_forward_m2m_to_attr_conflict(self):
msg = 'to_attr=authors conflicts with a field on the Book model.'
authors = Author.objects.all()
with self.assertRaisesMessage(ValueError, msg):
list(Book.objects.prefetch_related(
Prefetch('authors', queryset=authors, to_attr='authors'),
))
# Without the ValueError, an author was deleted due to the implicit
# save of the relation assignment.
self.assertEqual(self.book1.authors.count(), 3)
def test_reverse_m2m_to_attr_conflict(self):
msg = 'to_attr=books conflicts with a field on the Author model.'
poems = Book.objects.filter(title='Poems')
with self.assertRaisesMessage(ValueError, msg):
list(Author.objects.prefetch_related(
Prefetch('books', queryset=poems, to_attr='books'),
))
# Without the ValueError, a book was deleted due to the implicit
# save of reverse relation assignment.
self.assertEqual(self.author1.books.count(), 2)
def test_m2m_then_reverse_fk_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related('authors__addresses'))
sql = queries[-1]['sql']
self.assertEqual(sql.count(self.author1.name), 1)
def test_m2m_then_m2m_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related('authors__favorite_authors'))
sql = queries[-1]['sql']
self.assertEqual(sql.count(self.author1.name), 1)
def test_m2m_then_reverse_one_to_one_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related('authors__authorwithage'))
sql = queries[-1]['sql']
self.assertEqual(sql.count(str(self.author1.id)), 1, sql)
class CustomPrefetchTests(TestCase):
@classmethod
def traverse_qs(cls, obj_iter, path):
"""
Helper method that returns a list containing a list of the objects in the
obj_iter. Then for each object in the obj_iter, the path will be
recursively travelled and the found objects are added to the return value.
"""
ret_val = []
if hasattr(obj_iter, 'all'):
obj_iter = obj_iter.all()
try:
iter(obj_iter)
except TypeError:
obj_iter = [obj_iter]
for obj in obj_iter:
rel_objs = []
for part in path:
if not part:
continue
try:
related = getattr(obj, part[0])
except ObjectDoesNotExist:
continue
if related is not None:
rel_objs.extend(cls.traverse_qs(related, [part[1:]]))
ret_val.append((obj, rel_objs))
return ret_val
@classmethod
def setUpTestData(cls):
cls.person1 = Person.objects.create(name='Joe')
cls.person2 = Person.objects.create(name='Mary')
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
cls.house1 = House.objects.create(name='House 1', address='123 Main St', owner=cls.person1)
cls.room1_1 = Room.objects.create(name='Dining room', house=cls.house1)
cls.room1_2 = Room.objects.create(name='Lounge', house=cls.house1)
cls.room1_3 = Room.objects.create(name='Kitchen', house=cls.house1)
cls.house1.main_room = cls.room1_1
cls.house1.save()
cls.person1.houses.add(cls.house1)
cls.house2 = House.objects.create(name='House 2', address='45 Side St', owner=cls.person1)
cls.room2_1 = Room.objects.create(name='Dining room', house=cls.house2)
cls.room2_2 = Room.objects.create(name='Lounge', house=cls.house2)
cls.room2_3 = Room.objects.create(name='Kitchen', house=cls.house2)
cls.house2.main_room = cls.room2_1
cls.house2.save()
cls.person1.houses.add(cls.house2)
cls.house3 = House.objects.create(name='House 3', address='6 Downing St', owner=cls.person2)
cls.room3_1 = Room.objects.create(name='Dining room', house=cls.house3)
cls.room3_2 = Room.objects.create(name='Lounge', house=cls.house3)
cls.room3_3 = Room.objects.create(name='Kitchen', house=cls.house3)
cls.house3.main_room = cls.room3_1
cls.house3.save()
cls.person2.houses.add(cls.house3)
cls.house4 = House.objects.create(name='house 4', address="7 Regents St", owner=cls.person2)
cls.room4_1 = Room.objects.create(name='Dining room', house=cls.house4)
cls.room4_2 = Room.objects.create(name='Lounge', house=cls.house4)
cls.room4_3 = Room.objects.create(name='Kitchen', house=cls.house4)
cls.house4.main_room = cls.room4_1
cls.house4.save()
cls.person2.houses.add(cls.house4)
def test_traverse_qs(self):
qs = Person.objects.prefetch_related('houses')
related_objs_normal = [list(p.houses.all()) for p in qs],
related_objs_from_traverse = [[inner[0] for inner in o[1]]
for o in self.traverse_qs(qs, [['houses']])]
self.assertEqual(related_objs_normal, (related_objs_from_traverse,))
def test_ambiguous(self):
# Ambiguous: Lookup was already seen with a different queryset.
with self.assertRaises(ValueError):
self.traverse_qs(
Person.objects.prefetch_related('houses__rooms', Prefetch('houses', queryset=House.objects.all())),
[['houses', 'rooms']]
)
# Ambiguous: Lookup houses_lst doesn't yet exist when performing houses_lst__rooms.
with self.assertRaises(AttributeError):
self.traverse_qs(
Person.objects.prefetch_related(
'houses_lst__rooms',
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')
),
[['houses', 'rooms']]
)
# Not ambiguous.
self.traverse_qs(
Person.objects.prefetch_related('houses__rooms', 'houses'),
[['houses', 'rooms']]
)
self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')
),
[['houses', 'rooms']]
)
def test_m2m(self):
# Control lookups.
with self.assertNumQueries(2):
lst1 = self.traverse_qs(
Person.objects.prefetch_related('houses'),
[['houses']]
)
# Test lookups.
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses')),
[['houses']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses', to_attr='houses_lst')),
[['houses_lst']]
)
self.assertEqual(lst1, lst2)
def test_reverse_m2m(self):
# Control lookups.
with self.assertNumQueries(2):
lst1 = self.traverse_qs(
House.objects.prefetch_related('occupants'),
[['occupants']]
)
# Test lookups.
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
House.objects.prefetch_related(Prefetch('occupants')),
[['occupants']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
House.objects.prefetch_related(Prefetch('occupants', to_attr='occupants_lst')),
[['occupants_lst']]
)
self.assertEqual(lst1, lst2)
def test_m2m_through_fk(self):
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
Room.objects.prefetch_related('house__occupants'),
[['house', 'occupants']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Room.objects.prefetch_related(Prefetch('house__occupants')),
[['house', 'occupants']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Room.objects.prefetch_related(Prefetch('house__occupants', to_attr='occupants_lst')),
[['house', 'occupants_lst']]
)
self.assertEqual(lst1, lst2)
def test_m2m_through_gfk(self):
TaggedItem.objects.create(tag="houses", content_object=self.house1)
TaggedItem.objects.create(tag="houses", content_object=self.house2)
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
TaggedItem.objects.filter(tag='houses').prefetch_related('content_object__rooms'),
[['content_object', 'rooms']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
TaggedItem.objects.prefetch_related(
Prefetch('content_object'),
Prefetch('content_object__rooms', to_attr='rooms_lst')
),
[['content_object', 'rooms_lst']]
)
self.assertEqual(lst1, lst2)
def test_o2m_through_m2m(self):
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
Person.objects.prefetch_related('houses', 'houses__rooms'),
[['houses', 'rooms']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses'), 'houses__rooms'),
[['houses', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses'), Prefetch('houses__rooms')),
[['houses', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses', to_attr='houses_lst'), 'houses_lst__rooms'),
[['houses_lst', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
Prefetch('houses', to_attr='houses_lst'),
Prefetch('houses_lst__rooms', to_attr='rooms_lst')
),
[['houses_lst', 'rooms_lst']]
)
self.assertEqual(lst1, lst2)
def test_generic_rel(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, favorite=bookmark, tag='python')
# Control lookups.
with self.assertNumQueries(4):
lst1 = self.traverse_qs(
Bookmark.objects.prefetch_related('tags', 'tags__content_object', 'favorite_tags'),
[['tags', 'content_object'], ['favorite_tags']]
)
# Test lookups.
with self.assertNumQueries(4):
lst2 = self.traverse_qs(
Bookmark.objects.prefetch_related(
Prefetch('tags', to_attr='tags_lst'),
Prefetch('tags_lst__content_object'),
Prefetch('favorite_tags'),
),
[['tags_lst', 'content_object'], ['favorite_tags']]
)
self.assertEqual(lst1, lst2)
def test_traverse_single_item_property(self):
# Control lookups.
with self.assertNumQueries(5):
lst1 = self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
'primary_house__occupants__houses',
),
[['primary_house', 'occupants', 'houses']]
)
# Test lookups.
with self.assertNumQueries(5):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
Prefetch('primary_house__occupants', to_attr='occupants_lst'),
'primary_house__occupants_lst__houses',
),
[['primary_house', 'occupants_lst', 'houses']]
)
self.assertEqual(lst1, lst2)
def test_traverse_multiple_items_property(self):
# Control lookups.
with self.assertNumQueries(4):
lst1 = self.traverse_qs(
Person.objects.prefetch_related(
'houses',
'all_houses__occupants__houses',
),
[['all_houses', 'occupants', 'houses']]
)
# Test lookups.
with self.assertNumQueries(4):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
'houses',
Prefetch('all_houses__occupants', to_attr='occupants_lst'),
'all_houses__occupants_lst__houses',
),
[['all_houses', 'occupants_lst', 'houses']]
)
self.assertEqual(lst1, lst2)
def test_custom_qs(self):
# Test basic.
with self.assertNumQueries(2):
lst1 = list(Person.objects.prefetch_related('houses'))
with self.assertNumQueries(2):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')))
self.assertEqual(
self.traverse_qs(lst1, [['houses']]),
self.traverse_qs(lst2, [['houses_lst']])
)
# Test queryset filtering.
with self.assertNumQueries(2):
lst2 = list(
Person.objects.prefetch_related(
Prefetch(
'houses',
queryset=House.objects.filter(pk__in=[self.house1.pk, self.house3.pk]),
to_attr='houses_lst',
)
)
)
self.assertEqual(len(lst2[0].houses_lst), 1)
self.assertEqual(lst2[0].houses_lst[0], self.house1)
self.assertEqual(len(lst2[1].houses_lst), 1)
self.assertEqual(lst2[1].houses_lst[0], self.house3)
# Test flattened.
with self.assertNumQueries(3):
lst1 = list(Person.objects.prefetch_related('houses__rooms'))
with self.assertNumQueries(3):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses__rooms', queryset=Room.objects.all(), to_attr='rooms_lst')))
self.assertEqual(
self.traverse_qs(lst1, [['houses', 'rooms']]),
self.traverse_qs(lst2, [['houses', 'rooms_lst']])
)
# Test inner select_related.
with self.assertNumQueries(3):
lst1 = list(Person.objects.prefetch_related('houses__owner'))
with self.assertNumQueries(2):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.select_related('owner'))))
self.assertEqual(
self.traverse_qs(lst1, [['houses', 'owner']]),
self.traverse_qs(lst2, [['houses', 'owner']])
)
# Test inner prefetch.
inner_rooms_qs = Room.objects.filter(pk__in=[self.room1_1.pk, self.room1_2.pk])
houses_qs_prf = House.objects.prefetch_related(
Prefetch('rooms', queryset=inner_rooms_qs, to_attr='rooms_lst'))
with self.assertNumQueries(4):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=houses_qs_prf.filter(pk=self.house1.pk), to_attr='houses_lst'),
Prefetch('houses_lst__rooms_lst__main_room_of')
))
self.assertEqual(len(lst2[0].houses_lst[0].rooms_lst), 2)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[0], self.room1_1)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[1], self.room1_2)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[0].main_room_of, self.house1)
self.assertEqual(len(lst2[1].houses_lst), 0)
# Test ForwardManyToOneDescriptor.
houses = House.objects.select_related('owner')
with self.assertNumQueries(6):
rooms = Room.objects.all().prefetch_related('house')
lst1 = self.traverse_qs(rooms, [['house', 'owner']])
with self.assertNumQueries(2):
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=houses.all()))
lst2 = self.traverse_qs(rooms, [['house', 'owner']])
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
houses = House.objects.select_related('owner')
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=houses.all(), to_attr='house_attr'))
lst2 = self.traverse_qs(rooms, [['house_attr', 'owner']])
self.assertEqual(lst1, lst2)
room = Room.objects.all().prefetch_related(
Prefetch('house', queryset=houses.filter(address='DoesNotExist'))
).first()
with self.assertRaises(ObjectDoesNotExist):
getattr(room, 'house')
room = Room.objects.all().prefetch_related(
Prefetch('house', queryset=houses.filter(address='DoesNotExist'), to_attr='house_attr')
).first()
self.assertIsNone(room.house_attr)
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=House.objects.only('name')))
with self.assertNumQueries(2):
getattr(rooms.first().house, 'name')
with self.assertNumQueries(3):
getattr(rooms.first().house, 'address')
# Test ReverseOneToOneDescriptor.
houses = House.objects.select_related('owner')
with self.assertNumQueries(6):
rooms = Room.objects.all().prefetch_related('main_room_of')
lst1 = self.traverse_qs(rooms, [['main_room_of', 'owner']])
with self.assertNumQueries(2):
rooms = Room.objects.all().prefetch_related(Prefetch('main_room_of', queryset=houses.all()))
lst2 = self.traverse_qs(rooms, [['main_room_of', 'owner']])
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
rooms = list(
Room.objects.all().prefetch_related(
Prefetch('main_room_of', queryset=houses.all(), to_attr='main_room_of_attr')
)
)
lst2 = self.traverse_qs(rooms, [['main_room_of_attr', 'owner']])
self.assertEqual(lst1, lst2)
room = Room.objects.filter(main_room_of__isnull=False).prefetch_related(
Prefetch('main_room_of', queryset=houses.filter(address='DoesNotExist'))
).first()
with self.assertRaises(ObjectDoesNotExist):
getattr(room, 'main_room_of')
room = Room.objects.filter(main_room_of__isnull=False).prefetch_related(
Prefetch('main_room_of', queryset=houses.filter(address='DoesNotExist'), to_attr='main_room_of_attr')
).first()
self.assertIsNone(room.main_room_of_attr)
# The custom queryset filters should be applied to the queryset
# instance returned by the manager.
person = Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.filter(name='House 1')),
).get(pk=self.person1.pk)
self.assertEqual(
list(person.houses.all()),
list(person.houses.all().all()),
)
def test_nested_prefetch_related_are_not_overwritten(self):
# Regression test for #24873
houses_2 = House.objects.prefetch_related(Prefetch('rooms'))
persons = Person.objects.prefetch_related(Prefetch('houses', queryset=houses_2))
houses = House.objects.prefetch_related(Prefetch('occupants', queryset=persons))
list(houses) # queryset must be evaluated once to reproduce the bug.
self.assertEqual(
houses.all()[0].occupants.all()[0].houses.all()[1].rooms.all()[0],
self.room2_1
)
def test_apply_rel_filters_deprecation_shim(self):
# Simulate a missing `_apply_rel_filters` method.
del Person.houses.related_manager_cls._apply_rel_filters
# Also remove `get_queryset` as it rely on `_apply_rel_filters`.
del Person.houses.related_manager_cls.get_queryset
try:
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
list(Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.filter(name='House 1'))
))
finally:
# Deleting `related_manager_cls` will force the creation of a new
# class since it's a `cached_property`.
del Person.houses.related_manager_cls
msg = (
'The `django.db.models.fields.related_descriptors.ManyRelatedManager` class '
'must implement a `_apply_rel_filters()` method that accepts a `QuerySet` as '
'its single argument and returns an appropriately filtered version of it.'
)
self.assertEqual(len(warns), 2) # Once person.
self.assertEqual(str(warns[0].message), msg)
self.assertEqual(str(warns[0].message), msg)
class DefaultManagerTests(TestCase):
def setUp(self):
self.qual1 = Qualification.objects.create(name="BA")
self.qual2 = Qualification.objects.create(name="BSci")
self.qual3 = Qualification.objects.create(name="MA")
self.qual4 = Qualification.objects.create(name="PhD")
self.teacher1 = Teacher.objects.create(name="Mr Cleese")
self.teacher2 = Teacher.objects.create(name="Mr Idle")
self.teacher3 = Teacher.objects.create(name="Mr Chapman")
self.teacher1.qualifications.add(self.qual1, self.qual2, self.qual3, self.qual4)
self.teacher2.qualifications.add(self.qual1)
self.teacher3.qualifications.add(self.qual2)
self.dept1 = Department.objects.create(name="English")
self.dept2 = Department.objects.create(name="Physics")
self.dept1.teachers.add(self.teacher1, self.teacher2)
self.dept2.teachers.add(self.teacher1, self.teacher3)
def test_m2m_then_m2m(self):
with self.assertNumQueries(3):
# When we prefetch the teachers, and force the query, we don't want
# the default manager on teachers to immediately get all the related
# qualifications, since this will do one query per teacher.
qs = Department.objects.prefetch_related('teachers')
depts = "".join("%s department: %s\n" %
(dept.name, ", ".join(six.text_type(t) for t in dept.teachers.all()))
for dept in qs)
self.assertEqual(depts,
"English department: Mr Cleese (BA, BSci, MA, PhD), Mr Idle (BA)\n"
"Physics department: Mr Cleese (BA, BSci, MA, PhD), Mr Chapman (BSci)\n")
class GenericRelationTests(TestCase):
@classmethod
def setUpTestData(cls):
book1 = Book.objects.create(title="Winnie the Pooh")
book2 = Book.objects.create(title="Do you like green eggs and spam?")
book3 = Book.objects.create(title="Three Men In A Boat")
reader1 = Reader.objects.create(name="me")
reader2 = Reader.objects.create(name="you")
reader3 = Reader.objects.create(name="someone")
book1.read_by.add(reader1, reader2)
book2.read_by.add(reader2)
book3.read_by.add(reader3)
cls.book1, cls.book2, cls.book3 = book1, book2, book3
cls.reader1, cls.reader2, cls.reader3 = reader1, reader2, reader3
def test_prefetch_GFK(self):
TaggedItem.objects.create(tag="awesome", content_object=self.book1)
TaggedItem.objects.create(tag="great", content_object=self.reader1)
TaggedItem.objects.create(tag="outstanding", content_object=self.book2)
TaggedItem.objects.create(tag="amazing", content_object=self.reader3)
# 1 for TaggedItem table, 1 for Book table, 1 for Reader table
with self.assertNumQueries(3):
qs = TaggedItem.objects.prefetch_related('content_object')
list(qs)
def test_prefetch_GFK_nonint_pk(self):
Comment.objects.create(comment="awesome", content_object=self.book1)
# 1 for Comment table, 1 for Book table
with self.assertNumQueries(2):
qs = Comment.objects.prefetch_related('content_object')
[c.content_object for c in qs]
def test_traverse_GFK(self):
"""
Test that we can traverse a 'content_object' with prefetch_related() and
get to related objects on the other side (assuming it is suitably
filtered)
"""
TaggedItem.objects.create(tag="awesome", content_object=self.book1)
TaggedItem.objects.create(tag="awesome", content_object=self.book2)
TaggedItem.objects.create(tag="awesome", content_object=self.book3)
TaggedItem.objects.create(tag="awesome", content_object=self.reader1)
TaggedItem.objects.create(tag="awesome", content_object=self.reader2)
ct = ContentType.objects.get_for_model(Book)
# We get 3 queries - 1 for main query, 1 for content_objects since they
# all use the same table, and 1 for the 'read_by' relation.
with self.assertNumQueries(3):
# If we limit to books, we know that they will have 'read_by'
# attributes, so the following makes sense:
qs = TaggedItem.objects.filter(content_type=ct, tag='awesome').prefetch_related('content_object__read_by')
readers_of_awesome_books = {r.name for tag in qs
for r in tag.content_object.read_by.all()}
self.assertEqual(readers_of_awesome_books, {"me", "you", "someone"})
def test_nullable_GFK(self):
TaggedItem.objects.create(tag="awesome", content_object=self.book1,
created_by=self.reader1)
TaggedItem.objects.create(tag="great", content_object=self.book2)
TaggedItem.objects.create(tag="rubbish", content_object=self.book3)
with self.assertNumQueries(2):
result = [t.created_by for t in TaggedItem.objects.prefetch_related('created_by')]
self.assertEqual(result,
[t.created_by for t in TaggedItem.objects.all()])
def test_generic_relation(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, tag='python')
with self.assertNumQueries(2):
tags = [t.tag for b in Bookmark.objects.prefetch_related('tags')
for t in b.tags.all()]
self.assertEqual(sorted(tags), ["django", "python"])
def test_charfield_GFK(self):
b = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=b, tag='django')
TaggedItem.objects.create(content_object=b, favorite=b, tag='python')
with self.assertNumQueries(3):
bookmark = Bookmark.objects.filter(pk=b.pk).prefetch_related('tags', 'favorite_tags')[0]
self.assertEqual(sorted([i.tag for i in bookmark.tags.all()]), ["django", "python"])
self.assertEqual([i.tag for i in bookmark.favorite_tags.all()], ["python"])
def test_custom_queryset(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
django_tag = TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, tag='python')
with self.assertNumQueries(2):
bookmark = Bookmark.objects.prefetch_related(
Prefetch('tags', TaggedItem.objects.filter(tag='django')),
).get()
with self.assertNumQueries(0):
self.assertEqual(list(bookmark.tags.all()), [django_tag])
# The custom queryset filters should be applied to the queryset
# instance returned by the manager.
self.assertEqual(list(bookmark.tags.all()), list(bookmark.tags.all().all()))
class MultiTableInheritanceTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.book1 = BookWithYear.objects.create(title='Poems', published_year=2010)
cls.book2 = BookWithYear.objects.create(title='More poems', published_year=2011)
cls.author1 = AuthorWithAge.objects.create(name='Jane', first_book=cls.book1, age=50)
cls.author2 = AuthorWithAge.objects.create(name='Tom', first_book=cls.book1, age=49)
cls.author3 = AuthorWithAge.objects.create(name='Robert', first_book=cls.book2, age=48)
cls.author_address = AuthorAddress.objects.create(author=cls.author1, address='SomeStreet 1')
cls.book2.aged_authors.add(cls.author2, cls.author3)
cls.br1 = BookReview.objects.create(book=cls.book1, notes='review book1')
cls.br2 = BookReview.objects.create(book=cls.book2, notes='review book2')
def test_foreignkey(self):
with self.assertNumQueries(2):
qs = AuthorWithAge.objects.prefetch_related('addresses')
addresses = [[six.text_type(address) for address in obj.addresses.all()]
for obj in qs]
self.assertEqual(addresses, [[six.text_type(self.author_address)], [], []])
def test_foreignkey_to_inherited(self):
with self.assertNumQueries(2):
qs = BookReview.objects.prefetch_related('book')
titles = [obj.book.title for obj in qs]
self.assertEqual(titles, ["Poems", "More poems"])
def test_m2m_to_inheriting_model(self):
qs = AuthorWithAge.objects.prefetch_related('books_with_year')
with self.assertNumQueries(2):
lst = [[six.text_type(book) for book in author.books_with_year.all()]
for author in qs]
qs = AuthorWithAge.objects.all()
lst2 = [[six.text_type(book) for book in author.books_with_year.all()]
for author in qs]
self.assertEqual(lst, lst2)
qs = BookWithYear.objects.prefetch_related('aged_authors')
with self.assertNumQueries(2):
lst = [[six.text_type(author) for author in book.aged_authors.all()]
for book in qs]
qs = BookWithYear.objects.all()
lst2 = [[six.text_type(author) for author in book.aged_authors.all()]
for book in qs]
self.assertEqual(lst, lst2)
def test_parent_link_prefetch(self):
with self.assertNumQueries(2):
[a.author for a in AuthorWithAge.objects.prefetch_related('author')]
@override_settings(DEBUG=True)
def test_child_link_prefetch(self):
with self.assertNumQueries(2):
l = [a.authorwithage for a in Author.objects.prefetch_related('authorwithage')]
# Regression for #18090: the prefetching query must include an IN clause.
# Note that on Oracle the table name is upper case in the generated SQL,
# thus the .lower() call.
self.assertIn('authorwithage', connection.queries[-1]['sql'].lower())
self.assertIn(' IN ', connection.queries[-1]['sql'])
self.assertEqual(l, [a.authorwithage for a in Author.objects.all()])
class ForeignKeyToFieldTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.book = Book.objects.create(title='Poems')
cls.author1 = Author.objects.create(name='Jane', first_book=cls.book)
cls.author2 = Author.objects.create(name='Tom', first_book=cls.book)
cls.author3 = Author.objects.create(name='Robert', first_book=cls.book)
cls.author_address = AuthorAddress.objects.create(author=cls.author1, address='SomeStreet 1')
FavoriteAuthors.objects.create(author=cls.author1, likes_author=cls.author2)
FavoriteAuthors.objects.create(author=cls.author2, likes_author=cls.author3)
FavoriteAuthors.objects.create(author=cls.author3, likes_author=cls.author1)
def test_foreignkey(self):
with self.assertNumQueries(2):
qs = Author.objects.prefetch_related('addresses')
addresses = [[six.text_type(address) for address in obj.addresses.all()]
for obj in qs]
self.assertEqual(addresses, [[six.text_type(self.author_address)], [], []])
def test_m2m(self):
with self.assertNumQueries(3):
qs = Author.objects.all().prefetch_related('favorite_authors', 'favors_me')
favorites = [(
[six.text_type(i_like) for i_like in author.favorite_authors.all()],
[six.text_type(likes_me) for likes_me in author.favors_me.all()]
) for author in qs]
self.assertEqual(
favorites,
[
([six.text_type(self.author2)], [six.text_type(self.author3)]),
([six.text_type(self.author3)], [six.text_type(self.author1)]),
([six.text_type(self.author1)], [six.text_type(self.author2)])
]
)
class LookupOrderingTest(TestCase):
"""
Test cases that demonstrate that ordering of lookups is important, and
ensure it is preserved.
"""
def setUp(self):
self.person1 = Person.objects.create(name="Joe")
self.person2 = Person.objects.create(name="Mary")
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
self.house1 = House.objects.create(address="123 Main St")
self.room1_1 = Room.objects.create(name="Dining room", house=self.house1)
self.room1_2 = Room.objects.create(name="Lounge", house=self.house1)
self.room1_3 = Room.objects.create(name="Kitchen", house=self.house1)
self.house1.main_room = self.room1_1
self.house1.save()
self.person1.houses.add(self.house1)
self.house2 = House.objects.create(address="45 Side St")
self.room2_1 = Room.objects.create(name="Dining room", house=self.house2)
self.room2_2 = Room.objects.create(name="Lounge", house=self.house2)
self.house2.main_room = self.room2_1
self.house2.save()
self.person1.houses.add(self.house2)
self.house3 = House.objects.create(address="6 Downing St")
self.room3_1 = Room.objects.create(name="Dining room", house=self.house3)
self.room3_2 = Room.objects.create(name="Lounge", house=self.house3)
self.room3_3 = Room.objects.create(name="Kitchen", house=self.house3)
self.house3.main_room = self.room3_1
self.house3.save()
self.person2.houses.add(self.house3)
self.house4 = House.objects.create(address="7 Regents St")
self.room4_1 = Room.objects.create(name="Dining room", house=self.house4)
self.room4_2 = Room.objects.create(name="Lounge", house=self.house4)
self.house4.main_room = self.room4_1
self.house4.save()
self.person2.houses.add(self.house4)
def test_order(self):
with self.assertNumQueries(4):
# The following two queries must be done in the same order as written,
# otherwise 'primary_house' will cause non-prefetched lookups
qs = Person.objects.prefetch_related('houses__rooms',
'primary_house__occupants')
[list(p.primary_house.occupants.all()) for p in qs]
class NullableTest(TestCase):
@classmethod
def setUpTestData(cls):
boss = Employee.objects.create(name="Peter")
Employee.objects.create(name="Joe", boss=boss)
Employee.objects.create(name="Angela", boss=boss)
def test_traverse_nullable(self):
# Because we use select_related() for 'boss', it doesn't need to be
# prefetched, but we can still traverse it although it contains some nulls
with self.assertNumQueries(2):
qs = Employee.objects.select_related('boss').prefetch_related('boss__serfs')
co_serfs = [list(e.boss.serfs.all()) if e.boss is not None else []
for e in qs]
qs2 = Employee.objects.select_related('boss')
co_serfs2 = [list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs2]
self.assertEqual(co_serfs, co_serfs2)
def test_prefetch_nullable(self):
# One for main employee, one for boss, one for serfs
with self.assertNumQueries(3):
qs = Employee.objects.prefetch_related('boss__serfs')
co_serfs = [list(e.boss.serfs.all()) if e.boss is not None else []
for e in qs]
qs2 = Employee.objects.all()
co_serfs2 = [list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs2]
self.assertEqual(co_serfs, co_serfs2)
def test_in_bulk(self):
"""
In-bulk does correctly prefetch objects by not using .iterator()
directly.
"""
boss1 = Employee.objects.create(name="Peter")
boss2 = Employee.objects.create(name="Jack")
with self.assertNumQueries(2):
# Check that prefetch is done and it does not cause any errors.
bulk = Employee.objects.prefetch_related('serfs').in_bulk([boss1.pk, boss2.pk])
for b in bulk.values():
list(b.serfs.all())
class MultiDbTests(TestCase):
multi_db = True
def test_using_is_honored_m2m(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Jane Eyre")
book3 = B.create(title="Wuthering Heights")
book4 = B.create(title="Sense and Sensibility")
author1 = A.create(name="Charlotte", first_book=book1)
author2 = A.create(name="Anne", first_book=book1)
author3 = A.create(name="Emily", first_book=book1)
author4 = A.create(name="Jane", first_book=book4)
book1.authors.add(author1, author2, author3)
book2.authors.add(author1)
book3.authors.add(author3)
book4.authors.add(author4)
# Forward
qs1 = B.prefetch_related('authors')
with self.assertNumQueries(2, using='other'):
books = "".join("%s (%s)\n" %
(book.title, ", ".join(a.name for a in book.authors.all()))
for book in qs1)
self.assertEqual(books,
"Poems (Charlotte, Anne, Emily)\n"
"Jane Eyre (Charlotte)\n"
"Wuthering Heights (Emily)\n"
"Sense and Sensibility (Jane)\n")
# Reverse
qs2 = A.prefetch_related('books')
with self.assertNumQueries(2, using='other'):
authors = "".join("%s: %s\n" %
(author.name, ", ".join(b.title for b in author.books.all()))
for author in qs2)
self.assertEqual(authors,
"Charlotte: Poems, Jane Eyre\n"
"Anne: Poems\n"
"Emily: Poems, Wuthering Heights\n"
"Jane: Sense and Sensibility\n")
def test_using_is_honored_fkey(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Sense and Sensibility")
A.create(name="Charlotte Bronte", first_book=book1)
A.create(name="Jane Austen", first_book=book2)
# Forward
with self.assertNumQueries(2, using='other'):
books = ", ".join(a.first_book.title for a in A.prefetch_related('first_book'))
self.assertEqual("Poems, Sense and Sensibility", books)
# Reverse
with self.assertNumQueries(2, using='other'):
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related('first_time_authors'))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
def test_using_is_honored_inheritance(self):
B = BookWithYear.objects.using('other')
A = AuthorWithAge.objects.using('other')
book1 = B.create(title="Poems", published_year=2010)
B.create(title="More poems", published_year=2011)
A.create(name='Jane', first_book=book1, age=50)
A.create(name='Tom', first_book=book1, age=49)
# parent link
with self.assertNumQueries(2, using='other'):
authors = ", ".join(a.author.name for a in A.prefetch_related('author'))
self.assertEqual(authors, "Jane, Tom")
# child link
with self.assertNumQueries(2, using='other'):
ages = ", ".join(str(a.authorwithage.age) for a in A.prefetch_related('authorwithage'))
self.assertEqual(ages, "50, 49")
def test_using_is_honored_custom_qs(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Sense and Sensibility")
A.create(name="Charlotte Bronte", first_book=book1)
A.create(name="Jane Austen", first_book=book2)
# Implicit hinting
with self.assertNumQueries(2, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.all())
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
# Explicit using on the same db.
with self.assertNumQueries(2, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.using('other'))
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
# Explicit using on a different db.
with self.assertNumQueries(1, using='default'), self.assertNumQueries(1, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.using('default'))
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems ()\n"
"Sense and Sensibility ()\n")
class Ticket19607Tests(TestCase):
def setUp(self):
for id, name1, name2 in [
(1, 'einfach', 'simple'),
(2, 'schwierig', 'difficult'),
]:
LessonEntry.objects.create(id=id, name1=name1, name2=name2)
for id, lesson_entry_id, name in [
(1, 1, 'einfach'),
(2, 1, 'simple'),
(3, 2, 'schwierig'),
(4, 2, 'difficult'),
]:
WordEntry.objects.create(id=id, lesson_entry_id=lesson_entry_id, name=name)
def test_bug(self):
list(WordEntry.objects.prefetch_related('lesson_entry', 'lesson_entry__wordentry_set'))
class Ticket21410Tests(TestCase):
def setUp(self):
self.book1 = Book.objects.create(title="Poems")
self.book2 = Book.objects.create(title="Jane Eyre")
self.book3 = Book.objects.create(title="Wuthering Heights")
self.book4 = Book.objects.create(title="Sense and Sensibility")
self.author1 = Author2.objects.create(name="Charlotte",
first_book=self.book1)
self.author2 = Author2.objects.create(name="Anne",
first_book=self.book1)
self.author3 = Author2.objects.create(name="Emily",
first_book=self.book1)
self.author4 = Author2.objects.create(name="Jane",
first_book=self.book4)
self.author1.favorite_books.add(self.book1, self.book2, self.book3)
self.author2.favorite_books.add(self.book1)
self.author3.favorite_books.add(self.book2)
self.author4.favorite_books.add(self.book3)
def test_bug(self):
list(Author2.objects.prefetch_related('first_book', 'favorite_books'))
class Ticket21760Tests(TestCase):
def setUp(self):
self.rooms = []
for _ in range(3):
house = House.objects.create()
for _ in range(3):
self.rooms.append(Room.objects.create(house=house))
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
house.main_room = self.rooms[-3]
house.save()
def test_bug(self):
prefetcher = get_prefetcher(self.rooms[0], 'house')[0]
queryset = prefetcher.get_prefetch_queryset(list(Room.objects.all()))[0]
self.assertNotIn(' JOIN ', force_text(queryset.query))
class Ticket25546Tests(TestCase):
"""
Nested prefetch_related() shouldn't trigger duplicate queries for the same
lookup.
Before, prefetch queries were for 'addresses', 'first_time_authors', and
'first_time_authors__addresses'. The last query is the duplicate.
"""
@classmethod
def setUpTestData(cls):
cls.book1, cls.book2 = [
Book.objects.create(title='book1'),
Book.objects.create(title='book2'),
]
cls.author11, cls.author12, cls.author21 = [
Author.objects.create(first_book=cls.book1, name='Author11'),
Author.objects.create(first_book=cls.book1, name='Author12'),
Author.objects.create(first_book=cls.book2, name='Author21'),
]
cls.author1_address1, cls.author1_address2, cls.author2_address1 = [
AuthorAddress.objects.create(author=cls.author11, address='Happy place'),
AuthorAddress.objects.create(author=cls.author12, address='Haunted house'),
AuthorAddress.objects.create(author=cls.author21, address='Happy place'),
]
def test_prefetch(self):
with self.assertNumQueries(3):
books = Book.objects.filter(
title__in=['book1', 'book2'],
).prefetch_related(
Prefetch(
'first_time_authors',
Author.objects.prefetch_related(
Prefetch(
'addresses',
AuthorAddress.objects.filter(address='Happy place'),
)
),
),
)
book1, book2 = list(books)
with self.assertNumQueries(0):
self.assertListEqual(list(book1.first_time_authors.all()), [self.author11, self.author12])
self.assertListEqual(list(book2.first_time_authors.all()), [self.author21])
self.assertListEqual(list(book1.first_time_authors.all()[0].addresses.all()), [self.author1_address1])
self.assertListEqual(list(book1.first_time_authors.all()[1].addresses.all()), [])
self.assertListEqual(list(book2.first_time_authors.all()[0].addresses.all()), [self.author2_address1])
self.assertEqual(
list(book1.first_time_authors.all()), list(book1.first_time_authors.all().all())
)
self.assertEqual(
list(book2.first_time_authors.all()), list(book2.first_time_authors.all().all())
)
self.assertEqual(
list(book1.first_time_authors.all()[0].addresses.all()),
list(book1.first_time_authors.all()[0].addresses.all().all())
)
self.assertEqual(
list(book1.first_time_authors.all()[1].addresses.all()),
list(book1.first_time_authors.all()[1].addresses.all().all())
)
self.assertEqual(
list(book2.first_time_authors.all()[0].addresses.all()),
list(book2.first_time_authors.all()[0].addresses.all().all())
)
def test_prefetch_with_to_attr(self):
with self.assertNumQueries(3):
books = Book.objects.filter(
title__in=['book1', 'book2'],
).prefetch_related(
Prefetch(
'first_time_authors',
Author.objects.prefetch_related(
Prefetch(
'addresses',
AuthorAddress.objects.filter(address='Happy place'),
to_attr='happy_place',
)
),
to_attr='first_authors',
),
)
book1, book2 = list(books)
with self.assertNumQueries(0):
self.assertListEqual(book1.first_authors, [self.author11, self.author12])
self.assertListEqual(book2.first_authors, [self.author21])
self.assertListEqual(book1.first_authors[0].happy_place, [self.author1_address1])
self.assertListEqual(book1.first_authors[1].happy_place, [])
self.assertListEqual(book2.first_authors[0].happy_place, [self.author2_address1])
| |
from datetime import datetime, timedelta
import numpy as np
import time
from GpsDataAnalyzer import utils
MEAN_DISTANCE_THRESHOLD = 100
def find_lineup(set1, set2):
"""
Optimized algorithm to find the approximate time offset between two GPS data sets.
This algorithm assigns the data set that starts later as primary and the
other one as secondary. The offset is then applied to the secondary data
set. After mapping the timestamps to the corresponding points from each set,
it finds the range that has the most overlapping timestamps between the two
sets and then finds the optimal offset using the middle of that range
as the starting point.
Args:
set1: GpsDataSet object
set2: GpsDataSet object
Returns:
Tuple of two datetimes, the start time for set 1 and for set 2 given the
calculated offset. If no lineup is found, it will return None.
"""
# find primary data set (the one with the later start time)
set1_start_time = set1.gps_data_list[0].time
set2_start_time = set2.gps_data_list[0].time
set1_end_time = set1.gps_data_list[-1].time
set2_end_time = set2.gps_data_list[-1].time
if set1_start_time > set2_start_time:
later_start_time = utils.round_time(set1_start_time)
primary_set = set1
secondary_set = set2
else:
later_start_time = utils.round_time(set2_start_time)
primary_set = set2
secondary_set = set1
# create dict that maps rounded times to points
primary_time_points_mapping = create_time_to_points_mapping(primary_set)
secondary_time_points_mapping = create_time_to_points_mapping(secondary_set)
# how many offsets to check so, for 50, check offsets (-25,25); should be even
offset_range_length = 50
# points to check around each offset, for 100, check points (-50,50); should be even
point_checking_range_length = 100
# span length is total length of the span of points to check around the offsets
span_length = offset_range_length + point_checking_range_length
range_start = utils.round_time(max(set1_start_time, set2_start_time)) - timedelta(seconds=span_length)
range_end = utils.round_time(min(set1_end_time, set2_end_time))
range_optimization_size = offset_range_length
best_range_start = find_best_data_range(primary_time_points_mapping,
secondary_time_points_mapping,
range_optimization_size,
range_start,
range_end)
# find best offset starting at the middle of range with most valid points
range_middle = best_range_start + timedelta(seconds=range_optimization_size//2)
optimal_offset = find_optimal_offset(primary_time_points_mapping,
secondary_time_points_mapping,
range_middle,
offset_range_length,
point_checking_range_length)
if optimal_offset is None:
print("no optimal line-up for these two data sets; check if correct files are being used")
return (None, None)
if primary_set == set1:
print("Optimal offset: set 2 is %s seconds from set 1" % optimal_offset)
return (later_start_time, later_start_time + timedelta(seconds=optimal_offset))
else:
print("Optimal offset: set 1 is %s seconds from set 2" % optimal_offset)
return (later_start_time + timedelta(seconds=optimal_offset), later_start_time)
def find_lineup_no_optimization(set1, set2):
"""
Find the approximate offset between two GPS data sets without range start optimization.
This algorithm first identifies a primary data set and a secondary one based
on which starts later. The offset is then applied to the secondary data set.
After mapping the timestamps to the corresponding points from each set,
it finds the optimal offset using the later start time of the two sets as
the starting point. Requires checking of more values in the set to obtain
an accurate offset, compared to the optimized version.
Args:
set1: GpsDataSet object
set2: GpsDataSet object
Returns:
Tuple of two datetimes, the start time for set 1 and for set 2 given the
calculated offset. If no lineup is found, it will return None.
"""
set1_start_time = set1.gps_data_list[0].time
set2_start_time = set2.gps_data_list[0].time
if set1_start_time > set2_start_time:
later_start_time = utils.round_time(set1_start_time)
primary_set = set1
secondary_set = set2
else:
later_start_time = utils.round_time(set2_start_time)
primary_set = set2
secondary_set = set1
# create dicts that map rounded times to points
primary_time_points_mapping = create_time_to_points_mapping(primary_set)
secondary_time_points_mapping = create_time_to_points_mapping(secondary_set)
offset_range_length = 200 # how many offsets to check so, for 200, check offsets (-100,100)
point_checking_range_length = 200 # points to check around each offset, for 200, check points (-100,100)
# find best offset
optimal_offset = find_optimal_offset(primary_time_points_mapping,
secondary_time_points_mapping,
later_start_time,
offset_range_length,
point_checking_range_length)
print(optimal_offset)
if optimal_offset is None:
print("no optimal line-up for these two data sets; check if correct files are being used")
return (None, None)
if primary_set == set1:
print("Optimal offset: set 2 is %s seconds from set 1" % optimal_offset)
return (later_start_time, later_start_time + timedelta(seconds=optimal_offset))
else:
print("Optimal offset: set 1 is %s seconds from set 2" % optimal_offset)
return (later_start_time + timedelta(seconds=optimal_offset), later_start_time)
def find_lineup_naive(set1, set2):
"""
Initial naive implementation to find lineup between two data sets.
Finds the point in the secondary data set closest to the start of the
primary data set, and then uses those starting indices to find the offset
with the lowest mean distance in the range of points around it index-wise
in the data set
Args:
set1: GpsDataSet object
set2: GpsDataSet object
Returns:
The indexes of the starting points in each data set
"""
sets = [set1, set2]
starting_indexes = [0,0]
set1_start_time = set1.gps_data_list[0].time
set2_start_time = set2.gps_data_list[0].time
if set1_start_time > set2_start_time:
later_start_time = set1_start_time
primary_set_index = 0
secondary_set_index = 1
else:
later_start_time = set2_start_time
primary_set_index = 1
secondary_set_index = 0
smallest_time_difference = None
for i,data_point in enumerate(sets[secondary_set_index].gps_data_list):
time_difference = abs((data_point.time - later_start_time).total_seconds())
if smallest_time_difference is None or time_difference < smallest_time_difference:
smallest_time_difference = time_difference
starting_indexes[secondary_set_index] = i
best_index = 0
best_mean_distance = None
for offset in range(-100,100):
distances = []
moving_index = starting_indexes[secondary_set_index] + offset
if moving_index >= 0:
primary_points = sets[primary_set_index].gps_data_list
secondary_points = sets[secondary_set_index].gps_data_list[moving_index:]
for primary_point,secondary_point in zip(primary_points, secondary_points):
location1 = (primary_point.latitude, primary_point.longitude)
location2 = (secondary_point.latitude, secondary_point.longitude)
distances.append(utils.calculate_distance(location1, location2))
if distances and (best_mean_distance == None or np.mean(distances) < best_mean_distance):
best_mean_distance = np.mean(distances)
best_index = moving_index
print("optimal mean distance: " + str(best_mean_distance))
starting_indexes[secondary_set_index] = best_index
return starting_indexes
def find_best_data_range(primary_time_points_mapping, secondary_time_points_mapping,
range_optimization_size, range_start, range_end):
"""
Find range of size range_optimization_size with most overlapping points.
This will return the best range where of the the best range is a range
of size range_optimization_size that has the most number of overlapping points
between the primary and secondary sets that falls between range_start and
range_end.
Args:
primary_time_points_mapping: Dictionary, {DateTime: [GpsData,], ...}
secondary_time_points_mapping: Dictionary, {DateTime: [GpsData,], ...}
range_optimization_size: int, size of optimal range to find
range_start: Datetime, beginning of range for possible best_range_start timestamp
range_end: Datetime, end of range for possible best_range_start timestamp
Returns:
Datetime, starting timestamp of the range of size
range_optimization_size that has the most number of overlapping points
between the primary and secondary sets.
"""
# set for which of the range_optimization_size most recent timestamps overlap
overlapping_timestamps = set()
# count skipped (not overlapping) points out of first 50 (range_optimization_size)
previous_range_skip_count = 0
for i in range(range_optimization_size):
time = range_start + timedelta(seconds=i)
if time not in primary_time_points_mapping or time not in secondary_time_points_mapping:
previous_range_skip_count += 1
else:
overlapping_timestamps.add(time)
best_range_start = range_start
lowest_skip_count = range_optimization_size
total_seconds = int((range_end-range_start).total_seconds())
# check for the rest of the ranges how many skipped entries there are
for start_time in [range_start + timedelta(seconds=x) for x in range(1,total_seconds)]:
current_range_skip_count = 0
# use prior value but add or subtract for new point and old point
current_range_skip_count = previous_range_skip_count
end_time = start_time + timedelta(seconds=range_optimization_size-1) # point at end of current range that was just added
if end_time not in primary_time_points_mapping or end_time not in secondary_time_points_mapping:
current_range_skip_count += 1
else:
overlapping_timestamps.add(end_time)
previous_start = start_time + timedelta(seconds=-1) # point that was just edged out
if previous_start in overlapping_timestamps:
# remove from set to keep set size of max range_optimization_size
overlapping_timestamps.remove(previous_start)
else:
current_range_skip_count -= 1
if current_range_skip_count <= lowest_skip_count:
lowest_skip_count = current_range_skip_count
best_range_start = start_time
if lowest_skip_count == 0:
break
previous_range_skip_count = current_range_skip_count
return best_range_start
def create_time_to_points_mapping(dataset, offset=0):
"""
Map timestamp to points from the dataset.
Args:
dataset: GpsDataSet
offset: int, seconds of offset to apply to timestamps
Returns:
Dictionary that goups dataset points by seconds timestamp in the
following format: {DateTime: [GpsData,], ...}
"""
time_points_mapping = {}
for point in dataset.gps_data_list:
rounded_time = utils.round_time(point.time) + timedelta(seconds=offset)
if rounded_time in time_points_mapping:
time_points_mapping[rounded_time].append(point)
else:
time_points_mapping[rounded_time] = [point]
return time_points_mapping
def find_optimal_offset(primary_set, secondary_set, start_time,
offset_range_length, point_checking_range_length):
"""
Find optimal offset from offset range to shift the secondary set
Finds the optimal offset meaning, an offset that is in the given offset
range to check and that also has overlap between the two sets. If the
mean distance is over 100m, it returns None, since this indicates sets
that do not align even if their timestamps can be shifted to align.
Args:
primary_set: dictionary in format {DateTime: [GpsData,], ...}
secondary_set: dictionary in format {DateTime: [GpsData,], ...}
start_time: Datetime to start calculation at
offset_range: int, how many offset values to check i.e. the range of
(-offset_range//2, offset_range//2)
point_checking_range: int, how many points around the start to use in the deviation
mean calculations
primary_set_index: which set (1 or 2) is the primary (i.e. not being shifted)
Returns:
int, the offset in seconds that should be applied to the secondary
data set if an optimal offset exists,
None otherwise
"""
optimal_offset= 0
optimal_mean_distance = MEAN_DISTANCE_THRESHOLD+1
for offset in range (-offset_range_length//2, offset_range_length//2):
distances = []
skipped_point_count = 0
for i in range(-point_checking_range_length//2, point_checking_range_length//2):
# apply offset to secondary set
primary_time = start_time + timedelta(seconds=i)
secondary_time = start_time + timedelta(seconds=i+offset)
if primary_time in primary_set and secondary_time in secondary_set:
# if more than one point at that time, choose first one
# TODO(ameles) consider if more complex downsampling needed
primary_point = primary_set[primary_time][0]
secondary_point = secondary_set[secondary_time][0]
location1 = (primary_point.latitude, primary_point.longitude)
location2 = (secondary_point.latitude, secondary_point.longitude)
distances.append(utils.calculate_distance(location1, location2))
else:
skipped_point_count += 1
if distances and np.mean(distances) < optimal_mean_distance:
optimal_mean_distance = np.mean(distances)
optimal_offset = offset
print("optimal mean distance: " + str(optimal_mean_distance))
if optimal_mean_distance is None or optimal_mean_distance > MEAN_DISTANCE_THRESHOLD:
return None
return optimal_offset
| |
# -*- coding: utf-8 -*-
# Copyright (C) 2007-2018, Raffaele Salmaso <raffaele@salmaso.org>
# Copyright (c) 2012 Omoto Kenji
# Copyright (c) 2011 Sam Stephenson
# Copyright (c) 2011 Josh Peek
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, division, print_function, unicode_literals
import io
import json
import re
import os
from subprocess import Popen, PIPE, STDOUT
import tempfile
from .exceptions import RuntimeError, ProgramError, RuntimeUnavailable
from .utils import json2_source, which
def encode_unicode_codepoints(str):
r"""
>>> encode_unicode_codepoints("a") == 'a'
True
>>> ascii = ''.join(chr(i) for i in range(0x80))
>>> encode_unicode_codepoints(ascii) == ascii
True
>>> encode_unicode_codepoints('\u4e16\u754c') == '\\u4e16\\u754c'
True
"""
codepoint_format = '\\u{0:04x}'.format
def codepoint(m):
return codepoint_format(ord(m.group(0)))
return re.sub('[^\x00-\x7f]', codepoint, str)
class Runtime(object):
def __init__(self, name, command, runner_source, encoding='utf8'):
self._name = name
if isinstance(command, str):
command = [command]
self._command = command
self._runner_source = runner_source
self._encoding = encoding
def __str__(self):
return "{class_name}({runtime_name})".format(
class_name=type(self).__name__,
runtime_name=self._name,
)
@property
def name(self):
return self._name
def exec_(self, source):
if not self.is_available():
raise RuntimeUnavailable()
return self.Context(self).exec_(source)
def eval(self, source):
if not self.is_available():
raise RuntimeUnavailable()
return self.Context(self).eval(source)
def compile(self, source):
if not self.is_available():
raise RuntimeUnavailable()
return self.Context(self, source)
def is_available(self):
return self._binary() is not None
def runner_source(self):
return self._runner_source
def _binary(self):
"""protected"""
if not hasattr(self, "_binary_cache"):
self._binary_cache = which(self._command)
return self._binary_cache
def _execfile(self, filename):
"""protected"""
cmd = self._binary() + [filename]
p = None
try:
p = Popen(cmd, stdout=PIPE, stderr=STDOUT)
stdoutdata, stderrdata = p.communicate()
ret = p.wait()
finally:
del p
if ret == 0:
return stdoutdata
else:
raise RuntimeError(stdoutdata)
class Context(object):
def __init__(self, runtime, source=''):
self._runtime = runtime
self._source = source
def eval(self, source):
if not source.strip():
data = "''"
else:
data = "'('+" + json.dumps(source, ensure_ascii=True) + "+')'"
code = 'return eval({data})'.format(data=data)
return self.exec_(code)
def exec_(self, source):
if self._source:
source = self._source + '\n' + source
(fd, filename) = tempfile.mkstemp(prefix='babeljs', suffix='.js')
os.close(fd)
try:
with io.open(filename, "w+", encoding=self._runtime._encoding) as fp:
fp.write(self._compile(source))
output = self._runtime._execfile(filename)
finally:
os.remove(filename)
output = output.decode(self._runtime._encoding)
output = output.replace("\r\n", "\n").replace("\r", "\n")
output = self._extract_result(output.split("\n")[-2])
return output
def call(self, identifier, *args):
args = json.dumps(args)
return self.eval("{identifier}.apply(this, {args})".format(identifier=identifier, args=args))
def _compile(self, source):
"""protected"""
runner_source = self._runtime.runner_source()
replacements = {
'#{source}': lambda: source,
'#{encoded_source}': lambda: json.dumps(
"(function(){ " +
encode_unicode_codepoints(source) +
" })()"
),
'#{json2_source}': json2_source,
}
pattern = "|".join(re.escape(k) for k in replacements)
runner_source = re.sub(pattern, lambda m: replacements[m.group(0)](), runner_source)
return runner_source
def _extract_result(self, output_last_line):
"""protected"""
if not output_last_line:
status = value = None
else:
ret = json.loads(output_last_line)
if len(ret) == 1:
ret = [ret[0], None]
status, value = ret
if status == "ok":
return value
elif value and value.startswith('SyntaxError:'):
raise RuntimeError(value)
else:
raise ProgramError(value)
class PyV8Runtime(object):
def __init__(self):
try:
import PyV8
except ImportError:
self._is_available = False
else:
self._is_available = True
@property
def name(self):
return "PyV8"
def exec_(self, source):
return self.Context().exec_(source)
def eval(self, source):
return self.Context().eval(source)
def compile(self, source):
return self.Context(source)
def is_available(self):
return self._is_available
class Context:
def __init__(self, source=""):
self._source = source
def exec_(self, source):
source = '''\
(function() {{
{0};
{1};
}})()'''.format(
encode_unicode_codepoints(self._source),
encode_unicode_codepoints(source)
)
source = str(source)
import PyV8
import contextlib
#backward compatibility
with contextlib.nested(PyV8.JSContext(), PyV8.JSEngine()) as (ctxt, engine):
js_errors = (PyV8.JSError, IndexError, ReferenceError, SyntaxError, TypeError)
try:
script = engine.compile(source)
except js_errors as e:
raise RuntimeError(e)
try:
value = script.run()
except js_errors as e:
raise ProgramError(e)
return self.convert(value)
def eval(self, source):
return self.exec_('return ' + encode_unicode_codepoints(source))
def call(self, identifier, *args):
args = json.dumps(args)
return self.eval("{identifier}.apply(this, {args})".format(identifier=identifier, args=args))
@classmethod
def convert(cls, obj):
from PyV8 import _PyV8
if isinstance(obj, bytes):
return obj.decode('utf8')
if isinstance(obj, _PyV8.JSArray):
return [cls.convert(v) for v in obj]
elif isinstance(obj, _PyV8.JSFunction):
return None
elif isinstance(obj, _PyV8.JSObject):
ret = {}
for k in obj.keys():
v = cls.convert(obj[k])
if v is not None:
ret[cls.convert(k)] = v
return ret
else:
return obj
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sampling functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import summary
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import input as input_ops
from tensorflow.python.training import queue_runner
__all__ = ['rejection_sample',
'stratified_sample',]
def rejection_sample(tensors, accept_prob_fn, batch_size, queue_threads=1,
enqueue_many=False, prebatch_capacity=16,
prebatch_threads=1, runtime_checks=False, name=None):
"""Stochastically creates batches by rejection sampling.
Each list of non-batched tensors is evaluated by `accept_prob_fn`, to produce
a scalar tensor between 0 and 1. This tensor corresponds to the probability of
being accepted. When `batch_size` tensor groups have been accepted, the batch
queue will return a mini-batch.
Args:
tensors: List of tensors for data. All tensors are either one item or a
batch, according to enqueue_many.
accept_prob_fn: A python lambda that takes a non-batch tensor from each
item in `tensors`, and produces a scalar tensor.
batch_size: Size of batch to be returned.
queue_threads: The number of threads for the queue that will hold the final
batch.
enqueue_many: Bool. If true, interpret input tensors as having a batch
dimension.
prebatch_capacity: Capacity for the large queue that is used to convert
batched tensors to single examples.
prebatch_threads: Number of threads for the large queue that is used to
convert batched tensors to single examples.
runtime_checks: Bool. If true, insert runtime checks on the output of
`accept_prob_fn`. Using `True` might have a performance impact.
name: Optional prefix for ops created by this function.
Raises:
ValueError: enqueue_many is True and labels doesn't have a batch
dimension, or if enqueue_many is False and labels isn't a scalar.
ValueError: enqueue_many is True, and batch dimension on data and labels
don't match.
ValueError: if a zero initial probability class has a nonzero target
probability.
Returns:
A list of tensors of the same length as `tensors`, with batch dimension
`batch_size`.
Example:
# Get tensor for a single data and label example.
data, label = data_provider.Get(['data', 'label'])
# Get stratified batch according to data tensor.
accept_prob_fn = lambda x: (tf.tanh(x[0]) + 1) / 2
data_batch = tf.contrib.training.rejection_sample(
[data, label], accept_prob_fn, 16)
# Run batch through network.
...
"""
with variable_scope.variable_scope(name, 'rejection_sample', tensors):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensors)
# Reduce the case of a batched example to that of a batch of a single
# example by taking a batch of size one.
if enqueue_many:
# Validate that batch dimension of the input is consistent.
tensor_list = _verify_data_inputs(tensor_list)
# Make a single queue to hold input examples. Reshape output so examples
# don't have singleton batch dimension.
batched = input_ops.batch(tensor_list,
batch_size=1,
num_threads=prebatch_threads,
capacity=prebatch_capacity,
enqueue_many=True)
tensor_list = [array_ops.squeeze(x, [0]) for x in batched]
# Set up a queue containing batches that have the distribution.
cur_prob = accept_prob_fn(tensor_list)
if runtime_checks:
cur_prob = array_ops.identity(control_flow_ops.with_dependencies(
[check_ops.assert_less_equal(0.0, cur_prob),
check_ops.assert_less_equal(cur_prob, 1.0)],
cur_prob), name='prob_with_checks')
keep_input = random_ops.random_uniform([]) < cur_prob
return _conditional_batch(
tensor_list, keep_input, batch_size, num_threads=queue_threads)
def stratified_sample(tensors, labels, target_probs, batch_size,
init_probs=None, enqueue_many=False, queue_capacity=16,
threads_per_queue=1, name=None):
"""Stochastically creates batches based on per-class probabilities.
This method discards examples. Internally, it creates one queue to amortize
the cost of disk reads, and one queue to hold the properly-proportioned
batch.
Args:
tensors: List of tensors for data. All tensors are either one item or a
batch, according to enqueue_many.
labels: Tensor for label of data. Label is a single integer or a batch,
depending on enqueue_many. It is not a one-hot vector.
target_probs: Target class proportions in batch. An object whose type has a
registered Tensor conversion function.
batch_size: Size of batch to be returned.
init_probs: Class proportions in the data. An object whose type has a
registered Tensor conversion function, or `None` for estimating the
initial distribution.
enqueue_many: Bool. If true, interpret input tensors as having a batch
dimension.
queue_capacity: Capacity of the large queue that holds input examples.
threads_per_queue: Number of threads for the large queue that holds input
examples and for the final queue with the proper class proportions.
name: Optional prefix for ops created by this function.
Raises:
ValueError: enqueue_many is True and labels doesn't have a batch
dimension, or if enqueue_many is False and labels isn't a scalar.
ValueError: enqueue_many is True, and batch dimension on data and labels
don't match.
ValueError: if probs don't sum to one.
ValueError: if a zero initial probability class has a nonzero target
probability.
TFAssertion: if labels aren't integers in [0, num classes).
Returns:
(data_batch, label_batch), where data_batch is a list of tensors of the same
length as `tensors`
Example:
# Get tensor for a single data and label example.
data, label = data_provider.Get(['data', 'label'])
# Get stratified batch according to per-class probabilities.
target_probs = [...distribution you want...]
[data_batch], labels = tf.contrib.training.stratified_sample(
[data], label, target_probs)
# Run batch through network.
...
"""
with ops.name_scope(name, 'stratified_sample', tensors + [labels]):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensors)
labels = ops.convert_to_tensor(labels)
target_probs = ops.convert_to_tensor(target_probs, dtype=dtypes.float32)
# Reduce the case of a single example to that of a batch of size 1.
if not enqueue_many:
tensor_list = [array_ops.expand_dims(tensor, 0) for tensor in tensor_list]
labels = array_ops.expand_dims(labels, 0)
# If `init_probs` is `None`, set up online estimation of data distribution.
if init_probs is None:
# We use `target_probs` to get the number of classes, so its shape must be
# fully defined at graph construction time.
target_probs.get_shape().assert_is_fully_defined()
init_probs = _estimate_data_distribution(
labels, target_probs.get_shape().num_elements())
else:
init_probs = ops.convert_to_tensor(init_probs, dtype=dtypes.float32)
# Validate that input is consistent.
tensor_list, labels, [init_probs, target_probs] = _verify_input(
tensor_list, labels, [init_probs, target_probs])
# Check that all zero initial probabilities also have zero target
# probabilities.
assert_op = control_flow_ops.Assert(
math_ops.reduce_all(math_ops.logical_or(
math_ops.not_equal(init_probs, 0),
math_ops.equal(target_probs, 0))),
['All classes with zero initial probability must also have zero target '
'probability: ', init_probs, target_probs])
init_probs = control_flow_ops.with_dependencies([assert_op], init_probs)
# Calculate acceptance sampling probabilities.
accept_probs = _calculate_acceptance_probabilities(init_probs, target_probs)
proportion_rejected = math_ops.reduce_sum((1 - accept_probs) * init_probs)
accept_probs = control_flow_ops.cond(
math_ops.less(proportion_rejected, .5),
lambda: accept_probs,
lambda: logging_ops.Print( # pylint: disable=g-long-lambda
accept_probs, [accept_probs],
message='Proportion of examples rejected by sampler is high.',
first_n=10))
# Make a single queue to hold input examples. Reshape output so examples
# don't have singleton batch dimension.
batched = input_ops.batch(tensor_list + [labels],
batch_size=1,
num_threads=threads_per_queue,
capacity=queue_capacity,
enqueue_many=True)
val_list = [array_ops.squeeze(x, [0]) for x in batched[:-1]]
label = array_ops.squeeze(batched[-1], [0])
# Set up second queue containing batches that have the desired class
# proportions.
cur_prob = array_ops.gather(accept_probs, label)
keep_input = random_ops.random_uniform([]) < cur_prob
batched = _conditional_batch(
val_list + [label],
keep_input,
batch_size,
num_threads=threads_per_queue)
return batched[:-1], batched[-1]
def _estimate_data_distribution(labels, num_classes, smoothing_constant=10):
"""Estimate data distribution as labels are seen."""
# Variable to track running count of classes. Smooth by a nonzero value to
# avoid division-by-zero. Higher values provide more stability at the cost of
# slower convergence.
if smoothing_constant <= 0:
raise ValueError('smoothing_constant must be nonzero.')
num_examples_per_class_seen = variables.Variable(
initial_value=[smoothing_constant] * num_classes, trainable=False,
name='class_count', dtype=dtypes.int64)
# Update the class-count based on what labels are seen in batch.
num_examples_per_class_seen = num_examples_per_class_seen.assign_add(
math_ops.reduce_sum(array_ops.one_hot(labels, num_classes,
dtype=dtypes.int64), 0))
# Normalize count into a probability.
# NOTE: Without the `+= 0` line below, the test
# `testMultiThreadedEstimateDataDistribution` fails. The reason is that
# before this line, `num_examples_per_class_seen` is a Tensor that shares a
# buffer with an underlying `ref` object. When the `ref` is changed by another
# thread, `num_examples_per_class_seen` changes as well. Since this can happen
# in the middle of the normalization computation, we get probabilities that
# are very far from summing to one. Adding `+= 0` copies the contents of the
# tensor to a new buffer, which will be consistent from the start to the end
# of the normalization computation.
num_examples_per_class_seen += 0
init_prob_estimate = math_ops.truediv(
num_examples_per_class_seen,
math_ops.reduce_sum(num_examples_per_class_seen))
# Must return float32 (not float64) to agree with downstream `_verify_input`
# checks.
return math_ops.cast(init_prob_estimate, dtypes.float32)
def _verify_data_inputs(tensor_list):
"""Verify that batched data inputs are well-formed."""
for tensor in tensor_list:
# Data tensor should have a batch dimension.
tensor_shape = tensor.get_shape().with_rank_at_least(1)
# Data batch dimensions must be compatible.
tensor_shape[0].assert_is_compatible_with(tensor_list[0].get_shape()[0])
return tensor_list
def _verify_input(tensor_list, labels, probs_list):
"""Verify that batched inputs are well-formed."""
checked_probs_list = []
for probs in probs_list:
# Since number of classes shouldn't change at runtime, probalities shape
# should be fully defined.
probs.get_shape().assert_is_fully_defined()
# Probabilities must be 1D.
probs.get_shape().assert_has_rank(1)
# Probabilities must be nonnegative and sum to one.
tol = 1e-6
prob_sum = math_ops.reduce_sum(probs)
checked_probs = control_flow_ops.with_dependencies(
[check_ops.assert_non_negative(probs),
check_ops.assert_less(prob_sum, 1.0 + tol),
check_ops.assert_less(1.0 - tol, prob_sum)],
probs)
checked_probs_list.append(checked_probs)
# All probabilities should be the same length.
prob_length = checked_probs_list[0].get_shape().num_elements()
for checked_prob in checked_probs_list:
if checked_prob.get_shape().num_elements() != prob_length:
raise ValueError('Probability parameters must have the same length.')
# Labels tensor should only have batch dimension.
labels.get_shape().assert_has_rank(1)
for tensor in tensor_list:
# Data tensor should have a batch dimension.
tensor_shape = tensor.get_shape().with_rank_at_least(1)
# Data and label batch dimensions must be compatible.
tensor_shape[0].assert_is_compatible_with(labels.get_shape()[0])
# Data and labels must have the same, strictly positive batch size. Since we
# can't assume we know the batch size at graph creation, add runtime checks.
labels_batch_size = array_ops.shape(labels)[0]
lbl_assert = check_ops.assert_positive(labels_batch_size)
# Make each tensor depend on its own checks.
labels = control_flow_ops.with_dependencies([lbl_assert], labels)
tensor_list = [control_flow_ops.with_dependencies(
[lbl_assert,
check_ops.assert_equal(array_ops.shape(x)[0], labels_batch_size)],
x) for x in tensor_list]
# Label's classes must be integers 0 <= x < num_classes.
labels = control_flow_ops.with_dependencies(
[check_ops.assert_integer(labels),
check_ops.assert_non_negative(labels),
check_ops.assert_less(labels, math_ops.cast(prob_length, labels.dtype))],
labels)
return tensor_list, labels, checked_probs_list
def _calculate_acceptance_probabilities(init_probs, target_probs):
"""Calculate the per-class acceptance rates.
Args:
init_probs: The class probabilities of the data.
target_probs: The desired class proportion in minibatches.
Returns:
A list of the per-class acceptance probabilities.
This method is based on solving the following analysis:
Let F be the probability of a rejection (on any example).
Let p_i be the proportion of examples in the data in class i (init_probs)
Let a_i is the rate the rejection sampler should *accept* class i
Let t_i is the target proportion in the minibatches for class i (target_probs)
```
F = sum_i(p_i * (1-a_i))
= 1 - sum_i(p_i * a_i) using sum_i(p_i) = 1
```
An example with class `i` will be accepted if `k` rejections occur, then an
example with class `i` is seen by the rejector, and it is accepted. This can
be written as follows:
```
t_i = sum_k=0^inf(F^k * p_i * a_i)
= p_i * a_j / (1 - F) using geometric series identity, since 0 <= F < 1
= p_i * a_i / sum_j(p_j * a_j) using F from above
```
Note that the following constraints hold:
```
0 <= p_i <= 1, sum_i(p_i) = 1
0 <= a_i <= 1
0 <= t_i <= 1, sum_i(t_i) = 1
```
A solution for a_i in terms of the other variabes is the following:
```a_i = (t_i / p_i) / max_i[t_i / p_i]```
"""
# Make list of t_i / p_i.
ratio_l = target_probs / init_probs
# Replace NaNs with 0s.
ratio_l = math_ops.select(math_ops.is_nan(ratio_l),
array_ops.zeros_like(ratio_l),
ratio_l)
# Calculate list of acceptance probabilities.
max_ratio = math_ops.reduce_max(ratio_l)
return ratio_l / max_ratio
def _conditional_batch(tensors, keep_input, batch_size, num_threads=10):
"""Conditionally enqueue tensors based on accept_prob.
Specifically, enqueue the element if accept_prob > rand_unif([0, 1]).
Args:
tensors: List of tensors to enqueue.
keep_input: Bool. Whether to enqueue or not.
batch_size: Size of batch.
num_threads: Number of enqueueing threads.
Returns:
List of batched tensors.
Raises:
ValueError: `accept_prob` isn't 0D.
"""
keep_input.get_shape().assert_has_rank(0)
# Determine shapes and types of to-be-enqueued-tensors.
shapes_list = []
dtypes_list = []
for tensor in tensors:
cur_shape = tensor.get_shape()
cur_shape.assert_is_fully_defined()
shapes_list.append(cur_shape)
dtypes_list.append(tensor.dtype)
final_q = data_flow_ops.FIFOQueue(capacity=batch_size,
shapes=shapes_list,
dtypes=dtypes_list,
name='batched_queue')
summary.scalar('queue/%s/size' % final_q.name, final_q.size())
# Conditionally enqueue.
# Reshape enqueue op to match no_op's shape.
conditional_enqueue = control_flow_ops.cond(
keep_input,
lambda: final_q.enqueue(tensors),
control_flow_ops.no_op)
queue_runner.add_queue_runner(queue_runner.QueueRunner(
final_q, [conditional_enqueue] * num_threads))
out_tensor = final_q.dequeue_many(batch_size)
# Queues return a single tensor if the list of enqued tensors is one. Since we
# want the type to be the same in all cases, always return a list.
if isinstance(out_tensor, ops.Tensor):
out_tensor = [out_tensor]
return out_tensor
| |
"""Collection of helper methods.
All containing methods are legacy helpers that should not be used by new
components. Instead call the service directly.
"""
from homeassistant.components.climate import _LOGGER
from homeassistant.components.climate.const import (
ATTR_AUX_HEAT, ATTR_FAN_MODE, ATTR_HUMIDITY, ATTR_HVAC_MODE,
ATTR_PRESET_MODE, ATTR_SWING_MODE, ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW, DOMAIN, SERVICE_SET_AUX_HEAT, SERVICE_SET_FAN_MODE,
SERVICE_SET_HUMIDITY, SERVICE_SET_HVAC_MODE, SERVICE_SET_PRESET_MODE,
SERVICE_SET_SWING_MODE, SERVICE_SET_TEMPERATURE)
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_TEMPERATURE, SERVICE_TURN_OFF, SERVICE_TURN_ON)
from homeassistant.loader import bind_hass
async def async_set_preset_mode(hass, preset_mode, entity_id=None):
"""Set new preset mode."""
data = {
ATTR_PRESET_MODE: preset_mode
}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
await hass.services.async_call(
DOMAIN, SERVICE_SET_PRESET_MODE, data, blocking=True)
@bind_hass
def set_preset_mode(hass, preset_mode, entity_id=None):
"""Set new preset mode."""
data = {
ATTR_PRESET_MODE: preset_mode
}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_PRESET_MODE, data)
async def async_set_aux_heat(hass, aux_heat, entity_id=None):
"""Turn all or specified climate devices auxiliary heater on."""
data = {
ATTR_AUX_HEAT: aux_heat
}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
await hass.services.async_call(
DOMAIN, SERVICE_SET_AUX_HEAT, data, blocking=True)
@bind_hass
def set_aux_heat(hass, aux_heat, entity_id=None):
"""Turn all or specified climate devices auxiliary heater on."""
data = {
ATTR_AUX_HEAT: aux_heat
}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_AUX_HEAT, data)
async def async_set_temperature(hass, temperature=None, entity_id=None,
target_temp_high=None, target_temp_low=None,
hvac_mode=None):
"""Set new target temperature."""
kwargs = {
key: value for key, value in [
(ATTR_TEMPERATURE, temperature),
(ATTR_TARGET_TEMP_HIGH, target_temp_high),
(ATTR_TARGET_TEMP_LOW, target_temp_low),
(ATTR_ENTITY_ID, entity_id),
(ATTR_HVAC_MODE, hvac_mode)
] if value is not None
}
_LOGGER.debug("set_temperature start data=%s", kwargs)
await hass.services.async_call(
DOMAIN, SERVICE_SET_TEMPERATURE, kwargs, blocking=True)
@bind_hass
def set_temperature(hass, temperature=None, entity_id=None,
target_temp_high=None, target_temp_low=None,
hvac_mode=None):
"""Set new target temperature."""
kwargs = {
key: value for key, value in [
(ATTR_TEMPERATURE, temperature),
(ATTR_TARGET_TEMP_HIGH, target_temp_high),
(ATTR_TARGET_TEMP_LOW, target_temp_low),
(ATTR_ENTITY_ID, entity_id),
(ATTR_HVAC_MODE, hvac_mode)
] if value is not None
}
_LOGGER.debug("set_temperature start data=%s", kwargs)
hass.services.call(DOMAIN, SERVICE_SET_TEMPERATURE, kwargs)
async def async_set_humidity(hass, humidity, entity_id=None):
"""Set new target humidity."""
data = {ATTR_HUMIDITY: humidity}
if entity_id is not None:
data[ATTR_ENTITY_ID] = entity_id
await hass.services.async_call(
DOMAIN, SERVICE_SET_HUMIDITY, data, blocking=True)
@bind_hass
def set_humidity(hass, humidity, entity_id=None):
"""Set new target humidity."""
data = {ATTR_HUMIDITY: humidity}
if entity_id is not None:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_HUMIDITY, data)
async def async_set_fan_mode(hass, fan, entity_id=None):
"""Set all or specified climate devices fan mode on."""
data = {ATTR_FAN_MODE: fan}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
await hass.services.async_call(
DOMAIN, SERVICE_SET_FAN_MODE, data, blocking=True)
@bind_hass
def set_fan_mode(hass, fan, entity_id=None):
"""Set all or specified climate devices fan mode on."""
data = {ATTR_FAN_MODE: fan}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_FAN_MODE, data)
async def async_set_hvac_mode(hass, hvac_mode, entity_id=None):
"""Set new target operation mode."""
data = {ATTR_HVAC_MODE: hvac_mode}
if entity_id is not None:
data[ATTR_ENTITY_ID] = entity_id
await hass.services.async_call(
DOMAIN, SERVICE_SET_HVAC_MODE, data, blocking=True)
@bind_hass
def set_operation_mode(hass, hvac_mode, entity_id=None):
"""Set new target operation mode."""
data = {ATTR_HVAC_MODE: hvac_mode}
if entity_id is not None:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_HVAC_MODE, data)
async def async_set_swing_mode(hass, swing_mode, entity_id=None):
"""Set new target swing mode."""
data = {ATTR_SWING_MODE: swing_mode}
if entity_id is not None:
data[ATTR_ENTITY_ID] = entity_id
await hass.services.async_call(
DOMAIN, SERVICE_SET_SWING_MODE, data, blocking=True)
@bind_hass
def set_swing_mode(hass, swing_mode, entity_id=None):
"""Set new target swing mode."""
data = {ATTR_SWING_MODE: swing_mode}
if entity_id is not None:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SET_SWING_MODE, data)
async def async_turn_on(hass, entity_id=None):
"""Turn on device."""
data = {}
if entity_id is not None:
data[ATTR_ENTITY_ID] = entity_id
await hass.services.async_call(
DOMAIN, SERVICE_TURN_ON, data, blocking=True)
async def async_turn_off(hass, entity_id=None):
"""Turn off device."""
data = {}
if entity_id is not None:
data[ATTR_ENTITY_ID] = entity_id
await hass.services.async_call(
DOMAIN, SERVICE_TURN_OFF, data, blocking=True)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
######################################################################
# Copyright C 2015 Faurecia (China) Holding Co.,Ltd. #
# All rights reserved #
# Name: host.py
# Author: Canux canuxcheng@163.com #
# Version: V1.0 #
# Time: Thu 20 Aug 2015 02:27:23 AM EDT
######################################################################
# Description:
######################################################################
from base import NagiosAuto
import os
class Host(NagiosAuto):
"""This class have three options to create create host file in nagios.
You can specify the template you need.
If you create a lots of host file at one time, this is more effeciency.
"""
def __init__(self, *args, **kwargs):
"""Define some variables"""
super(Host, self).__init__(*args, **kwargs)
self.g_dir = self.args.path + "/hosts/"
self.host_conf = self.conf + "/host/"
self.area_conf = self.conf + "/area/"
self.area_list = ["as", "us", "eu"]
if self.__class__.__name__ == "Host":
self.logger.debug("==== END DEBUG ====")
def define_options(self):
"""Define some options used for create host."""
super(Host, self).define_options()
self.parser.add_argument("-t", "--types",
action="append",
dest="types",
required=False,
help="The host types, eg: ['ad', 'mii', \
'ijcore', 'mii_win-primary', 'mii_win-bck']. \
Read template from types.cfg and \
read hostname and ip address from types.txt. \
Use types@mode for normal host. \
mode=0 use dns as address. \
mode=1 use ip as address.")
self.parser.add_argument("-v", "--vcenter",
dest="vcenter",
required=False,
help="Vcenter for mii and ijcore vmware.")
def get_area(self, hostname):
"""Get the area us/eu/as according to hostname."""
try:
locate = hostname[0:2].upper()
self.logger.debug("locate: {}".format(locate))
for area in self.area_list:
area_file = self.area_conf + area + ".txt"
self.logger.debug("area_file: {}".format(area_file))
f = open(area_file, "r")
lines = f.readlines()
for line in lines:
if locate in line:
self.logger.debug("area: {}".format(area))
return area
self.not_exist(locate)
except Exception as e:
self.error("get_area: %s" % e)
def get_vcenter(self, vcenter):
"""Get the vcenter for vmware."""
try:
vcenterfile = self.area_conf + "vmware.txt"
self.logger.debug("vcenterfile: {}".format(vcenterfile))
fr = open(vcenterfile, "r")
lines = fr.readlines()
for line in lines:
if vcenter in line:
vcenter = "".join(line.split())
self.logger.debug("vcenter: {}".format(vcenter))
return vcenter
self.not_exist("%s" % vcenter)
except Exception as e:
self.error("get_vcenter: %s" % e)
def get_mii_site(self, hostname):
"""Get the for _MII_SITEDATABASE in mii primary or backup server."""
try:
mii_site = hostname[2:5].upper()
self.logger.debug("mii_site: {}".format(mii_site))
return mii_site
except Exception as e:
self.error("get_mii_site: %s" % e)
def get_types(self, types):
try:
if types in ["ad", "mii_win-primary", "mii_win-bck"]:
types = types
mode = 1
elif types in ["mii", "ijcore"]:
types = types
mode = 0
else:
old_type = types
types = old_type.split("@")[0]
mode = old_type.split("@")[1]
if not mode:
self.error("Please specify address mode for normal host.")
self.logger.debug("types: {}".format(types))
self.logger.debug("mode: {}".format(mode))
return types, mode
except Exception as e:
self.error("get_types: %s" % e)
def write_one_host(self, hostfile, lines, vcenter,
area, mii_site, hostname, address):
"""Write to one host file."""
try:
fw = open(hostfile, "w")
for l in lines:
self.logger.debug("l: {}".format(l))
if "ohtpl_area_%s" in l:
fw.write(l % area)
elif "ohtpl_sys_vmware_%s_%s" in l:
l_vcenter = l.replace("ohtpl_sys_vmware_%s_%s",
str(vcenter))
fw.write(l_vcenter)
elif "host_name" in l:
fw.write(l % hostname)
elif "address" in l:
fw.write(l % address)
elif "_MII_SITEDATABASE" in l:
fw.write(l % mii_site)
elif "%s" not in l:
fw.write(l)
# If %s inside but not specify, can not handle it.
else:
self.error("write_host: unknow argument %s inside.")
except Exception as e:
self.error("write_one_host: %s" % e)
def create_host(self):
"""Get types from -t and read hostname and address and write to the \
hosts in nagios."""
try:
vcenter = ""
area = ""
mii_site = ""
for loop in range(0, len(self.args.types)):
types = self.args.types[loop]
self.logger.debug("types: {}".format(types))
(types, mode) = self.get_types(types)
# Get the template file.
template = self.host_conf + types + ".cfg"
self.logger.debug("template: {}".format(template))
ftr = open(template, "r")
lines = ftr.readlines()
# Get the hostname and address file.
host = self.host_conf + types + ".txt"
self.logger.debug("host: {}".format(host))
des_host = self.host_conf + types + ".tmp"
self.logger.debug("des_host: {}".format(des_host))
self.delete_blank_line(host, des_host)
fhr = open(des_host, "r")
h_lines = fhr.readlines()
for line in h_lines:
hostname = line.split()[0].split(".")[0].strip().upper()
self.logger.debug("hostname: {}".format(hostname))
address = line.split()[int(mode)].strip().lower()
self.logger.debug("address: {}".format(address))
hostfile = self.g_dir + hostname + ".cfg"
self.logger.debug("hostfile: {}".format(hostfile))
if types in ["ad"]:
area = self.get_area(hostname)
elif types in ["mii_win-primary", "mii_win-bck"]:
area = self.get_area(hostname)
mii_site = self.get_mii_site(hostname)
elif types in ["mii", "ijcore"]:
if self.args.vcenter:
vcenter = self.get_vcenter(self.args.vcenter)
else:
self.error("Please use -v to specify vcenter.")
# Write to the host in nagios.
if os.path.isfile(hostfile):
self.already_exist("%s" % hostfile)
if self.args.force:
self.write_one_host(hostfile, lines, vcenter, area,
mii_site, hostname, address)
else:
self.write_one_host(hostfile, lines, vcenter, area,
mii_site, hostname, address)
except Exception as e:
self.error("create_host: %s" % e)
def delete_host(self):
files = self.host_conf + "host.txt"
self.logger.debug("files: {}".format(files))
des_files = self.host_conf + "host.tmp"
self.logger.debug("des_files: {}".format(des_files))
self.delete_blank_line(files, des_files)
self.fr = open(des_files, "r")
self.lines = self.fr.readlines()
for line in self.lines:
self.logger.debug("line: {}".format(line))
hostname = line.split()[0].split(".")[0].strip().upper()
hostfile = self.g_dir + hostname + ".cfg"
self.logger.debug("hostfile: {}".format(hostfile))
if not os.path.isfile(hostfile):
self.not_exist("%s" % hostfile)
else:
try:
os.remove(hostfile)
except Exception as e:
self.error("remove_host: %s" % e)
| |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import hexlify, unhexlify
from decimal import Decimal, ROUND_DOWN
import hashlib
import json
import logging
import os
import random
import re
from subprocess import CalledProcessError
import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = tx_size * fee_per_kB / 1000
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring not found:" + e.error['message'])
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hash256(byte_str):
sha256 = hashlib.sha256()
sha256.update(byte_str)
sha256d = hashlib.sha256()
sha256d.update(sha256.digest())
return sha256d.digest()[::-1]
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
timeout += time.time()
while attempt < attempts and time.time() < timeout:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.05)
# Print the cause of the timeout
assert_greater_than(attempts, attempt)
assert_greater_than(timeout, time.time())
raise RuntimeError('Unreachable')
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, rpchost=None):
rpc_u, rpc_p = get_auth_cookie(datadir)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node" + str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "namecoin.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("listenonion=0\n")
return datadir
def base_node_args(i):
"""
Return base arguments to always use for node i. These arguments
are those that are also present for the chain cache and must thus
be set for all runs.
"""
# We choose nodes 1 and 2 to keep -namehistory, because this allows
# us to test both nodes with it and without it in both split
# network parts (0/1 vs 2/3).
if i == 1 or i == 2:
return ["-namehistory"]
return []
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def get_auth_cookie(datadir):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "namecoin.conf")):
with open(os.path.join(datadir, "namecoin.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
with open(os.path.join(datadir, "regtest", ".cookie"), 'r') as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node" + str(n_node), "regtest", logname)
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for peer_id in [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
from_connection.disconnectnode(nodeid=peer_id)
for _ in range(50):
if [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == []:
break
time.sleep(0.1)
else:
raise AssertionError("timed out waiting for disconnect")
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
# Use getblockcount() instead of waitforblockheight() to determine the
# initial max height because the two RPCs look at different internal global
# variables (chainActive vs latestBlock) and the former gets updated
# earlier.
maxheight = max(x.getblockcount() for x in rpc_connections)
start_time = cur_time = time.time()
while cur_time <= start_time + timeout:
tips = [r.waitforblockheight(maxheight, int(wait * 1000)) for r in rpc_connections]
if all(t["height"] == maxheight for t in tips):
if all(t["hash"] == tips[0]["hash"] for t in tips):
return
raise AssertionError("Block sync failed, mismatched block hashes:{}".format(
"".join("\n {!r}".format(tip) for tip in tips)))
cur_time = time.time()
raise AssertionError("Block sync to height {} timed out:{}".format(
maxheight, "".join("\n {!r}".format(tip) for tip in tips)))
def sync_chain(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same best block
"""
while timeout > 0:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash == [best_hash[0]] * len(best_hash):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Chain sync failed: Best block hashes don't match")
def sync_mempools(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while timeout > 0:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match + 1
if num_match == len(rpc_connections):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
# Transaction/Block functions
#############################
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >= 0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value / 2)
outputs[addr2] = satoshi_round(send_value / 2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{"txid": coinbase, "vout": 0}]
outputs = {to_address: amount}
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
| |
## A script for extracting info about the patients used in the analysis
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
f=open(os.path.join(BASE_DIR,'tcga_data','GBM','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_gbm.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical=[['','','']]
for i in data:
if clinical[-1][0]==i[0]:
if i[9]=='Alive':
clinical[-1]=[i[0],int(i[10]),'Alive']
elif i[9]=='Dead':
clinical[-1]=[i[0],int(i[11]),'Dead']
else:
pass
else:
if i[9]=='Alive':
clinical.append([i[0],int(i[10]),'Alive'])
elif i[9]=='Dead':
clinical.append([i[0],int(i[11]),'Dead'])
else:
pass
## Removing the empty value.
clinical=clinical[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','GBM','clinical','nationwidechildrens.org_clinical_patient_gbm.txt'))
f.readline()
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
more_clinical[i[0]]=[0,sex_dict[i[6]],int(i[26])]
if i[15]=='Alive':
clinical4.append([i[0],int(i[16]),'Alive'])
elif i[15]=='Dead':
clinical4.append([i[0],int(i[17]),'Dead'])
else:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','GBM','FILE_SAMPLE_MAP.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## We only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
##print average age at diagnosis
age=np.mean([i[5] for i in clinical_and_files])
##print number of males
males=len([i for i in clinical_and_files if i[4]==0])
##print number of females
females=len([i for i in clinical_and_files if i[4]==1])
##to get the median survival we need to call survfit from r
##prepare variables for R
ro.globalenv['times']=ro.IntVector([i[1] for i in clinical_and_files])
##need to create a dummy variable group
ro.globalenv['group']=ro.IntVector([0 for i in clinical_and_files])
##need a vector for deaths
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
ro.globalenv['died']=ro.IntVector([death_dic[i[2]] for i in clinical_and_files])
res=ro.r('survfit(Surv(times,died) ~ as.factor(group))')
#the number of events(deaths) is the fourth column of the output
deaths=str(res).split('\n')[-2].strip().split()[3]
#the median survival time is the fifth column of the output
median=str(res).split('\n')[-2].strip().split()[4]
##write data to a file
f=open('patient_info.txt','w')
f.write('Average Age')
f.write('\t')
f.write('Males')
f.write('\t')
f.write('Females')
f.write('\t')
f.write('Deaths')
f.write('\t')
f.write('Median Survival')
f.write('\n')
f.write(str(age))
f.write('\t')
f.write(str(males))
f.write('\t')
f.write(str(females))
f.write('\t')
f.write(deaths)
f.write('\t')
f.write(median)
| |
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Thomas Scholtes
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import division, absolute_import, print_function
import unittest
import six
from mock import patch
from test.helper import TestHelper, capture_log, has_program
from beets import config
from beets.util import CommandOutput
from mediafile import MediaFile
from beetsplug.replaygain import (FatalGstreamerPluginReplayGainError,
GStreamerBackend)
try:
import gi
gi.require_version('Gst', '1.0')
GST_AVAILABLE = True
except (ImportError, ValueError):
GST_AVAILABLE = False
if any(has_program(cmd, ['-v']) for cmd in ['mp3gain', 'aacgain']):
GAIN_PROG_AVAILABLE = True
else:
GAIN_PROG_AVAILABLE = False
if has_program('bs1770gain', ['--replaygain']):
LOUDNESS_PROG_AVAILABLE = True
else:
LOUDNESS_PROG_AVAILABLE = False
FFMPEG_AVAILABLE = has_program('ffmpeg', ['-version'])
def reset_replaygain(item):
item['rg_track_peak'] = None
item['rg_track_gain'] = None
item['rg_album_gain'] = None
item['rg_album_gain'] = None
item.write()
item.store()
class ReplayGainCliTestBase(TestHelper):
def setUp(self):
self.setup_beets()
self.config['replaygain']['backend'] = self.backend
try:
self.load_plugins('replaygain')
except Exception:
import sys
# store exception info so an error in teardown does not swallow it
exc_info = sys.exc_info()
try:
self.teardown_beets()
self.unload_plugins()
except Exception:
# if load_plugins() failed then setup is incomplete and
# teardown operations may fail. In particular # {Item,Album}
# may not have the _original_types attribute in unload_plugins
pass
six.reraise(exc_info[1], None, exc_info[2])
album = self.add_album_fixture(2)
for item in album.items():
reset_replaygain(item)
def tearDown(self):
self.teardown_beets()
self.unload_plugins()
def _reset_replaygain(self, item):
item['rg_track_peak'] = None
item['rg_track_gain'] = None
item['rg_album_peak'] = None
item['rg_album_gain'] = None
item['r128_track_gain'] = None
item['r128_album_gain'] = None
item.write()
item.store()
def test_cli_saves_track_gain(self):
for item in self.lib.items():
self.assertIsNone(item.rg_track_peak)
self.assertIsNone(item.rg_track_gain)
mediafile = MediaFile(item.path)
self.assertIsNone(mediafile.rg_track_peak)
self.assertIsNone(mediafile.rg_track_gain)
self.run_command('replaygain')
# Skip the test if rg_track_peak and rg_track gain is None, assuming
# that it could only happen if the decoder plugins are missing.
if all(i.rg_track_peak is None and i.rg_track_gain is None
for i in self.lib.items()):
self.skipTest(u'decoder plugins could not be loaded.')
for item in self.lib.items():
self.assertIsNotNone(item.rg_track_peak)
self.assertIsNotNone(item.rg_track_gain)
mediafile = MediaFile(item.path)
self.assertAlmostEqual(
mediafile.rg_track_peak, item.rg_track_peak, places=6)
self.assertAlmostEqual(
mediafile.rg_track_gain, item.rg_track_gain, places=2)
def test_cli_skips_calculated_tracks(self):
self.run_command(u'replaygain')
item = self.lib.items()[0]
peak = item.rg_track_peak
item.rg_track_gain = 0.0
self.run_command(u'replaygain')
self.assertEqual(item.rg_track_gain, 0.0)
self.assertEqual(item.rg_track_peak, peak)
def test_cli_saves_album_gain_to_file(self):
for item in self.lib.items():
mediafile = MediaFile(item.path)
self.assertIsNone(mediafile.rg_album_peak)
self.assertIsNone(mediafile.rg_album_gain)
self.run_command(u'replaygain', u'-a')
peaks = []
gains = []
for item in self.lib.items():
mediafile = MediaFile(item.path)
peaks.append(mediafile.rg_album_peak)
gains.append(mediafile.rg_album_gain)
# Make sure they are all the same
self.assertEqual(max(peaks), min(peaks))
self.assertEqual(max(gains), min(gains))
self.assertNotEqual(max(gains), 0.0)
self.assertNotEqual(max(peaks), 0.0)
def test_cli_writes_only_r128_tags(self):
if self.backend == "command":
# opus not supported by command backend
return
album = self.add_album_fixture(2, ext="opus")
for item in album.items():
self._reset_replaygain(item)
self.run_command(u'replaygain', u'-a')
for item in album.items():
mediafile = MediaFile(item.path)
# does not write REPLAYGAIN_* tags
self.assertIsNone(mediafile.rg_track_gain)
self.assertIsNone(mediafile.rg_album_gain)
# writes R128_* tags
self.assertIsNotNone(mediafile.r128_track_gain)
self.assertIsNotNone(mediafile.r128_album_gain)
def test_target_level_has_effect(self):
item = self.lib.items()[0]
def analyse(target_level):
self.config['replaygain']['targetlevel'] = target_level
self._reset_replaygain(item)
self.run_command(u'replaygain', '-f')
mediafile = MediaFile(item.path)
return mediafile.rg_track_gain
gain_relative_to_84 = analyse(84)
gain_relative_to_89 = analyse(89)
# check that second calculation did work
if gain_relative_to_84 is not None:
self.assertIsNotNone(gain_relative_to_89)
self.assertNotEqual(gain_relative_to_84, gain_relative_to_89)
@unittest.skipIf(not GST_AVAILABLE, u'gstreamer cannot be found')
class ReplayGainGstCliTest(ReplayGainCliTestBase, unittest.TestCase):
backend = u'gstreamer'
def setUp(self):
try:
# Check if required plugins can be loaded by instantiating a
# GStreamerBackend (via its .__init__).
config['replaygain']['targetlevel'] = 89
GStreamerBackend(config['replaygain'], None)
except FatalGstreamerPluginReplayGainError as e:
# Skip the test if plugins could not be loaded.
self.skipTest(str(e))
super(ReplayGainGstCliTest, self).setUp()
@unittest.skipIf(not GAIN_PROG_AVAILABLE, u'no *gain command found')
class ReplayGainCmdCliTest(ReplayGainCliTestBase, unittest.TestCase):
backend = u'command'
@unittest.skipIf(not LOUDNESS_PROG_AVAILABLE, u'bs1770gain cannot be found')
class ReplayGainLdnsCliTest(ReplayGainCliTestBase, unittest.TestCase):
backend = u'bs1770gain'
class ReplayGainLdnsCliMalformedTest(TestHelper, unittest.TestCase):
@patch('beetsplug.replaygain.call')
def setUp(self, call_patch):
self.setup_beets()
self.config['replaygain']['backend'] = 'bs1770gain'
# Patch call to return nothing, bypassing the bs1770gain installation
# check.
call_patch.return_value = CommandOutput(stdout=b"", stderr=b"")
try:
self.load_plugins('replaygain')
except Exception:
import sys
exc_info = sys.exc_info()
try:
self.tearDown()
except Exception:
pass
six.reraise(exc_info[1], None, exc_info[2])
for item in self.add_album_fixture(2).items():
reset_replaygain(item)
def tearDown(self):
self.teardown_beets()
self.unload_plugins()
@patch('beetsplug.replaygain.call')
def test_malformed_output(self, call_patch):
# Return malformed XML (the ampersand should be &)
call_patch.return_value = CommandOutput(stdout="""
<album>
<track total="1" number="1" file="&">
<integrated lufs="0" lu="0" />
<sample-peak spfs="0" factor="0" />
</track>
</album>
""", stderr="")
with capture_log('beets.replaygain') as logs:
self.run_command('replaygain')
# Count how many lines match the expected error.
matching = [line for line in logs if
'malformed XML' in line]
self.assertEqual(len(matching), 2)
@unittest.skipIf(not FFMPEG_AVAILABLE, u'ffmpeg cannot be found')
class ReplayGainFfmpegTest(ReplayGainCliTestBase, unittest.TestCase):
backend = u'ffmpeg'
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| |
from __future__ import absolute_import
import urwid
from netlib import http
from . import common, signals
def _mkhelp():
text = []
keys = [
("A", "accept all intercepted flows"),
("a", "accept this intercepted flow"),
("b", "save request/response body"),
("C", "clear flow list or eventlog"),
("d", "delete flow"),
("D", "duplicate flow"),
("e", "toggle eventlog"),
("F", "toggle follow flow list"),
("l", "set limit filter pattern"),
("L", "load saved flows"),
("n", "create a new request"),
("P", "copy flow to clipboard"),
("r", "replay request"),
("V", "revert changes to request"),
("w", "save flows "),
("W", "stream flows to file"),
("X", "kill and delete flow, even if it's mid-intercept"),
("tab", "tab between eventlog and flow list"),
("enter", "view flow"),
("|", "run script on this flow"),
]
text.extend(common.format_keyvals(keys, key="key", val="text", indent=4))
return text
help_context = _mkhelp()
footer = [
('heading_key', "?"), ":help ",
]
class EventListBox(urwid.ListBox):
def __init__(self, master):
self.master = master
urwid.ListBox.__init__(self, master.eventlist)
def keypress(self, size, key):
key = common.shortcuts(key)
if key == "C":
self.master.clear_events()
key = None
elif key == "G":
self.set_focus(0)
elif key == "g":
self.set_focus(len(self.master.eventlist) - 1)
return urwid.ListBox.keypress(self, size, key)
class BodyPile(urwid.Pile):
def __init__(self, master):
h = urwid.Text("Event log")
h = urwid.Padding(h, align="left", width=("relative", 100))
self.inactive_header = urwid.AttrWrap(h, "heading_inactive")
self.active_header = urwid.AttrWrap(h, "heading")
urwid.Pile.__init__(
self,
[
FlowListBox(master),
urwid.Frame(
EventListBox(master),
header = self.inactive_header
)
]
)
self.master = master
def keypress(self, size, key):
if key == "tab":
self.focus_position = (
self.focus_position + 1) % len(self.widget_list)
if self.focus_position == 1:
self.widget_list[1].header = self.active_header
else:
self.widget_list[1].header = self.inactive_header
key = None
elif key == "e":
self.master.toggle_eventlog()
key = None
# This is essentially a copypasta from urwid.Pile's keypress handler.
# So much for "closed for modification, but open for extension".
item_rows = None
if len(size) == 2:
item_rows = self.get_item_rows(size, focus = True)
i = self.widget_list.index(self.focus_item)
tsize = self.get_item_size(size, i, True, item_rows)
return self.focus_item.keypress(tsize, key)
class ConnectionItem(urwid.WidgetWrap):
def __init__(self, master, state, flow, focus):
self.master, self.state, self.flow = master, state, flow
self.f = focus
w = self.get_text()
urwid.WidgetWrap.__init__(self, w)
def get_text(self):
return common.format_flow(
self.flow,
self.f,
hostheader = self.master.showhost
)
def selectable(self):
return True
def save_flows_prompt(self, k):
if k == "a":
signals.status_prompt_path.send(
prompt = "Save all flows to",
callback = self.master.save_flows
)
else:
signals.status_prompt_path.send(
prompt = "Save this flow to",
callback = self.master.save_one_flow,
args = (self.flow,)
)
def stop_server_playback_prompt(self, a):
if a != "n":
self.master.stop_server_playback()
def server_replay_prompt(self, k):
if k == "a":
self.master.start_server_playback(
[i.copy() for i in self.master.state.view],
self.master.killextra, self.master.rheaders,
False, self.master.nopop,
self.master.options.replay_ignore_params,
self.master.options.replay_ignore_content,
self.master.options.replay_ignore_payload_params,
self.master.options.replay_ignore_host
)
elif k == "t":
self.master.start_server_playback(
[self.flow.copy()],
self.master.killextra, self.master.rheaders,
False, self.master.nopop,
self.master.options.replay_ignore_params,
self.master.options.replay_ignore_content,
self.master.options.replay_ignore_payload_params,
self.master.options.replay_ignore_host
)
else:
signals.status_prompt_path.send(
prompt = "Server replay path",
callback = self.master.server_playback_path
)
def mouse_event(self, size, event, button, col, row, focus):
if event == "mouse press" and button == 1:
if self.flow.request:
self.master.view_flow(self.flow)
return True
def keypress(self, xxx_todo_changeme, key):
(maxcol,) = xxx_todo_changeme
key = common.shortcuts(key)
if key == "a":
self.flow.accept_intercept(self.master)
signals.flowlist_change.send(self)
elif key == "d":
self.flow.kill(self.master)
self.state.delete_flow(self.flow)
signals.flowlist_change.send(self)
elif key == "D":
f = self.master.duplicate_flow(self.flow)
self.master.view_flow(f)
elif key == "r":
r = self.master.replay_request(self.flow)
if r:
signals.status_message.send(message=r)
signals.flowlist_change.send(self)
elif key == "S":
if not self.master.server_playback:
signals.status_prompt_onekey.send(
prompt = "Server Replay",
keys = (
("all flows", "a"),
("this flow", "t"),
("file", "f"),
),
callback = self.server_replay_prompt,
)
else:
signals.status_prompt_onekey.send(
prompt = "Stop current server replay?",
keys = (
("yes", "y"),
("no", "n"),
),
callback = self.stop_server_playback_prompt,
)
elif key == "V":
if not self.flow.modified():
signals.status_message.send(message="Flow not modified.")
return
self.state.revert(self.flow)
signals.flowlist_change.send(self)
signals.status_message.send(message="Reverted.")
elif key == "w":
signals.status_prompt_onekey.send(
self,
prompt = "Save",
keys = (
("all flows", "a"),
("this flow", "t"),
),
callback = self.save_flows_prompt,
)
elif key == "X":
self.flow.kill(self.master)
elif key == "enter":
if self.flow.request:
self.master.view_flow(self.flow)
elif key == "|":
signals.status_prompt_path.send(
prompt = "Send flow to script",
callback = self.master.run_script_once,
args = (self.flow,)
)
elif key == "P":
common.ask_copy_part("a", self.flow, self.master, self.state)
elif key == "b":
common.ask_save_body(None, self.master, self.state, self.flow)
else:
return key
class FlowListWalker(urwid.ListWalker):
def __init__(self, master, state):
self.master, self.state = master, state
signals.flowlist_change.connect(self.sig_flowlist_change)
def sig_flowlist_change(self, sender):
self._modified()
def get_focus(self):
f, i = self.state.get_focus()
f = ConnectionItem(self.master, self.state, f, True) if f else None
return f, i
def set_focus(self, focus):
ret = self.state.set_focus(focus)
return ret
def get_next(self, pos):
f, i = self.state.get_next(pos)
f = ConnectionItem(self.master, self.state, f, False) if f else None
return f, i
def get_prev(self, pos):
f, i = self.state.get_prev(pos)
f = ConnectionItem(self.master, self.state, f, False) if f else None
return f, i
class FlowListBox(urwid.ListBox):
def __init__(self, master):
self.master = master
urwid.ListBox.__init__(
self,
FlowListWalker(master, master.state)
)
def get_method_raw(self, k):
if k:
self.get_url(k)
def get_method(self, k):
if k == "e":
signals.status_prompt.send(
self,
prompt = "Method",
text = "",
callback = self.get_method_raw
)
else:
method = ""
for i in common.METHOD_OPTIONS:
if i[1] == k:
method = i[0].upper()
self.get_url(method)
def get_url(self, method):
signals.status_prompt.send(
prompt = "URL",
text = "http://www.example.com/",
callback = self.new_request,
args = (method,)
)
def new_request(self, url, method):
parts = http.parse_url(str(url))
if not parts:
signals.status_message.send(message="Invalid Url")
return
scheme, host, port, path = parts
f = self.master.create_request(method, scheme, host, port, path)
self.master.view_flow(f)
def keypress(self, size, key):
key = common.shortcuts(key)
if key == "A":
self.master.accept_all()
signals.flowlist_change.send(self)
elif key == "C":
self.master.clear_flows()
elif key == "e":
self.master.toggle_eventlog()
elif key == "G":
self.master.state.set_focus(0)
signals.flowlist_change.send(self)
elif key == "g":
self.master.state.set_focus(self.master.state.flow_count())
signals.flowlist_change.send(self)
elif key == "l":
signals.status_prompt.send(
prompt = "Limit",
text = self.master.state.limit_txt,
callback = self.master.set_limit
)
elif key == "L":
signals.status_prompt_path.send(
self,
prompt = "Load flows",
callback = self.master.load_flows_callback
)
elif key == "n":
signals.status_prompt_onekey.send(
prompt = "Method",
keys = common.METHOD_OPTIONS,
callback = self.get_method
)
elif key == "F":
self.master.toggle_follow_flows()
elif key == "W":
if self.master.stream:
self.master.stop_stream()
else:
signals.status_prompt_path.send(
self,
prompt = "Stream flows to",
callback = self.master.start_stream_to_path
)
else:
return urwid.ListBox.keypress(self, size, key)
| |
import os
from tesserocr import PSM, PyTessBaseAPI
import cv2
import numpy as np
from PIL import Image
from typing import List, Optional
from constants import SIDES_DIR
from cv_helpers import contour_bounding_box_for_contour, extract_color, four_point_transform,\
get_center_for_contour, get_classifier_directories, get_contours, inflate_classifier,\
ls_debug, rotate_image_180, rotate_image_clockwise, rotate_image_counter_clockwise,\
show
SERIAL_IS_ZERO_CLASSIFIER_DIR = os.path.join(SIDES_DIR, "serial", "is_zero")
LABEL_TO_IS_ZERO = {
1: False,
2: True,
}
def get_serial_number_from_side(side):
# type: (np.array) -> Optional[List[str]]
text_threshold = _get_cleaned_up_text_subsection(side)
if text_threshold is None:
return None
letters = _get_letters(text_threshold)
text = _get_text_from_letters(letters)
return text
def _get_cleaned_up_text_subsection(im):
# type: (np.array) -> Optional[np.array]
red1 = extract_color(im, (0, 6), (200, 255), (100, 150))
red2 = extract_color(im, (176, 180), (200, 255), (100, 150))
red = red1 + red2
color = extract_color(im, 45 / 2, (20, 50), (200, 255))
# show(red, .25)
# show(yellow, .25)
# im = scale(im, .25)
# color = scale(color, .25)
red_contour = _get_box_for_largest_rect_contour(red)
text_contour = _get_box_for_largest_rect_contour(color)
if red_contour is None or text_contour is None:
# if red_contour is not None:
# print "RED"
# show(get_drawn_contours(red, [red_contour], True))
# show(color)
# if text_contour is not None:
# print "TEXT"
# show(get_drawn_contours(color, [text_contour], True))
# show(red)
# if not (red_contour is None and text_contour is None):
# show(red)
# show(color)
# assert red_contour is None and text_contour is None, \
# "Error parsing serial number, didn't find one of the text or its label."
return None
red_center = get_center_for_contour(red_contour)
text_center = get_center_for_contour(text_contour)
text_subsection = four_point_transform(im, text_contour)
# show(get_drawn_contours(color, text_contour, True), .25)
# show(get_drawn_contours(red, red_contour, True), .25)
# show(text_subsection)
height, width = im.shape[:2]
# Rotation logic from http://stackoverflow.com/a/5912847/3000133
if height > width:
# Determine if red is left or right of text
if text_center[0] < red_center[0]:
text_subsection = rotate_image_counter_clockwise(text_subsection)
else:
# Rotate clockwise 90
text_subsection = rotate_image_clockwise(text_subsection)
else:
if text_center[1] > red_center[1]:
# We're fine
pass
else:
# Rotate 180
text_subsection = rotate_image_180(text_subsection)
# show(get_drawn_contours(im, [text_contour], True))
# show(text_subsection)
text_subsection_gray = cv2.cvtColor(text_subsection, cv2.COLOR_BGR2GRAY)
# show(text_subsection_gray)
_, text_threshold = cv2.threshold(text_subsection_gray, 50, 255, 0)
text_threshold = 255 - text_threshold
# show(text_threshold)
height, width = text_threshold.shape[:2]
text_threshold[:height / 10, :] = 0
text_threshold[9 * height / 10:, :] = 0
return text_threshold
def _get_letters(text_threshold):
# type: (np.array) -> List[np.array]
height, width = text_threshold.shape[:2]
contours, _ = cv2.findContours(text_threshold.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
contours = [c for c in contours if cv2.boundingRect(c)[3] > height / 3]
centers = [get_center_for_contour(c) for c in contours]
centers = sorted(centers, key=lambda x: x[0])
distances = []
for idx in range(len(centers) - 1):
distances.append(centers[idx + 1][0] - centers[idx][0])
half_avg_dist = sum(distances) / (len(distances) * 2)
# contours = [contour_bounding_box_for_contour(c) for c in contours]
# show(get_drawn_contours(text_threshold, contours, True))
letters = []
for center in centers:
x = center[0] - half_avg_dist
y = 0
w = half_avg_dist * 2
h = height
contour = np.array([
[x, y],
[x + w, y],
[x + w, y + h],
[x, y + h],
]).reshape((4, 1, 2))
letters.append(four_point_transform(text_threshold, contour))
return letters
def _get_text_from_letters(letters):
# type: (List[np.array]) -> List[str]
is_zero_classifier = inflate_classifier(SERIAL_IS_ZERO_CLASSIFIER_DIR)
text = []
with PyTessBaseAPI() as api:
api.SetVariable("load_system_dawg", "F")
api.SetVariable("load_freq_dawg", "F")
api.SetVariable("load_punc_dawg", "F")
api.SetVariable("load_number_dawg", "F")
api.SetVariable("load_unambig_dawg", "F")
api.SetVariable("load_bigram_dawg", "F")
api.SetVariable("load_fixed_length_dawgs", "F")
api.SetVariable("classify_enable_learning", "F")
api.SetVariable("classify_enable_adaptive_matcher", "F")
api.SetVariable("segment_penalty_garbage", "F")
api.SetVariable("segment_penalty_dict_nonword", "F")
api.SetVariable("segment_penalty_dict_frequent_word", "F")
api.SetVariable("segment_penalty_dict_case_ok", "F")
api.SetVariable("segment_penalty_dict_case_bad", "F")
api.SetVariable("edges_use_new_outline_complexity", "T")
api.SetVariable("tessedit_char_whitelist", "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
api.SetPageSegMode(PSM.SINGLE_CHAR)
for letter in letters:
if LABEL_TO_IS_ZERO[is_zero_classifier(letter)]:
text.append("0")
continue
pil_image = Image.fromarray(letter)
api.SetImage(pil_image)
# show(np.array(api.GetThresholdedImage()))
text.append(api.GetUTF8Text().replace("\n\n", ""))
return text
def _get_box_for_largest_rect_contour(color):
# type: (np.array) -> Optional[np.array]
contours = get_contours(color)
# print [0.05 * cv2.arcLength(c, True) for c in contours]
# show(get_drawn_contours(color, contours, True), .25)
contours = [cv2.approxPolyDP(c, 0.05 * cv2.arcLength(c, True), True) for c in contours]
height, width = color.shape[:2]
total_area = height * width
contours = [c for c in contours if len(c) == 4 and cv2.contourArea(c) / total_area > 0.005]
if len(contours) == 0:
return None
contour = sorted(contours, key=cv2.contourArea)[-1]
contour = contour_bounding_box_for_contour(contour)
return contour
def _test():
vocab_path, unlabelled_dir, labelled_dir, features_dir, svm_data_dir = \
get_classifier_directories(SERIAL_IS_ZERO_CLASSIFIER_DIR)
letter_idx = 0
i = 0
group = 0
found_in_group = 0
# for path in ls(DATA_DIR + "module_classifier/unlabelled"):
for path in ls_debug(1294, 1297):
# if "-left.png" not in path:
# if "0036-edge-bottom.png" not in path:
# continue
# new_group = int(os.path.basename(path).split("-")[0])
# if new_group % 3 != 0:
# continue
# if new_group != group:
# if found_in_group != 1:
# "!!!! Found {} in group {} !!!!".format(found_in_group, group)
# found_in_group = 0
# group = new_group
i += 1
# if i == 1:
# continue
# if i > 1:
# break
print path
im = cv2.imread(path)
from sides import _extract_side
im = _extract_side(im, "-bottom" in path)
text = get_serial_number_from_side(im)
if text is None:
print "NO SERIAL NUMBER"
else:
print "-".join(text)
text_threshold = _get_cleaned_up_text_subsection(im)
if text_threshold is not None:
show(text_threshold)
# if text_threshold is None:
# continue
# found_in_group += 1
# letters = _get_letters(text_threshold)
# for letter in letters:
# cv2.imwrite(os.path.join(unlabelled_dir, "{:05}.png".format(letter_idx)), letter)
# letter_idx += 1
if __name__ == '__main__':
_test()
| |
#!/usr/bin/env python
#
# 88 88 ad88 88 88
# 88 "" d8" "" "" ,d
# 88 88 88
# 88,dPPYba, 88 MM88MMM 8b,dPPYba, ,adPPYba, ,adPPYba, MM88MMM
# 88P' "8a 88 88 88P' "Y8 a8" "8a I8[ "" 88
# 88 d8 88 88 88 8b d8 `"Y8ba, 88
# 88b, ,a8" 88 88 88 "8a, ,a8" aa ]8I 88,
# 8Y"Ybbd8"' 88 88 88 `"YbbdP"' `"YbbdP"' "Y888
#
# INTELLIGENT WEB APPLICATION FIREWALL
# by: Jan Seidl <jseidl@wroot.org>
#
# Global WAF instance
waf = None
#######################################
# Constants
#######################################
CONFIG_FILE = 'bifrost.conf'
DATABASE_FILE = 'bifrost.db'
MODE_TRAINING = 1
MODE_OPERATIONAL = 2
MODE_BYPASS = 3
ACTION_DROP = 1
ACTION_PASS = 2
CHUNK_END = '0\r\n\r\n'
#######################################
# Imports
#######################################
import ConfigParser, magic, Cookie
import sys, pprint, sqlite3, signal, textwrap
from twisted.internet import protocol, reactor
from BaseHTTPServer import BaseHTTPRequestHandler
import cgi
from httplib import HTTPResponse as HTTPR
from StringIO import StringIO
from twisted.python import log
#######################################
# Util functions
#######################################
def in_range(minval, maxval, value, tolerance):
value = float(value)
tolerance = float(tolerance)
maxval = float(maxval)
minval = float(minval)
maxval += (maxval*tolerance)
minval -= (minval*tolerance)
return (minval <= value <= maxval)
def in_average(mean, value, tolerance):
threshold = (mean*tolerance)
maxval = (mean+threshold)
minval = (mean-threshold)
return in_range(minval, maxval, value, 0.0)
# http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
# http://pythonwise.blogspot.com.br/2010/02/parse-http-response.html
class FakeSocket(StringIO):
def makefile(self, *args, **kw):
return self
#######################################
# HTTP Request and Response Classes
#######################################
# http://stackoverflow.com/questions/2115410/does-python-have-a-module-for-parsing-http-requests-and-responses
class HTTPRequest(BaseHTTPRequestHandler):
form = {}
def __init__(self, request_text):
self.rfile = StringIO(request_text)
self.raw_requestline = self.rfile.readline()
self.error_code = self.error_message = None
self.parse_request()
if self.command == 'POST':
# Parse the form data posted
self.form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD':'POST',
'CONTENT_TYPE':self.headers['Content-Type'],
})
def send_error(self, code, message):
self.error_code = code
self.error_message = message
class HTTPResponse():
headers = {}
fp = None
length = None
chunked = None
def __init__(self, response_text):
self.fp = FakeSocket(response_text)
res = HTTPR(self.fp)
res.begin()
headers = res.getheaders()
for header in headers:
self.headers[header[0]] = header[1]
self.length = res.getheader('Content-Length')
self.chunked = res.getheader('Transfer-Encoding')
#######################################
# Endpoints
# Adapted from http://www.mostthingsweb.com/2013/08/a-basic-man-in-the-middle-proxy-with-twisted/
# Adapted from http://stackoverflow.com/a/15645169/221061
#######################################
##
# ServerProtocol = Client -> WAF | WAF -> Client
##
class ServerProtocol(protocol.Protocol):
def __init__(self):
self.buffer = None
self.client = None
def connectionMade(self):
factory = protocol.ClientFactory()
factory.protocol = ClientProtocol
factory.server = self
reactor.connectTCP(waf.server_addr, waf.server_port, factory)
def drop_connection(self):
# FIX ME -- NEED TO CLOSE THIS BOTH ENDS
print bcolors.FAIL + "Dropping connection." + bcolors.ENDC
self.transport.loseWriteConnection()
self.transport.loseConnection()
self.transport.abortConnection()
# Client => Proxy
def dataReceived(self, data):
if self.client:
self.client.write(data)
else:
self.buffer = data
# Proxy => Client
def write(self, data):
self.transport.write(data)
##
# ClientProtocol = Server -> WAF | WAF -> Server
##
class ClientProtocol(protocol.Protocol):
client_request = None
response_buffer = None
request_buffer = None
chunked = False
request_size = 0
def connectionMade(self):
self.factory.server.client = self
self.write(self.factory.server.buffer)
self.factory.server.buffer = ''
def valid_files(self, post):
mimes = []
# Get uploaded files' mime
for field in post.keys():
if post[field].filename:
file_data = post[field].file.read()
mime = waf.magic.buffer(file_data)
mimes.append(mime)
dataset = self.fetch_set('uploads', 'mimetype', None, path_only = True)
for mime in set(mimes):
if mime not in dataset:
print bcolors.WARNING + "[ANOMALY] File mimetype '%s' not allowed on requests for this URL ('%s')." % (mime, self.client_request.path) + bcolors.ENDC
return False
return True
def valid_post(self, post_keys, r_type):
dataset = self.fetch_set('postdata', 'field', None, path_only = True)
for post in post_keys:
post_name = post.strip().lower()
if post_name not in dataset:
print bcolors.WARNING + "[ANOMALY] POST field '%s' not allowed on %s for this URL ('%s')." % (post_name, 'requests' if r_type == 'req' else 'responses', self.client_request.path) + bcolors.ENDC
return False
return True
def valid_headers(self, header_keys, r_type):
dataset = self.fetch_set('headers', 'header', r_type)
for header in header_keys:
header_name = header.strip().lower()
if header_name not in dataset:
print bcolors.WARNING + "[ANOMALY] Header '%s' not allowed on %s for this URL/method ('%s','%s')." % (header_name, 'requests' if r_type == 'req' else 'responses', self.client_request.command, self.client_request.path) + bcolors.ENDC
return False
return True
def valid_cookies(self, cookie_str, r_type):
ck = Cookie.SimpleCookie()
ck.load(cookie_str)
dataset = self.fetch_set('cookies', 'cookie', r_type)
for cookie in ck.keys():
cookie_name = cookie.strip().lower()
if cookie_name not in dataset:
print bcolors.WARNING + "[ANOMALY] Cookie '%s' not allowed on %s for this URL/method ('%s','%s')." % (cookie_name, 'requests' if r_type == 'req' else 'responses', self.client_request.command, self.client_request.path) + bcolors.ENDC
return False
return True
def valid_range(self, name, ranges, value):
tolerance = waf.config.get('tolerance', name)
ret = True
if waf.config.get('analyzer', name) == 'avg':
ret = in_average(ranges[2], value, tolerance)
else:
ret = in_range(ranges[0], ranges[1], value, tolerance)
return ret
def fetch_set(self, table, field, r_type, path_only=False):
items = []
if path_only:
cursor = waf.cursor.execute("SELECT %s FROM %s WHERE path = ?" % (field, table), (self.client_request.path,))
else:
cursor = waf.cursor.execute("SELECT %s FROM %s WHERE path = ? and method = ? AND type = ?" % (field, table), (self.client_request.path, self.client_request.command, r_type))
for row in cursor:
items.append(row[0])
return set(items)
def fetch_averages(self, path, r_type):
# Get averages
query = "SELECT MIN(headers_qty) as min_hqty, \
MAX(headers_qty) as max_hqty, \
AVG(headers_qty) as hqty, \
MIN(headers_size) as mix_hsize, \
MAX(headers_size) as max_hsize, \
AVG(headers_size) as hsize, \
MIN(content_size) as min_csize, \
MAX(content_size) as max_csize, \
AVG(content_size) as csize \
FROM urls WHERE path = ? AND type = ?"
waf.cursor.execute(query, (path,r_type))
return waf.cursor.fetchone()
def analyzeRequest(self):
score = 0
request = self.client_request
command = request.command
path = request.path
# Check if page can be acessed within given method
waf.cursor.execute("SELECT method FROM urls WHERE path = ? AND method = ? AND type = 'req' GROUP BY METHOD", (path, command) );
methods = waf.cursor.fetchone()
if methods is None:
print bcolors.FAIL + "[ANOMALY] URL/method ('%s','%s') not in database." % (command, path) + bcolors.ENDC
if waf.unknown_urls_action == ACTION_PASS:
return True
else:
self.request_buffer = None
return False
averages = self.fetch_averages(path, 'req')
# Content SIZE
header_size = len(str(request.headers))
content_size = (len(self.request_buffer)-header_size)
if not self.valid_range('request_content_size', averages[6:9], content_size):
print bcolors.WARNING + "[ANOMALY] URL '%s' has an unexpected request content size (%d)." % (path, content_size) + bcolors.ENDC
score += waf.config.getint('scorer', 'request_content_size')
# Check for valid cookies
if waf.config.getint('analyzer', 'request_cookies') == 1 and 'Cookie' in request.headers:
if not self.valid_cookies(request.headers['Cookie'], 'req'):
score += waf.config.getint('scorer', 'request_cookies')
# Header sanity
if waf.config.getint('analyzer', 'request_headers') == 1:
if not self.valid_headers(request.headers.keys(), 'req'):
score += waf.config.getint('scorer', 'request_headers')
# POST sanity
if command == 'POST' and waf.config.getint('analyzer', 'request_postdata') == 1:
if not self.valid_post(request.form.keys(), 'req'):
score += waf.config.getint('scorer', 'request_postdata')
# Uploaded File MIME type sanity
if command == 'POST' and waf.config.getint('analyzer', 'upload_filetype') == 1:
if not self.valid_files(request.form):
score += waf.config.getint('scorer', 'upload_filetype')
threshold = waf.config.getint('enforcer', 'request_threshold')
if score > threshold:
print bcolors.FAIL + "[THREAT] URL '%s' scored as malicious (%d/%d)." % (path, score, threshold) + bcolors.ENDC
if waf.config.get('enforcer', 'action') == 'drop':
return False
return True
def analyzeResponse(self, response):
command = self.client_request.command
path = self.client_request.path
# Check if page can be acessed within given method
waf.cursor.execute("SELECT method FROM urls WHERE path = ? AND method = ? AND type = 'resp' GROUP BY METHOD", (path, command) );
methods = waf.cursor.fetchone()
if methods is None:
print bcolors.WARNING + "[ANOMALY] URL/method ('%s','%s') not in database." % (command, path) + bcolors.ENDC
if waf.unknown_urls_action == ACTION_PASS:
return True
else:
self.response_buffer = None
return False
averages = self.fetch_averages(path, 'resp')
score = 0
# Header QTY
header_qty = len(response.headers)
if not self.valid_range('response_header_qty', averages[0:3], header_qty):
print bcolors.WARNING + "[ANOMALY] URL '%s' has an unexpected response header quantity (%d)." % (path, header_qty) + bcolors.ENDC
score += waf.config.getint('scorer', 'response_header_qty')
# Header SIZE
header_size = len(str(response.headers))
if not self.valid_range('response_header_size', averages[3:6], header_size):
print bcolors.WARNING + "[ANOMALY] URL '%s' has an unexpected response header size (%d)." % (path, header_size) + bcolors.ENDC
score += waf.config.getint('scorer', 'response_header_size')
# Content SIZE
content_size = (len(self.response_buffer)-header_size)
if not self.valid_range('response_content_size', averages[6:9], content_size):
print bcolors.WARNING + "[ANOMALY] URL '%s' has an unexpected response content size (%d)." % (path, content_size) + bcolors.ENDC
score += waf.config.getint('scorer', 'response_content_size')
# Cookies
if waf.config.getint('analyzer', 'response_cookies') == 1 and 'set-cookie' in response.headers:
if not self.valid_cookies(response.headers['set-cookie'], 'resp'):
score += waf.config.getint('scorer', 'response_cookies')
# Header sanity
if waf.config.getint('analyzer', 'response_headers') == 1:
if not self.valid_headers(response.headers.keys(), 'resp'):
score += waf.config.getint('scorer', 'response_headers')
threshold = waf.config.getint('enforcer', 'response_threshold')
if score > threshold:
print bcolors.FAIL + "[THREAT] URL '%s' scored as malicious (%d/%d)." % (path, score, threshold) + bcolors.ENDC
if waf.config.get('enforcer', 'action') == 'drop':
return False
return True
# Server => Proxy
def dataReceived(self, data):
if waf.mode == MODE_BYPASS:
self.factory.server.write(data)
return False
if self.response_buffer is None:
self.response_buffer = data
else:
self.response_buffer += data
# All chunks received
if self.chunked and data.endswith(CHUNK_END):
self.chunked = False
elif self.chunked:
return True
response = HTTPResponse(self.response_buffer)
if not hasattr(response, 'headers'):
print bcolors.FAIL + '[ANOMALY] Malformed response.' + bcolors.ENDC
self.factory.server.drop_connection()
self.response_buffer = None
return False
# Chunked starts
if response.chunked is not None and len(self.response_buffer) == len(data):
self.chunked = True
return True
if waf.mode == MODE_OPERATIONAL:
if not self.analyzeResponse(response):
self.factory.server.drop_connection()
self.response_buffer = None
return False
header_qty = len(response.headers)
header_size = len(str(response.headers))
content_size = (len(self.response_buffer)-header_size)
print bcolors.OKGREEN + "[RESPONSE] %s %s (HEADERS: %d, HEADERSIZE: %s, CONTENTSIZE %s)" % (self.client_request.command, self.client_request.path, header_qty, header_size, content_size) + bcolors.ENDC
if waf.mode == MODE_TRAINING:
self.learnResponse(response)
self.factory.server.write(self.response_buffer)
self.response_buffer = None
def learnResponse(self, response):
header_qty = len(response.headers)
header_size = len(str(response.headers))
content_size = (len(self.response_buffer)-header_size)
waf.cursor.execute('INSERT INTO urls VALUES (?, ?, ?, ?, ?, ?, ?, ?)', (self.client_request.path, self.client_request.command, header_qty, header_size, content_size, None, None, 'resp'))
# Check for cookies
cookies = []
if 'set-cookie' in response.headers:
ck = Cookie.SimpleCookie()
ck.load(response.headers['set-cookie'])
for cookie in ck.keys():
cookie_name = cookie.strip().lower()
cookies.append((self.client_request.path, self.client_request.command, cookie_name, 'resp'))
headers = []
for header in response.headers:
header_name = header.strip().lower()
headers.append((self.client_request.path, self.client_request.command, header_name, 'resp'))
waf.cursor.executemany('INSERT OR IGNORE INTO cookies VALUES (?, ?, ?, ?)', cookies)
waf.cursor.executemany('INSERT OR IGNORE INTO headers VALUES (?, ?, ?, ?)', headers)
waf.conn.commit()
def learnRequest(self, data):
request = self.client_request
header_qty = len(request.headers)
header_size = len(str(request.headers))
content_size = (len(data)-header_size)
waf.cursor.execute('INSERT INTO urls VALUES (?, ?, ?, ?, ?, ?, ?, ?)', (request.path, request.command, header_qty, header_size, content_size, None, None, 'req'))
headers = []
cookies = []
postdata = []
mimes = []
if 'Cookie' in self.client_request.headers:
ck = Cookie.SimpleCookie()
ck.load(self.client_request.headers['Cookie'])
for cookie in ck.keys():
cookie_name = cookie.strip().lower()
cookies.append((self.client_request.path, self.client_request.command, cookie_name, 'req'))
for header in self.client_request.headers:
header_name = header.strip().lower()
headers.append((self.client_request.path, self.client_request.command, header_name, 'req'))
if request.command == "POST":
for field in request.form.keys():
if request.form[field].filename:
file_data = request.form[field].file.read()
mime = waf.magic.buffer(file_data)
mimes.append((self.client_request.path, mime))
field_name = field.strip().lower()
postdata.append((self.client_request.path, field_name))
waf.cursor.executemany('INSERT OR IGNORE INTO cookies VALUES (?, ?, ?, ?)', cookies)
waf.cursor.executemany('INSERT OR IGNORE INTO headers VALUES (?, ?, ?, ?)', headers)
waf.cursor.executemany('INSERT OR IGNORE INTO postdata VALUES (?, ?)', postdata)
waf.cursor.executemany('INSERT OR IGNORE INTO uploads VALUES (?, ?)', set(mimes))
waf.conn.commit()
# Proxy => Server
def write(self, data):
if data:
if waf.mode == MODE_BYPASS:
self.transport.write(data)
return True
if self.request_buffer is None:
self.request_buffer = data
else:
self.request_buffer += data
request = HTTPRequest(self.request_buffer)
if not hasattr(request, 'headers') or not hasattr(request, 'path') or not hasattr(request, 'command'):
print bcolors.FAIL + '[ANOMALY] Malformed request.' + bcolors.ENDC
self.factory.server.drop_connection()
self.request_buffer = None
return False
self.client_request = request
header_qty = len(request.headers)
header_size = len(str(request.headers))
content_size = (len(self.request_buffer)-header_size)
if 'Content-Length' in request.headers:
total_size = int(request.headers['Content-Length'])
if content_size < total_size:
return True
if waf.mode == MODE_OPERATIONAL:
if not self.analyzeRequest():
self.factory.server.drop_connection()
self.request_buffer = None
return False
print bcolors.OKBLUE + "[REQUEST] %s %s (HEADERS: %d, HEADERSIZE: %s, CONTENTSIZE %s)" % (request.command, request.path, header_qty, header_size, content_size) + bcolors.ENDC
if waf.mode == MODE_TRAINING:
self.learnRequest(self.request_buffer)
self.transport.write(self.request_buffer)
self.request_buffer = None
return True
class WAF(object):
conn = None
config = None
mode = None
magic = None
unknown_ulrs_action = ACTION_DROP
listen_port = 0
server_addr = None
server_port = 0
def __init__(self):
self.config = ConfigParser.RawConfigParser()
self.init_config()
self.init_logging()
self.init_magic()
self.init_db()
def print_banner(self):
print textwrap.dedent("""\
88 88 ad88 88 88
88 "" d8" "" "" ,d
88 88 88
88,dPPYba, 88 MM88MMM 8b,dPPYba, ,adPPYba, ,adPPYba, MM88MMM
88P\' "8a 88 88 88P\' "Y8 a8" "8a I8[ "" 88
88 d8 88 88 88 8b d8 `"Y8ba, 88
88b, ,a8" 88 88 88 "8a, ,a8" aa ]8I 88,
8Y"Ybbd8"\' 88 88 88 `"YbbdP"\' `"YbbdP"\' "Y888
Intelligent Web Application Firewall
by: Jan Seidl <jseidl@wroot.org>
""")
def start(self):
self.print_banner()
self.init_reactor()
def init_db(self):
self.conn = sqlite3.connect(DATABASE_FILE)
self.cursor = self.conn.cursor()
def init_magic(self):
self.magic = magic.open(magic.MAGIC_MIME)
self.magic.load()
def init_config(self):
try:
self.config.read(CONFIG_FILE)
# Mode
_mode = self.config.get('general', 'mode')
if _mode == 'training':
self.mode = MODE_TRAINING
elif _mode == 'operational':
self.mode = MODE_OPERATIONAL
else:
self.mode = MODE_BYPASS
# Unknown URLs
if self.config.get('general', 'unknown_urls') == 'drop':
self.unknown_urls_action = ACTION_DROP
else:
self.unknown_urls_action = ACTION_PASS
except Exception, e:
sys.stderr.write("No config file present %s" % str(e))
sys.exit(1)
def init_reactor(self):
factory = protocol.ServerFactory()
factory.protocol = ServerProtocol
self.listen_port = self.config.getint('general', 'listen_port')
self.server_addr = self.config.get('general', 'backend_ip')
self.server_port = self.config.getint('general', 'backend_port')
reactor.listenTCP(self.listen_port, factory)
print bcolors.HEADER + "BWAF listening at port %d (backend: %s:%d) [%s]" % (self.listen_port, self.server_addr, self.server_port, 'operational' if self.mode == MODE_OPERATIONAL else 'training') + bcolors.ENDC
reactor.run()
def init_logging(self):
log.startLogging(sys.stdout)
def __del__(self):
if self.conn is not None:
self.conn.close()
waf = WAF()
def main():
waf.start()
def reload_waf(signum, frame):
print bcolors.WARNING + "Received Signal: %s at frame: %s" % (signum, frame) + bcolors.ENDC
print bcolors.HEADER + "Reloading WAF configuration." + bcolors.ENDC
waf.init_config()
# SIGHUP Reload Config trap
signal.signal(signal.SIGHUP, reload_waf)
if __name__ == '__main__':
main()
| |
"""
Task abstract class
A Task is a unit of work, it has associated source code and
a product (a persistent object such as a table in a database),
it has a name and lives in a DAG
[WIP] On subclassing Tasks
Implementation details:
* params (dict), upstream (Param object)
* params vs constructor parameters
* params on render vs params on run
* Implementing Task.run (using the source object, product, TaskBuildError)
Optional:
* Validating PRODUCT_CLASSES_ALLOWED
* Validating upstream, product and params in code
* Using a client parameter
NOTE: Params trigger different data output (and should make tasks outdated),
Tasks constructor args (such as chunksize in SQLDump) should not change
the output, hence shoulf not make tasks outdated
"""
import inspect
import abc
import traceback
from copy import copy
import logging
from datetime import datetime
from dstools.pipeline.products import Product, MetaProduct
from dstools.pipeline.dag import DAG
from dstools.exceptions import TaskBuildError
from dstools.pipeline.tasks.TaskGroup import TaskGroup
from dstools.pipeline.constants import TaskStatus
from dstools.pipeline.tasks.Upstream import Upstream
from dstools.pipeline.Table import Row
from dstools.pipeline.sources.sources import Source
from dstools.util import isiterable
import humanize
class Task(abc.ABC):
"""A task represents a unit of work
"""
PRODUCT_CLASSES_ALLOWED = None
@abc.abstractmethod
def run(self):
"""This is the only required method Task subclasses must implement
"""
pass
@abc.abstractmethod
def _init_source(self, source):
pass
def __init__(self, source, product, dag, name, params=None):
"""
All subclasses must implement the same constuctor to keep the API
consistent, optional parameters after "params" are ok
Parameters
----------
source: str or pathlib.Path
Source code for the task, for tasks that do not take source code
as input (such as PostgresCopy), this can be other thing. The
source can be a template and can make references to any parameter
in "params", "upstream" parameters or its own "product", not all
Tasks have templated source (templating code is mostly used by
Tasks that take SQL source code as input)
product: Product
The product that this task will create upon completion
dag: DAG
The DAG holding this task
name: str
A name for this task, if None a default will be assigned
params: dict
Extra parameters passed to the task on rendering (if templated
source) or during execution (if not templated source)
"""
self._params = params or {}
self._name = name
self._source = self._init_source(source)
if dag is None:
raise TypeError('DAG cannot be None')
self.dag = dag
dag._add_task(self)
if self._source is None:
raise TypeError('_init_source must return a value, got None')
if not isinstance(self._source, Source):
raise TypeError('_init_source must return a subclass of Source')
if isinstance(product, Product):
self._product = product
if self.PRODUCT_CLASSES_ALLOWED is not None:
if not isinstance(self._product, self.PRODUCT_CLASSES_ALLOWED):
raise TypeError('{} only supports the following product '
'classes: {}, got {}'
.format(type(self).__name__,
self.PRODUCT_CLASSES_ALLOWED,
type(self._product).__name__))
else:
# if assigned a tuple/list of products, create a MetaProduct
self._product = MetaProduct(product)
if self.PRODUCT_CLASSES_ALLOWED is not None:
if not all(isinstance(p, self.PRODUCT_CLASSES_ALLOWED)
for p in self._product):
raise TypeError('{} only supports the following product '
'classes: {}, got {}'
.format(type(self).__name__,
self.PRODUCT_CLASSES_ALLOWED,
type(self._product).__name__))
self._logger = logging.getLogger('{}.{}'.format(__name__,
type(self).__name__))
self.product.task = self
self.client = None
self._status = TaskStatus.WaitingRender
self.build_report = None
self._on_finish = None
self._on_failure = None
@property
def name(self):
"""A str that represents the name of the task
"""
return self._name
@property
def source(self):
"""
A code object which represents what will be run upn task execution,
for tasks that do not take source code as parameter (such as
PostgresCopy), the source object will be a different thing
"""
return self._source
@property
def product(self):
"""The product this task will create upon execution
"""
return self._product
@property
def source_code(self):
"""
A str with the source for that this task will run on execution, if
templated, it is only available after rendering
"""
return str(self.source)
@property
def upstream(self):
"""{task names} -> [task objects] mapping for upstream dependencies
"""
# this is jus syntactic sugar, upstream relations are tracked by the
# DAG object
# this always return a copy to prevent global state if contents
# are modified (e.g. by using pop)
return self.dag._get_upstream(self.name)
@property
def params(self):
"""
dict that holds the parameter that will be passed to the task upon
execution. Before rendering, this will only hold parameters passed
in the Task constructor. After rendering, this will hold new keys:
"product" contained the rendered product and "upstream" holding
upstream parameters if there is any
"""
return self._params
@property
def _lineage(self):
"""
Set with task names of all the dependencies for this task
(including dependencies of dependencies)
"""
# if no upstream deps, there is no lineage
if not len(self.upstream):
return None
else:
# retrieve lineage: upstream tasks + lineage from upstream tasks
up = list(self.upstream.keys())
lineage_up = [up._lineage for up in self.upstream.values() if
up._lineage]
lineage = up + [task for lineage in lineage_up for task in lineage]
return set(lineage)
@property
def on_finish(self):
"""
Callable to be executed after this task is built successfully
(passes Task as first parameter)
"""
return self._on_finish
@on_finish.setter
def on_finish(self, value):
self._on_finish = value
@property
def on_failure(self):
"""
Callable to be executed if task fails (passes Task as first parameter
and the exception as second parameter)
"""
return self._on_failure
@on_failure.setter
def on_failure(self, value):
self._on_failure = value
def build(self, force=False):
"""Run the task if needed by checking its dependencies
Returns
-------
dict
A dictionary with keys 'run' and 'elapsed'
"""
# TODO: if this is run in a task that has upstream dependencies
# it will fail with a useless error since self.params does not have
# upstream yet (added after rendering)
# NOTE: should i fetch metadata here? I need to make sure I have
# the latest before building
self._logger.info(f'-----\nChecking {repr(self)}....')
# do not run unless some of the conditions below match...
run = False
elapsed = 0
if force:
self._logger.info('Forcing run, skipping checks...')
run = True
else:
# not forcing, need to check dependencies...
p_exists = self.product.exists()
# check dependencies only if the product exists and there is
# metadata
if p_exists and self.product.metadata is not None:
outdated_data_deps = self.product._outdated_data_dependencies()
outdated_code_dep = self.product._outdated_code_dependency()
self._logger.info('Checking dependencies...')
if outdated_data_deps:
run = True
self._logger.info('Outdated data deps...')
else:
self._logger.info('Up-to-date data deps...')
if outdated_code_dep:
run = True
self._logger.info('Outdated code dep...')
else:
self._logger.info('Up-to-date code dep...')
else:
run = True
# just log why it will run
if not p_exists:
self._logger.info('Product does not exist...')
if self.product.metadata is None:
self._logger.info('Product metadata is None...')
self._logger.info('Running...')
if run:
self._logger.info(f'Starting execution: {repr(self)}')
then = datetime.now()
try:
self.run()
except Exception as e:
tb = traceback.format_exc()
if self.on_failure:
try:
self.on_failure(self, tb)
except Exception:
self._logger.exception('Error executing on_failure '
'callback')
raise e
now = datetime.now()
elapsed = (now - then).total_seconds()
self._logger.info(f'Done. Operation took {elapsed:.1f} seconds')
# update metadata
self.product.timestamp = datetime.now().timestamp()
self.product.stored_source_code = self.source_code
self.product.save_metadata()
# TODO: also check that the Products were updated:
# if they did not exist, they must exist now, if they alredy
# exist, timestamp must be recent equal to the datetime.now()
# used. maybe run fetch metadata again and validate?
if not self.product.exists():
raise TaskBuildError(f'Error building task "{self}": '
'the task ran successfully but product '
f'"{self.product}" does not exist yet '
'(task.product.exist() returned False)')
if self.on_finish:
try:
if 'client' in inspect.getfullargspec(self.on_finish).args:
self.on_finish(self, client=self.client)
else:
self.on_finish(self)
except Exception as e:
raise TaskBuildError('Exception when running on_finish '
'for task {}: {}'.format(self, e))
else:
self._logger.info(f'No need to run {repr(self)}')
self._logger.info('-----\n')
self._status = TaskStatus.Executed
for t in self._get_downstream():
t._update_status()
self.build_report = Row({'name': self.name, 'Ran?': run,
'Elapsed (s)': elapsed, })
return self
def render(self):
"""
Renders code and product, all upstream tasks must have been rendered
first, for that reason, this method will usually not be called
directly but via DAG.render(), which renders in the right order
"""
self._render_product()
self.params['product'] = self.product
params = copy(self.params)
try:
if self.source.needs_render:
# if this task has upstream dependencies, render using the
# context manager, which will raise a warning if any of the
# dependencies is not used, otherwise just render
if params.get('upstream'):
with params.get('upstream'):
self.source.render(params)
else:
self.source.render(params)
except Exception as e:
raise type(e)('Error rendering code from Task "{}", '
' check the full traceback above for details'
.format(repr(self), self.params)) from e
self._status = (TaskStatus.WaitingExecution if not self.upstream
else TaskStatus.WaitingUpstream)
def set_upstream(self, other):
self.dag._add_edge(other, self)
def plan(self):
"""Shows a text summary of what this task will execute
"""
plan = f"""
Input parameters: {self.params}
Product: {self.product}
Source code:
{self.source_code}
"""
print(plan)
def status(self, return_code_diff=False):
"""Prints the current task status
"""
p = self.product
data = {}
data['name'] = self.name
if p.timestamp is not None:
dt = datetime.fromtimestamp(p.timestamp)
date_h = dt.strftime('%b %d, %y at %H:%M')
time_h = humanize.naturaltime(dt)
data['Last updated'] = '{} ({})'.format(time_h, date_h)
else:
data['Last updated'] = 'Has not been run'
data['Outdated dependencies'] = p._outdated_data_dependencies()
outd_code = p._outdated_code_dependency()
data['Outdated code'] = outd_code
if outd_code and return_code_diff:
data['Code diff'] = (self.dag
.differ
.get_diff(p.stored_source_code,
self.source_code,
language=self.source.language))
else:
outd_code = ''
data['Product'] = str(self.product)
data['Doc (short)'] = self.source.doc_short
data['Location'] = self.source.loc
return Row(data)
def to_dict(self):
"""
Returns a dict representation of the Task, only includes a few
attributes
"""
return dict(name=self.name, product=str(self.product),
source_code=self.source_code)
def _render_product(self):
params_names = list(self.params)
# add upstream product identifiers to params, if any
if self.upstream:
self.params['upstream'] = Upstream({n: t.product for n, t
in self.upstream.items()})
# render the current product
try:
# using the upstream products to define the current product
# is optional, using the parameters passed in params is also
# optional
self.product.render(copy(self.params),
optional=set(params_names + ['upstream']))
except Exception as e:
raise type(e)('Error rendering Product from Task "{}", '
' check the full traceback above for details'
.format(repr(self), self.params)) from e
def _get_downstream(self):
downstream = []
for t in self.dag.values():
if self in t.upstream.values():
downstream.append(t)
return downstream
def _update_status(self):
if self._status == TaskStatus.WaitingUpstream:
all_upstream_executed = all([t._status == TaskStatus.Executed
for t in self.upstream.values()])
if all_upstream_executed:
self._status = TaskStatus.WaitingExecution
def __rshift__(self, other):
""" a >> b is the same as b.set_upstream(a)
"""
other.set_upstream(self)
# return other so a >> b >> c works
return other
def __add__(self, other):
""" a + b means TaskGroup([a, b])
"""
if isiterable(other) and not isinstance(other, DAG):
return TaskGroup([self] + list(other))
else:
return TaskGroup((self, other))
def __repr__(self):
return f'{type(self).__name__}: {self.name} -> {repr(self.product)}'
def __str__(self):
return str(self.product)
def _short_repr(self):
def short(s):
max_l = 30
return s if len(s) <= max_l else s[:max_l - 3] + '...'
return f'{short(self.name)} -> \n{self.product._short_repr()}'
# __getstate__ and __setstate__ are needed to make this picklable
def __getstate__(self):
state = self.__dict__.copy()
# _logger is not pickable, so we remove them and build
# them again in __setstate__
del state['_logger']
return state
def __setstate__(self, state):
self.__dict__.update(state)
self._logger = logging.getLogger('{}.{}'.format(__name__,
type(self).__name__))
| |
"""
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
"""
# Authors: Eustache Diemert <eustache@diemert.fr>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from html.parser import HTMLParser
from urllib.request import urlretrieve
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return "__file__" in globals()
# %%
# Reuters Dataset related routines
# --------------------------------
#
# The dataset used in this example is Reuters-21578 as provided by the UCI ML
# repository. It will be automatically downloaded and uncompressed on first
# run.
class ReutersParser(HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding="latin-1"):
HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = "start_" + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = "end_" + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r"\s+", r" ", self.body)
self.docs.append(
{"title": self.title, "body": self.body, "topics": self.topics}
)
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = (
"http://archive.ics.uci.edu/ml/machine-learning-databases/"
"reuters21578-mld/reuters21578.tar.gz"
)
ARCHIVE_FILENAME = "reuters21578.tar.gz"
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" % data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = "%.2f MB" % (size / 1e6)
current_sz_mb = "%.2f MB" % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
sys.stdout.write("\rdownloaded %s / %s" % (current_sz_mb, total_sz_mb))
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urlretrieve(DOWNLOAD_URL, filename=archive_path, reporthook=progress)
if _not_in_sphinx():
sys.stdout.write("\r")
print("untarring Reuters dataset...")
tarfile.open(archive_path, "r:gz").extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, "rb")):
yield doc
# %%
# Main
# ----
#
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(
decode_error="ignore", n_features=2**18, alternate_sign=False
)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = "acq"
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
"SGD": SGDClassifier(max_iter=5),
"Perceptron": Perceptron(),
"NB Multinomial": MultinomialNB(alpha=0.01),
"Passive-Aggressive": PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [
("{title}\n\n{body}".format(**doc), pos_class in doc["topics"])
for doc in itertools.islice(doc_iter, size)
if doc["topics"]
]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {"n_test": 0, "n_test_pos": 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats["n_test"] += len(y_test)
test_stats["n_test_pos"] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats["t0"]
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats["n_train"] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {
"n_train": 0,
"n_train_pos": 0,
"accuracy": 0.0,
"accuracy_history": [(0, 0)],
"t0": time.time(),
"runtime_history": [(0, 0)],
"total_fit_time": 0.0,
}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batches of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]["total_fit_time"] += time.time() - tick
cls_stats[cls_name]["n_train"] += X_train.shape[0]
cls_stats[cls_name]["n_train_pos"] += sum(y_train)
tick = time.time()
cls_stats[cls_name]["accuracy"] = cls.score(X_test, y_test)
cls_stats[cls_name]["prediction_time"] = time.time() - tick
acc_history = (cls_stats[cls_name]["accuracy"], cls_stats[cls_name]["n_train"])
cls_stats[cls_name]["accuracy_history"].append(acc_history)
run_history = (
cls_stats[cls_name]["accuracy"],
total_vect_time + cls_stats[cls_name]["total_fit_time"],
)
cls_stats[cls_name]["runtime_history"].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print("\n")
# %%
# Plot results
# ------------
#
# The plot represents the learning curve of the classifier: the evolution
# of classification accuracy over the course of the mini-batches. Accuracy is
# measured on the first 1000 samples, held out as a validation set.
#
# To limit the memory consumption, we queue examples up to a fixed amount
# before feeding them to the learner.
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title("Classification accuracy as a function of %s" % x_legend)
plt.xlabel("%s" % x_legend)
plt.ylabel("Accuracy")
plt.grid(True)
plt.plot(x, y)
rcParams["legend.fontsize"] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats["accuracy_history"])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc="best")
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats["runtime_history"])
plot_accuracy(runtime, accuracy, "runtime (s)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc="best")
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = [stats["total_fit_time"] for cls_name, stats in sorted(cls_stats.items())]
cls_runtime.append(total_vect_time)
cls_names.append("Vectorization")
bar_colors = ["b", "g", "r", "c", "m", "y"]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5, color=bar_colors)
ax.set_xticks(np.linspace(0, len(cls_names) - 1, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel("runtime (s)")
ax.set_title("Training Times")
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(
rect.get_x() + rect.get_width() / 2.0,
1.05 * height,
"%.4f" % height,
ha="center",
va="bottom",
)
plt.setp(plt.xticks()[1], rotation=30)
autolabel(rectangles)
plt.tight_layout()
plt.show()
# Plot prediction times
plt.figure()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats["prediction_time"])
cls_runtime.append(parsing_time)
cls_names.append("Read/Parse\n+Feat.Extr.")
cls_runtime.append(vectorizing_time)
cls_names.append("Hashing\n+Vect.")
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5, color=bar_colors)
ax.set_xticks(np.linspace(0, len(cls_names) - 1, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel("runtime (s)")
ax.set_title("Prediction Times (%d instances)" % n_test_documents)
autolabel(rectangles)
plt.tight_layout()
plt.show()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import re
from django.utils.translation import ugettext as _
from horizon import exceptions
from horizon import forms
from horizon.utils import fields
from horizon import workflows
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
class AddPoolAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Name"))
description = forms.CharField(
initial="", required=False,
max_length=80, label=_("Description"))
subnet_id = forms.ChoiceField(label=_("Subnet"))
protocol = forms.ChoiceField(label=_("Protocol"))
lb_method = forms.ChoiceField(label=_("Load Balancing Method"))
admin_state_up = forms.BooleanField(label=_("Admin State"),
initial=True, required=False)
def __init__(self, request, *args, **kwargs):
super(AddPoolAction, self).__init__(request, *args, **kwargs)
tenant_id = request.user.tenant_id
subnet_id_choices = [('', _("Select a Subnet"))]
try:
networks = api.quantum.network_list_for_tenant(request, tenant_id)
except:
exceptions.handle(request,
_('Unable to retrieve networks list.'))
networks = []
for n in networks:
for s in n['subnets']:
subnet_id_choices.append((s.id, s.cidr))
self.fields['subnet_id'].choices = subnet_id_choices
protocol_choices = [('', _("Select a Protocol"))]
protocol_choices.append(('HTTP', 'HTTP'))
protocol_choices.append(('HTTPS', 'HTTPS'))
self.fields['protocol'].choices = protocol_choices
lb_method_choices = [('', _("Select a Protocol"))]
lb_method_choices.append(('ROUND_ROBIN', 'ROUND_ROBIN'))
lb_method_choices.append(('LEAST_CONNECTIONS', 'LEAST_CONNECTIONS'))
lb_method_choices.append(('SOURCE_IP', 'SOURCE_IP'))
self.fields['lb_method'].choices = lb_method_choices
class Meta:
name = _("PoolDetails")
permissions = ('openstack.services.network',)
help_text = _("Create Pool for current tenant.\n\n"
"Assign a name and description for the pool. "
"Choose one subnet where all members of this "
"pool must be on. "
"Select the protocol and load balancing method "
"for this pool. "
"Admin State is UP (checked) by defaul.t")
class AddPoolStep(workflows.Step):
action_class = AddPoolAction
contributes = ("name", "description", "subnet_id",
"protocol", "lb_method", "admin_state_up")
def contribute(self, data, context):
context = super(AddPoolStep, self).contribute(data, context)
if data:
return context
class AddPool(workflows.Workflow):
slug = "addpool"
name = _("Add Pool")
finalize_button_name = _("Add")
success_message = _('Added Pool "%s".')
failure_message = _('Unable to add Pool "%s".')
success_url = "horizon:project:loadbalancers:index"
default_steps = (AddPoolStep,)
def format_status_message(self, message):
name = self.context.get('name')
return message % name
def handle(self, request, context):
try:
pool = api.lbaas.pool_create(request, **context)
return True
except:
msg = self.format_status_message(self.failure_message)
exceptions.handle(request, msg)
return False
class AddVipAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Name"))
description = forms.CharField(
initial="", required=False,
max_length=80, label=_("Description"))
floatip_address = forms.ChoiceField(
label=_("Vip Address from Floating IPs"),
widget=forms.Select(attrs={'disabled': 'disabled'}),
required=False)
other_address = fields.IPField(required=False,
initial="",
version=fields.IPv4,
mask=False)
protocol_port = forms.CharField(max_length=80, label=_("Protocol Port"))
protocol = forms.ChoiceField(label=_("Protocol"))
session_persistence = forms.ChoiceField(
required=False, initial={}, label=_("Session Persistence"))
cookie_name = forms.CharField(
initial="", required=False,
max_length=80, label=_("Cookie Name"),
help_text=_("Required for APP_COOKIE persistence;"
" Ignored otherwise."))
connection_limit = forms.CharField(
max_length=80, label=_("Connection Limit"))
admin_state_up = forms.BooleanField(
label=_("Admin State"), initial=True, required=False)
def __init__(self, request, *args, **kwargs):
super(AddVipAction, self).__init__(request, *args, **kwargs)
self.fields['other_address'].label = _("Specify a free IP address"
" from %s" %
args[0]['subnet'])
protocol_choices = [('', _("Select a Protocol"))]
protocol_choices.append(('HTTP', 'HTTP'))
protocol_choices.append(('HTTPS', 'HTTPS'))
self.fields['protocol'].choices = protocol_choices
session_persistence_choices = [('', _("Set Session Persistence"))]
for mode in ('SOURCE_IP', 'HTTP_COOKIE', 'APP_COOKIE'):
session_persistence_choices.append((mode, mode))
self.fields[
'session_persistence'].choices = session_persistence_choices
floatip_address_choices = [('', _("Currently Not Supported"))]
self.fields['floatip_address'].choices = floatip_address_choices
class Meta:
name = _("AddVip")
permissions = ('openstack.services.network',)
help_text = _("Create a vip (virtual IP) for this pool. "
"Assign a name and description for the vip. "
"Specify an IP address and port for the vip. "
"Choose the protocol and session persistence "
"method for the vip."
"Specify the max connections allowed. "
"Admin State is UP (checked) by default.")
class AddVipStep(workflows.Step):
action_class = AddVipAction
depends_on = ("pool_id", "subnet")
contributes = ("name", "description", "floatip_address",
"other_address", "protocol_port", "protocol",
"session_persistence", "cookie_name",
"connection_limit", "admin_state_up")
def contribute(self, data, context):
context = super(AddVipStep, self).contribute(data, context)
return context
class AddVip(workflows.Workflow):
slug = "addvip"
name = _("Add Vip")
finalize_button_name = _("Add")
success_message = _('Added Vip "%s".')
failure_message = _('Unable to add Vip "%s".')
success_url = "horizon:project:loadbalancers:index"
default_steps = (AddVipStep,)
def format_status_message(self, message):
name = self.context.get('name')
return message % name
def handle(self, request, context):
if context['other_address'] == '':
context['address'] = context['floatip_address']
else:
if not context['floatip_address'] == '':
self.failure_message = _('Only one address can be specified.'
'Unable to add Vip %s.')
return False
else:
context['address'] = context['other_address']
try:
pool = api.lbaas.pool_get(request, context['pool_id'])
context['subnet_id'] = pool['subnet_id']
except:
context['subnet_id'] = None
exceptions.handle(request,
_('Unable to retrieve pool.'))
return False
if context['session_persistence']:
stype = context['session_persistence']
if stype == 'APP_COOKIE':
if context['cookie_name'] == "":
self.failure_message = _('Cookie name must be specified '
'with APP_COOKIE persistence.')
return False
else:
cookie = context['cookie_name']
context['session_persistence'] = {'type': stype,
'cookie_name': cookie}
else:
context['session_persistence'] = {'type': stype}
else:
context['session_persistence'] = {}
try:
api.lbaas.vip_create(request, **context)
return True
except:
msg = self.format_status_message(self.failure_message)
exceptions.handle(request, msg)
return False
class AddMemberAction(workflows.Action):
pool_id = forms.ChoiceField(label=_("Pool"))
members = forms.MultipleChoiceField(
label=_("Member(s)"),
required=True,
initial=["default"],
widget=forms.CheckboxSelectMultiple(),
help_text=_("Select members for this pool "))
weight = forms.CharField(max_length=80, label=_("Weight"))
protocol_port = forms.CharField(max_length=80, label=_("Protocol Port"))
admin_state_up = forms.BooleanField(label=_("Admin State"),
initial=True, required=False)
def __init__(self, request, *args, **kwargs):
super(AddMemberAction, self).__init__(request, *args, **kwargs)
pool_id_choices = [('', _("Select a Pool"))]
try:
pools = api.lbaas.pools_get(request)
except:
pools = []
exceptions.handle(request,
_('Unable to retrieve pools list.'))
pools = sorted(pools,
key=lambda pool: pool.name)
for p in pools:
pool_id_choices.append((p.id, p.name))
self.fields['pool_id'].choices = pool_id_choices
members_choices = []
try:
servers = api.nova.server_list(request)
except:
servers = []
exceptions.handle(request,
_('Unable to retrieve instances list.'))
if len(servers) == 0:
self.fields['members'].label = _("No servers available. "
"Click Add to cancel.")
self.fields['members'].required = False
self.fields['members'].help_text = _("Select members "
"for this pool ")
self.fields['pool_id'].required = False
self.fields['weight'].required = False
self.fields['protocol_port'].required = False
return
for m in servers:
members_choices.append((m.id, m.name))
self.fields['members'].choices = sorted(
members_choices,
key=lambda member: member[1])
class Meta:
name = _("MemberDetails")
permissions = ('openstack.services.network',)
help_text = _("Add member to selected pool.\n\n"
"Choose one or more listed instances to be "
"added to the pool as member(s). "
"Assign a numeric weight for this member "
"Specify the port number the member(s) "
"operate on; e.g., 80.")
class AddMemberStep(workflows.Step):
action_class = AddMemberAction
contributes = ("pool_id", "members", "protocol_port", "weight",
"admin_state_up")
def contribute(self, data, context):
context = super(AddMemberStep, self).contribute(data, context)
return context
class AddMember(workflows.Workflow):
slug = "addmember"
name = _("Add Member")
finalize_button_name = _("Add")
success_message = _('Added Member "%s".')
failure_message = _('Unable to add Member %s.')
success_url = "horizon:project:loadbalancers:index"
default_steps = (AddMemberStep,)
def handle(self, request, context):
if context['members'] == []:
self.failure_message = _('No instances available.%s')
context['member_id'] = ''
return False
for m in context['members']:
params = {'device_id': m}
try:
plist = api.quantum.port_list(request, **params)
except:
plist = []
exceptions.handle(request,
_('Unable to retrieve ports list.'))
return False
if plist:
context['address'] = plist[0].fixed_ips[0]['ip_address']
try:
context['member_id'] = api.lbaas.member_create(
request, **context).id
except:
exceptions.handle(request, _("Unable to add member."))
return False
return True
class AddMonitorAction(workflows.Action):
pool_id = forms.ChoiceField(label=_("Pool"))
type = forms.ChoiceField(label=_("Type"))
delay = forms.CharField(max_length=80, label=_("Delay"))
timeout = forms.CharField(max_length=80, label=_("Timeout"))
max_retries = forms.CharField(max_length=80,
label=_("Max Retries (1~10)"))
http_method = forms.ChoiceField(
initial="GET", required=False, label=_("HTTP Method"))
url_path = forms.CharField(
initial="/", required=False, max_length=80, label=_("URL"))
expected_codes = forms.CharField(
initial="200", required=False, max_length=80,
label=_("Expected HTTP Status Codes"))
admin_state_up = forms.BooleanField(label=_("Admin State"),
initial=True, required=False)
def __init__(self, request, *args, **kwargs):
super(AddMonitorAction, self).__init__(request, *args, **kwargs)
pool_id_choices = [('', _("Select a Pool"))]
try:
pools = api.lbaas.pools_get(request)
for p in pools:
pool_id_choices.append((p.id, p.name))
except:
exceptions.handle(request,
_('Unable to retrieve pools list.'))
self.fields['pool_id'].choices = pool_id_choices
type_choices = [('', _("Select Type"))]
type_choices.append(('PING', 'PING'))
type_choices.append(('TCP', 'TCP'))
type_choices.append(('HTTP', 'HTTP'))
type_choices.append(('HTTPS', 'HTTPS'))
self.fields['type'].choices = type_choices
http_method_choices = [('', _("Select HTTP Method"))]
http_method_choices.append(('GET', 'GET'))
self.fields['http_method'].choices = http_method_choices
class Meta:
name = _("MonitorDetails")
permissions = ('openstack.services.network',)
help_text = _("Create a monitor for a pool.\n\n"
"Select target pool and type of monitoring. "
"Specify delay, timeout, and retry limits "
"required by the monitor. "
"Specify method, URL path, and expected "
"HTTP codes upon success.")
class AddMonitorStep(workflows.Step):
action_class = AddMonitorAction
contributes = ("pool_id", "type", "delay", "timeout", "max_retries",
"http_method", "url_path", "expected_codes",
"admin_state_up")
def contribute(self, data, context):
context = super(AddMonitorStep, self).contribute(data, context)
if data:
return context
class AddMonitor(workflows.Workflow):
slug = "addmonitor"
name = _("Add Monitor")
finalize_button_name = _("Add")
success_message = _('Added Monitor "%s".')
failure_message = _('Unable to add Monitor "%s".')
success_url = "horizon:project:loadbalancers:index"
default_steps = (AddMonitorStep,)
def handle(self, request, context):
try:
context['monitor_id'] = api.lbaas.pool_health_monitor_create(
request, **context).get('id')
return True
except:
exceptions.handle(request, _("Unable to add monitor."))
return False
| |
##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
"""
import os
import shutil
import sys
import tempfile
from optparse import OptionParser
__version__ = '2015-07-01'
# See zc.buildout's changelog if this version is up to date.
tmpeggs = tempfile.mkdtemp(prefix='bootstrap-')
eggsdir = os.path.join(os.path.dirname(__file__), 'eggs')
try:
os.mkdir(eggsdir)
except OSError as e:
if e.errno != 17:
raise
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --find-links to point to local resources, you can keep
this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("--version",
action="store_true", default=False,
help=("Return bootstrap.py version."))
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", "--config-file",
help=("Specify the path to the buildout configuration "
"file to be used."))
parser.add_option("-f", "--find-links",
help=("Specify a URL to search for buildout releases"))
parser.add_option("--allow-site-packages",
action="store_true", default=False,
help=("Let bootstrap.py use existing site packages"))
parser.add_option("--buildout-version",
default='2.5.3',
help="Use a specific zc.buildout version")
parser.add_option("--setuptools-version",
default='33.1.1',
help="Use a specific setuptools version")
parser.add_option("--setuptools-to-dir",
default=eggsdir,
help=("Allow for re-use of existing directory of "
"setuptools versions"))
options, args = parser.parse_args()
if options.version:
print("bootstrap.py version %s" % __version__)
sys.exit(0)
######################################################################
# load/install setuptools
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
ez = {}
if os.path.exists('ez_setup.py'):
exec(open('ez_setup.py').read(), ez)
else:
exec(urlopen('https://bootstrap.pypa.io/ez_setup.py').read(), ez)
if not options.allow_site_packages:
# ez_setup imports site, which adds site packages
# this will remove them from the path to ensure that incompatible versions
# of setuptools are not in the path
import site
# inside a virtualenv, there is no 'getsitepackages'.
# We can't remove these reliably
if hasattr(site, 'getsitepackages'):
for sitepackage_path in site.getsitepackages():
# Strip all site-packages directories from sys.path that
# are not sys.prefix; this is because on Windows
# sys.prefix is a site-package directory.
if sitepackage_path != sys.prefix:
sys.path[:] = [x for x in sys.path
if sitepackage_path not in x]
setup_args = dict(to_dir=tmpeggs, download_delay=0)
if options.setuptools_version is not None:
setup_args['version'] = options.setuptools_version
if options.setuptools_to_dir is not None:
setup_args['to_dir'] = options.setuptools_to_dir
ez['use_setuptools'](**setup_args)
import setuptools
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
setuptools_path = ws.find(
pkg_resources.Requirement.parse('setuptools')).location
# Fix sys.path here as easy_install.pth added before PYTHONPATH
cmd = [sys.executable, '-c',
'import sys; sys.path[0:0] = [%r]; ' % setuptools_path +
'from setuptools.command.easy_install import main; main()',
'-mZqNxd', tmpeggs]
find_links = os.environ.get(
'bootstrap-testing-find-links',
options.find_links or
('http://downloads.buildout.org/'
if options.accept_buildout_test_releases else None)
)
if find_links:
cmd.extend(['-f', find_links])
requirement = 'zc.buildout'
version = options.buildout_version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
try:
return not parsed_version.is_prerelease
except AttributeError:
# Older setuptools
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setuptools_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd) != 0:
raise Exception(
"Failed to execute command:\n%s" % repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs)
| |
from __future__ import division
import copy
import unittest
import numpy
import six
from chainer import iterators
from chainer import serializer
from chainer import testing
class DummySerializer(serializer.Serializer):
def __init__(self, target):
super(DummySerializer, self).__init__()
self.target = target
def __getitem__(self, key):
raise NotImplementedError
def __call__(self, key, value):
self.target[key] = value
return self.target[key]
class DummyDeserializer(serializer.Deserializer):
def __init__(self, target):
super(DummyDeserializer, self).__init__()
self.target = target
def __getitem__(self, key):
raise NotImplementedError
def __call__(self, key, value):
if value is None:
value = self.target[key]
elif isinstance(value, numpy.ndarray):
numpy.copyto(value, self.target[key])
else:
value = type(value)(numpy.asarray(self.target[key]))
return value
@testing.parameterize(*testing.product({
'n_prefetch': [1, 2],
'shared_mem': [None, 1000000],
}))
class TestMultiprocessIterator(unittest.TestCase):
def setUp(self):
self.n_processes = 2
self.options = {'n_processes': self.n_processes,
'n_prefetch': self.n_prefetch,
'shared_mem': self.shared_mem}
def test_iterator_repeat(self):
dataset = [1, 2, 3, 4, 5, 6]
it = iterators.MultiprocessIterator(dataset, 2, **self.options)
for i in range(3):
self.assertEqual(it.epoch, i)
self.assertAlmostEqual(it.epoch_detail, i + 0 / 6)
if i == 0:
self.assertIsNone(it.previous_epoch_detail)
else:
self.assertAlmostEqual(it.previous_epoch_detail, i - 2 / 6)
batch1 = it.next()
self.assertEqual(len(batch1), 2)
self.assertIsInstance(batch1, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, i + 2 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, i + 0 / 6)
batch2 = it.next()
self.assertEqual(len(batch2), 2)
self.assertIsInstance(batch2, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, i + 4 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, i + 2 / 6)
batch3 = it.next()
self.assertEqual(len(batch3), 2)
self.assertIsInstance(batch3, list)
self.assertTrue(it.is_new_epoch)
self.assertEqual(sorted(batch1 + batch2 + batch3), dataset)
self.assertAlmostEqual(it.epoch_detail, i + 6 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, i + 4 / 6)
def test_iterator_list_type(self):
dataset = [[i, numpy.zeros((10,)) + i] for i in range(6)]
it = iterators.MultiprocessIterator(dataset, 2, **self.options)
for i in range(3):
self.assertEqual(it.epoch, i)
self.assertAlmostEqual(it.epoch_detail, i)
if i == 0:
self.assertIsNone(it.previous_epoch_detail)
else:
self.assertAlmostEqual(it.previous_epoch_detail, i - 2 / 6)
batches = {}
for j in range(3):
batch = it.next()
self.assertEqual(len(batch), 2)
if j != 2:
self.assertFalse(it.is_new_epoch)
else:
self.assertTrue(it.is_new_epoch)
self.assertAlmostEqual(
it.epoch_detail, (3 * i + j + 1) * 2 / 6)
self.assertAlmostEqual(
it.previous_epoch_detail, (3 * i + j) * 2 / 6)
for x in batch:
self.assertIsInstance(x, list)
self.assertIsInstance(x[1], numpy.ndarray)
batches[x[0]] = x[1]
self.assertEqual(len(batches), len(dataset))
for k, v in six.iteritems(batches):
numpy.testing.assert_allclose(dataset[k][1], v)
def test_iterator_tuple_type(self):
dataset = [(i, numpy.zeros((10,)) + i) for i in range(6)]
it = iterators.MultiprocessIterator(dataset, 2, **self.options)
for i in range(3):
self.assertEqual(it.epoch, i)
self.assertAlmostEqual(it.epoch_detail, i)
if i == 0:
self.assertIsNone(it.previous_epoch_detail)
else:
self.assertAlmostEqual(it.previous_epoch_detail, i - 2 / 6)
batches = {}
for j in range(3):
batch = it.next()
self.assertEqual(len(batch), 2)
if j != 2:
self.assertFalse(it.is_new_epoch)
else:
self.assertTrue(it.is_new_epoch)
self.assertAlmostEqual(
it.epoch_detail, (3 * i + j + 1) * 2 / 6)
self.assertAlmostEqual(
it.previous_epoch_detail, (3 * i + j) * 2 / 6)
for x in batch:
self.assertIsInstance(x, tuple)
self.assertIsInstance(x[1], numpy.ndarray)
batches[x[0]] = x[1]
self.assertEqual(len(batches), len(dataset))
for k, v in six.iteritems(batches):
numpy.testing.assert_allclose(dataset[k][1], v)
def test_iterator_dict_type(self):
dataset = [{i: numpy.zeros((10,)) + i} for i in range(6)]
it = iterators.MultiprocessIterator(dataset, 2, **self.options)
for i in range(3):
self.assertEqual(it.epoch, i)
self.assertAlmostEqual(it.epoch_detail, i)
if i == 0:
self.assertIsNone(it.previous_epoch_detail)
else:
self.assertAlmostEqual(it.previous_epoch_detail, i - 2 / 6)
batches = {}
for j in range(3):
batch = it.next()
self.assertEqual(len(batch), 2)
if j != 2:
self.assertFalse(it.is_new_epoch)
else:
self.assertTrue(it.is_new_epoch)
self.assertAlmostEqual(
it.epoch_detail, (3 * i + j + 1) * 2 / 6)
self.assertAlmostEqual(
it.previous_epoch_detail, (3 * i + j) * 2 / 6)
for x in batch:
self.assertIsInstance(x, dict)
k = tuple(x)[0]
v = x[k]
self.assertIsInstance(v, numpy.ndarray)
batches[k] = v
self.assertEqual(len(batches), len(dataset))
for k, v in six.iteritems(batches):
x = dataset[k][tuple(dataset[k])[0]]
numpy.testing.assert_allclose(x, v)
def test_iterator_repeat_not_even(self):
dataset = [1, 2, 3, 4, 5]
it = iterators.MultiprocessIterator(dataset, 2, **self.options)
batches = sum([it.next() for _ in range(5)], [])
self.assertEqual(sorted(batches), sorted(dataset * 2))
def test_iterator_not_repeat(self):
dataset = [1, 2, 3, 4, 5]
it = iterators.MultiprocessIterator(
dataset, 2, repeat=False, **self.options)
batches = sum([it.next() for _ in range(3)], [])
self.assertEqual(sorted(batches), dataset)
for _ in range(2):
self.assertRaises(StopIteration, it.next)
def test_iterator_not_repeat_not_even(self):
dataset = [1, 2, 3, 4, 5]
it = iterators.MultiprocessIterator(
dataset, 2, repeat=False, **self.options)
self.assertAlmostEqual(it.epoch_detail, 0 / 5)
self.assertIsNone(it.previous_epoch_detail)
batch1 = it.next()
self.assertAlmostEqual(it.epoch_detail, 2 / 5)
self.assertAlmostEqual(it.previous_epoch_detail, 0 / 5)
batch2 = it.next()
self.assertAlmostEqual(it.epoch_detail, 4 / 5)
self.assertAlmostEqual(it.previous_epoch_detail, 2 / 5)
batch3 = it.next()
self.assertAlmostEqual(it.epoch_detail, 5 / 5)
self.assertAlmostEqual(it.previous_epoch_detail, 4 / 5)
self.assertRaises(StopIteration, it.next)
self.assertEqual(len(batch3), 1)
self.assertEqual(sorted(batch1 + batch2 + batch3), dataset)
def test_iterator_shuffle_divisible(self):
dataset = list(range(10))
it = iterators.MultiprocessIterator(
dataset, 10, **self.options)
self.assertNotEqual(it.next(), it.next())
def test_iterator_shuffle_nondivisible(self):
dataset = list(range(10))
it = iterators.MultiprocessIterator(
dataset, 3, **self.options)
out = sum([it.next() for _ in range(7)], [])
self.assertNotEqual(out[0:10], out[10:20])
def test_copy_not_repeat(self):
dataset = [1, 2, 3, 4, 5]
it = iterators.MultiprocessIterator(
dataset, 2, repeat=False, **self.options)
copy_it = copy.copy(it)
batches = sum([it.next() for _ in range(3)], [])
self.assertEqual(sorted(batches), dataset)
for _ in range(2):
self.assertRaises(StopIteration, it.next)
it = None
batches = sum([copy_it.next() for _ in range(3)], [])
self.assertEqual(sorted(batches), dataset)
for _ in range(2):
self.assertRaises(StopIteration, copy_it.next)
def test_reset(self):
dataset = [1, 2, 3, 4, 5]
it = iterators.MultiprocessIterator(
dataset, 2, repeat=False, **self.options)
for trial in range(4):
batches = sum([it.next() for _ in range(3)], [])
self.assertEqual(sorted(batches), dataset)
for _ in range(2):
self.assertRaises(StopIteration, it.next)
it.reset()
def test_unsupported_reset_middle(self):
dataset = [1, 2, 3, 4, 5]
it = iterators.MultiprocessIterator(
dataset, 2, repeat=False, **self.options)
it.next()
self.assertRaises(NotImplementedError, it.reset)
def test_unsupported_reset_repeat(self):
dataset = [1, 2, 3, 4]
it = iterators.MultiprocessIterator(
dataset, 2, repeat=True, **self.options)
it.next()
it.next()
self.assertRaises(NotImplementedError, it.reset)
def test_unsupported_reset_finalized(self):
dataset = [1, 2, 3, 4]
it = iterators.MultiprocessIterator(
dataset, 2, repeat=False, **self.options)
it.next()
it.next()
it.finalize()
self.assertRaises(NotImplementedError, it.reset)
@testing.parameterize(*testing.product({
'n_prefetch': [1, 2],
'shared_mem': [None, 1000000],
}))
class TestMultiprocessIteratorSerialize(unittest.TestCase):
def setUp(self):
self.n_processes = 2
self.options = {'n_processes': self.n_processes,
'n_prefetch': self.n_prefetch,
'shared_mem': self.shared_mem}
def test_iterator_serialize(self):
dataset = [1, 2, 3, 4, 5, 6]
it = iterators.MultiprocessIterator(dataset, 2, **self.options)
self.assertEqual(it.epoch, 0)
self.assertAlmostEqual(it.epoch_detail, 0 / 6)
self.assertIsNone(it.previous_epoch_detail)
batch1 = it.next()
self.assertEqual(len(batch1), 2)
self.assertIsInstance(batch1, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 2 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 0 / 6)
batch2 = it.next()
self.assertEqual(len(batch2), 2)
self.assertIsInstance(batch2, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 4 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 2 / 6)
target = dict()
it.serialize(DummySerializer(target))
it = iterators.MultiprocessIterator(dataset, 2, **self.options)
it.serialize(DummyDeserializer(target))
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 4 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 2 / 6)
batch3 = it.next()
self.assertEqual(len(batch3), 2)
self.assertIsInstance(batch3, list)
self.assertTrue(it.is_new_epoch)
self.assertEqual(sorted(batch1 + batch2 + batch3), dataset)
self.assertAlmostEqual(it.epoch_detail, 6 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 4 / 6)
def test_iterator_serialize_backward_compat(self):
dataset = [1, 2, 3, 4, 5, 6]
it = iterators.MultiprocessIterator(dataset, 2, **self.options)
self.assertEqual(it.epoch, 0)
self.assertAlmostEqual(it.epoch_detail, 0 / 6)
self.assertIsNone(it.previous_epoch_detail)
batch1 = it.next()
self.assertEqual(len(batch1), 2)
self.assertIsInstance(batch1, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 2 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 0 / 6)
batch2 = it.next()
self.assertEqual(len(batch2), 2)
self.assertIsInstance(batch2, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 4 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 2 / 6)
target = dict()
it.serialize(DummySerializer(target))
# older version does not have previous_epoch_detail
del target['previous_epoch_detail']
it = iterators.MultiprocessIterator(dataset, 2, **self.options)
it.serialize(DummyDeserializer(target))
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 4 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 2 / 6)
batch3 = it.next()
self.assertEqual(len(batch3), 2)
self.assertIsInstance(batch3, list)
self.assertTrue(it.is_new_epoch)
self.assertEqual(sorted(batch1 + batch2 + batch3), dataset)
self.assertAlmostEqual(it.epoch_detail, 6 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 4 / 6)
testing.run_module(__name__, __file__)
| |
# Copyright 2012 OpenStack LLC.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import argparse
import logging
from neutronclient.neutron import v2_0 as neutronV20
class ListSecurityGroup(neutronV20.ListCommand):
"""List security groups that belong to a given tenant."""
resource = 'security_group'
log = logging.getLogger(__name__ + '.ListSecurityGroup')
list_columns = ['id', 'name', 'description']
pagination_support = True
sorting_support = True
class ShowSecurityGroup(neutronV20.ShowCommand):
"""Show information of a given security group."""
resource = 'security_group'
log = logging.getLogger(__name__ + '.ShowSecurityGroup')
allow_names = True
class CreateSecurityGroup(neutronV20.CreateCommand):
"""Create a security group."""
resource = 'security_group'
log = logging.getLogger(__name__ + '.CreateSecurityGroup')
def add_known_arguments(self, parser):
parser.add_argument(
'name', metavar='NAME',
help='Name of security group')
parser.add_argument(
'--description',
help='description of security group')
def args2body(self, parsed_args):
body = {'security_group': {
'name': parsed_args.name}}
if parsed_args.description:
body['security_group'].update(
{'description': parsed_args.description})
if parsed_args.tenant_id:
body['security_group'].update({'tenant_id': parsed_args.tenant_id})
return body
class DeleteSecurityGroup(neutronV20.DeleteCommand):
"""Delete a given security group."""
log = logging.getLogger(__name__ + '.DeleteSecurityGroup')
resource = 'security_group'
allow_names = True
class UpdateSecurityGroup(neutronV20.UpdateCommand):
"""Update a given security group."""
log = logging.getLogger(__name__ + '.UpdateSecurityGroup')
resource = 'security_group'
def add_known_arguments(self, parser):
parser.add_argument(
'--name',
help='Name of security group')
parser.add_argument(
'--description',
help='description of security group')
def args2body(self, parsed_args):
body = {'security_group': {}}
if parsed_args.name:
body['security_group'].update(
{'name': parsed_args.name})
if parsed_args.description:
body['security_group'].update(
{'description': parsed_args.description})
return body
class ListSecurityGroupRule(neutronV20.ListCommand):
"""List security group rules that belong to a given tenant."""
resource = 'security_group_rule'
log = logging.getLogger(__name__ + '.ListSecurityGroupRule')
list_columns = ['id', 'security_group_id', 'direction', 'protocol',
'remote_ip_prefix', 'remote_group_id']
replace_rules = {'security_group_id': 'security_group',
'remote_group_id': 'remote_group'}
pagination_support = True
sorting_support = True
def get_parser(self, prog_name):
parser = super(ListSecurityGroupRule, self).get_parser(prog_name)
parser.add_argument(
'--no-nameconv', action='store_true',
help='Do not convert security group ID to its name')
return parser
@staticmethod
def replace_columns(cols, rules, reverse=False):
if reverse:
rules = dict((rules[k], k) for k in rules.keys())
return [rules.get(col, col) for col in cols]
def retrieve_list(self, parsed_args):
parsed_args.fields = self.replace_columns(parsed_args.fields,
self.replace_rules,
reverse=True)
return super(ListSecurityGroupRule, self).retrieve_list(parsed_args)
def extend_list(self, data, parsed_args):
if parsed_args.no_nameconv:
return
neutron_client = self.get_client()
search_opts = {'fields': ['id', 'name']}
if self.pagination_support:
page_size = parsed_args.page_size
if page_size:
search_opts.update({'limit': page_size})
sec_group_ids = set()
for rule in data:
for key in self.replace_rules:
sec_group_ids.add(rule[key])
search_opts.update({"id": sec_group_ids})
secgroups = neutron_client.list_security_groups(**search_opts)
secgroups = secgroups.get('security_groups', [])
sg_dict = dict([(sg['id'], sg['name'])
for sg in secgroups if sg['name']])
for rule in data:
for key in self.replace_rules:
rule[key] = sg_dict.get(rule[key], rule[key])
def setup_columns(self, info, parsed_args):
parsed_args.columns = self.replace_columns(parsed_args.columns,
self.replace_rules,
reverse=True)
# NOTE(amotoki): 2nd element of the tuple returned by setup_columns()
# is a generator, so if you need to create a look using the generator
# object, you need to recreate a generator to show a list expectedly.
info = super(ListSecurityGroupRule, self).setup_columns(info,
parsed_args)
cols = info[0]
if not parsed_args.no_nameconv:
cols = self.replace_columns(info[0], self.replace_rules)
parsed_args.columns = cols
return (cols, info[1])
class ShowSecurityGroupRule(neutronV20.ShowCommand):
"""Show information of a given security group rule."""
resource = 'security_group_rule'
log = logging.getLogger(__name__ + '.ShowSecurityGroupRule')
allow_names = False
class CreateSecurityGroupRule(neutronV20.CreateCommand):
"""Create a security group rule."""
resource = 'security_group_rule'
log = logging.getLogger(__name__ + '.CreateSecurityGroupRule')
def add_known_arguments(self, parser):
parser.add_argument(
'security_group_id', metavar='SECURITY_GROUP',
help='Security group name or id to add rule.')
parser.add_argument(
'--direction',
default='ingress', choices=['ingress', 'egress'],
help='direction of traffic: ingress/egress')
parser.add_argument(
'--ethertype',
default='IPv4',
help='IPv4/IPv6')
parser.add_argument(
'--protocol',
help='protocol of packet')
parser.add_argument(
'--port-range-min',
help='starting port range')
parser.add_argument(
'--port_range_min',
help=argparse.SUPPRESS)
parser.add_argument(
'--port-range-max',
help='ending port range')
parser.add_argument(
'--port_range_max',
help=argparse.SUPPRESS)
parser.add_argument(
'--remote-ip-prefix',
help='cidr to match on')
parser.add_argument(
'--remote_ip_prefix',
help=argparse.SUPPRESS)
parser.add_argument(
'--remote-group-id', metavar='REMOTE_GROUP',
help='remote security group name or id to apply rule')
parser.add_argument(
'--remote_group_id',
help=argparse.SUPPRESS)
def args2body(self, parsed_args):
_security_group_id = neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'security_group', parsed_args.security_group_id)
body = {'security_group_rule': {
'security_group_id': _security_group_id,
'direction': parsed_args.direction,
'ethertype': parsed_args.ethertype}}
if parsed_args.protocol:
body['security_group_rule'].update(
{'protocol': parsed_args.protocol})
if parsed_args.port_range_min:
body['security_group_rule'].update(
{'port_range_min': parsed_args.port_range_min})
if parsed_args.port_range_max:
body['security_group_rule'].update(
{'port_range_max': parsed_args.port_range_max})
if parsed_args.remote_ip_prefix:
body['security_group_rule'].update(
{'remote_ip_prefix': parsed_args.remote_ip_prefix})
if parsed_args.remote_group_id:
_remote_group_id = neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'security_group',
parsed_args.remote_group_id)
body['security_group_rule'].update(
{'remote_group_id': _remote_group_id})
if parsed_args.tenant_id:
body['security_group_rule'].update(
{'tenant_id': parsed_args.tenant_id})
return body
class DeleteSecurityGroupRule(neutronV20.DeleteCommand):
"""Delete a given security group rule."""
log = logging.getLogger(__name__ + '.DeleteSecurityGroupRule')
resource = 'security_group_rule'
allow_names = False
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
def setup(app):
app.add_config_value('link_root', '', 'env')
tags.add('dev')
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
sys.path.append(os.path.abspath('../common/ext'))
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.ifconfig',
'div',
'link'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['../common/_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'DNN'
copyright = '2015, DNN Corp.'
author = 'DNN Corp.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '8.0'
# The full version, including alpha/beta/rc tags.
release = '8.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build','samples/*']
primary_domain = None
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
rst_epilog = """
.. |InProgress| unicode:: U+1F527
.. |InReview| unicode:: U+1F441
"""
# -- Options for Custom Link directive -----------------------------------------
link_root = '/docs'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'dnn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'navbar_links': [
("Documentation", "#", True, [
("Developer", "~/dev/index.html", True),
("Admin", "~/admin/index.html", True),
("Designer", "~/design/index.html", True)
]),
("Blogs", "http://www.dnnsoftware.com/community-blog", False),
("Download", "http://www.dnnsoftware.com/community/download", False),
("Support", "#", True, [
("Forums", "http://www.dnnsoftware.com/forums", False),
("Evoq Success Network", "http://www.dnnsoftware.com/services/customer-support/success-network", False),
])
],
'source_link_position': "none",
'navbar_title': 'Development Center',
'navbar_version': False,
'globaltoc_depth': 3,
'site_home': 'http://dnnsoftware.com/docs',
'hide_sidebar': [],
'link_root': link_root
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["../common/themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '../common/img/DNN_logo_28px.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '../common/img/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = [
'../common/img',
'../common/js'
]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'**': ['searchbox.html', 'sidebartoc.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'dnndoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'dnn.tex', 'DNN Documentation',
'DNN Corp', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'dnn', 'DNN Documentation',
['DNN Corp.'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'dnn', 'DNN Documentation',
'DNN Corp', 'dnn', 'DNN Documentation',
'DNN Development'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
}
# -- Options for Breathe output -------------------------------------------
breathe_projects = { "dnn-platform": "./api-source/xml" }
breathe_default_project = "dnn-platform"
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for UnifiedLSTM layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import time
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import keras
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.layers import recurrent as rnn_v1
from tensorflow.python.keras.layers import recurrent_v2 as rnn
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import nest
# Global config for grappler setting that is used for graph mode test.
_rewrites = rewriter_config_pb2.RewriterConfig()
_rewrites.implementation_selector = rewriter_config_pb2.RewriterConfig.ON
_rewrites.min_graph_nodes = -1
_graph_options = config_pb2.GraphOptions(rewrite_options=_rewrites)
_config = config_pb2.ConfigProto(graph_options=_graph_options)
@keras_parameterized.run_all_keras_modes(config=_config)
class LSTMV2Test(keras_parameterized.TestCase):
@parameterized.named_parameters(
('non_tan_activation', 'relu', 'sigmoid', 0, False, True),
('non_sigmoid_recur_activation', 'tanh', 'relu', 0, False, True),
('use_recurrent_dropout', 'tanh', 'sigmoid', 0.1, False, True),
('unroll', 'tanh', 'sigmoid', 0, True, True),
('not_use_bias', 'tanh', 'sigmoid', 0, False, False),
)
def test_could_use_defun_backend(self, activation, recurrent_activation,
recurrent_dropout, unroll, use_bias):
layer = rnn.LSTM(
1,
activation=activation,
recurrent_activation=recurrent_activation,
recurrent_dropout=recurrent_dropout,
unroll=unroll,
use_bias=use_bias)
self.assertFalse(layer.could_use_cudnn)
def test_static_shape_inference_LSTM(self):
# Github issue: 15165
timesteps = 3
embedding_dim = 4
units = 2
model = keras.models.Sequential()
inputs = keras.layers.Dense(
embedding_dim, input_shape=(timesteps, embedding_dim))
model.add(inputs)
layer = rnn.LSTM(units, return_sequences=True)
model.add(layer)
outputs = model.layers[-1].output
self.assertEqual(outputs.shape.as_list(), [None, timesteps, units])
def test_dynamic_behavior_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer = rnn.LSTM(units, input_shape=(None, embedding_dim))
model = keras.models.Sequential()
model.add(layer)
model.compile(gradient_descent.GradientDescentOptimizer(0.001), 'mse')
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
def test_stacking_LSTM(self):
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(rnn.LSTM(10, return_sequences=True, unroll=False))
model.add(rnn.LSTM(5, return_sequences=True, unroll=False))
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.01))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_from_config_LSTM(self):
layer_class = rnn.LSTM
for stateful in (False, True):
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
def test_specify_initial_state_keras_tensor(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
# Test with Keras tensor
inputs = keras.Input((timesteps, embedding_dim))
initial_state = [keras.Input((units,)) for _ in range(num_states)]
layer = rnn.LSTM(units)
if len(initial_state) == 1:
output = layer(inputs, initial_state=initial_state[0])
else:
output = layer(inputs, initial_state=initial_state)
assert initial_state[0] in layer._inbound_nodes[0].input_tensors
model = keras.models.Model([inputs] + initial_state, output)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.01))
inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [
np.random.random((num_samples, units)) for _ in range(num_states)
]
targets = np.random.random((num_samples, units))
model.train_on_batch([inputs] + initial_state, targets)
def DISABLED_test_specify_initial_state_non_keras_tensor(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
# Test with non-Keras tensor
inputs = keras.Input((timesteps, embedding_dim))
initial_state = [
keras.backend.random_normal_variable((num_samples, units), 0, 1)
for _ in range(num_states)
]
layer = rnn.LSTM(units)
output = layer(inputs, initial_state=initial_state)
model = keras.models.Model(inputs, output)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.01))
inputs = np.random.random((num_samples, timesteps, embedding_dim))
targets = np.random.random((num_samples, units))
model.train_on_batch(inputs, targets)
def test_reset_states_with_values(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
layer = rnn.LSTM(units, stateful=True)
layer.build((num_samples, timesteps, embedding_dim))
initial_weight_count = len(layer.weights)
layer.reset_states()
assert len(layer.states) == num_states
assert layer.states[0] is not None
self.assertAllClose(
keras.backend.eval(layer.states[0]),
np.zeros(keras.backend.int_shape(layer.states[0])),
atol=1e-4)
state_shapes = [keras.backend.int_shape(state) for state in layer.states]
values = [np.ones(shape) for shape in state_shapes]
if len(values) == 1:
values = values[0]
layer.reset_states(values)
self.assertAllClose(
keras.backend.eval(layer.states[0]),
np.ones(keras.backend.int_shape(layer.states[0])),
atol=1e-4)
# Test with invalid data
with self.assertRaises(ValueError):
layer.reset_states([1] * (len(layer.states) + 1))
self.assertEqual(initial_weight_count, len(layer.weights))
# Variables in "states" shouldn't show up in .weights
layer.states = nest.map_structure(variables.Variable, values)
layer.reset_states()
self.assertEqual(initial_weight_count, len(layer.weights))
def test_specify_state_with_masking(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
inputs = keras.Input((timesteps, embedding_dim))
_ = keras.layers.Masking()(inputs)
initial_state = [keras.Input((units,)) for _ in range(num_states)]
output = rnn.LSTM(units)(
inputs, initial_state=initial_state)
model = keras.models.Model([inputs] + initial_state, output)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.01))
inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [
np.random.random((num_samples, units)) for _ in range(num_states)
]
targets = np.random.random((num_samples, units))
model.train_on_batch([inputs] + initial_state, targets)
def test_return_state(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
inputs = keras.Input(batch_shape=(num_samples, timesteps, embedding_dim))
masked = keras.layers.Masking()(inputs)
layer = rnn.LSTM(units, return_state=True, stateful=True)
outputs = layer(masked)
state = outputs[1:]
assert len(state) == num_states
model = keras.models.Model(inputs, state[0])
inputs = np.random.random((num_samples, timesteps, embedding_dim))
state = model.predict(inputs)
self.assertAllClose(keras.backend.eval(layer.states[0]), state, atol=1e-4)
def test_state_reuse(self):
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
inputs = keras.Input(batch_shape=(num_samples, timesteps, embedding_dim))
layer = rnn.LSTM(
units, return_state=True, return_sequences=True)
outputs = layer(inputs)
output, state = outputs[0], outputs[1:]
output = rnn.LSTM(units)(output, initial_state=state)
model = keras.models.Model(inputs, output)
inputs = np.random.random((num_samples, timesteps, embedding_dim))
model.predict(inputs)
def test_initial_states_as_other_inputs(self):
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
num_states = 2
layer_class = rnn.LSTM
# Test with Keras tensor
main_inputs = keras.Input((timesteps, embedding_dim))
initial_state = [keras.Input((units,)) for _ in range(num_states)]
inputs = [main_inputs] + initial_state
layer = layer_class(units)
output = layer(inputs)
assert initial_state[0] in layer._inbound_nodes[0].input_tensors
model = keras.models.Model(inputs, output)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.01))
main_inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [
np.random.random((num_samples, units)) for _ in range(num_states)
]
targets = np.random.random((num_samples, units))
model.train_on_batch([main_inputs] + initial_state, targets)
def test_unified_lstm_feature_parity_with_canonical_lstm(self):
with context.eager_mode():
# Run this test under eager only due to b/120160788 for model.set_weights.
input_shape = 10
rnn_state_size = 8
timestep = 4
batch = 20
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=rnn_state_size)
y_train = keras.utils.to_categorical(y_train, rnn_state_size)
# For the last batch item of the test data, we filter out the last
# timestep to simulate the variable length sequence and masking test.
x_train[-2:, -1, :] = 0.0
y_train[-2:] = 0
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
masked_input = keras.layers.Masking()(inputs)
lstm_layer = rnn_v1.LSTM(rnn_state_size,
recurrent_activation='sigmoid')
output = lstm_layer(masked_input)
lstm_model = keras.models.Model(inputs, output)
weights = lstm_model.get_weights()
y_1 = lstm_model.predict(x_train)
lstm_model.compile('rmsprop', 'mse')
lstm_model.fit(x_train, y_train)
y_2 = lstm_model.predict(x_train)
with test_util.device(use_gpu=True):
cudnn_layer = rnn.LSTM(rnn_state_size)
cudnn_model = keras.models.Model(inputs, cudnn_layer(masked_input))
cudnn_model.set_weights(weights)
y_3 = cudnn_model.predict(x_train)
cudnn_model.compile('rmsprop', 'mse')
cudnn_model.fit(x_train, y_train)
y_4 = cudnn_model.predict(x_train)
self.assertAllClose(y_1, y_3, rtol=1e-5, atol=2e-5)
self.assertAllClose(y_2, y_4, rtol=1e-5, atol=2e-5)
@parameterized.named_parameters(('v0', 0), ('v1', 1), ('v2', 2))
def DISABLED_test_implementation_mode_LSTM(self, implementation_mode):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
rnn.LSTM,
kwargs={
'units': units,
'implementation': implementation_mode
},
input_shape=(num_samples, timesteps, embedding_dim))
layer_class = rnn.LSTM
k_constraint = keras.constraints.max_norm(0.01)
r_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_constraint=k_constraint,
recurrent_constraint=r_constraint,
bias_constraint=b_constraint)
layer.build((None, None, embedding_dim))
self.assertEqual(layer.cell.kernel.constraint, k_constraint)
self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint)
self.assertEqual(layer.cell.bias.constraint, b_constraint)
layer_class = rnn.LSTM
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(layer_class(units=5, return_sequences=True, unroll=False))
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.01))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_masking_with_stacking_LSTM(self):
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(rnn.LSTM(10, return_sequences=True, unroll=False))
model.add(rnn.LSTM(5, return_sequences=True, unroll=False))
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.01))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
@parameterized.named_parameters(
# test_name, time_major, go_backwards
('normal', False, False),
('time_major', True, False),
('go_backwards', False, True),
('both', True, True),
)
def test_time_major_and_go_backward(self, time_major, go_backwards):
input_shape = 10
rnn_state_size = 8
timestep = 4
batch = 100
x_train = np.random.random((batch, timestep, input_shape))
def build_model(layer_cls):
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
layer = layer_cls(rnn_state_size,
recurrent_activation='sigmoid',
time_major=time_major,
return_sequences=True,
go_backwards=go_backwards)
if time_major:
converted_input = keras.layers.Lambda(
lambda t: array_ops.transpose(t, [1, 0, 2]))(inputs)
outputs = layer(converted_input)
outputs = keras.layers.Lambda(
lambda t: array_ops.transpose(t, [1, 0, 2]))(outputs)
else:
outputs = layer(inputs)
return keras.models.Model(inputs, outputs)
lstm_model = build_model(rnn_v1.LSTM)
y_ref = lstm_model.predict(x_train)
weights = lstm_model.get_weights()
unified_lstm_model = build_model(rnn.LSTM)
unified_lstm_model.set_weights(weights)
y = unified_lstm_model.predict(x_train)
self.assertAllClose(y, y_ref)
input_shape = 10
rnn_state_size = 8
output_shape = 8
timestep = 4
batch = 100
epoch = 10
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
y_train = keras.utils.to_categorical(y_train, output_shape)
layer = rnn.LSTM(rnn_state_size)
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
outputs = layer(inputs)
model = keras.models.Model(inputs, outputs)
model.compile('rmsprop', loss='mse')
model.fit(x_train, y_train, epochs=epoch)
model.evaluate(x_train, y_train)
model.predict(x_train)
@parameterized.named_parameters(
# test_name, use_bias, bias_initializer, activation
('normal', True, 'zeros'),
('no_bias', False, 'zeros'),
('random_bias', True, 'random_uniform'),
)
def test_unified_lstm_model_save_load(self, use_bias, bias_initializer):
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
batch = 10
timestep = 3
input_dim = 5
units = 2
x = np.random.random((batch, timestep, input_dim))
def build_model():
inputs = keras.layers.Input(
shape=[timestep, input_dim], dtype=dtypes.float32)
layer = rnn.LSTM(
units,
use_bias=use_bias,
bias_initializer=bias_initializer)
output = layer(inputs)
return keras.models.Model(inputs, output), layer
model, layer = build_model()
y_ref = model.predict(x)
model.save_weights(h5_path)
cloned_model, new_layer = build_model()
cloned_model.load_weights(h5_path)
y = cloned_model.predict(x)
self.assertAllClose(y, y_ref)
self.assertAllClose(layer.get_weights(), new_layer.get_weights())
def test_unified_lstm_output_on_multiple_kernel(self):
input_shape = 10
rnn_state_size = 8
timestep = 4
batch = 100
x_train = np.random.random((batch, timestep, input_shape))
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
with test_util.device(use_gpu=False):
layer = rnn.LSTM(rnn_state_size)
output = layer(inputs)
cpu_model = keras.models.Model(inputs, output)
weights = cpu_model.get_weights()
y_1 = cpu_model.predict(x_train)
with test_util.device(use_gpu=True):
layer = rnn.LSTM(rnn_state_size)
output = layer(inputs)
gpu_model = keras.models.Model(inputs, output)
gpu_model.set_weights(weights)
y_2 = gpu_model.predict(x_train)
# Note that CuDNN uses 'sigmoid' as activation, so the unified LSTM uses
# 'sigmoid' as default. Construct the canonical LSTM with sigmoid to achieve
# the same output.
with test_util.device(use_gpu=True):
layer = rnn_v1.LSTM(rnn_state_size, recurrent_activation='sigmoid')
output = layer(inputs)
canonical_model = keras.models.Model(inputs, output)
# Remove the extra cudnn bias since canonical lstm will not use it.
canonical_model.set_weights(weights[:3])
y_3 = canonical_model.predict(x_train)
self.assertAllClose(y_1, y_2)
self.assertAllClose(y_2, y_3)
def DISABLED_test_return_sequences_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
rnn.LSTM,
kwargs={
'units': units,
'return_sequences': True
},
input_shape=(num_samples, timesteps, embedding_dim))
def test_regularizers_LSTM(self):
embedding_dim = 4
layer_class = rnn.LSTM
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_regularizer=keras.regularizers.l1(0.01),
recurrent_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l2',
activity_regularizer='l1')
layer.build((None, None, 2))
self.assertEqual(len(layer.losses), 3)
x = keras.backend.variable(np.ones((2, 3, 2)))
layer(x)
if context.executing_eagerly():
self.assertEqual(len(layer.losses), 4)
else:
self.assertEqual(len(layer.get_losses_for(x)), 1)
# Run in V2 only due to b/120160788.
@test_util.run_v2_only
def test_statefulness_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer_class = rnn.LSTM
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
4,
embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None)
model.add(layer)
model.compile(optimizer=gradient_descent.GradientDescentOptimizer(0.01),
loss='mse', run_eagerly=testing_utils.should_run_eagerly())
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units)))
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
self.assertAllClose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
# Check masking
layer.reset_states()
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
layer.reset_states()
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
layer.reset_states()
mix_padded_input = np.ones((num_samples, timesteps))
mix_padded_input[0, 1] = 0
mix_padded_input[1, 0] = 0
mix_padded_input[1, 2] = 0
out8 = model.predict(mix_padded_input)
self.assertAllClose(out7, out6, atol=1e-5)
self.assertAllClose(out8, out7, atol=1e-5)
def test_stateful_LSTM_training(self):
# See b/123587692 for more context.
vocab_size = 20
embedding_dim = 10
batch_size = 8
timestep = 12
units = 5
x = np.random.randint(0, vocab_size, size=(batch_size, timestep))
y = np.random.randint(0, vocab_size, size=(batch_size, timestep))
model = keras.Sequential([
keras.layers.Embedding(vocab_size, embedding_dim,
batch_input_shape=[batch_size, timestep]),
rnn.LSTM(units, return_sequences=True, stateful=True),
keras.layers.Dense(vocab_size)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, epochs=1, shuffle=False)
def test_dropout_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
rnn.LSTM,
kwargs={
'units': units,
'dropout': 0.1,
'recurrent_dropout': 0.1
},
input_shape=(num_samples, timesteps, embedding_dim))
class LSTMLayerGraphOnlyTest(test.TestCase):
# Need session for test
@test_util.run_deprecated_v1
def test_unifiedLSTM(self):
input_shape = 10
rnn_state_size = 8
output_shape = 8
timestep = 4
batch = 100
epoch = 1
with self.cached_session(config=_config, use_gpu=True) as sess:
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
y_train = keras.utils.to_categorical(y_train, output_shape)
layer = rnn.LSTM(rnn_state_size, return_runtime=True)
inputs = array_ops.placeholder(
dtypes.float32, shape=(None, timestep, input_shape), name='inputs')
predict = array_ops.placeholder(
dtypes.float32, shape=(None, output_shape), name='predict')
outputs, runtime = layer(inputs)
loss = losses.softmax_cross_entropy(predict, outputs)
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
train_op = optimizer.minimize(loss)
sess.run([variables.global_variables_initializer()])
existing_loss = 0
for _ in range(epoch):
loss_value, _, runtime_value = sess.run([loss, train_op, runtime], {
inputs: x_train,
predict: y_train
})
if test.is_gpu_available():
self.assertEqual(runtime_value, b'cudnn')
else:
self.assertEqual(runtime_value, b'cpu')
# Make sure the loss is updated for every epoch
# (layer weights properly updated).
self.assertNotEqual(existing_loss, loss_value)
existing_loss = loss_value
# Need session for test
@test_util.run_deprecated_v1
def test_unifiedLSTM_with_cond(self):
# This test is to demonstrate the graph rewrite of grappler plugin under
# the condition that the function returns different number of internal
# states.
input_shape = 10
rnn_state_size = 8
output_shape = 8
timestep = 4
batch = 100
epoch = 1
with self.cached_session(config=_config, use_gpu=True) as sess:
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
y_train = keras.utils.to_categorical(y_train, output_shape)
layer = rnn.LSTM(rnn_state_size, return_runtime=True)
inputs = array_ops.placeholder(
dtypes.float32, shape=(None, timestep, input_shape), name='inputs')
predict = array_ops.placeholder(
dtypes.float32, shape=(None, output_shape), name='predict')
zeros = array_ops.zeros([batch, output_shape])
dummy_runtime = constant_op.constant(
'unknown', dtype=dtypes.string, name='runtime')
a = constant_op.constant(0)
b = constant_op.constant(1)
# Will always run the lstm layer.
outputs, runtime = control_flow_ops.cond(
gen_math_ops.less(a, b),
lambda: layer(inputs),
lambda: (zeros, dummy_runtime))
loss = losses.softmax_cross_entropy(predict, outputs)
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
train_op = optimizer.minimize(loss)
sess.run([variables.global_variables_initializer()])
existing_loss = 0
for _ in range(epoch):
loss_value, _, runtime_value = sess.run([loss, train_op, runtime], {
inputs: x_train,
predict: y_train
})
if test.is_gpu_available():
self.assertEqual(runtime_value, b'cudnn')
else:
self.assertEqual(runtime_value, b'cpu')
# Make sure the loss is updated for every epoch
# (layer weights properly updated).
self.assertNotEqual(existing_loss, loss_value)
existing_loss = loss_value
class UnifiedLSTMPerformanceTest(test.Benchmark):
def _measure_performance(self, test_config, model, x_train, y_train):
batch = test_config['batch']
epoch = test_config['epoch']
warmup_epoch = test_config['warmup_epoch']
# warm up the model
model.fit(x_train, y_train, batch_size=batch, epochs=warmup_epoch)
start_time = time.time()
model.fit(x_train, y_train, batch_size=batch, epochs=epoch - warmup_epoch)
end_time = time.time()
return (end_time - start_time) / (epoch - warmup_epoch)
def _time_performance_run_cudnn_lstm(self, test_config, x_train, y_train):
# Get the performance number for standard Cudnn LSTM
input_shape = test_config['input_shape']
rnn_state_size = test_config['rnn_state_size']
timestep = test_config['timestep']
cudnn_lstm_layer = keras.layers.CuDNNLSTM(rnn_state_size)
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
outputs = cudnn_lstm_layer(inputs)
model = keras.models.Model(inputs, outputs)
model.compile('sgd', 'mse')
sec_per_epoch = self._measure_performance(
test_config, model, x_train, y_train)
logging.info('Average performance for %s per epoch is: %s',
'CuDNN LSTM', sec_per_epoch)
return sec_per_epoch
def _time_performance_run_unifed_lstm_gpu(
self, test_config, x_train, y_train):
# Get performance number for Unified_LSTM with grappler swap the impl
input_shape = test_config['input_shape']
rnn_state_size = test_config['rnn_state_size']
timestep = test_config['timestep']
layer = rnn.LSTM(rnn_state_size)
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
outputs = layer(inputs)
model = keras.models.Model(inputs, outputs)
model.compile('sgd', 'mse')
sec_per_epoch = self._measure_performance(
test_config, model, x_train, y_train)
logging.info('Average performance for %s per epoch is: %s',
'Unified LSTM', sec_per_epoch)
return sec_per_epoch
def _time_performance_run_normal_lstm(
self, test_config, x_train, y_train):
# Get performance number for standard LSTM on GPU.
input_shape = test_config['input_shape']
rnn_state_size = test_config['rnn_state_size']
timestep = test_config['timestep']
layer = rnn_v1.LSTM(rnn_state_size)
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
outputs = layer(inputs)
model = keras.models.Model(inputs, outputs)
model.compile('sgd', 'mse')
sec_per_epoch = self._measure_performance(
test_config, model, x_train, y_train)
logging.info('Average performance for %s per epoch is: %s',
'Normal LSTM', sec_per_epoch)
return sec_per_epoch
def _benchmark_performance_with_standard_cudnn_impl(self):
if not test.is_gpu_available():
self.skipTest('performance test will only run on GPU')
mode = 'eager' if context.executing_eagerly() else 'graph'
batch = 64
num_batch = 10
test_config = {
'input_shape': 128,
'rnn_state_size': 64,
'output_shape': 64,
'timestep': 50,
'batch': batch,
'epoch': 20,
# The performance for warmup epoch is ignored.
'warmup_epoch': 1,
}
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=(batch * num_batch),
test_samples=0,
input_shape=(test_config['timestep'], test_config['input_shape']),
num_classes=test_config['output_shape'])
y_train = keras.utils.to_categorical(y_train, test_config['output_shape'])
cudnn_sec_per_epoch = self._time_performance_run_cudnn_lstm(
test_config, x_train, y_train)
unified_lstm_sec_per_epoch = self._time_performance_run_unifed_lstm_gpu(
test_config, x_train, y_train)
normal_lstm_sec_per_epoch = self._time_performance_run_normal_lstm(
test_config, x_train, y_train)
cudnn_vs_unified = cudnn_sec_per_epoch / unified_lstm_sec_per_epoch
unified_vs_normal = normal_lstm_sec_per_epoch / unified_lstm_sec_per_epoch
self.report_benchmark(name='keras_cudnn_lstm_' + mode,
wall_time=cudnn_sec_per_epoch,
iters=test_config['epoch'],
extras=test_config)
self.report_benchmark(name='keras_unified_lstm_' + mode,
wall_time=unified_lstm_sec_per_epoch,
iters=test_config['epoch'],
extras=test_config)
self.report_benchmark(name='keras_canonical_lstm_' + mode,
wall_time=normal_lstm_sec_per_epoch,
iters=test_config['epoch'],
extras=test_config)
logging.info('Expect the performance of Unified LSTM is within 80% of '
'CuDNN LSTM, got {0:.2f}%'.format(cudnn_vs_unified * 100))
logging.info('Expect the performance of Unified LSTM is more than 5 times'
' of normal LSTM, got {0:.2f}'.format(unified_vs_normal))
def benchmark_performance_graph(self):
with context.graph_mode(), session_lib.Session(config=_config):
self._benchmark_performance_with_standard_cudnn_impl()
def benchmark_performance_eager(self):
with context.eager_mode():
self._benchmark_performance_with_standard_cudnn_impl()
if __name__ == '__main__':
test.main()
| |
<<<<<<< HEAD
<<<<<<< HEAD
"""Tests for lock.py"""
import unittest
from unittest import mock
import re
import asyncio
from asyncio import test_utils
STR_RGX_REPR = (
r'^<(?P<class>.*?) object at (?P<address>.*?)'
r'\[(?P<extras>'
r'(set|unset|locked|unlocked)(,value:\d)?(,waiters:\d+)?'
r')\]>\Z'
)
RGX_REPR = re.compile(STR_RGX_REPR)
class LockTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
def test_ctor_loop(self):
loop = mock.Mock()
lock = asyncio.Lock(loop=loop)
self.assertIs(lock._loop, loop)
lock = asyncio.Lock(loop=self.loop)
self.assertIs(lock._loop, self.loop)
def test_ctor_noloop(self):
asyncio.set_event_loop(self.loop)
lock = asyncio.Lock()
self.assertIs(lock._loop, self.loop)
def test_repr(self):
lock = asyncio.Lock(loop=self.loop)
self.assertTrue(repr(lock).endswith('[unlocked]>'))
self.assertTrue(RGX_REPR.match(repr(lock)))
@asyncio.coroutine
def acquire_lock():
yield from lock
self.loop.run_until_complete(acquire_lock())
self.assertTrue(repr(lock).endswith('[locked]>'))
self.assertTrue(RGX_REPR.match(repr(lock)))
def test_lock(self):
lock = asyncio.Lock(loop=self.loop)
@asyncio.coroutine
def acquire_lock():
return (yield from lock)
res = self.loop.run_until_complete(acquire_lock())
self.assertTrue(res)
self.assertTrue(lock.locked())
lock.release()
self.assertFalse(lock.locked())
def test_acquire(self):
lock = asyncio.Lock(loop=self.loop)
result = []
self.assertTrue(self.loop.run_until_complete(lock.acquire()))
@asyncio.coroutine
def c1(result):
if (yield from lock.acquire()):
result.append(1)
return True
@asyncio.coroutine
def c2(result):
if (yield from lock.acquire()):
result.append(2)
return True
@asyncio.coroutine
def c3(result):
if (yield from lock.acquire()):
result.append(3)
return True
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
lock.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
t3 = asyncio.Task(c3(result), loop=self.loop)
lock.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1, 2], result)
lock.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1, 2, 3], result)
self.assertTrue(t1.done())
self.assertTrue(t1.result())
self.assertTrue(t2.done())
self.assertTrue(t2.result())
self.assertTrue(t3.done())
self.assertTrue(t3.result())
def test_acquire_cancel(self):
lock = asyncio.Lock(loop=self.loop)
self.assertTrue(self.loop.run_until_complete(lock.acquire()))
task = asyncio.Task(lock.acquire(), loop=self.loop)
self.loop.call_soon(task.cancel)
self.assertRaises(
asyncio.CancelledError,
self.loop.run_until_complete, task)
self.assertFalse(lock._waiters)
def test_cancel_race(self):
# Several tasks:
# - A acquires the lock
# - B is blocked in aqcuire()
# - C is blocked in aqcuire()
#
# Now, concurrently:
# - B is cancelled
# - A releases the lock
#
# If B's waiter is marked cancelled but not yet removed from
# _waiters, A's release() call will crash when trying to set
# B's waiter; instead, it should move on to C's waiter.
# Setup: A has the lock, b and c are waiting.
lock = asyncio.Lock(loop=self.loop)
@asyncio.coroutine
def lockit(name, blocker):
yield from lock.acquire()
try:
if blocker is not None:
yield from blocker
finally:
lock.release()
fa = asyncio.Future(loop=self.loop)
ta = asyncio.Task(lockit('A', fa), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertTrue(lock.locked())
tb = asyncio.Task(lockit('B', None), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual(len(lock._waiters), 1)
tc = asyncio.Task(lockit('C', None), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual(len(lock._waiters), 2)
# Create the race and check.
# Without the fix this failed at the last assert.
fa.set_result(None)
tb.cancel()
self.assertTrue(lock._waiters[0].cancelled())
test_utils.run_briefly(self.loop)
self.assertFalse(lock.locked())
self.assertTrue(ta.done())
self.assertTrue(tb.cancelled())
self.assertTrue(tc.done())
def test_release_not_acquired(self):
lock = asyncio.Lock(loop=self.loop)
self.assertRaises(RuntimeError, lock.release)
def test_release_no_waiters(self):
lock = asyncio.Lock(loop=self.loop)
self.loop.run_until_complete(lock.acquire())
self.assertTrue(lock.locked())
lock.release()
self.assertFalse(lock.locked())
def test_context_manager(self):
lock = asyncio.Lock(loop=self.loop)
@asyncio.coroutine
def acquire_lock():
return (yield from lock)
with self.loop.run_until_complete(acquire_lock()):
self.assertTrue(lock.locked())
self.assertFalse(lock.locked())
def test_context_manager_cant_reuse(self):
lock = asyncio.Lock(loop=self.loop)
@asyncio.coroutine
def acquire_lock():
return (yield from lock)
# This spells "yield from lock" outside a generator.
cm = self.loop.run_until_complete(acquire_lock())
with cm:
self.assertTrue(lock.locked())
self.assertFalse(lock.locked())
with self.assertRaises(AttributeError):
with cm:
pass
def test_context_manager_no_yield(self):
lock = asyncio.Lock(loop=self.loop)
try:
with lock:
self.fail('RuntimeError is not raised in with expression')
except RuntimeError as err:
self.assertEqual(
str(err),
'"yield from" should be used as context manager expression')
self.assertFalse(lock.locked())
class EventTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
def test_ctor_loop(self):
loop = mock.Mock()
ev = asyncio.Event(loop=loop)
self.assertIs(ev._loop, loop)
ev = asyncio.Event(loop=self.loop)
self.assertIs(ev._loop, self.loop)
def test_ctor_noloop(self):
asyncio.set_event_loop(self.loop)
ev = asyncio.Event()
self.assertIs(ev._loop, self.loop)
def test_repr(self):
ev = asyncio.Event(loop=self.loop)
self.assertTrue(repr(ev).endswith('[unset]>'))
match = RGX_REPR.match(repr(ev))
self.assertEqual(match.group('extras'), 'unset')
ev.set()
self.assertTrue(repr(ev).endswith('[set]>'))
self.assertTrue(RGX_REPR.match(repr(ev)))
ev._waiters.append(mock.Mock())
self.assertTrue('waiters:1' in repr(ev))
self.assertTrue(RGX_REPR.match(repr(ev)))
def test_wait(self):
ev = asyncio.Event(loop=self.loop)
self.assertFalse(ev.is_set())
result = []
@asyncio.coroutine
def c1(result):
if (yield from ev.wait()):
result.append(1)
@asyncio.coroutine
def c2(result):
if (yield from ev.wait()):
result.append(2)
@asyncio.coroutine
def c3(result):
if (yield from ev.wait()):
result.append(3)
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
t3 = asyncio.Task(c3(result), loop=self.loop)
ev.set()
test_utils.run_briefly(self.loop)
self.assertEqual([3, 1, 2], result)
self.assertTrue(t1.done())
self.assertIsNone(t1.result())
self.assertTrue(t2.done())
self.assertIsNone(t2.result())
self.assertTrue(t3.done())
self.assertIsNone(t3.result())
def test_wait_on_set(self):
ev = asyncio.Event(loop=self.loop)
ev.set()
res = self.loop.run_until_complete(ev.wait())
self.assertTrue(res)
def test_wait_cancel(self):
ev = asyncio.Event(loop=self.loop)
wait = asyncio.Task(ev.wait(), loop=self.loop)
self.loop.call_soon(wait.cancel)
self.assertRaises(
asyncio.CancelledError,
self.loop.run_until_complete, wait)
self.assertFalse(ev._waiters)
def test_clear(self):
ev = asyncio.Event(loop=self.loop)
self.assertFalse(ev.is_set())
ev.set()
self.assertTrue(ev.is_set())
ev.clear()
self.assertFalse(ev.is_set())
def test_clear_with_waiters(self):
ev = asyncio.Event(loop=self.loop)
result = []
@asyncio.coroutine
def c1(result):
if (yield from ev.wait()):
result.append(1)
return True
t = asyncio.Task(c1(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
ev.set()
ev.clear()
self.assertFalse(ev.is_set())
ev.set()
ev.set()
self.assertEqual(1, len(ev._waiters))
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
self.assertEqual(0, len(ev._waiters))
self.assertTrue(t.done())
self.assertTrue(t.result())
class ConditionTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
def test_ctor_loop(self):
loop = mock.Mock()
cond = asyncio.Condition(loop=loop)
self.assertIs(cond._loop, loop)
cond = asyncio.Condition(loop=self.loop)
self.assertIs(cond._loop, self.loop)
def test_ctor_noloop(self):
asyncio.set_event_loop(self.loop)
cond = asyncio.Condition()
self.assertIs(cond._loop, self.loop)
def test_wait(self):
cond = asyncio.Condition(loop=self.loop)
result = []
@asyncio.coroutine
def c1(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(1)
return True
@asyncio.coroutine
def c2(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(2)
return True
@asyncio.coroutine
def c3(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(3)
return True
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
t3 = asyncio.Task(c3(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
self.assertFalse(cond.locked())
self.assertTrue(self.loop.run_until_complete(cond.acquire()))
cond.notify()
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
self.assertTrue(cond.locked())
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
self.assertTrue(cond.locked())
cond.notify(2)
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
self.assertTrue(cond.locked())
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1, 2], result)
self.assertTrue(cond.locked())
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1, 2, 3], result)
self.assertTrue(cond.locked())
self.assertTrue(t1.done())
self.assertTrue(t1.result())
self.assertTrue(t2.done())
self.assertTrue(t2.result())
self.assertTrue(t3.done())
self.assertTrue(t3.result())
def test_wait_cancel(self):
cond = asyncio.Condition(loop=self.loop)
self.loop.run_until_complete(cond.acquire())
wait = asyncio.Task(cond.wait(), loop=self.loop)
self.loop.call_soon(wait.cancel)
self.assertRaises(
asyncio.CancelledError,
self.loop.run_until_complete, wait)
self.assertFalse(cond._waiters)
self.assertTrue(cond.locked())
def test_wait_unacquired(self):
cond = asyncio.Condition(loop=self.loop)
self.assertRaises(
RuntimeError,
self.loop.run_until_complete, cond.wait())
def test_wait_for(self):
cond = asyncio.Condition(loop=self.loop)
presult = False
def predicate():
return presult
result = []
@asyncio.coroutine
def c1(result):
yield from cond.acquire()
if (yield from cond.wait_for(predicate)):
result.append(1)
cond.release()
return True
t = asyncio.Task(c1(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
self.loop.run_until_complete(cond.acquire())
cond.notify()
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
presult = True
self.loop.run_until_complete(cond.acquire())
cond.notify()
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
self.assertTrue(t.done())
self.assertTrue(t.result())
def test_wait_for_unacquired(self):
cond = asyncio.Condition(loop=self.loop)
# predicate can return true immediately
res = self.loop.run_until_complete(cond.wait_for(lambda: [1, 2, 3]))
self.assertEqual([1, 2, 3], res)
self.assertRaises(
RuntimeError,
self.loop.run_until_complete,
cond.wait_for(lambda: False))
def test_notify(self):
cond = asyncio.Condition(loop=self.loop)
result = []
@asyncio.coroutine
def c1(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(1)
cond.release()
return True
@asyncio.coroutine
def c2(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(2)
cond.release()
return True
@asyncio.coroutine
def c3(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(3)
cond.release()
return True
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
t3 = asyncio.Task(c3(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
self.loop.run_until_complete(cond.acquire())
cond.notify(1)
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
self.loop.run_until_complete(cond.acquire())
cond.notify(1)
cond.notify(2048)
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1, 2, 3], result)
self.assertTrue(t1.done())
self.assertTrue(t1.result())
self.assertTrue(t2.done())
self.assertTrue(t2.result())
self.assertTrue(t3.done())
self.assertTrue(t3.result())
def test_notify_all(self):
cond = asyncio.Condition(loop=self.loop)
result = []
@asyncio.coroutine
def c1(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(1)
cond.release()
return True
@asyncio.coroutine
def c2(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(2)
cond.release()
return True
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
self.loop.run_until_complete(cond.acquire())
cond.notify_all()
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1, 2], result)
self.assertTrue(t1.done())
self.assertTrue(t1.result())
self.assertTrue(t2.done())
self.assertTrue(t2.result())
def test_notify_unacquired(self):
cond = asyncio.Condition(loop=self.loop)
self.assertRaises(RuntimeError, cond.notify)
def test_notify_all_unacquired(self):
cond = asyncio.Condition(loop=self.loop)
self.assertRaises(RuntimeError, cond.notify_all)
def test_repr(self):
cond = asyncio.Condition(loop=self.loop)
self.assertTrue('unlocked' in repr(cond))
self.assertTrue(RGX_REPR.match(repr(cond)))
self.loop.run_until_complete(cond.acquire())
self.assertTrue('locked' in repr(cond))
cond._waiters.append(mock.Mock())
self.assertTrue('waiters:1' in repr(cond))
self.assertTrue(RGX_REPR.match(repr(cond)))
cond._waiters.append(mock.Mock())
self.assertTrue('waiters:2' in repr(cond))
self.assertTrue(RGX_REPR.match(repr(cond)))
def test_context_manager(self):
cond = asyncio.Condition(loop=self.loop)
@asyncio.coroutine
def acquire_cond():
return (yield from cond)
with self.loop.run_until_complete(acquire_cond()):
self.assertTrue(cond.locked())
self.assertFalse(cond.locked())
def test_context_manager_no_yield(self):
cond = asyncio.Condition(loop=self.loop)
try:
with cond:
self.fail('RuntimeError is not raised in with expression')
except RuntimeError as err:
self.assertEqual(
str(err),
'"yield from" should be used as context manager expression')
self.assertFalse(cond.locked())
def test_explicit_lock(self):
lock = asyncio.Lock(loop=self.loop)
cond = asyncio.Condition(lock, loop=self.loop)
self.assertIs(cond._lock, lock)
self.assertIs(cond._loop, lock._loop)
def test_ambiguous_loops(self):
loop = self.new_test_loop()
self.addCleanup(loop.close)
lock = asyncio.Lock(loop=self.loop)
with self.assertRaises(ValueError):
asyncio.Condition(lock, loop=loop)
class SemaphoreTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
def test_ctor_loop(self):
loop = mock.Mock()
sem = asyncio.Semaphore(loop=loop)
self.assertIs(sem._loop, loop)
sem = asyncio.Semaphore(loop=self.loop)
self.assertIs(sem._loop, self.loop)
def test_ctor_noloop(self):
asyncio.set_event_loop(self.loop)
sem = asyncio.Semaphore()
self.assertIs(sem._loop, self.loop)
def test_initial_value_zero(self):
sem = asyncio.Semaphore(0, loop=self.loop)
self.assertTrue(sem.locked())
def test_repr(self):
sem = asyncio.Semaphore(loop=self.loop)
self.assertTrue(repr(sem).endswith('[unlocked,value:1]>'))
self.assertTrue(RGX_REPR.match(repr(sem)))
self.loop.run_until_complete(sem.acquire())
self.assertTrue(repr(sem).endswith('[locked]>'))
self.assertTrue('waiters' not in repr(sem))
self.assertTrue(RGX_REPR.match(repr(sem)))
sem._waiters.append(mock.Mock())
self.assertTrue('waiters:1' in repr(sem))
self.assertTrue(RGX_REPR.match(repr(sem)))
sem._waiters.append(mock.Mock())
self.assertTrue('waiters:2' in repr(sem))
self.assertTrue(RGX_REPR.match(repr(sem)))
def test_semaphore(self):
sem = asyncio.Semaphore(loop=self.loop)
self.assertEqual(1, sem._value)
@asyncio.coroutine
def acquire_lock():
return (yield from sem)
res = self.loop.run_until_complete(acquire_lock())
self.assertTrue(res)
self.assertTrue(sem.locked())
self.assertEqual(0, sem._value)
sem.release()
self.assertFalse(sem.locked())
self.assertEqual(1, sem._value)
def test_semaphore_value(self):
self.assertRaises(ValueError, asyncio.Semaphore, -1)
def test_acquire(self):
sem = asyncio.Semaphore(3, loop=self.loop)
result = []
self.assertTrue(self.loop.run_until_complete(sem.acquire()))
self.assertTrue(self.loop.run_until_complete(sem.acquire()))
self.assertFalse(sem.locked())
@asyncio.coroutine
def c1(result):
yield from sem.acquire()
result.append(1)
return True
@asyncio.coroutine
def c2(result):
yield from sem.acquire()
result.append(2)
return True
@asyncio.coroutine
def c3(result):
yield from sem.acquire()
result.append(3)
return True
@asyncio.coroutine
def c4(result):
yield from sem.acquire()
result.append(4)
return True
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
t3 = asyncio.Task(c3(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
self.assertTrue(sem.locked())
self.assertEqual(2, len(sem._waiters))
self.assertEqual(0, sem._value)
t4 = asyncio.Task(c4(result), loop=self.loop)
sem.release()
sem.release()
self.assertEqual(2, sem._value)
test_utils.run_briefly(self.loop)
self.assertEqual(0, sem._value)
self.assertEqual([1, 2, 3], result)
self.assertTrue(sem.locked())
self.assertEqual(1, len(sem._waiters))
self.assertEqual(0, sem._value)
self.assertTrue(t1.done())
self.assertTrue(t1.result())
self.assertTrue(t2.done())
self.assertTrue(t2.result())
self.assertTrue(t3.done())
self.assertTrue(t3.result())
self.assertFalse(t4.done())
# cleanup locked semaphore
sem.release()
self.loop.run_until_complete(t4)
def test_acquire_cancel(self):
sem = asyncio.Semaphore(loop=self.loop)
self.loop.run_until_complete(sem.acquire())
acquire = asyncio.Task(sem.acquire(), loop=self.loop)
self.loop.call_soon(acquire.cancel)
self.assertRaises(
asyncio.CancelledError,
self.loop.run_until_complete, acquire)
self.assertFalse(sem._waiters)
def test_release_not_acquired(self):
sem = asyncio.BoundedSemaphore(loop=self.loop)
self.assertRaises(ValueError, sem.release)
def test_release_no_waiters(self):
sem = asyncio.Semaphore(loop=self.loop)
self.loop.run_until_complete(sem.acquire())
self.assertTrue(sem.locked())
sem.release()
self.assertFalse(sem.locked())
def test_context_manager(self):
sem = asyncio.Semaphore(2, loop=self.loop)
@asyncio.coroutine
def acquire_lock():
return (yield from sem)
with self.loop.run_until_complete(acquire_lock()):
self.assertFalse(sem.locked())
self.assertEqual(1, sem._value)
with self.loop.run_until_complete(acquire_lock()):
self.assertTrue(sem.locked())
self.assertEqual(2, sem._value)
def test_context_manager_no_yield(self):
sem = asyncio.Semaphore(2, loop=self.loop)
try:
with sem:
self.fail('RuntimeError is not raised in with expression')
except RuntimeError as err:
self.assertEqual(
str(err),
'"yield from" should be used as context manager expression')
self.assertEqual(2, sem._value)
if __name__ == '__main__':
unittest.main()
=======
"""Tests for lock.py"""
import unittest
from unittest import mock
import re
import asyncio
from asyncio import test_utils
STR_RGX_REPR = (
r'^<(?P<class>.*?) object at (?P<address>.*?)'
r'\[(?P<extras>'
r'(set|unset|locked|unlocked)(,value:\d)?(,waiters:\d+)?'
r')\]>\Z'
)
RGX_REPR = re.compile(STR_RGX_REPR)
class LockTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
def test_ctor_loop(self):
loop = mock.Mock()
lock = asyncio.Lock(loop=loop)
self.assertIs(lock._loop, loop)
lock = asyncio.Lock(loop=self.loop)
self.assertIs(lock._loop, self.loop)
def test_ctor_noloop(self):
asyncio.set_event_loop(self.loop)
lock = asyncio.Lock()
self.assertIs(lock._loop, self.loop)
def test_repr(self):
lock = asyncio.Lock(loop=self.loop)
self.assertTrue(repr(lock).endswith('[unlocked]>'))
self.assertTrue(RGX_REPR.match(repr(lock)))
@asyncio.coroutine
def acquire_lock():
yield from lock
self.loop.run_until_complete(acquire_lock())
self.assertTrue(repr(lock).endswith('[locked]>'))
self.assertTrue(RGX_REPR.match(repr(lock)))
def test_lock(self):
lock = asyncio.Lock(loop=self.loop)
@asyncio.coroutine
def acquire_lock():
return (yield from lock)
res = self.loop.run_until_complete(acquire_lock())
self.assertTrue(res)
self.assertTrue(lock.locked())
lock.release()
self.assertFalse(lock.locked())
def test_acquire(self):
lock = asyncio.Lock(loop=self.loop)
result = []
self.assertTrue(self.loop.run_until_complete(lock.acquire()))
@asyncio.coroutine
def c1(result):
if (yield from lock.acquire()):
result.append(1)
return True
@asyncio.coroutine
def c2(result):
if (yield from lock.acquire()):
result.append(2)
return True
@asyncio.coroutine
def c3(result):
if (yield from lock.acquire()):
result.append(3)
return True
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
lock.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
t3 = asyncio.Task(c3(result), loop=self.loop)
lock.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1, 2], result)
lock.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1, 2, 3], result)
self.assertTrue(t1.done())
self.assertTrue(t1.result())
self.assertTrue(t2.done())
self.assertTrue(t2.result())
self.assertTrue(t3.done())
self.assertTrue(t3.result())
def test_acquire_cancel(self):
lock = asyncio.Lock(loop=self.loop)
self.assertTrue(self.loop.run_until_complete(lock.acquire()))
task = asyncio.Task(lock.acquire(), loop=self.loop)
self.loop.call_soon(task.cancel)
self.assertRaises(
asyncio.CancelledError,
self.loop.run_until_complete, task)
self.assertFalse(lock._waiters)
def test_cancel_race(self):
# Several tasks:
# - A acquires the lock
# - B is blocked in aqcuire()
# - C is blocked in aqcuire()
#
# Now, concurrently:
# - B is cancelled
# - A releases the lock
#
# If B's waiter is marked cancelled but not yet removed from
# _waiters, A's release() call will crash when trying to set
# B's waiter; instead, it should move on to C's waiter.
# Setup: A has the lock, b and c are waiting.
lock = asyncio.Lock(loop=self.loop)
@asyncio.coroutine
def lockit(name, blocker):
yield from lock.acquire()
try:
if blocker is not None:
yield from blocker
finally:
lock.release()
fa = asyncio.Future(loop=self.loop)
ta = asyncio.Task(lockit('A', fa), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertTrue(lock.locked())
tb = asyncio.Task(lockit('B', None), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual(len(lock._waiters), 1)
tc = asyncio.Task(lockit('C', None), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual(len(lock._waiters), 2)
# Create the race and check.
# Without the fix this failed at the last assert.
fa.set_result(None)
tb.cancel()
self.assertTrue(lock._waiters[0].cancelled())
test_utils.run_briefly(self.loop)
self.assertFalse(lock.locked())
self.assertTrue(ta.done())
self.assertTrue(tb.cancelled())
self.assertTrue(tc.done())
def test_release_not_acquired(self):
lock = asyncio.Lock(loop=self.loop)
self.assertRaises(RuntimeError, lock.release)
def test_release_no_waiters(self):
lock = asyncio.Lock(loop=self.loop)
self.loop.run_until_complete(lock.acquire())
self.assertTrue(lock.locked())
lock.release()
self.assertFalse(lock.locked())
def test_context_manager(self):
lock = asyncio.Lock(loop=self.loop)
@asyncio.coroutine
def acquire_lock():
return (yield from lock)
with self.loop.run_until_complete(acquire_lock()):
self.assertTrue(lock.locked())
self.assertFalse(lock.locked())
def test_context_manager_cant_reuse(self):
lock = asyncio.Lock(loop=self.loop)
@asyncio.coroutine
def acquire_lock():
return (yield from lock)
# This spells "yield from lock" outside a generator.
cm = self.loop.run_until_complete(acquire_lock())
with cm:
self.assertTrue(lock.locked())
self.assertFalse(lock.locked())
with self.assertRaises(AttributeError):
with cm:
pass
def test_context_manager_no_yield(self):
lock = asyncio.Lock(loop=self.loop)
try:
with lock:
self.fail('RuntimeError is not raised in with expression')
except RuntimeError as err:
self.assertEqual(
str(err),
'"yield from" should be used as context manager expression')
self.assertFalse(lock.locked())
class EventTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
def test_ctor_loop(self):
loop = mock.Mock()
ev = asyncio.Event(loop=loop)
self.assertIs(ev._loop, loop)
ev = asyncio.Event(loop=self.loop)
self.assertIs(ev._loop, self.loop)
def test_ctor_noloop(self):
asyncio.set_event_loop(self.loop)
ev = asyncio.Event()
self.assertIs(ev._loop, self.loop)
def test_repr(self):
ev = asyncio.Event(loop=self.loop)
self.assertTrue(repr(ev).endswith('[unset]>'))
match = RGX_REPR.match(repr(ev))
self.assertEqual(match.group('extras'), 'unset')
ev.set()
self.assertTrue(repr(ev).endswith('[set]>'))
self.assertTrue(RGX_REPR.match(repr(ev)))
ev._waiters.append(mock.Mock())
self.assertTrue('waiters:1' in repr(ev))
self.assertTrue(RGX_REPR.match(repr(ev)))
def test_wait(self):
ev = asyncio.Event(loop=self.loop)
self.assertFalse(ev.is_set())
result = []
@asyncio.coroutine
def c1(result):
if (yield from ev.wait()):
result.append(1)
@asyncio.coroutine
def c2(result):
if (yield from ev.wait()):
result.append(2)
@asyncio.coroutine
def c3(result):
if (yield from ev.wait()):
result.append(3)
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
t3 = asyncio.Task(c3(result), loop=self.loop)
ev.set()
test_utils.run_briefly(self.loop)
self.assertEqual([3, 1, 2], result)
self.assertTrue(t1.done())
self.assertIsNone(t1.result())
self.assertTrue(t2.done())
self.assertIsNone(t2.result())
self.assertTrue(t3.done())
self.assertIsNone(t3.result())
def test_wait_on_set(self):
ev = asyncio.Event(loop=self.loop)
ev.set()
res = self.loop.run_until_complete(ev.wait())
self.assertTrue(res)
def test_wait_cancel(self):
ev = asyncio.Event(loop=self.loop)
wait = asyncio.Task(ev.wait(), loop=self.loop)
self.loop.call_soon(wait.cancel)
self.assertRaises(
asyncio.CancelledError,
self.loop.run_until_complete, wait)
self.assertFalse(ev._waiters)
def test_clear(self):
ev = asyncio.Event(loop=self.loop)
self.assertFalse(ev.is_set())
ev.set()
self.assertTrue(ev.is_set())
ev.clear()
self.assertFalse(ev.is_set())
def test_clear_with_waiters(self):
ev = asyncio.Event(loop=self.loop)
result = []
@asyncio.coroutine
def c1(result):
if (yield from ev.wait()):
result.append(1)
return True
t = asyncio.Task(c1(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
ev.set()
ev.clear()
self.assertFalse(ev.is_set())
ev.set()
ev.set()
self.assertEqual(1, len(ev._waiters))
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
self.assertEqual(0, len(ev._waiters))
self.assertTrue(t.done())
self.assertTrue(t.result())
class ConditionTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
def test_ctor_loop(self):
loop = mock.Mock()
cond = asyncio.Condition(loop=loop)
self.assertIs(cond._loop, loop)
cond = asyncio.Condition(loop=self.loop)
self.assertIs(cond._loop, self.loop)
def test_ctor_noloop(self):
asyncio.set_event_loop(self.loop)
cond = asyncio.Condition()
self.assertIs(cond._loop, self.loop)
def test_wait(self):
cond = asyncio.Condition(loop=self.loop)
result = []
@asyncio.coroutine
def c1(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(1)
return True
@asyncio.coroutine
def c2(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(2)
return True
@asyncio.coroutine
def c3(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(3)
return True
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
t3 = asyncio.Task(c3(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
self.assertFalse(cond.locked())
self.assertTrue(self.loop.run_until_complete(cond.acquire()))
cond.notify()
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
self.assertTrue(cond.locked())
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
self.assertTrue(cond.locked())
cond.notify(2)
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
self.assertTrue(cond.locked())
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1, 2], result)
self.assertTrue(cond.locked())
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1, 2, 3], result)
self.assertTrue(cond.locked())
self.assertTrue(t1.done())
self.assertTrue(t1.result())
self.assertTrue(t2.done())
self.assertTrue(t2.result())
self.assertTrue(t3.done())
self.assertTrue(t3.result())
def test_wait_cancel(self):
cond = asyncio.Condition(loop=self.loop)
self.loop.run_until_complete(cond.acquire())
wait = asyncio.Task(cond.wait(), loop=self.loop)
self.loop.call_soon(wait.cancel)
self.assertRaises(
asyncio.CancelledError,
self.loop.run_until_complete, wait)
self.assertFalse(cond._waiters)
self.assertTrue(cond.locked())
def test_wait_unacquired(self):
cond = asyncio.Condition(loop=self.loop)
self.assertRaises(
RuntimeError,
self.loop.run_until_complete, cond.wait())
def test_wait_for(self):
cond = asyncio.Condition(loop=self.loop)
presult = False
def predicate():
return presult
result = []
@asyncio.coroutine
def c1(result):
yield from cond.acquire()
if (yield from cond.wait_for(predicate)):
result.append(1)
cond.release()
return True
t = asyncio.Task(c1(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
self.loop.run_until_complete(cond.acquire())
cond.notify()
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
presult = True
self.loop.run_until_complete(cond.acquire())
cond.notify()
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
self.assertTrue(t.done())
self.assertTrue(t.result())
def test_wait_for_unacquired(self):
cond = asyncio.Condition(loop=self.loop)
# predicate can return true immediately
res = self.loop.run_until_complete(cond.wait_for(lambda: [1, 2, 3]))
self.assertEqual([1, 2, 3], res)
self.assertRaises(
RuntimeError,
self.loop.run_until_complete,
cond.wait_for(lambda: False))
def test_notify(self):
cond = asyncio.Condition(loop=self.loop)
result = []
@asyncio.coroutine
def c1(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(1)
cond.release()
return True
@asyncio.coroutine
def c2(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(2)
cond.release()
return True
@asyncio.coroutine
def c3(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(3)
cond.release()
return True
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
t3 = asyncio.Task(c3(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
self.loop.run_until_complete(cond.acquire())
cond.notify(1)
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
self.loop.run_until_complete(cond.acquire())
cond.notify(1)
cond.notify(2048)
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1, 2, 3], result)
self.assertTrue(t1.done())
self.assertTrue(t1.result())
self.assertTrue(t2.done())
self.assertTrue(t2.result())
self.assertTrue(t3.done())
self.assertTrue(t3.result())
def test_notify_all(self):
cond = asyncio.Condition(loop=self.loop)
result = []
@asyncio.coroutine
def c1(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(1)
cond.release()
return True
@asyncio.coroutine
def c2(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(2)
cond.release()
return True
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
self.loop.run_until_complete(cond.acquire())
cond.notify_all()
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1, 2], result)
self.assertTrue(t1.done())
self.assertTrue(t1.result())
self.assertTrue(t2.done())
self.assertTrue(t2.result())
def test_notify_unacquired(self):
cond = asyncio.Condition(loop=self.loop)
self.assertRaises(RuntimeError, cond.notify)
def test_notify_all_unacquired(self):
cond = asyncio.Condition(loop=self.loop)
self.assertRaises(RuntimeError, cond.notify_all)
def test_repr(self):
cond = asyncio.Condition(loop=self.loop)
self.assertTrue('unlocked' in repr(cond))
self.assertTrue(RGX_REPR.match(repr(cond)))
self.loop.run_until_complete(cond.acquire())
self.assertTrue('locked' in repr(cond))
cond._waiters.append(mock.Mock())
self.assertTrue('waiters:1' in repr(cond))
self.assertTrue(RGX_REPR.match(repr(cond)))
cond._waiters.append(mock.Mock())
self.assertTrue('waiters:2' in repr(cond))
self.assertTrue(RGX_REPR.match(repr(cond)))
def test_context_manager(self):
cond = asyncio.Condition(loop=self.loop)
@asyncio.coroutine
def acquire_cond():
return (yield from cond)
with self.loop.run_until_complete(acquire_cond()):
self.assertTrue(cond.locked())
self.assertFalse(cond.locked())
def test_context_manager_no_yield(self):
cond = asyncio.Condition(loop=self.loop)
try:
with cond:
self.fail('RuntimeError is not raised in with expression')
except RuntimeError as err:
self.assertEqual(
str(err),
'"yield from" should be used as context manager expression')
self.assertFalse(cond.locked())
def test_explicit_lock(self):
lock = asyncio.Lock(loop=self.loop)
cond = asyncio.Condition(lock, loop=self.loop)
self.assertIs(cond._lock, lock)
self.assertIs(cond._loop, lock._loop)
def test_ambiguous_loops(self):
loop = self.new_test_loop()
self.addCleanup(loop.close)
lock = asyncio.Lock(loop=self.loop)
with self.assertRaises(ValueError):
asyncio.Condition(lock, loop=loop)
class SemaphoreTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
def test_ctor_loop(self):
loop = mock.Mock()
sem = asyncio.Semaphore(loop=loop)
self.assertIs(sem._loop, loop)
sem = asyncio.Semaphore(loop=self.loop)
self.assertIs(sem._loop, self.loop)
def test_ctor_noloop(self):
asyncio.set_event_loop(self.loop)
sem = asyncio.Semaphore()
self.assertIs(sem._loop, self.loop)
def test_initial_value_zero(self):
sem = asyncio.Semaphore(0, loop=self.loop)
self.assertTrue(sem.locked())
def test_repr(self):
sem = asyncio.Semaphore(loop=self.loop)
self.assertTrue(repr(sem).endswith('[unlocked,value:1]>'))
self.assertTrue(RGX_REPR.match(repr(sem)))
self.loop.run_until_complete(sem.acquire())
self.assertTrue(repr(sem).endswith('[locked]>'))
self.assertTrue('waiters' not in repr(sem))
self.assertTrue(RGX_REPR.match(repr(sem)))
sem._waiters.append(mock.Mock())
self.assertTrue('waiters:1' in repr(sem))
self.assertTrue(RGX_REPR.match(repr(sem)))
sem._waiters.append(mock.Mock())
self.assertTrue('waiters:2' in repr(sem))
self.assertTrue(RGX_REPR.match(repr(sem)))
def test_semaphore(self):
sem = asyncio.Semaphore(loop=self.loop)
self.assertEqual(1, sem._value)
@asyncio.coroutine
def acquire_lock():
return (yield from sem)
res = self.loop.run_until_complete(acquire_lock())
self.assertTrue(res)
self.assertTrue(sem.locked())
self.assertEqual(0, sem._value)
sem.release()
self.assertFalse(sem.locked())
self.assertEqual(1, sem._value)
def test_semaphore_value(self):
self.assertRaises(ValueError, asyncio.Semaphore, -1)
def test_acquire(self):
sem = asyncio.Semaphore(3, loop=self.loop)
result = []
self.assertTrue(self.loop.run_until_complete(sem.acquire()))
self.assertTrue(self.loop.run_until_complete(sem.acquire()))
self.assertFalse(sem.locked())
@asyncio.coroutine
def c1(result):
yield from sem.acquire()
result.append(1)
return True
@asyncio.coroutine
def c2(result):
yield from sem.acquire()
result.append(2)
return True
@asyncio.coroutine
def c3(result):
yield from sem.acquire()
result.append(3)
return True
@asyncio.coroutine
def c4(result):
yield from sem.acquire()
result.append(4)
return True
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
t3 = asyncio.Task(c3(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
self.assertTrue(sem.locked())
self.assertEqual(2, len(sem._waiters))
self.assertEqual(0, sem._value)
t4 = asyncio.Task(c4(result), loop=self.loop)
sem.release()
sem.release()
self.assertEqual(2, sem._value)
test_utils.run_briefly(self.loop)
self.assertEqual(0, sem._value)
self.assertEqual([1, 2, 3], result)
self.assertTrue(sem.locked())
self.assertEqual(1, len(sem._waiters))
self.assertEqual(0, sem._value)
self.assertTrue(t1.done())
self.assertTrue(t1.result())
self.assertTrue(t2.done())
self.assertTrue(t2.result())
self.assertTrue(t3.done())
self.assertTrue(t3.result())
self.assertFalse(t4.done())
# cleanup locked semaphore
sem.release()
self.loop.run_until_complete(t4)
def test_acquire_cancel(self):
sem = asyncio.Semaphore(loop=self.loop)
self.loop.run_until_complete(sem.acquire())
acquire = asyncio.Task(sem.acquire(), loop=self.loop)
self.loop.call_soon(acquire.cancel)
self.assertRaises(
asyncio.CancelledError,
self.loop.run_until_complete, acquire)
self.assertFalse(sem._waiters)
def test_release_not_acquired(self):
sem = asyncio.BoundedSemaphore(loop=self.loop)
self.assertRaises(ValueError, sem.release)
def test_release_no_waiters(self):
sem = asyncio.Semaphore(loop=self.loop)
self.loop.run_until_complete(sem.acquire())
self.assertTrue(sem.locked())
sem.release()
self.assertFalse(sem.locked())
def test_context_manager(self):
sem = asyncio.Semaphore(2, loop=self.loop)
@asyncio.coroutine
def acquire_lock():
return (yield from sem)
with self.loop.run_until_complete(acquire_lock()):
self.assertFalse(sem.locked())
self.assertEqual(1, sem._value)
with self.loop.run_until_complete(acquire_lock()):
self.assertTrue(sem.locked())
self.assertEqual(2, sem._value)
def test_context_manager_no_yield(self):
sem = asyncio.Semaphore(2, loop=self.loop)
try:
with sem:
self.fail('RuntimeError is not raised in with expression')
except RuntimeError as err:
self.assertEqual(
str(err),
'"yield from" should be used as context manager expression')
self.assertEqual(2, sem._value)
if __name__ == '__main__':
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""Tests for lock.py"""
import unittest
from unittest import mock
import re
import asyncio
from asyncio import test_utils
STR_RGX_REPR = (
r'^<(?P<class>.*?) object at (?P<address>.*?)'
r'\[(?P<extras>'
r'(set|unset|locked|unlocked)(,value:\d)?(,waiters:\d+)?'
r')\]>\Z'
)
RGX_REPR = re.compile(STR_RGX_REPR)
class LockTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
def test_ctor_loop(self):
loop = mock.Mock()
lock = asyncio.Lock(loop=loop)
self.assertIs(lock._loop, loop)
lock = asyncio.Lock(loop=self.loop)
self.assertIs(lock._loop, self.loop)
def test_ctor_noloop(self):
asyncio.set_event_loop(self.loop)
lock = asyncio.Lock()
self.assertIs(lock._loop, self.loop)
def test_repr(self):
lock = asyncio.Lock(loop=self.loop)
self.assertTrue(repr(lock).endswith('[unlocked]>'))
self.assertTrue(RGX_REPR.match(repr(lock)))
@asyncio.coroutine
def acquire_lock():
yield from lock
self.loop.run_until_complete(acquire_lock())
self.assertTrue(repr(lock).endswith('[locked]>'))
self.assertTrue(RGX_REPR.match(repr(lock)))
def test_lock(self):
lock = asyncio.Lock(loop=self.loop)
@asyncio.coroutine
def acquire_lock():
return (yield from lock)
res = self.loop.run_until_complete(acquire_lock())
self.assertTrue(res)
self.assertTrue(lock.locked())
lock.release()
self.assertFalse(lock.locked())
def test_acquire(self):
lock = asyncio.Lock(loop=self.loop)
result = []
self.assertTrue(self.loop.run_until_complete(lock.acquire()))
@asyncio.coroutine
def c1(result):
if (yield from lock.acquire()):
result.append(1)
return True
@asyncio.coroutine
def c2(result):
if (yield from lock.acquire()):
result.append(2)
return True
@asyncio.coroutine
def c3(result):
if (yield from lock.acquire()):
result.append(3)
return True
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
lock.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
t3 = asyncio.Task(c3(result), loop=self.loop)
lock.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1, 2], result)
lock.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1, 2, 3], result)
self.assertTrue(t1.done())
self.assertTrue(t1.result())
self.assertTrue(t2.done())
self.assertTrue(t2.result())
self.assertTrue(t3.done())
self.assertTrue(t3.result())
def test_acquire_cancel(self):
lock = asyncio.Lock(loop=self.loop)
self.assertTrue(self.loop.run_until_complete(lock.acquire()))
task = asyncio.Task(lock.acquire(), loop=self.loop)
self.loop.call_soon(task.cancel)
self.assertRaises(
asyncio.CancelledError,
self.loop.run_until_complete, task)
self.assertFalse(lock._waiters)
def test_cancel_race(self):
# Several tasks:
# - A acquires the lock
# - B is blocked in aqcuire()
# - C is blocked in aqcuire()
#
# Now, concurrently:
# - B is cancelled
# - A releases the lock
#
# If B's waiter is marked cancelled but not yet removed from
# _waiters, A's release() call will crash when trying to set
# B's waiter; instead, it should move on to C's waiter.
# Setup: A has the lock, b and c are waiting.
lock = asyncio.Lock(loop=self.loop)
@asyncio.coroutine
def lockit(name, blocker):
yield from lock.acquire()
try:
if blocker is not None:
yield from blocker
finally:
lock.release()
fa = asyncio.Future(loop=self.loop)
ta = asyncio.Task(lockit('A', fa), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertTrue(lock.locked())
tb = asyncio.Task(lockit('B', None), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual(len(lock._waiters), 1)
tc = asyncio.Task(lockit('C', None), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual(len(lock._waiters), 2)
# Create the race and check.
# Without the fix this failed at the last assert.
fa.set_result(None)
tb.cancel()
self.assertTrue(lock._waiters[0].cancelled())
test_utils.run_briefly(self.loop)
self.assertFalse(lock.locked())
self.assertTrue(ta.done())
self.assertTrue(tb.cancelled())
self.assertTrue(tc.done())
def test_release_not_acquired(self):
lock = asyncio.Lock(loop=self.loop)
self.assertRaises(RuntimeError, lock.release)
def test_release_no_waiters(self):
lock = asyncio.Lock(loop=self.loop)
self.loop.run_until_complete(lock.acquire())
self.assertTrue(lock.locked())
lock.release()
self.assertFalse(lock.locked())
def test_context_manager(self):
lock = asyncio.Lock(loop=self.loop)
@asyncio.coroutine
def acquire_lock():
return (yield from lock)
with self.loop.run_until_complete(acquire_lock()):
self.assertTrue(lock.locked())
self.assertFalse(lock.locked())
def test_context_manager_cant_reuse(self):
lock = asyncio.Lock(loop=self.loop)
@asyncio.coroutine
def acquire_lock():
return (yield from lock)
# This spells "yield from lock" outside a generator.
cm = self.loop.run_until_complete(acquire_lock())
with cm:
self.assertTrue(lock.locked())
self.assertFalse(lock.locked())
with self.assertRaises(AttributeError):
with cm:
pass
def test_context_manager_no_yield(self):
lock = asyncio.Lock(loop=self.loop)
try:
with lock:
self.fail('RuntimeError is not raised in with expression')
except RuntimeError as err:
self.assertEqual(
str(err),
'"yield from" should be used as context manager expression')
self.assertFalse(lock.locked())
class EventTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
def test_ctor_loop(self):
loop = mock.Mock()
ev = asyncio.Event(loop=loop)
self.assertIs(ev._loop, loop)
ev = asyncio.Event(loop=self.loop)
self.assertIs(ev._loop, self.loop)
def test_ctor_noloop(self):
asyncio.set_event_loop(self.loop)
ev = asyncio.Event()
self.assertIs(ev._loop, self.loop)
def test_repr(self):
ev = asyncio.Event(loop=self.loop)
self.assertTrue(repr(ev).endswith('[unset]>'))
match = RGX_REPR.match(repr(ev))
self.assertEqual(match.group('extras'), 'unset')
ev.set()
self.assertTrue(repr(ev).endswith('[set]>'))
self.assertTrue(RGX_REPR.match(repr(ev)))
ev._waiters.append(mock.Mock())
self.assertTrue('waiters:1' in repr(ev))
self.assertTrue(RGX_REPR.match(repr(ev)))
def test_wait(self):
ev = asyncio.Event(loop=self.loop)
self.assertFalse(ev.is_set())
result = []
@asyncio.coroutine
def c1(result):
if (yield from ev.wait()):
result.append(1)
@asyncio.coroutine
def c2(result):
if (yield from ev.wait()):
result.append(2)
@asyncio.coroutine
def c3(result):
if (yield from ev.wait()):
result.append(3)
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
t3 = asyncio.Task(c3(result), loop=self.loop)
ev.set()
test_utils.run_briefly(self.loop)
self.assertEqual([3, 1, 2], result)
self.assertTrue(t1.done())
self.assertIsNone(t1.result())
self.assertTrue(t2.done())
self.assertIsNone(t2.result())
self.assertTrue(t3.done())
self.assertIsNone(t3.result())
def test_wait_on_set(self):
ev = asyncio.Event(loop=self.loop)
ev.set()
res = self.loop.run_until_complete(ev.wait())
self.assertTrue(res)
def test_wait_cancel(self):
ev = asyncio.Event(loop=self.loop)
wait = asyncio.Task(ev.wait(), loop=self.loop)
self.loop.call_soon(wait.cancel)
self.assertRaises(
asyncio.CancelledError,
self.loop.run_until_complete, wait)
self.assertFalse(ev._waiters)
def test_clear(self):
ev = asyncio.Event(loop=self.loop)
self.assertFalse(ev.is_set())
ev.set()
self.assertTrue(ev.is_set())
ev.clear()
self.assertFalse(ev.is_set())
def test_clear_with_waiters(self):
ev = asyncio.Event(loop=self.loop)
result = []
@asyncio.coroutine
def c1(result):
if (yield from ev.wait()):
result.append(1)
return True
t = asyncio.Task(c1(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
ev.set()
ev.clear()
self.assertFalse(ev.is_set())
ev.set()
ev.set()
self.assertEqual(1, len(ev._waiters))
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
self.assertEqual(0, len(ev._waiters))
self.assertTrue(t.done())
self.assertTrue(t.result())
class ConditionTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
def test_ctor_loop(self):
loop = mock.Mock()
cond = asyncio.Condition(loop=loop)
self.assertIs(cond._loop, loop)
cond = asyncio.Condition(loop=self.loop)
self.assertIs(cond._loop, self.loop)
def test_ctor_noloop(self):
asyncio.set_event_loop(self.loop)
cond = asyncio.Condition()
self.assertIs(cond._loop, self.loop)
def test_wait(self):
cond = asyncio.Condition(loop=self.loop)
result = []
@asyncio.coroutine
def c1(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(1)
return True
@asyncio.coroutine
def c2(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(2)
return True
@asyncio.coroutine
def c3(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(3)
return True
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
t3 = asyncio.Task(c3(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
self.assertFalse(cond.locked())
self.assertTrue(self.loop.run_until_complete(cond.acquire()))
cond.notify()
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
self.assertTrue(cond.locked())
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
self.assertTrue(cond.locked())
cond.notify(2)
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
self.assertTrue(cond.locked())
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1, 2], result)
self.assertTrue(cond.locked())
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1, 2, 3], result)
self.assertTrue(cond.locked())
self.assertTrue(t1.done())
self.assertTrue(t1.result())
self.assertTrue(t2.done())
self.assertTrue(t2.result())
self.assertTrue(t3.done())
self.assertTrue(t3.result())
def test_wait_cancel(self):
cond = asyncio.Condition(loop=self.loop)
self.loop.run_until_complete(cond.acquire())
wait = asyncio.Task(cond.wait(), loop=self.loop)
self.loop.call_soon(wait.cancel)
self.assertRaises(
asyncio.CancelledError,
self.loop.run_until_complete, wait)
self.assertFalse(cond._waiters)
self.assertTrue(cond.locked())
def test_wait_unacquired(self):
cond = asyncio.Condition(loop=self.loop)
self.assertRaises(
RuntimeError,
self.loop.run_until_complete, cond.wait())
def test_wait_for(self):
cond = asyncio.Condition(loop=self.loop)
presult = False
def predicate():
return presult
result = []
@asyncio.coroutine
def c1(result):
yield from cond.acquire()
if (yield from cond.wait_for(predicate)):
result.append(1)
cond.release()
return True
t = asyncio.Task(c1(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
self.loop.run_until_complete(cond.acquire())
cond.notify()
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
presult = True
self.loop.run_until_complete(cond.acquire())
cond.notify()
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
self.assertTrue(t.done())
self.assertTrue(t.result())
def test_wait_for_unacquired(self):
cond = asyncio.Condition(loop=self.loop)
# predicate can return true immediately
res = self.loop.run_until_complete(cond.wait_for(lambda: [1, 2, 3]))
self.assertEqual([1, 2, 3], res)
self.assertRaises(
RuntimeError,
self.loop.run_until_complete,
cond.wait_for(lambda: False))
def test_notify(self):
cond = asyncio.Condition(loop=self.loop)
result = []
@asyncio.coroutine
def c1(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(1)
cond.release()
return True
@asyncio.coroutine
def c2(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(2)
cond.release()
return True
@asyncio.coroutine
def c3(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(3)
cond.release()
return True
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
t3 = asyncio.Task(c3(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
self.loop.run_until_complete(cond.acquire())
cond.notify(1)
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
self.loop.run_until_complete(cond.acquire())
cond.notify(1)
cond.notify(2048)
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1, 2, 3], result)
self.assertTrue(t1.done())
self.assertTrue(t1.result())
self.assertTrue(t2.done())
self.assertTrue(t2.result())
self.assertTrue(t3.done())
self.assertTrue(t3.result())
def test_notify_all(self):
cond = asyncio.Condition(loop=self.loop)
result = []
@asyncio.coroutine
def c1(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(1)
cond.release()
return True
@asyncio.coroutine
def c2(result):
yield from cond.acquire()
if (yield from cond.wait()):
result.append(2)
cond.release()
return True
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
self.loop.run_until_complete(cond.acquire())
cond.notify_all()
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1, 2], result)
self.assertTrue(t1.done())
self.assertTrue(t1.result())
self.assertTrue(t2.done())
self.assertTrue(t2.result())
def test_notify_unacquired(self):
cond = asyncio.Condition(loop=self.loop)
self.assertRaises(RuntimeError, cond.notify)
def test_notify_all_unacquired(self):
cond = asyncio.Condition(loop=self.loop)
self.assertRaises(RuntimeError, cond.notify_all)
def test_repr(self):
cond = asyncio.Condition(loop=self.loop)
self.assertTrue('unlocked' in repr(cond))
self.assertTrue(RGX_REPR.match(repr(cond)))
self.loop.run_until_complete(cond.acquire())
self.assertTrue('locked' in repr(cond))
cond._waiters.append(mock.Mock())
self.assertTrue('waiters:1' in repr(cond))
self.assertTrue(RGX_REPR.match(repr(cond)))
cond._waiters.append(mock.Mock())
self.assertTrue('waiters:2' in repr(cond))
self.assertTrue(RGX_REPR.match(repr(cond)))
def test_context_manager(self):
cond = asyncio.Condition(loop=self.loop)
@asyncio.coroutine
def acquire_cond():
return (yield from cond)
with self.loop.run_until_complete(acquire_cond()):
self.assertTrue(cond.locked())
self.assertFalse(cond.locked())
def test_context_manager_no_yield(self):
cond = asyncio.Condition(loop=self.loop)
try:
with cond:
self.fail('RuntimeError is not raised in with expression')
except RuntimeError as err:
self.assertEqual(
str(err),
'"yield from" should be used as context manager expression')
self.assertFalse(cond.locked())
def test_explicit_lock(self):
lock = asyncio.Lock(loop=self.loop)
cond = asyncio.Condition(lock, loop=self.loop)
self.assertIs(cond._lock, lock)
self.assertIs(cond._loop, lock._loop)
def test_ambiguous_loops(self):
loop = self.new_test_loop()
self.addCleanup(loop.close)
lock = asyncio.Lock(loop=self.loop)
with self.assertRaises(ValueError):
asyncio.Condition(lock, loop=loop)
class SemaphoreTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
def test_ctor_loop(self):
loop = mock.Mock()
sem = asyncio.Semaphore(loop=loop)
self.assertIs(sem._loop, loop)
sem = asyncio.Semaphore(loop=self.loop)
self.assertIs(sem._loop, self.loop)
def test_ctor_noloop(self):
asyncio.set_event_loop(self.loop)
sem = asyncio.Semaphore()
self.assertIs(sem._loop, self.loop)
def test_initial_value_zero(self):
sem = asyncio.Semaphore(0, loop=self.loop)
self.assertTrue(sem.locked())
def test_repr(self):
sem = asyncio.Semaphore(loop=self.loop)
self.assertTrue(repr(sem).endswith('[unlocked,value:1]>'))
self.assertTrue(RGX_REPR.match(repr(sem)))
self.loop.run_until_complete(sem.acquire())
self.assertTrue(repr(sem).endswith('[locked]>'))
self.assertTrue('waiters' not in repr(sem))
self.assertTrue(RGX_REPR.match(repr(sem)))
sem._waiters.append(mock.Mock())
self.assertTrue('waiters:1' in repr(sem))
self.assertTrue(RGX_REPR.match(repr(sem)))
sem._waiters.append(mock.Mock())
self.assertTrue('waiters:2' in repr(sem))
self.assertTrue(RGX_REPR.match(repr(sem)))
def test_semaphore(self):
sem = asyncio.Semaphore(loop=self.loop)
self.assertEqual(1, sem._value)
@asyncio.coroutine
def acquire_lock():
return (yield from sem)
res = self.loop.run_until_complete(acquire_lock())
self.assertTrue(res)
self.assertTrue(sem.locked())
self.assertEqual(0, sem._value)
sem.release()
self.assertFalse(sem.locked())
self.assertEqual(1, sem._value)
def test_semaphore_value(self):
self.assertRaises(ValueError, asyncio.Semaphore, -1)
def test_acquire(self):
sem = asyncio.Semaphore(3, loop=self.loop)
result = []
self.assertTrue(self.loop.run_until_complete(sem.acquire()))
self.assertTrue(self.loop.run_until_complete(sem.acquire()))
self.assertFalse(sem.locked())
@asyncio.coroutine
def c1(result):
yield from sem.acquire()
result.append(1)
return True
@asyncio.coroutine
def c2(result):
yield from sem.acquire()
result.append(2)
return True
@asyncio.coroutine
def c3(result):
yield from sem.acquire()
result.append(3)
return True
@asyncio.coroutine
def c4(result):
yield from sem.acquire()
result.append(4)
return True
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
t3 = asyncio.Task(c3(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
self.assertTrue(sem.locked())
self.assertEqual(2, len(sem._waiters))
self.assertEqual(0, sem._value)
t4 = asyncio.Task(c4(result), loop=self.loop)
sem.release()
sem.release()
self.assertEqual(2, sem._value)
test_utils.run_briefly(self.loop)
self.assertEqual(0, sem._value)
self.assertEqual([1, 2, 3], result)
self.assertTrue(sem.locked())
self.assertEqual(1, len(sem._waiters))
self.assertEqual(0, sem._value)
self.assertTrue(t1.done())
self.assertTrue(t1.result())
self.assertTrue(t2.done())
self.assertTrue(t2.result())
self.assertTrue(t3.done())
self.assertTrue(t3.result())
self.assertFalse(t4.done())
# cleanup locked semaphore
sem.release()
self.loop.run_until_complete(t4)
def test_acquire_cancel(self):
sem = asyncio.Semaphore(loop=self.loop)
self.loop.run_until_complete(sem.acquire())
acquire = asyncio.Task(sem.acquire(), loop=self.loop)
self.loop.call_soon(acquire.cancel)
self.assertRaises(
asyncio.CancelledError,
self.loop.run_until_complete, acquire)
self.assertFalse(sem._waiters)
def test_release_not_acquired(self):
sem = asyncio.BoundedSemaphore(loop=self.loop)
self.assertRaises(ValueError, sem.release)
def test_release_no_waiters(self):
sem = asyncio.Semaphore(loop=self.loop)
self.loop.run_until_complete(sem.acquire())
self.assertTrue(sem.locked())
sem.release()
self.assertFalse(sem.locked())
def test_context_manager(self):
sem = asyncio.Semaphore(2, loop=self.loop)
@asyncio.coroutine
def acquire_lock():
return (yield from sem)
with self.loop.run_until_complete(acquire_lock()):
self.assertFalse(sem.locked())
self.assertEqual(1, sem._value)
with self.loop.run_until_complete(acquire_lock()):
self.assertTrue(sem.locked())
self.assertEqual(2, sem._value)
def test_context_manager_no_yield(self):
sem = asyncio.Semaphore(2, loop=self.loop)
try:
with sem:
self.fail('RuntimeError is not raised in with expression')
except RuntimeError as err:
self.assertEqual(
str(err),
'"yield from" should be used as context manager expression')
self.assertEqual(2, sem._value)
if __name__ == '__main__':
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import sys
from keystoneclient.common import cms
from oslo.utils import timeutils
import six
from keystone.common import controller
from keystone.common import dependency
from keystone.common import wsgi
from keystone import config
from keystone import exception
from keystone.i18n import _
from keystone.openstack.common import jsonutils
from keystone.openstack.common import log
from keystone.token import provider
CONF = config.CONF
LOG = log.getLogger(__name__)
class ExternalAuthNotApplicable(Exception):
"""External authentication is not applicable."""
pass
@dependency.requires('assignment_api', 'catalog_api', 'identity_api',
'token_api', 'token_provider_api', 'trust_api')
class Auth(controller.V2Controller):
@controller.v2_deprecated
def ca_cert(self, context, auth=None):
ca_file = open(CONF.signing.ca_certs, 'r')
data = ca_file.read()
ca_file.close()
return data
@controller.v2_deprecated
def signing_cert(self, context, auth=None):
cert_file = open(CONF.signing.certfile, 'r')
data = cert_file.read()
cert_file.close()
return data
@controller.v2_deprecated
def authenticate(self, context, auth=None):
"""Authenticate credentials and return a token.
Accept auth as a dict that looks like::
{
"auth":{
"passwordCredentials":{
"username":"test_user",
"password":"mypass"
},
"tenantName":"customer-x"
}
}
In this case, tenant is optional, if not provided the token will be
considered "unscoped" and can later be used to get a scoped token.
Alternatively, this call accepts auth with only a token and tenant
that will return a token that is scoped to that tenant.
"""
if auth is None:
raise exception.ValidationError(attribute='auth',
target='request body')
if "token" in auth:
# Try to authenticate using a token
auth_info = self._authenticate_token(
context, auth)
else:
# Try external authentication
try:
auth_info = self._authenticate_external(
context, auth)
except ExternalAuthNotApplicable:
# Try local authentication
auth_info = self._authenticate_local(
context, auth)
user_ref, tenant_ref, metadata_ref, expiry, bind = auth_info
# Validate that the auth info is valid and nothing is disabled
try:
self.identity_api.assert_user_enabled(
user_id=user_ref['id'], user=user_ref)
self.assignment_api.assert_domain_enabled(
domain_id=user_ref['domain_id'])
if tenant_ref:
self.assignment_api.assert_project_enabled(
project_id=tenant_ref['id'], project=tenant_ref)
except AssertionError as e:
six.reraise(exception.Unauthorized, exception.Unauthorized(e),
sys.exc_info()[2])
# NOTE(morganfainberg): Make sure the data is in correct form since it
# might be consumed external to Keystone and this is a v2.0 controller.
# The user_ref is encoded into the auth_token_data which is returned as
# part of the token data. The token provider doesn't care about the
# format.
user_ref = self.v3_to_v2_user(user_ref)
if tenant_ref:
tenant_ref = self.filter_domain_id(tenant_ref)
auth_token_data = self._get_auth_token_data(user_ref,
tenant_ref,
metadata_ref,
expiry)
if tenant_ref:
catalog_ref = self.catalog_api.get_catalog(
user_ref['id'], tenant_ref['id'], metadata_ref)
else:
catalog_ref = {}
auth_token_data['id'] = 'placeholder'
if bind:
auth_token_data['bind'] = bind
roles_ref = []
for role_id in metadata_ref.get('roles', []):
role_ref = self.assignment_api.get_role(role_id)
roles_ref.append(dict(name=role_ref['name']))
(token_id, token_data) = self.token_provider_api.issue_v2_token(
auth_token_data, roles_ref=roles_ref, catalog_ref=catalog_ref)
# NOTE(wanghong): We consume a trust use only when we are using trusts
# and have successfully issued a token.
if CONF.trust.enabled and 'trust_id' in auth:
self.trust_api.consume_use(auth['trust_id'])
return token_data
def _authenticate_token(self, context, auth):
"""Try to authenticate using an already existing token.
Returns auth_token_data, (user_ref, tenant_ref, metadata_ref)
"""
if 'token' not in auth:
raise exception.ValidationError(
attribute='token', target='auth')
if "id" not in auth['token']:
raise exception.ValidationError(
attribute="id", target="token")
old_token = auth['token']['id']
if len(old_token) > CONF.max_token_size:
raise exception.ValidationSizeError(attribute='token',
size=CONF.max_token_size)
try:
old_token_ref = self.token_api.get_token(old_token)
except exception.NotFound as e:
raise exception.Unauthorized(e)
wsgi.validate_token_bind(context, old_token_ref)
# A trust token cannot be used to get another token
if 'trust' in old_token_ref:
raise exception.Forbidden()
if 'trust_id' in old_token_ref['metadata']:
raise exception.Forbidden()
user_ref = old_token_ref['user']
user_id = user_ref['id']
tenant_id = self._get_project_id_from_auth(auth)
if not CONF.trust.enabled and 'trust_id' in auth:
raise exception.Forbidden('Trusts are disabled.')
elif CONF.trust.enabled and 'trust_id' in auth:
trust_ref = self.trust_api.get_trust(auth['trust_id'])
if trust_ref is None:
raise exception.Forbidden()
if user_id != trust_ref['trustee_user_id']:
raise exception.Forbidden()
if (trust_ref['project_id'] and
tenant_id != trust_ref['project_id']):
raise exception.Forbidden()
if ('expires' in trust_ref) and (trust_ref['expires']):
expiry = trust_ref['expires']
if expiry < timeutils.parse_isotime(timeutils.isotime()):
raise exception.Forbidden()()
user_id = trust_ref['trustor_user_id']
trustor_user_ref = self.identity_api.get_user(
trust_ref['trustor_user_id'])
if not trustor_user_ref['enabled']:
raise exception.Forbidden()()
trustee_user_ref = self.identity_api.get_user(
trust_ref['trustee_user_id'])
if not trustee_user_ref['enabled']:
raise exception.Forbidden()()
if trust_ref['impersonation'] is True:
current_user_ref = trustor_user_ref
else:
current_user_ref = trustee_user_ref
else:
current_user_ref = self.identity_api.get_user(user_id)
metadata_ref = {}
tenant_ref, metadata_ref['roles'] = self._get_project_roles_and_ref(
user_id, tenant_id)
expiry = old_token_ref['expires']
if CONF.trust.enabled and 'trust_id' in auth:
trust_id = auth['trust_id']
trust_roles = []
for role in trust_ref['roles']:
if 'roles' not in metadata_ref:
raise exception.Forbidden()()
if role['id'] in metadata_ref['roles']:
trust_roles.append(role['id'])
else:
raise exception.Forbidden()
if 'expiry' in trust_ref and trust_ref['expiry']:
trust_expiry = timeutils.parse_isotime(trust_ref['expiry'])
if trust_expiry < expiry:
expiry = trust_expiry
metadata_ref['roles'] = trust_roles
metadata_ref['trustee_user_id'] = trust_ref['trustee_user_id']
metadata_ref['trust_id'] = trust_id
bind = old_token_ref.get('bind')
return (current_user_ref, tenant_ref, metadata_ref, expiry, bind)
def _authenticate_local(self, context, auth):
"""Try to authenticate against the identity backend.
Returns auth_token_data, (user_ref, tenant_ref, metadata_ref)
"""
if 'passwordCredentials' not in auth:
raise exception.ValidationError(
attribute='passwordCredentials', target='auth')
if "password" not in auth['passwordCredentials']:
raise exception.ValidationError(
attribute='password', target='passwordCredentials')
password = auth['passwordCredentials']['password']
if password and len(password) > CONF.identity.max_password_length:
raise exception.ValidationSizeError(
attribute='password', size=CONF.identity.max_password_length)
if ("userId" not in auth['passwordCredentials'] and
"username" not in auth['passwordCredentials']):
raise exception.ValidationError(
attribute='username or userId',
target='passwordCredentials')
user_id = auth['passwordCredentials'].get('userId')
if user_id and len(user_id) > CONF.max_param_size:
raise exception.ValidationSizeError(attribute='userId',
size=CONF.max_param_size)
username = auth['passwordCredentials'].get('username', '')
if username:
if len(username) > CONF.max_param_size:
raise exception.ValidationSizeError(attribute='username',
size=CONF.max_param_size)
try:
user_ref = self.identity_api.get_user_by_name(
username, CONF.identity.default_domain_id)
user_id = user_ref['id']
except exception.UserNotFound as e:
raise exception.Unauthorized(e)
try:
user_ref = self.identity_api.authenticate(
context,
user_id=user_id,
password=password)
except AssertionError as e:
raise exception.Unauthorized(e.args[0])
metadata_ref = {}
tenant_id = self._get_project_id_from_auth(auth)
tenant_ref, metadata_ref['roles'] = self._get_project_roles_and_ref(
user_id, tenant_id)
expiry = provider.default_expire_time()
return (user_ref, tenant_ref, metadata_ref, expiry, None)
def _authenticate_external(self, context, auth):
"""Try to authenticate an external user via REMOTE_USER variable.
Returns auth_token_data, (user_ref, tenant_ref, metadata_ref)
"""
environment = context.get('environment', {})
if not environment.get('REMOTE_USER'):
raise ExternalAuthNotApplicable()
# NOTE(jamielennox): xml and json differ and get confused about what
# empty auth should look like so just reset it.
if not auth:
auth = {}
username = environment['REMOTE_USER']
try:
user_ref = self.identity_api.get_user_by_name(
username, CONF.identity.default_domain_id)
user_id = user_ref['id']
except exception.UserNotFound as e:
raise exception.Unauthorized(e)
metadata_ref = {}
tenant_id = self._get_project_id_from_auth(auth)
tenant_ref, metadata_ref['roles'] = self._get_project_roles_and_ref(
user_id, tenant_id)
expiry = provider.default_expire_time()
bind = None
if ('kerberos' in CONF.token.bind and
environment.get('AUTH_TYPE', '').lower() == 'negotiate'):
bind = {'kerberos': username}
return (user_ref, tenant_ref, metadata_ref, expiry, bind)
def _get_auth_token_data(self, user, tenant, metadata, expiry):
return dict(user=user,
tenant=tenant,
metadata=metadata,
expires=expiry)
def _get_project_id_from_auth(self, auth):
"""Extract tenant information from auth dict.
Returns a valid tenant_id if it exists, or None if not specified.
"""
tenant_id = auth.get('tenantId')
if tenant_id and len(tenant_id) > CONF.max_param_size:
raise exception.ValidationSizeError(attribute='tenantId',
size=CONF.max_param_size)
tenant_name = auth.get('tenantName')
if tenant_name and len(tenant_name) > CONF.max_param_size:
raise exception.ValidationSizeError(attribute='tenantName',
size=CONF.max_param_size)
if tenant_name:
try:
tenant_ref = self.assignment_api.get_project_by_name(
tenant_name, CONF.identity.default_domain_id)
tenant_id = tenant_ref['id']
except exception.ProjectNotFound as e:
raise exception.Unauthorized(e)
return tenant_id
def _get_project_roles_and_ref(self, user_id, tenant_id):
"""Returns the project roles for this user, and the project ref."""
tenant_ref = None
role_list = []
if tenant_id:
try:
tenant_ref = self.assignment_api.get_project(tenant_id)
role_list = self.assignment_api.get_roles_for_user_and_project(
user_id, tenant_id)
except exception.ProjectNotFound:
pass
if not role_list:
msg = _('User %(u_id)s is unauthorized for tenant %(t_id)s')
msg = msg % {'u_id': user_id, 't_id': tenant_id}
LOG.warning(msg)
raise exception.Unauthorized(msg)
return (tenant_ref, role_list)
def _get_token_ref(self, token_id, belongs_to=None):
"""Returns a token if a valid one exists.
Optionally, limited to a token owned by a specific tenant.
"""
data = self.token_api.get_token(token_id)
if belongs_to:
if data.get('tenant') is None:
raise exception.Unauthorized(
_('Token does not belong to specified tenant.'))
if data['tenant'].get('id') != belongs_to:
raise exception.Unauthorized(
_('Token does not belong to specified tenant.'))
return data
@controller.v2_deprecated
@controller.protected()
def validate_token_head(self, context, token_id):
"""Check that a token is valid.
Optionally, also ensure that it is owned by a specific tenant.
Identical to ``validate_token``, except does not return a response.
The code in ``keystone.common.wsgi.render_response`` will remove
the content body.
"""
# TODO(ayoung) validate against revocation API
belongs_to = context['query_string'].get('belongsTo')
return self.token_provider_api.validate_v2_token(token_id, belongs_to)
@controller.v2_deprecated
@controller.protected()
def validate_token(self, context, token_id):
"""Check that a token is valid.
Optionally, also ensure that it is owned by a specific tenant.
Returns metadata about the token along any associated roles.
"""
belongs_to = context['query_string'].get('belongsTo')
# TODO(ayoung) validate against revocation API
return self.token_provider_api.validate_v2_token(token_id, belongs_to)
@controller.v2_deprecated
def delete_token(self, context, token_id):
"""Delete a token, effectively invalidating it for authz."""
# TODO(termie): this stuff should probably be moved to middleware
self.assert_admin(context)
self.token_provider_api.revoke_token(token_id)
@controller.v2_deprecated
@controller.protected()
def revocation_list(self, context, auth=None):
if not CONF.token.revoke_by_id:
raise exception.Gone()
tokens = self.token_provider_api.list_revoked_tokens()
for t in tokens:
expires = t['expires']
if expires and isinstance(expires, datetime.datetime):
t['expires'] = timeutils.isotime(expires)
data = {'revoked': tokens}
json_data = jsonutils.dumps(data)
signed_text = cms.cms_sign_text(json_data,
CONF.signing.certfile,
CONF.signing.keyfile)
return {'signed': signed_text}
@controller.v2_deprecated
def endpoints(self, context, token_id):
"""Return a list of endpoints available to the token."""
self.assert_admin(context)
token_ref = self._get_token_ref(token_id)
catalog_ref = None
if token_ref.get('tenant'):
catalog_ref = self.catalog_api.get_catalog(
token_ref['user']['id'],
token_ref['tenant']['id'],
token_ref['metadata'])
return Auth.format_endpoint_list(catalog_ref)
@classmethod
def format_endpoint_list(cls, catalog_ref):
"""Formats a list of endpoints according to Identity API v2.
The v2.0 API wants an endpoint list to look like::
{
'endpoints': [
{
'id': $endpoint_id,
'name': $SERVICE[name],
'type': $SERVICE,
'tenantId': $tenant_id,
'region': $REGION,
}
],
'endpoints_links': [],
}
"""
if not catalog_ref:
return {}
endpoints = []
for region_name, region_ref in six.iteritems(catalog_ref):
for service_type, service_ref in six.iteritems(region_ref):
endpoints.append({
'id': service_ref.get('id'),
'name': service_ref.get('name'),
'type': service_type,
'region': region_name,
'publicURL': service_ref.get('publicURL'),
'internalURL': service_ref.get('internalURL'),
'adminURL': service_ref.get('adminURL'),
})
return {'endpoints': endpoints, 'endpoints_links': []}
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for SpacetoDepth op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class SpaceToDepthTest(test.TestCase):
def _testOne(self, inputs, block_size, outputs, dtype=dtypes.float32):
input_nhwc = math_ops.cast(inputs, dtype)
with self.session(use_gpu=False):
# test NHWC (default) on CPU
x_tf = array_ops.space_to_depth(input_nhwc, block_size)
self.assertAllEqual(x_tf.eval(), outputs)
if test.is_gpu_available():
with self.session(force_gpu=True):
# test NHWC (default) on GPU
x_tf = array_ops.space_to_depth(input_nhwc, block_size)
self.assertAllEqual(x_tf.eval(), outputs)
# test NCHW on GPU
input_nchw = test_util.NHWCToNCHW(input_nhwc)
output_nchw = array_ops.space_to_depth(
input_nchw, block_size, data_format="NCHW")
output_nhwc = test_util.NCHWToNHWC(output_nchw)
self.assertAllEqual(output_nhwc.eval(), outputs)
def testBasic(self):
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 2
x_out = [[[[1, 2, 3, 4]]]]
for dtype in [dtypes.float32, dtypes.float16, dtypes.uint8]:
self._testOne(x_np, block_size, x_out, dtype=dtype)
# Tests for larger input dimensions. To make sure elements are
# correctly ordered spatially.
def testLargerInput2x2(self):
x_np = [[[[1], [2], [5], [6]], [[3], [4], [7], [8]],
[[9], [10], [13], [14]], [[11], [12], [15], [16]]]]
block_size = 2
x_out = [[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12],
[13, 14, 15, 16]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input dimensions. To make sure elements are
# correctly ordered in depth. Here, larger block size.
def testLargerInput4x4(self):
x_np = [[[[1], [2], [5], [6]], [[3], [4], [7], [8]],
[[9], [10], [13], [14]], [[11], [12], [15], [16]]]]
block_size = 4
x_out = [[[[1, 2, 5, 6, 3, 4, 7, 8, 9, 10, 13, 14, 11, 12, 15, 16]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input depths.
# To make sure elements are properly interleaved in depth.
def testDepthInterleaved(self):
x_np = [[[[1, 10], [2, 20]], [[3, 30], [4, 40]]]]
block_size = 2
x_out = [[[[1, 10, 2, 20, 3, 30, 4, 40]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input depths. Here an odd depth.
# To make sure elements are properly interleaved in depth.
def testDepthInterleavedDepth3(self):
x_np = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]]
block_size = 2
x_out = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input dimensions AND for larger input depths.
# To make sure elements are properly interleaved in depth and ordered
# spatially.
def testDepthInterleavedLarge(self):
x_np = [[[[1, 10], [2, 20], [5, 50], [6, 60]],
[[3, 30], [4, 40], [7, 70], [8, 80]],
[[9, 90], [10, 100], [13, 130], [14, 140]],
[[11, 110], [12, 120], [15, 150], [16, 160]]]]
block_size = 2
x_out = [[[[1, 10, 2, 20, 3, 30, 4, 40], [5, 50, 6, 60, 7, 70, 8, 80]],
[[9, 90, 10, 100, 11, 110, 12, 120],
[13, 130, 14, 140, 15, 150, 16, 160]]]]
self._testOne(x_np, block_size, x_out)
def testBlockSize2Batch10(self):
block_size = 2
def batch_input_elt(i):
return [[[1 * i], [2 * i], [5 * i], [6 * i]],
[[3 * i], [4 * i], [7 * i], [8 * i]],
[[9 * i], [10 * i], [13 * i], [14 * i]],
[[11 * i], [12 * i], [15 * i], [16 * i]]]
def batch_output_elt(i):
return [[[1 * i, 2 * i, 3 * i, 4 * i], [5 * i, 6 * i, 7 * i, 8 * i]],
[[9 * i, 10 * i, 11 * i, 12 * i],
[13 * i, 14 * i, 15 * i, 16 * i]]]
batch_size = 10
x_np = [batch_input_elt(i) for i in range(batch_size)]
x_out = [batch_output_elt(i) for i in range(batch_size)]
self._testOne(x_np, block_size, x_out)
def testBatchSize0(self):
block_size = 2
batch_size = 0
input_nhwc = array_ops.ones([batch_size, 4, 6, 3])
x_out = array_ops.ones([batch_size, 2, 3, 12])
with self.session(use_gpu=False):
# test NHWC (default) on CPU
x_tf = array_ops.space_to_depth(input_nhwc, block_size)
self.assertAllEqual(x_tf.shape, x_out.shape)
x_tf.eval()
if test.is_gpu_available():
with self.session(use_gpu=True):
# test NHWC (default) on GPU
x_tf = array_ops.space_to_depth(input_nhwc, block_size)
self.assertAllEqual(x_tf.shape, x_out.shape)
x_tf.eval()
# Tests for different width and height.
def testNonSquare(self):
x_np = [[[[1, 10], [2, 20]], [[3, 30], [4, 40]], [[5, 50], [6, 60]],
[[7, 70], [8, 80]], [[9, 90], [10, 100]], [[11, 110], [12, 120]]]]
block_size = 2
x_out = [[[[1, 10, 2, 20, 3, 30, 4, 40]], [[5, 50, 6, 60, 7, 70, 8, 80]],
[[9, 90, 10, 100, 11, 110, 12, 120]]]]
self._testOne(x_np, block_size, x_out)
# Error handling:
def testInputWrongDimMissingDepth(self):
# The input is missing the last dimension ("depth")
x_np = [[[1, 2], [3, 4]]]
block_size = 2
with self.assertRaises(ValueError):
out_tf = array_ops.space_to_depth(x_np, block_size)
out_tf.eval()
def testInputWrongDimMissingBatch(self):
# The input is missing the first dimension ("batch")
x_np = [[[1], [2]], [[3], [4]]]
block_size = 2
with self.assertRaises(ValueError):
_ = array_ops.space_to_depth(x_np, block_size)
def testBlockSize0(self):
# The block size is 0.
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 0
with self.assertRaises(ValueError):
out_tf = array_ops.space_to_depth(x_np, block_size)
out_tf.eval()
def testBlockSizeOne(self):
# The block size is 1. The block size needs to be > 1.
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 1
with self.assertRaises(ValueError):
out_tf = array_ops.space_to_depth(x_np, block_size)
out_tf.eval()
def testBlockSizeLarger(self):
# The block size is too large for this input.
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 10
with self.assertRaises(ValueError):
out_tf = array_ops.space_to_depth(x_np, block_size)
out_tf.eval()
def testBlockSizeNotDivisibleWidth(self):
# The block size divides width but not height.
x_np = [[[[1], [2], [3]], [[3], [4], [7]]]]
block_size = 3
with self.assertRaises(ValueError):
_ = array_ops.space_to_depth(x_np, block_size)
def testBlockSizeNotDivisibleHeight(self):
# The block size divides height but not width.
x_np = [[[[1], [2]], [[3], [4]], [[5], [6]]]]
block_size = 3
with self.assertRaises(ValueError):
_ = array_ops.space_to_depth(x_np, block_size)
def testBlockSizeNotDivisibleBoth(self):
# The block size does not divide neither width or height.
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 3
with self.assertRaises(ValueError):
_ = array_ops.space_to_depth(x_np, block_size)
def testUnknownShape(self):
t = array_ops.space_to_depth(
array_ops.placeholder(dtypes.float32), block_size=4)
self.assertEqual(4, t.get_shape().ndims)
def spaceToDepthUsingTranspose(self, tensor, block_size, data_format):
block_size_sq = block_size * block_size
if data_format == "NHWC":
b, ih, iw, ic = tensor.shape.as_list()
assert ih % block_size == 0, (ih, block_size)
assert iw % block_size == 0, (iw, block_size)
ow, oh, oc = iw // block_size, ih // block_size, ic * block_size_sq
tensor = array_ops.reshape(tensor,
[b, oh, block_size, ow, block_size, ic])
tensor = array_ops.transpose(tensor, [0, 1, 3, 2, 4, 5])
tensor = array_ops.reshape(tensor, [b, oh, ow, oc])
elif data_format == "NCHW":
b, ic, ih, iw = tensor.shape.as_list()
assert ih % block_size == 0, (ih, block_size)
assert iw % block_size == 0, (iw, block_size)
ow, oh, oc = iw // block_size, ih // block_size, ic * block_size_sq
tensor = array_ops.reshape(tensor,
[b, ic, oh, block_size, ow, block_size])
tensor = array_ops.transpose(tensor, [0, 3, 5, 1, 2, 4])
tensor = array_ops.reshape(tensor, [b, oc, oh, ow])
return tensor
def compareToTranspose(self, batch_size, out_height, out_width, in_channels,
block_size, data_format, use_gpu):
in_height = out_height * block_size
in_width = out_width * block_size
nhwc_input_shape = [batch_size, in_height, in_width, in_channels]
nchw_input_shape = [batch_size, in_channels, in_height, in_width]
total_size = np.prod(nhwc_input_shape)
if data_format == "NCHW_VECT_C":
# Initialize the input tensor with qint8 values that circle -127..127.
x = [((f + 128) % 255) - 127 for f in range(total_size)]
t = constant_op.constant(x, shape=nhwc_input_shape, dtype=dtypes.float32)
expected = self.spaceToDepthUsingTranspose(t, block_size, "NHWC")
t = test_util.NHWCToNCHW_VECT_C(t)
t, _, _ = gen_array_ops.quantize_v2(t, -128.0, 127.0, dtypes.qint8)
t = array_ops.space_to_depth(t, block_size, data_format="NCHW_VECT_C")
t = gen_array_ops.dequantize(t, -128, 127)
actual = test_util.NCHW_VECT_CToNHWC(t)
else:
# Initialize the input tensor with ascending whole numbers as floats.
x = [f * 1.0 for f in range(total_size)]
shape = nchw_input_shape if data_format == "NCHW" else nhwc_input_shape
t = constant_op.constant(x, shape=shape, dtype=dtypes.float32)
expected = self.spaceToDepthUsingTranspose(t, block_size, data_format)
actual = array_ops.space_to_depth(t, block_size, data_format=data_format)
with self.cached_session(use_gpu=use_gpu) as sess:
actual_vals, expected_vals = sess.run([actual, expected])
self.assertTrue(np.array_equal(actual_vals, expected_vals))
def testAgainstTranspose(self):
self.compareToTranspose(3, 2, 3, 1, 2, "NHWC", False)
self.compareToTranspose(1, 2, 3, 2, 2, "NHWC", False)
self.compareToTranspose(1, 2, 3, 2, 3, "NHWC", False)
if not test.is_gpu_available():
tf_logging.info("skipping gpu tests since gpu not available")
return
self.compareToTranspose(3, 2, 3, 1, 2, "NHWC", True)
self.compareToTranspose(3, 2, 3, 2, 2, "NHWC", True)
self.compareToTranspose(3, 2, 3, 1, 2, "NCHW", True)
self.compareToTranspose(3, 2, 3, 2, 3, "NCHW", True)
self.compareToTranspose(5, 7, 11, 3, 2, "NCHW", True)
self.compareToTranspose(3, 2, 3, 4, 2, "NCHW_VECT_C", True)
self.compareToTranspose(3, 2, 3, 8, 3, "NCHW_VECT_C", True)
self.compareToTranspose(5, 7, 11, 12, 2, "NCHW_VECT_C", True)
class SpaceToDepthGradientTest(test.TestCase):
# Check the gradients.
def _checkGrad(self, x, block_size, data_format):
# NCHW is implemented for only GPU.
if data_format == "NCHW" and not test.is_gpu_available():
return
assert 4 == x.ndim
with self.cached_session(use_gpu=True):
tf_x = ops.convert_to_tensor(x)
tf_y = array_ops.space_to_depth(tf_x, block_size, data_format=data_format)
epsilon = 1e-2
((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
x.shape,
tf_y,
tf_y.get_shape().as_list(),
x_init_value=x,
delta=epsilon)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
# Tests a gradient for space_to_depth of x which is a four dimensional
# tensor of shape [b, h * block_size, w * block_size, d].
def _compare(self, b, h, w, d, block_size, data_format):
block_size_sq = block_size * block_size
data = np.random.normal(0, 1, b * h * w * d * block_size_sq).astype(
np.float32)
if data_format == "NHWC":
x = data.reshape([b, h * block_size, w * block_size, d])
else:
x = data.reshape([b, d, h * block_size, w * block_size])
self._checkGrad(x, block_size, data_format)
# Don't use very large numbers as dimensions here as the result is tensor
# with cartesian product of the dimensions.
def testSmall(self):
block_size = 2
self._compare(1, 2, 3, 5, block_size, "NHWC")
self._compare(1, 2, 3, 5, block_size, "NCHW")
def testSmall2(self):
block_size = 2
self._compare(2, 4, 3, 2, block_size, "NHWC")
self._compare(2, 4, 3, 2, block_size, "NCHW")
if __name__ == "__main__":
test.main()
| |
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Ckluster Technologies
# All Rights Reserved.
#
# This software is subject to the provision stipulated in
# http://www.ckluster.com/OPEN_LICENSE.txt.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
This module contains all the predicates related to x.509 authorization.
"""
from repoze.what.predicates import Predicate
from repoze.who.plugins.x509.utils import *
import re
__all__ = ['is_subject', 'is_issuer', 'X509Predicate', 'X509DNPredicate']
class X509Predicate(Predicate):
"""
Represents a predicate based on the X.509 protocol. It can be evaluated,
although it can only check that is a valid client certificate.
Users must use a subclass or inherit from it.
"""
def __init__(self, **kwargs):
"""
:param verify_key: The WSGI environment key that specify if the client
certificate is valid or not. A value of 'SUCCESS' will make it
valid. If you don't specify a key, then the value that it will take
is by default ``SSL_CLIENT_VERIFY``
:param validity_start_key: The WSGI environment key that specifies the
encoded datetime that indicates the start of the validity range.
If the timezone is not UTC (or GMT), it will fail.
:param validity_end_key: The WSGI environment key that specifies the
encoded datetime that indicates the end of the validity range.
If the timezone is not UTC (or GMT), it will fail.
"""
self.verify_key = kwargs.pop('verify_key', None) or VERIFY_KEY
self.validity_start_key = kwargs.pop('validity_start_key', None) or \
VALIDITY_START_KEY
self.validity_end_key = kwargs.pop('validity_end_key', None) or \
VALIDITY_END_KEY
super(X509Predicate, self).__init__(msg=kwargs.get('msg'))
def evaluate(self, environ, credentials):
"""
Evaluates the predicate. A subclass should override this method however
call it before doing its custom code.
:param environ: The WSGI environment.
:param credentials: The user credentials. These will not be used
:raise NotAuthorizedError: If the predicate is not met.
"""
# Cannot assume every environment will have all mod_ssl CGI vars.
if not verify_certificate(
environ,
self.verify_key,
self.validity_start_key,
self.validity_end_key
):
self.unmet()
class X509DNPredicate(X509Predicate):
"""
Represents a predicate that evaluates a distinguished name encoded in a
OpenSSL X.509 DN string. It evaluates according to the properties
specified.
"""
def __init__(self, common_name=None, organization=None,
organizational_unit=None, country=None,
state=None, locality=None, environ_key=None, **kwargs):
"""
:param common_name: The common name of the distinguished name.
:param organization: The organization of the distinguished name.
:param organizational_unit: The organization unit of the distinguished
name.
:param country: ISO-3166-1 alpha-2 encoding of the country of the
distinguished name.
:param state: The state within the country of the distinguished name.
:param locality: The locality or city of the distinguished name.
:param environ_key: The WSGI environment key of where the distinguished
name is located.
:param kwargs: You can specify a custom attribute type. The name of the
key will count as the type, and the value is what is going to be
checked against.
:raise ValueError: When you don't specify at least one value for the
parameters, including any custom one; or, when you don't specify an
``environ_key``.
"""
if common_name is None and organizational_unit is None and \
organization is None and country is None and state is None and \
locality is None and len(kwargs) == 0:
raise ValueError(('At least one of common_name, organizational_unit,'
' organization, country, state, locality, or one '
'custom parameter must have a value'))
super(X509DNPredicate, self).__init__(**kwargs)
field_and_values = (
('O', organization, 'organization'),
('CN', common_name, 'common_name'),
('OU', organizational_unit, 'organizational_unit'),
('C', country, 'country'),
('ST', state, 'state'),
('L', locality, 'locality')
)
self.log = kwargs.get('log')
self._prepare_dn_params_with_consistency(
field_and_values,
kwargs
)
if environ_key is None or len(environ_key) == 0:
raise ValueError('This predicate requires a WSGI environ key')
self.environ_key = environ_key
def _prepare_dn_params_with_consistency(self, check_params, kwargs):
# We prefer common_name over CN, for example
# It receives a 3-tuple:
# * The DN attribute type
# * The value of the constructor parameter
# * The name of the constructor parameter
self.dn_params = []
for param in check_params:
if param[0] in kwargs and param[1] is not None:
self.log and self.log.warn(
'Choosing %s over "%s"' % (param[0], param[1])
)
del kwargs[param[0]]
if param[1] is not None:
self.dn_params.append((param[0], param[1]))
for param in ('validity_start_key', 'validity_end_key', 'verify_key'):
try:
del kwargs[param]
except:
pass
self.dn_params.extend(kwargs.iteritems())
def evaluate(self, environ, credentials):
"""
Evaluates a distinguished name or the server variables that represents
it, already parsed. First it checks for the server variables, and then
it tries to parse the distinguished name. See the documentation for
more information.
:param environ: The WSGI environment.
:param credentials: The user credentials. This parameter is not used.
:raise NotAuthorizedError: When the evaluation fails.
"""
super(X509DNPredicate, self).evaluate(environ, credentials)
# First let's try with Apache-like server variables, and last rely on
# the parsing of the DN itself.
try:
for suffix, value in self.dn_params:
self._check_server_variable(environ, '_' + suffix, value)
except KeyError:
pass
else:
# Every environ variable is valid
return
dn = environ.get(self.environ_key)
if dn is None:
self.unmet()
try:
parsed_dn = parse_dn(dn)
except:
self.unmet()
try:
for key, value in self.dn_params:
self._check_parsed_dict(parsed_dn, key, value)
except KeyError:
self.unmet()
def _check_parsed_dict(self, parsed, key, value):
parsed_value = parsed[key]
if isinstance(value, list) or isinstance(value, tuple):
for v in value:
if v not in parsed_value:
self.unmet()
elif value not in parsed_value:
self.unmet()
def _check_server_variable(self, environ, suffix, value):
key = self.environ_key + suffix
if isinstance(value, list) or isinstance(value, tuple):
environ_values = []
for n in range(len(value)):
environ_values.append(environ[key + '_' + str(n)])
for v in value:
if v not in environ_values:
self.unmet()
elif environ[key] != value:
self.unmet()
class is_issuer(X509DNPredicate):
"""
Represents a predicate that evaluates the issuer distinguished name.
"""
ISSUER_KEY_DN = 'SSL_CLIENT_I_DN'
message = 'Invalid SSL client issuer.'
def __init__(self, common_name=None, organization=None,
organizational_unit=None, country=None, state=None,
locality=None, issuer_key=None, **kwargs):
super(is_issuer, self).__init__(
common_name,
organization,
organizational_unit,
country,
state,
locality,
issuer_key or self.ISSUER_KEY_DN,
**kwargs
)
class is_subject(X509DNPredicate):
"""
Represents a predicate that evalutes the subject distinguished name.
"""
SUBJECT_KEY_DN = 'SSL_CLIENT_S_DN'
message = 'Invalid SSL client subject.'
def __init__(self, common_name=None, organization=None,
organizational_unit=None, country=None, state=None,
locality=None, subject_key=None, **kwargs):
super(is_subject, self).__init__(
common_name,
organization,
organizational_unit,
country,
state,
locality,
subject_key or self.SUBJECT_KEY_DN,
**kwargs
)
| |
"""Support for PlayStation 4 consoles."""
import logging
import asyncio
import pyps4_homeassistant.ps4 as pyps4
from pyps4_homeassistant.errors import NotReady
from homeassistant.core import callback
from homeassistant.components.media_player import (
ENTITY_IMAGE_URL, MediaPlayerDevice)
from homeassistant.components.media_player.const import (
MEDIA_TYPE_GAME, MEDIA_TYPE_APP, SUPPORT_SELECT_SOURCE,
SUPPORT_PAUSE, SUPPORT_STOP, SUPPORT_TURN_OFF, SUPPORT_TURN_ON)
from homeassistant.components.ps4 import (
format_unique_id, load_games, save_games)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_REGION,
CONF_TOKEN, STATE_IDLE, STATE_OFF, STATE_PLAYING)
from homeassistant.helpers import device_registry, entity_registry
from .const import (DEFAULT_ALIAS, DOMAIN as PS4_DOMAIN, PS4_DATA,
REGIONS as deprecated_regions)
_LOGGER = logging.getLogger(__name__)
SUPPORT_PS4 = SUPPORT_TURN_OFF | SUPPORT_TURN_ON | \
SUPPORT_PAUSE | SUPPORT_STOP | SUPPORT_SELECT_SOURCE
ICON = 'mdi:playstation'
MEDIA_IMAGE_DEFAULT = None
DEFAULT_RETRIES = 2
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up PS4 from a config entry."""
config = config_entry
await async_setup_platform(
hass, config, async_add_entities, discovery_info=None)
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up PS4 Platform."""
creds = config.data[CONF_TOKEN]
device_list = []
for device in config.data['devices']:
host = device[CONF_HOST]
region = device[CONF_REGION]
name = device[CONF_NAME]
ps4 = pyps4.Ps4Async(host, creds, device_name=DEFAULT_ALIAS)
device_list.append(PS4Device(
config, name, host, region, ps4, creds))
async_add_entities(device_list, update_before_add=True)
class PS4Device(MediaPlayerDevice):
"""Representation of a PS4."""
def __init__(self, config, name, host, region, ps4, creds):
"""Initialize the ps4 device."""
self._entry_id = config.entry_id
self._ps4 = ps4
self._host = host
self._name = name
self._region = region
self._creds = creds
self._state = None
self._media_content_id = None
self._media_title = None
self._media_image = None
self._media_type = None
self._source = None
self._games = {}
self._source_list = []
self._retry = 0
self._disconnected = False
self._info = None
self._unique_id = None
@callback
def status_callback(self):
"""Handle status callback. Parse status."""
self._parse_status()
@callback
def schedule_update(self):
"""Schedules update with HA."""
self.async_schedule_update_ha_state()
@callback
def subscribe_to_protocol(self):
"""Notify protocol to callback with update changes."""
self.hass.data[PS4_DATA].protocol.add_callback(
self._ps4, self.status_callback)
@callback
def unsubscribe_to_protocol(self):
"""Notify protocol to remove callback."""
self.hass.data[PS4_DATA].protocol.remove_callback(
self._ps4, self.status_callback)
def check_region(self):
"""Display logger msg if region is deprecated."""
# Non-Breaking although data returned may be inaccurate.
if self._region in deprecated_regions:
_LOGGER.info("""Region: %s has been deprecated.
Please remove PS4 integration
and Re-configure again to utilize
current regions""", self._region)
async def async_added_to_hass(self):
"""Subscribe PS4 events."""
self.hass.data[PS4_DATA].devices.append(self)
self.check_region()
async def async_update(self):
"""Retrieve the latest data."""
if self._ps4.ddp_protocol is not None:
# Request Status with asyncio transport.
self._ps4.get_status()
# Don't attempt to connect if entity is connected or if,
# PS4 is in standby or disconnected from LAN or powered off.
if not self._ps4.connected and not self._ps4.is_standby and\
self._ps4.is_available:
try:
await self._ps4.async_connect()
except NotReady:
pass
# Try to ensure correct status is set on startup for device info.
if self._ps4.ddp_protocol is None:
# Use socket.socket.
await self.hass.async_add_executor_job(self._ps4.get_status)
if self._info is None:
# Add entity to registry.
await self.async_get_device_info(self._ps4.status)
self._ps4.ddp_protocol = self.hass.data[PS4_DATA].protocol
self.subscribe_to_protocol()
self._parse_status()
def _parse_status(self):
"""Parse status."""
status = self._ps4.status
if status is not None:
self._games = load_games(self.hass)
if self._games is not None:
self._source_list = list(sorted(self._games.values()))
self._retry = 0
self._disconnected = False
if status.get('status') == 'Ok':
title_id = status.get('running-app-titleid')
name = status.get('running-app-name')
if title_id and name is not None:
self._state = STATE_PLAYING
if self._media_content_id != title_id:
self._media_content_id = title_id
self._media_title = name
self._source = self._media_title
self._media_type = None
asyncio.ensure_future(
self.async_get_title_data(title_id, name))
else:
if self._state != STATE_IDLE:
self.idle()
else:
if self._state != STATE_OFF:
self.state_off()
elif self._retry > DEFAULT_RETRIES:
self.state_unknown()
else:
self._retry += 1
def idle(self):
"""Set states for state idle."""
self.reset_title()
self._state = STATE_IDLE
self.schedule_update()
def state_off(self):
"""Set states for state off."""
self.reset_title()
self._state = STATE_OFF
self.schedule_update()
def state_unknown(self):
"""Set states for state unknown."""
self.reset_title()
self._state = None
if self._disconnected is False:
_LOGGER.warning("PS4 could not be reached")
self._disconnected = True
self._retry = 0
def reset_title(self):
"""Update if there is no title."""
self._media_title = None
self._media_content_id = None
self._media_type = None
self._source = None
async def async_get_title_data(self, title_id, name):
"""Get PS Store Data."""
from pyps4_homeassistant.errors import PSDataIncomplete
app_name = None
art = None
media_type = None
try:
title = await self._ps4.async_get_ps_store_data(
name, title_id, self._region)
except PSDataIncomplete:
title = None
except asyncio.TimeoutError:
title = None
_LOGGER.error("PS Store Search Timed out")
else:
if title is not None:
app_name = title.name
art = title.cover_art
# Assume media type is game if not app.
if title.game_type != 'App':
media_type = MEDIA_TYPE_GAME
else:
media_type = MEDIA_TYPE_APP
else:
_LOGGER.error(
"Could not find data in region: %s for PS ID: %s",
self._region, title_id)
finally:
self._media_title = app_name or name
self._source = self._media_title
self._media_image = art or None
self._media_type = media_type
self.update_list()
self.schedule_update()
def update_list(self):
"""Update Game List, Correct data if different."""
if self._media_content_id in self._games:
store = self._games[self._media_content_id]
if store != self._media_title:
self._games.pop(self._media_content_id)
if self._media_content_id not in self._games:
self.add_games(self._media_content_id, self._media_title)
self._games = load_games(self.hass)
self._source_list = list(sorted(self._games.values()))
def add_games(self, title_id, app_name):
"""Add games to list."""
games = self._games
if title_id is not None and title_id not in games:
game = {title_id: app_name}
games.update(game)
save_games(self.hass, games)
async def async_get_device_info(self, status):
"""Set device info for registry."""
# If cannot get status on startup, assume info from registry.
if status is None:
_LOGGER.info("Assuming status from registry")
e_registry = await entity_registry.async_get_registry(self.hass)
d_registry = await device_registry.async_get_registry(self.hass)
for entity_id, entry in e_registry.entities.items():
if entry.config_entry_id == self._entry_id:
self._unique_id = entry.unique_id
self.entity_id = entity_id
break
for device in d_registry.devices.values():
if self._entry_id in device.config_entries:
self._info = {
'name': device.name,
'model': device.model,
'identifiers': device.identifiers,
'manufacturer': device.manufacturer,
'sw_version': device.sw_version
}
break
else:
_sw_version = status['system-version']
_sw_version = _sw_version[1:4]
sw_version = "{}.{}".format(_sw_version[0], _sw_version[1:])
self._info = {
'name': status['host-name'],
'model': 'PlayStation 4',
'identifiers': {
(PS4_DOMAIN, status['host-id'])
},
'manufacturer': 'Sony Interactive Entertainment Inc.',
'sw_version': sw_version
}
self._unique_id = format_unique_id(self._creds, status['host-id'])
async def async_will_remove_from_hass(self):
"""Remove Entity from Hass."""
# Close TCP Transport.
if self._ps4.connected:
await self._ps4.close()
self.hass.data[PS4_DATA].devices.remove(self)
@property
def device_info(self):
"""Return information about the device."""
return self._info
@property
def unique_id(self):
"""Return Unique ID for entity."""
return self._unique_id
@property
def entity_picture(self):
"""Return picture."""
if self._state == STATE_PLAYING and self._media_content_id is not None:
image_hash = self.media_image_hash
if image_hash is not None:
return ENTITY_IMAGE_URL.format(
self.entity_id, self.access_token, image_hash)
return MEDIA_IMAGE_DEFAULT
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def icon(self):
"""Icon."""
return ICON
@property
def media_content_id(self):
"""Content ID of current playing media."""
return self._media_content_id
@property
def media_content_type(self):
"""Content type of current playing media."""
return self._media_type
@property
def media_image_url(self):
"""Image url of current playing media."""
if self._media_content_id is None:
return MEDIA_IMAGE_DEFAULT
return self._media_image
@property
def media_title(self):
"""Title of current playing media."""
return self._media_title
@property
def supported_features(self):
"""Media player features that are supported."""
return SUPPORT_PS4
@property
def source(self):
"""Return the current input source."""
return self._source
@property
def source_list(self):
"""List of available input sources."""
return self._source_list
async def async_turn_off(self):
"""Turn off media player."""
await self._ps4.standby()
async def async_turn_on(self):
"""Turn on the media player."""
self._ps4.wakeup()
async def async_media_pause(self):
"""Send keypress ps to return to menu."""
await self.async_send_remote_control('ps')
async def async_media_stop(self):
"""Send keypress ps to return to menu."""
await self.async_send_remote_control('ps')
async def async_select_source(self, source):
"""Select input source."""
for title_id, game in self._games.items():
if source.lower().encode(encoding='utf-8') == \
game.lower().encode(encoding='utf-8') \
or source == title_id:
_LOGGER.debug(
"Starting PS4 game %s (%s) using source %s",
game, title_id, source)
await self._ps4.start_title(title_id, self._media_content_id)
return
_LOGGER.warning(
"Could not start title. '%s' is not in source list", source)
return
async def async_send_command(self, command):
"""Send Button Command."""
await self.async_send_remote_control(command)
async def async_send_remote_control(self, command):
"""Send RC command."""
await self._ps4.remote_control(command)
| |
"""
Copyright 2015 Paul T. Grogan, Massachusetts Institute of Technology
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Based on node-lp_solve by Stephen Remde released under an MIT license
https://github.com/smremde/node-lp_solve
"""
from lpsolve55 import *
class LinearProgram(object):
ConstraintTypes = {'LE':1, 'GE':2, 'EQ':3}
ConstraintText = {'LE':'<=', 'GE':'>=', 'EQ':'='}
SolveResult = {
'-5': 'UNKNOWNERROR',
'-4': 'DATAIGNORED',
'-3': 'NOBFP',
'-2': 'NOMEMORY',
'-1': 'NOTRUN',
'0': 'OPTIMAL',
'1': 'SUBOPTIMAL',
'2': 'INFEASIBLE',
'3': 'UNBOUNDED',
'4': 'DEGENERATE',
'5': 'NUMFAILURE',
'6': 'USERABORT',
'7': 'TIMEOUT',
'8': 'RUNNING',
'9': 'PRESOLVED'
}
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
lpsolve('delete_lp', self.lp)
def __init__(self, name=None):
self.lp = lpsolve('make_lp', 0, 0)
if name is not None:
lpsolve('set_lp_name', self.lp, name)
lpsolve('set_verbose', self.lp, 'IMPORTANT')
lpsolve('set_outputfile', self.lp, '')
self.version = lpsolve('lp_solve_version')
self.columns = {}
self.constraints = []
self.objective = Row()
self.solution = []
def setOutputFile(self, fname):
"""
Sets the output file for this linear program.
@param fname: the file name
@type fname: L{str}
"""
fileName = fname if fname is not None else ''
if not lpsolve('set_outputfile', self.lp, fileName):
raise Exception('error writing to file {0}'.format(fileName))
def addColumn(self, name=None, isInteger=False, isBinary=False):
"""
Adds a column to this linear program.
@param name: the column name
@type name: L{str}
@param isInteger: true, if this is an integer column
@type isInteger: L{bool}
@param isBinary: true, if this is a binary column
@type isBinary: L{bool}
@return: L{str}
"""
colId = len(self.columns)+1
if name is None:
name = 'col_{}'.format(cId)
self.columns[name] = colId
colValues = [0]*(lpsolve('get_Nrows', self.lp)+1)
if not lpsolve('add_column', self.lp, colValues):
raise Exception('error adding column {}'.format(name))
#if not lpsolve('set_col_name', self.lp, colId, name):
# raise Exception('error setting column name {}'.format(name))
if isInteger and not lpsolve('set_int', self.lp, colId, isInteger):
raise Exception('error setting integer type for column {}'.format(name))
if isBinary and not lpsolve('set_binary', self.lp, colId, isBinary):
raise Exception('error setting binary type for column {}'.format(name))
return name
def addConstraint(self, row, constraintType, constant, name=None):
"""
Adds a constraint to this linear program.
@param row: the constraint row
@type row: L{Row}
@param constraintType: the constraint type
@type constraintType: L{str}
@param constant: the constraint constant
@type constant: L{float}
@param name: the constraint name
@type name: L{str}
"""
colValues = [0]*(lpsolve('get_Ncolumns', self.lp)+1)
for key in row.raw:
colId = self.columns[key]
colValues[colId] += row.raw[key]
if name is None:
name = 'con_{}'.format(len(self.constraints)+1)
self.constraints.append({'name': name,
'row': row.toText(),
'constraint':constraintType,
'constant': constant})
if not lpsolve('add_constraint', self.lp, colValues[1:],
LinearProgram.ConstraintTypes[constraintType],
constant):
raise Exception('error adding constraint {}'.format(name))
# nRows = lpsolve('get_Nrows', self.lp)
# lpsolve('set_row_name', self.lp, nRows, name)
def setObjective(self, row, minimize=True):
"""
Sets the objective for this linear program.
@param row: the objective row
@type row: L{Row}
@param minimize: true, if the objective is minimized
@type minimize: L{bool}
"""
colValues = [0]*(lpsolve('get_Ncolumns', self.lp)+1)
for key in row.raw:
colId = self.columns[key]
colValues[colId] += row.raw[key]
self.objective = {'minimize': minimize,
'row': row}
if minimize:
lpsolve('set_minim', self.lp)
else:
lpsolve('set_maxim', self.lp)
if not lpsolve('set_obj_fn', self.lp, colValues[1:]):
raise Exception('error setting objective function')
def solve(self):
"""
Solves the linear program.
"""
code = lpsolve('solve', self.lp)
if code == 0 or code == 1 or code == 9:
self.solution = lpsolve('get_variables', self.lp)[0]
return code, LinearProgram.SolveResult[str(code)]
def get(self, column):
"""
Gets the solved value for a column.
@param column: the column
@type column: L{str}
@return: L{float}
"""
return (None if len(self.solution) < self.columns[column] - 1
else self.solution[self.columns[column] - 1])
def dumpProgram(self):
"""
Dumps the program to a string format.
@return: L{str}
"""
string = ('minimize' if self.objective['minimize']
else 'maximize') + ':' + self.objective['row'].toText() + ';\n'
for c in self.constraints:
string += '{}: {} {} {};\n'.format(
c['name'], c['row'],
LinearProgram.ConstraintText[c['constraint']],
c['constant'])
return string
def dumpSolution(self):
"""
Dumps the solution to a string format.
@return: L{str}
"""
string = ''
for col in self.columns:
string += '{} = {};\n'.format(
col, self.solution[self.columns[col]-1])
return string
class Row(object):
def __init__(self, clone=None):
self.raw = {}
if clone is not None:
for key in clone.raw:
self.raw[key] = clone[key]
def add(self, key=None, value=None, row=None):
"""
Adds a key-value pair or row to this row.
@param key: the key to add
@type key: L{str}
@param value: the value to add
@type value: L{float}
@param row: the row to add
@type row: L{Row}
@return: L{Row}
"""
if key is not None and value is not None:
self.raw[key] = ((self.raw[key]
if key in self.raw
else 0) + value)
if row is not None and hasattr(row, 'raw'):
for key in row.raw:
self.raw[key] = ((self.raw[key]
if key in self.raw
else 0) + row.raw[key])
return self
def subtract(self, key=None, value=None, row=None):
"""
Subtracts a key-value pair or row from this row.
@param key: the key to subtract
@type key: L{str}
@param value: the value to subtract
@type value: L{float}
@param row: the row to subtract
@type row: L{Row}
@return: L{Row}
"""
if key is not None and value is not None:
self.raw[key] = ((self.raw[key]
if key in self.raw
else 0) - value)
if row is not None and hasattr(row, 'raw'):
for key in row.raw:
self.raw[key] = ((self.raw[key]
if key in self.raw
else 0) - row.raw[key])
return self
def multiply(self, value):
"""
Multiplies this row by a scalar value.
@param value: the scalar value
@type value: L{float}
@return: L{Row}
"""
if value == 0:
self.raw = {}
else:
for key in self.raw:
self.raw[key] = self.raw[key] * value
return self
def toText(self):
"""
Gets the lp_solve-compatible text format for this row.
@return: L{str}
"""
string = ''
for key in self.raw:
if self.raw[key] == 0:
continue
if self.raw[key] < 0:
string += ' -'
self.raw[key] = -self.raw[key]
else:
string += ' +'
string += '{} {}'.format(self.raw[key], key)
return string if string != '' else '0'
| |
import json
from copy import deepcopy
from .controller_test_case import ControllerTestCase
class PostsControllerTestCase(ControllerTestCase):
def default_post_json(self, post_id=1, author_id=1):
return {
'data': {
'id': str(post_id),
'type': 'post',
'attributes': {
'title': 'An Inspirational Blog Post',
'body': 'Be yourself, but also you can change for the better.',
'type': 'text',
},
'relationships': {
'author': {
'data': {
'id': str(author_id),
'type': 'user'
}
}
}
}
}
def default_author_json(self, author_id=1):
return {
'data': {
'type': 'user',
'id': str(author_id),
'attributes': {
'name': 'Jane Doe'
}
}
}
def make_an_author(self, author_id=1):
return self.app.post('/users/{0}'.format(author_id),
data=json.dumps(self.default_author_json(author_id)),
content_type='application/json')
def make_a_post(self, post_id=1, author_id=1):
return self.app.post('/posts/{0}'.format(post_id),
data=json.dumps(self.default_post_json(post_id=post_id, author_id=author_id)),
content_type='application/json')
def make_an_author_and_post(self, post_id=1, author_id=1):
self.make_an_author(author_id=author_id)
self.make_a_post(post_id=post_id, author_id=author_id)
def test_get_post(self):
author_id = 1
post_id = 1
self.make_an_author_and_post(post_id=post_id, author_id=author_id)
# get made post
get_response = self.app.get('/posts/{0}'.format(post_id))
expected_response = self.default_post_json(post_id=post_id, author_id=author_id)
expected_response.update({'included': [self.default_author_json()['data']]})
self.check_jsonapi_response(get_response, 200, expected_response)
def test_get_nonexistant_post(self):
response = self.app.get('/posts/999')
self.check_response(response, 404,
{
'error': 'The requested URL was not found on the server. If you entered the URL manually please check your spelling and try again.'})
def test_create_valid_post(self):
author_id = 1
self.make_an_author(author_id)
post_id = 1
post_data = self.default_post_json(post_id)
create_response = self.app.post('/posts/{0}'.format(post_id), data=json.dumps(post_data),
content_type='application/json')
expected_response = deepcopy(post_data)
expected_response.update({'included': [self.default_author_json()['data']]})
self.check_jsonapi_response(create_response, 201, expected_response)
def test_create_invalid_post(self):
# missing author
post_id = 1
post_data = self.default_post_json(post_id=post_id)
del post_data['data']['relationships']['author']['data']['id']
create_response = self.app.post('/posts/{0}'.format(post_id), data=json.dumps(post_data),
content_type='application/json')
self.check_response(create_response, 400,
{'error': 'Provided post object was missing the author id field'})
author_id = 1
self.make_an_author(author_id)
post_data = self.default_post_json(post_id=post_id, author_id=author_id)
del post_data['data']['attributes']['body']
create_response = self.app.post('/posts/{0}'.format(post_id), data=json.dumps(post_data),
content_type='application/json')
self.check_response(create_response, 400,
{'error': 'Provided post object was missing the body field'})
def test_create_best_effort_post_id(self):
post_id = 1
post_data_no_id = self.default_post_json(post_id=post_id)
del post_data_no_id['data']['id']
self.make_an_author()
create_response = self.app.post('/posts/{0}'.format(post_id), data=json.dumps(post_data_no_id),
content_type='application/json')
expected_response = deepcopy(post_data_no_id)
expected_response['data'].update({'id': str(post_id)})
expected_response.update({'included': [self.default_author_json()['data']]})
self.check_jsonapi_response(create_response, 201, expected_response)
def test_create_duplicate_post_id(self):
author_id = 1
post_id = 1
self.make_an_author_and_post(author_id=author_id, post_id=post_id)
post_data = self.default_post_json(post_id)
create_response = self.app.post('/posts/{0}'.format(post_id), data=json.dumps(post_data),
content_type='application/json')
self.check_response(create_response, 409,
{'error': 'post with id {0} already exists'.format(post_id)})
def test_delete_post(self):
author_id = 1
post_id = 1
self.make_an_author_and_post(author_id=author_id, post_id=post_id)
post_data = self.default_post_json(post_id)
# delete made user
delete_response = self.app.delete('/posts/{0}'.format(post_id))
expected_response = deepcopy(post_data)
expected_response.update({'included': [self.default_author_json()['data']]})
self.check_jsonapi_response(delete_response, 200, expected_response)
# check that get now fails
response = self.app.get('/posts/{0}'.format(post_id))
self.check_response(response, 404,
{
'error': 'The requested URL was not found on the server. If you entered the URL manually please check your spelling and try again.'})
def test_delete_nonexistant_post(self):
response = self.app.delete('/posts/999')
self.check_response(response, 404,
{
'error': 'The requested URL was not found on the server. If you entered the URL manually please check your spelling and try again.'})
def test_update_post(self):
author_id = 1
post_id = 1
self.make_an_author_and_post(author_id=author_id, post_id=post_id)
# modify the post
post_data = self.default_post_json(post_id)
post_data['title'] = 'Now for Something Different'
update_response = self.app.put('/posts/{0}'.format(post_id), data=json.dumps(post_data),
content_type='application/json')
expected_response = deepcopy(post_data)
expected_response.update({'included': [self.default_author_json()['data']]})
self.check_jsonapi_response(update_response, 200, expected_response)
# check that get now has the new data too
get_response = self.app.get('/posts/{0}'.format(post_id))
self.check_jsonapi_response(get_response, 200, expected_response)
def test_update_invalid_user(self):
author_id = 1
post_id = 1
self.make_an_author_and_post(author_id=author_id, post_id=post_id)
# modify the user in an invalid way (missing author id)
post_data = self.default_post_json(post_id)
del post_data['data']['relationships']['author']['data']['id']
update_response = self.app.put('/posts/{0}'.format(post_id), data=json.dumps(post_data),
content_type='application/json')
self.check_response(update_response, 400,
{'error': 'Provided post object was missing the author id field'})
# check that get still has the old data
get_response = self.app.get('/posts/{0}'.format(post_id))
expected_response = self.default_post_json(post_id)
expected_response.update({'included': [self.default_author_json()['data']]})
self.check_jsonapi_response(get_response, 200, expected_response)
def test_update_nonexistant_user(self):
# without post body data
response = self.app.put('/posts/999')
self.check_response(response, 400, None)
# with post body data
post_id = 987
post_data = self.default_post_json(post_id)
response = self.app.put('/posts/{0}'.format(post_id), data=json.dumps(post_data),
content_type='application/json')
self.check_response(response, 404,
{
'error': 'The requested URL was not found on the server. If you entered the URL manually please check your spelling and try again.'})
suite = PostsControllerTestCase.suite()
| |
# -*- coding: utf-8 -*-
"""
sources.py ---
Copyright (C) 2017, Midraal
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import random
import nanscrapers
import requests
import xbmcaddon
import xbmcgui
import xbmc
import koding
from koding import route
import sys
import xbmcplugin
from resources.lib.util.xml import JenItem, JenList
from resources.lib.util.messages import get_link_message, get_searching_message
from resources.lib.util.info import get_info
from resources.lib.player import JenPlayer
from resources.lib.plugin import run_hook
from language import get_string as _
ADDON = xbmcaddon.Addon()
DIALOG = xbmcgui.Dialog()
class Sources(object):
"""interface to NaN scraper library and helper functions"""
def __init__(self):
"""initialise class"""
pass
@staticmethod
def get_sources(title,
year,
imdb,
tvdb,
season,
episode,
tvshowtitle,
premiered,
timeout=30,
preset="search",
dialog=None,
exclude=None,
scraper_title=False,
listitem=None,
output_function=koding.Play_Video,
skip_selector=False,
player=None):
"""
scrapes for video sources using NaN scraper library
Args:
title: movie or episode title
year: year movie/episode came out
imdb: imdb identifier
tvdb: tvdb identifier
season: season number
episode: episode number
tvshowtitle: title of tv show
premiered: year tv show premiered
timeout: timeout for scraping link
preset: preferred quality of stream
dialog: dialog to use for displaying messages
exclude: list of scrapers to exclude
scraper_title: extra movie/tv show title to search first.
required if scrapers use an alternate spelling
Returns:
Boolean indicating playback success
"""
year = str(year)
content = 'movie' if tvshowtitle is None else 'episode'
allow_debrid = ADDON.getSetting('allow_debrid') == "true"
if ADDON.getSetting('use_link_dialog') == 'true' and not skip_selector:
# use link selector
if content == 'movie':
scraper = nanscrapers.scrape_movie_with_dialog
link, rest = scraper(
title,
year,
imdb,
timeout=timeout,
exclude=exclude,
extended=True,
sort_function=Sources.sort_function,
enable_debrid=allow_debrid)
elif content == "episode":
scraper = nanscrapers.scrape_episode_with_dialog
link, rest = scraper(
tvshowtitle,
year,
premiered,
season,
episode,
imdb,
tvdb,
timeout=timeout,
exclude=exclude,
extended=True,
sort_function=Sources.sort_function,
enable_debrid=allow_debrid)
else:
return
if type(link) == dict and "path" in link:
link = link["path"]
if link is None:
return False
url = link['url']
if ADDON.getSetting('link_fallthrough') == 'true':
played = False
index = 0
links = []
for item in rest:
if type(item) == dict and "path" in item:
links.extend(item["path"][1])
else:
links.extend(item[1])
index = links.index(link)
links = links[index + 1:]
num_results = len(rest) + 1
while not played:
try:
if dialog is not None and dialog.iscanceled():
return False
if dialog is not None:
index = index + 1
percent = int((index * 100) / num_results)
line = "%s - %s (%s)" % (link['scraper'],
link['source'],
link['quality'])
dialog.update(percent, line)
except:
pass
try:
played = output_function(
link["url"],
showbusy=False,
ignore_dp=True,
item=listitem,
player=player)
link = links[0]
links = links[1:]
except:
return False
return played
else:
return output_function(
url,
showbusy=False,
ignore_dp=True,
item=listitem,
player=player)
else:
if content == 'movie':
title = title
scraper = nanscrapers.scrape_movie
links_scraper = scraper(
title,
year,
imdb,
timeout=timeout,
exclude=exclude,
enable_debrid=allow_debrid)
elif content == 'episode':
if scraper_title:
tvshowtitle = title
tvshowtitle = tvshowtitle
scraper = nanscrapers.scrape_episode
links_scraper = scraper(
tvshowtitle,
year,
premiered,
season,
episode,
imdb,
tvdb,
timeout=timeout,
exclude=exclude,
enable_debrid=allow_debrid)
else:
return
sd_links = []
non_direct_links = []
non_direct_sd_links = []
num_scrapers = len(nanscrapers.relevant_scrapers())
index = 0
try:
for scraper_links in links_scraper():
if dialog is not None and dialog.iscanceled():
return
if dialog is not None:
index = index + 1
percent = int((index * 100) / num_scrapers)
dialog.update(percent)
if scraper_links is not None:
random.shuffle(scraper_links)
for scraper_link in scraper_links:
if dialog is not None and dialog.iscanceled():
return False
if Sources().__check_skip_pairing(scraper_link):
continue
quality = Sources.__determine_quality(
scraper_link["quality"])
preset = preset.lower()
if preset == 'searchsd':
if quality == "HD":
continue
elif preset == "search":
if quality == "SD":
sd_links.append(scraper_link)
if scraper_link["direct"]:
result = output_function(
scraper_link["url"],
showbusy=False,
ignore_dp=True,
item=listitem,
player=player)
if result:
return result
else:
non_direct_links.append(scraper_link)
for scraper_link in non_direct_links:
if dialog is not None and dialog.iscanceled():
return False
result = output_function(
scraper_link["url"],
showbusy=False,
ignore_dp=True,
item=listitem,
player=player)
if result:
return result
for scraper_link in sd_links:
if dialog is not None and dialog.iscanceled():
return
if scraper_link['direct']:
result = output_function(
scraper_link["url"],
showbusy=False,
ignore_dp=True,
item=listitem,
player=player)
if result:
return result
else:
non_direct_sd_links.append(scraper_link)
for scraper_link in non_direct_sd_links:
if dialog is not None and dialog.iscanceled():
return
result = output_function(
scraper_link["url"],
showbusy=False,
ignore_dp=True,
item=listitem,
player=player)
if result:
return result
return False
except:
return False
@staticmethod
def get_music_sources(title,
artist,
timeout=30,
preset="search",
dialog=None,
exclude=None,
listitem=None,
output_function=koding.Play_Video,
skip_selector=False,
player=None):
"""
scrapes for music sources using NaN scraper library
Args:
title: song title
artist: song artist
timeout: timeout for scraping link
preset: preferred quality of stream
dialog: dialog to use for displaying messages
exclude: list of scrapers to exclude
Returns:
Boolean indicating playback success
"""
title = title
allow_debrid = ADDON.getSetting('allow_debrid') == "true"
if ADDON.getSetting('use_link_dialog') == 'true' and not skip_selector:
link, rest = nanscrapers.scrape_song_with_dialog(
title,
artist,
timeout=timeout,
exclude=exclude,
enable_debrid=allow_debrid,
extended=True)
if type(link) == dict and "path" in link:
link = link["path"]
if link is None:
return False
url = link['url']
if ADDON.getSetting('link_fallthrough') == 'true':
played = False
index = 0
links = []
for item in rest:
if type(item) == dict and "path" in item:
links.extend(item["path"][1])
else:
links.extend(item[1])
index = links.index(link)
links = links[index + 1:]
num_results = len(rest) + 1
while not played:
try:
if dialog is not None and dialog.iscanceled():
return
if dialog is not None:
index = index + 1
percent = int((index * 100) / num_results)
line = "%s - %s (%s)" % (link['scraper'],
link['source'],
link['quality'])
dialog.update(percent, line)
except:
pass
try:
played = output_function(
url,
showbusy=False,
ignore_dp=True,
item=listitem,
player=player)
link = links[0]
links = links[1:]
except:
return False
return played
else:
return output_function(
url,
showbusy=False,
ignore_dp=True,
item=listitem,
player=player)
links_scraper = nanscrapers.scrape_song(
title,
artist,
timeout=timeout,
exclude=exclude,
enable_debrid=allow_debrid)
sd_links = []
num_scrapers = len(nanscrapers.relevant_scrapers())
index = 0
try:
for scraper_links in links_scraper():
if dialog is not None and dialog.iscanceled():
return
if dialog is not None:
index = index + 1
percent = int((index * 100) / num_scrapers)
dialog.update(percent)
if scraper_links is not None:
random.shuffle(scraper_links)
for scraper_link in scraper_links:
if dialog is not None and dialog.iscanceled():
return
if Sources().__check_skip_pairing(scraper_link):
continue
quality = Sources.__determine_quality(
scraper_link["quality"])
preset = preset.lower()
if preset == 'searchsd':
if quality == "HD":
continue
elif preset == "search":
if quality == "SD":
sd_links.append(scraper_link)
result = output_function(
scraper_link["url"],
showbusy=False,
ignore_dp=True,
item=listitem,
player=player)
if result:
return result
for scraper_link in sd_links:
if dialog is not None and dialog.iscanceled():
return
result = output_function(
scraper_link["url"],
showbusy=False,
ignore_dp=True,
item=listitem,
player=player)
if result:
return result
except:
pass
return False
@staticmethod
def youtube_resolve(url):
"""
transform youtube url to link to youtube add-on
Args:
url: youtube url
Returns:
playable url
"""
try:
youtube_id = url.split('?v=')[-1].split('/')[-1].split('?')[
0].split('&')[0]
result = requests.head(
'http://www.youtube.com/watch?v=%s' % youtube_id)
if result:
return 'plugin://plugin.video.youtube/play/?video_id=%s' % (
youtube_id)
except:
return
@staticmethod
def sort_function(item):
"""
transform items quality into a string that's sort-able
Args:
item: scraper link
Returns:
sortable quality string
"""
if 'quality' in item[1][0]:
quality = item[1][0]["quality"]
else:
quality = item[1][0]["path"]["quality"]
if quality.startswith("1080"):
quality = "HDa"
elif quality.startswith("720"):
quality = "HDb"
elif quality.startswith("560"):
quality = "HDc"
elif quality == "DVD":
quality = "HDd"
elif quality == "HD":
quality = "HDe"
elif quality.startswith("480"):
quality = "SDa"
elif quality.startswith("360"):
quality = "SDb"
elif quality.startswith("SD"):
quality = "SDc"
else:
quality = "Z"
return quality
@staticmethod
def __determine_quality(quality_string):
try:
quality = int(quality_string)
if quality > 576:
return "HD"
else:
return "SD"
except ValueError:
if quality_string not in ["SD", "CAM", "SCR"]:
return "HD"
else:
return "SD"
@staticmethod
def __check_skip_pairing(scraper_link):
if not ADDON.getSetting('allow_openload') == 'true' and\
'openload' in scraper_link['url']:
return True
if not ADDON.getSetting('allow_the_video_me') == 'true' and\
'thevideo.me' in scraper_link['url']:
return True
if not ADDON.getSetting('allow_the_vidup_me') == 'true' and\
'vidup.me' in scraper_link['url']:
return True
return False
def choose_quality(link, name=None, selected_link=None):
"""
choose quality for scraping
Keyword Arguments:
link -- Jenitem link with sublinks
name -- Name to display in dialog (default None)
"""
import re
if name is None:
name = xbmc.getInfoLabel('listitem.label')
if link.startswith("http") or link.startswith("plugin"):
sublinks = [link]
else:
jen_link = JenItem(link)
sublinks = jen_link.getAll("sublink")
if not sublinks:
sublinks = [jen_link]
links = []
message = get_link_message()
if selected_link is None:
default_link = ADDON.getSetting("default_link")
else:
default_link = selected_link
link_dialog = ADDON.getSetting("use_link_dialog") == "true"
direct_links = False
for sublink in sublinks:
if link_dialog and "search" in sublink:
continue
if "searchsd" in sublink:
if default_link == "SD":
return sublink
label = 'SD'
if message['SD'] != '':
label += ' (%s)' % message['SD']
new_item = (label, sublink)
elif "search" in sublink:
if default_link == "HD":
return sublink
label = 'HD'
if message['HD'] != '':
label += ' (%s)' % message['HD']
new_item = (label, sublink)
else:
direct_links = True
match = re.findall("(.*?)\((.*?)\)", sublink)
if match:
new_item = ('%s' % match[0][1], match[0][0])
else:
new_item = ('Link %s' % (int(sublinks.index(sublink)) + 1),
sublink)
links.append(new_item)
if link_dialog and (not direct_links or len(sublinks) > 1):
links.append(("Search", "search"))
if len(links) == 1:
url = links[0][1]
return url
select = xbmcgui.Dialog().select(name, [i[0] for i in links])
if select == -1:
return False
else:
url = links[select][1]
return url
@route(mode="get_sources", args=["url"])
def get_sources(item):
"""
get video_link and try to play it
Keyword Arguments:
item -- JenItem to try playing
"""
result = run_hook("get_sources", item)
if result:
return
if item.startswith("<plugin>"):
# link to plugin
link = JenItem(item)["link"]
sublinks = JenItem(link).getAll("sublink")
if sublinks:
if len(sublinks) > 1:
link = choose_quality(link)
else:
link = sublinks[0]
link = link.replace("&", "&")
xbmc.executebuiltin('Container.update(' + link + ')')
return
item = JenItem(item)
link = item["link"]
if not link or link.replace("\t", "") == "":
return
meta = JenItem(item["meta"])
title = meta["title"]
year = meta.get("year", '').split("-")[0].strip()
imdb = meta.get("imdb", "")
tvdb = meta.get("tvdb", "")
season = meta.get("season", "")
episode = meta.get("episode", "")
tvshowtitle = meta.get("tvshowtitle", None)
premiered = meta.get("premiered", "")
try:
premiered = premiered.split("-")[0].strip()
except:
if len(premiered) == 4:
pass
elif not premiered:
pass
else:
koding.dolog("wrong premiered format")
busy_dialog = xbmcgui.DialogProgress()
dialog = xbmcgui.Dialog()
icon = ADDON.getAddonInfo('icon')
jenplayer = JenPlayer(resume=False)
try:
spec = {
"identifier": imdb,
"season": season or "0",
"episode": episode or "0"
}
match = koding.Get_From_Table("watched", spec)
if match:
match = match[0]
if match["currentTime"] and not match["currentTime"] == "0":
if dialog.yesno(ADDON.getAddonInfo("name"),
_("Previous playback detected"),
yeslabel=_("Resume"),
nolabel=_("Restart")):
jenplayer = JenPlayer(resume=True)
except:
pass
jenplayer.setItem(item)
busy_dialog.create(xbmcaddon.Addon().getAddonInfo('name'),
_("Processing Link"))
preset = choose_quality(link)
message = get_searching_message(preset)
played = False
infolabels = {}
if preset:
preset = preset.replace("&", "&")
busy_dialog.update(0, message)
listitem = None
fetch_meta = ADDON.getSetting("metadata") == "true"
listitem = xbmcgui.ListItem(
path=link,
iconImage=item.get("thumbnail", icon),
thumbnailImage=item.get("thumbnail", icon))
infolabels = {}
if fetch_meta and imdb != "0": # only try valid items with imdb
try:
info, created = get_info([item.item_string])
if info and type(info) == dict:
infolabels = info
except:
pass
else:
infolabels["title"] = title
infolabels["name"] = title
if "plotoutline" not in infolabels:
infolabels["plotoutline"] = infolabels.get("plot", "")
if item.get("content", "") == "song":
listitem.setInfo(type='Music',
infoLabels={'title': meta.get("title", ""),
'artist': meta.get("artist", "")})
else:
listitem.setInfo(type="video", infoLabels=infolabels)
listitem.setLabel(item.get("title", item.get("name", "")))
if "search" in preset:
exclude_scrapers_content = item.get("exclude_scrapers", "")
if exclude_scrapers_content:
exclude_scrapers = exclude_scrapers_content.split(";")
else:
exclude_scrapers = None
# nanscraper link
if item.get("content", "") == "song":
artist = item.get("artist", "")
played = Sources.get_music_sources(title, artist,
preset=preset,
dialog=busy_dialog,
exclude=exclude_scrapers,
listitem=listitem,
player=jenplayer)
else:
played = Sources.get_sources(
title,
year,
imdb,
tvdb,
season,
episode,
tvshowtitle,
premiered,
preset=preset,
dialog=busy_dialog,
listitem=listitem,
exclude=exclude_scrapers,
player=jenplayer)
elif preset.startswith("http") or preset.startswith("plugin"):
# direct link
if "/playlist" in preset and "youtube" in preset:
busy_dialog.close()
xbmc.executebuiltin('Container.update(' + preset + ')')
return
elif "plugin://plugin.video.youtube/play/?video_id=" in preset:
xbmc.executebuiltin("PlayMedia(%s)" % preset)
played = True
elif item["content"] == "image":
busy_dialog.close()
xbmc.executebuiltin("ShowPicture(%s)" % preset)
played = True
else:
played = koding.Play_Video(
preset,
showbusy=False,
ignore_dp=True,
item=listitem,
player=jenplayer)
else:
# who knows
busy_dialog.close()
koding.dolog("unknown link type: " + repr(preset))
raise Exception()
busy_dialog.close()
if played:
jenplayer.keep_alive()
@route(mode="queue", args=["url"])
def queue_source(item, depth=0):
"""
queue item
Keyword Arguments:
item -- JenItem to try playing
"""
from resources.lib.util.url import get_addon_url
jen_item = JenItem(item)
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
if "<item>" in str(jen_item):
play = False
if xbmcaddon.Addon().getSetting("autostart_queue") == "true":
if playlist.size() == 0:
play = True
playlist.add(
get_addon_url("get_sources", str(item)),
xbmcgui.ListItem(
jen_item["title"], iconImage=jen_item.get("thumbnail", "")))
if play:
play_queue()
else:
link = jen_item.get("url", jen_item.get("link", ""))
jenlist = JenList(link).get_raw_list()
for list_item in jenlist:
queue_source(str(list_item), depth + 1)
if depth == 0:
xbmcgui.Dialog().notification(
ADDON.getAddonInfo("name"), _("Finished Queueing").encode('utf-8'),
ADDON.getAddonInfo("icon"))
xbmc.executebuiltin("Container.Refresh")
@route(mode="clear_queue")
def clear_queue():
xbmc.PlayList(xbmc.PLAYLIST_VIDEO).clear()
xbmcgui.Dialog().notification(
ADDON.getAddonInfo("name"), _("Queue cleared").encode('utf-8'),
ADDON.getAddonInfo("icon"))
xbmc.executebuiltin('Container.Refresh')
@route(mode="play_queue")
def play_queue():
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
if playlist.size() > 0:
item = playlist[0]
xbmc.Player().play(playlist, item)
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
else:
xbmcgui.Dialog().notification(
ADDON.getAddonInfo("name"), _("Queue is empty").encode('utf-8'),
ADDON.getAddonInfo("icon"))
# LocalWords: searchsd HD
| |
"""
Manage the information in the hosts file
"""
import errno
import logging
import os
import salt.utils.files
import salt.utils.odict as odict
import salt.utils.stringutils
log = logging.getLogger(__name__)
# pylint: disable=C0103
def __get_hosts_filename():
"""
Return the path to the appropriate hosts file
"""
try:
return __context__["hosts.__get_hosts_filename"]
except KeyError:
__context__["hosts.__get_hosts_filename"] = __salt__["config.option"](
"hosts.file"
)
return __context__["hosts.__get_hosts_filename"]
def _get_or_create_hostfile():
"""
Wrapper of __get_hosts_filename but create host file if it
does not exist.
"""
hfn = __get_hosts_filename()
if hfn is None:
hfn = ""
if not os.path.exists(hfn):
with salt.utils.files.fopen(hfn, "w"):
pass
return hfn
def _list_hosts():
"""
Return the hosts found in the hosts file in as an OrderedDict
"""
try:
return __context__["hosts._list_hosts"]
except KeyError:
count = 0
hfn = __get_hosts_filename()
ret = odict.OrderedDict()
try:
with salt.utils.files.fopen(hfn) as ifile:
for line in ifile:
line = salt.utils.stringutils.to_unicode(line).strip()
if not line:
continue
if line.startswith("#"):
ret.setdefault("comment-{}".format(count), []).append(line)
count += 1
continue
comment = None
if "#" in line:
comment = line[line.index("#") + 1 :].lstrip()
line = line[: line.index("#")].strip()
comps = line.split()
ip = comps.pop(0)
if comment:
ret.setdefault(ip, {}).setdefault("aliases", []).extend(comps)
ret.setdefault(ip, {}).update({"comment": comment})
else:
ret.setdefault(ip, {}).setdefault("aliases", []).extend(comps)
except OSError as exc:
salt.utils.files.process_read_exception(exc, hfn, ignore=errno.ENOENT)
# Don't set __context__ since we weren't able to read from the
# hosts file.
return ret
__context__["hosts._list_hosts"] = ret
return ret
def list_hosts():
"""
Return the hosts found in the hosts file in this format::
{'<ip addr>': ['alias1', 'alias2', ...]}
CLI Example:
.. code-block:: bash
salt '*' hosts.list_hosts
"""
# msgpack does not like OrderedDict's
return dict(_list_hosts())
def get_ip(host):
"""
Return the ip associated with the named host
CLI Example:
.. code-block:: bash
salt '*' hosts.get_ip <hostname>
"""
hosts = _list_hosts()
if not hosts:
return ""
# Look for the op
for addr in hosts:
if isinstance(hosts[addr], dict) and "aliases" in hosts[addr]:
_hosts = hosts[addr]["aliases"]
if host in _hosts:
return addr
# ip not found
return ""
def get_alias(ip):
"""
Return the list of aliases associated with an ip
Aliases (host names) are returned in the order in which they
appear in the hosts file. If there are no aliases associated with
the IP, an empty list is returned.
CLI Example:
.. code-block:: bash
salt '*' hosts.get_alias <ip addr>
"""
hosts = _list_hosts()
if ip in list(hosts):
return hosts[ip]["aliases"]
return []
def has_pair(ip, alias):
"""
Return true if the alias is set
CLI Example:
.. code-block:: bash
salt '*' hosts.has_pair <ip> <alias>
"""
hosts = _list_hosts()
try:
if isinstance(alias, list):
return set(alias).issubset(hosts[ip]["aliases"])
else:
return alias in hosts[ip]["aliases"]
except KeyError:
return False
def set_host(ip, alias, comment=None):
"""
Set the host entry in the hosts file for the given ip, this will overwrite
any previous entry for the given ip
.. versionchanged:: 2016.3.0
If ``alias`` does not include any host names (it is the empty
string or contains only whitespace), all entries for the given
IP address are removed.
CLI Example:
.. code-block:: bash
salt '*' hosts.set_host <ip> <alias>
"""
hfn = _get_or_create_hostfile()
ovr = False
if not os.path.isfile(hfn):
return False
# Make sure future calls to _list_hosts() will re-read the file
__context__.pop("hosts._list_hosts", None)
if comment:
line_to_add = salt.utils.stringutils.to_bytes(
ip + "\t\t" + alias + "\t\t# " + comment + os.linesep
)
else:
line_to_add = salt.utils.stringutils.to_bytes(ip + "\t\t" + alias + os.linesep)
# support removing a host entry by providing an empty string
if not alias.strip():
line_to_add = b""
with salt.utils.files.fopen(hfn, "rb") as fp_:
lines = fp_.readlines()
for ind, _ in enumerate(lines):
tmpline = lines[ind].strip()
if not tmpline:
continue
if tmpline.startswith(b"#"):
continue
comps = tmpline.split()
if comps[0] == salt.utils.stringutils.to_bytes(ip):
if not ovr:
lines[ind] = line_to_add
ovr = True
else: # remove other entries
lines[ind] = b""
linesep_bytes = salt.utils.stringutils.to_bytes(os.linesep)
if not ovr:
# make sure there is a newline
if lines and not lines[-1].endswith(linesep_bytes):
lines[-1] += linesep_bytes
line = line_to_add
lines.append(line)
with salt.utils.files.fopen(hfn, "wb") as ofile:
ofile.writelines(lines)
return True
def rm_host(ip, alias):
"""
Remove a host entry from the hosts file
CLI Example:
.. code-block:: bash
salt '*' hosts.rm_host <ip> <alias>
"""
if not has_pair(ip, alias):
return True
# Make sure future calls to _list_hosts() will re-read the file
__context__.pop("hosts._list_hosts", None)
hfn = _get_or_create_hostfile()
with salt.utils.files.fopen(hfn, "rb") as fp_:
lines = fp_.readlines()
for ind, _ in enumerate(lines):
tmpline = lines[ind].strip()
if not tmpline:
continue
if tmpline.startswith(b"#"):
continue
comps = tmpline.split()
comment = None
if b"#" in tmpline:
host_info, comment = tmpline.split(b"#")
comment = salt.utils.stringutils.to_bytes(comment).lstrip()
else:
host_info = tmpline
host_info = salt.utils.stringutils.to_bytes(host_info)
comps = host_info.split()
b_ip = salt.utils.stringutils.to_bytes(ip)
b_alias = salt.utils.stringutils.to_bytes(alias)
if comps[0] == b_ip:
newline = comps[0] + b"\t\t"
for existing in comps[1:]:
if existing == b_alias:
continue
newline += existing + b" "
if newline.strip() == b_ip:
# No aliases exist for the line, make it empty
lines[ind] = b""
else:
# Only an alias was removed
if comment:
lines[ind] = (
newline
+ b"# "
+ comment
+ salt.utils.stringutils.to_bytes(os.linesep)
)
else:
lines[ind] = newline + salt.utils.stringutils.to_bytes(os.linesep)
with salt.utils.files.fopen(hfn, "wb") as ofile:
ofile.writelines(lines)
return True
def add_host(ip, alias):
"""
Add a host to an existing entry, if the entry is not in place then create
it with the given host
CLI Example:
.. code-block:: bash
salt '*' hosts.add_host <ip> <alias>
"""
hfn = _get_or_create_hostfile()
if not os.path.isfile(hfn):
return False
if has_pair(ip, alias):
return True
hosts = _list_hosts()
# Make sure future calls to _list_hosts() will re-read the file
__context__.pop("hosts._list_hosts", None)
inserted = False
for i, h in hosts.items():
for num, host in enumerate(h):
if isinstance(h, list):
if host.startswith("#") and i == ip:
h.insert(num, alias)
inserted = True
if not inserted:
hosts.setdefault(ip, {}).setdefault("aliases", []).append(alias)
_write_hosts(hosts)
return True
def set_comment(ip, comment):
"""
Set the comment for a host to an existing entry,
if the entry is not in place then return False
CLI Example:
.. code-block:: bash
salt '*' hosts.set_comment <ip> <comment>
"""
hfn = _get_or_create_hostfile()
if not os.path.isfile(hfn):
return False
hosts = _list_hosts()
# Make sure future calls to _list_hosts() will re-read the file
__context__.pop("hosts._list_hosts", None)
if ip not in hosts:
return False
if "comment" in hosts[ip]:
if comment != hosts[ip]["comment"]:
hosts[ip]["comment"] = comment
_write_hosts(hosts)
else:
return True
else:
hosts[ip]["comment"] = comment
_write_hosts(hosts)
return True
def _write_hosts(hosts):
lines = []
for ip, host_info in hosts.items():
if ip:
if ip.startswith("comment"):
line = "".join(host_info)
else:
if "comment" in host_info:
line = "{}\t\t{}\t\t# {}".format(
ip, " ".join(host_info["aliases"]), host_info["comment"]
)
else:
line = "{}\t\t{}".format(ip, " ".join(host_info["aliases"]))
lines.append(line)
hfn = _get_or_create_hostfile()
with salt.utils.files.fopen(hfn, "w+") as ofile:
for line in lines:
if line.strip():
# /etc/hosts needs to end with a newline so that some utils
# that read it do not break
ofile.write(
salt.utils.stringutils.to_str(line.strip() + str(os.linesep))
)
| |
import requests
from bs4 import BeautifulSoup
import sys
import numpy as np
# then add this function lower down
from memory_profiler import profile
import pandas as pd
from sortedcontainers import SortedDict
import datetime
import bs4
# TODO
# http://www.meilleursagents.com/immobilier/recherche/?item_types%5B%5D=369681781&item_types%5B%5D=369681782&transaction_type=369681778&place_ids%5B%5D=32696
# http://www.seloger.com/list.htm?idtt=1&idtypebien=1&cp=75&tri=initial
def parse_source(html, encoding='utf-8'):
parsed = BeautifulSoup(html, from_encoding=encoding)
return parsed
def fetch_meilleursagents():
base = 'http://www.meilleursagents.com/immobilier/recherche/?redirect_url=&view_mode=list&sort_mode=ma_contract%7Cdesc&transaction_type=369681778&buyer_search_id=&user_email=&place_ids%5B%5D=138724240&place_title=&item_types%5B%5D=369681781&item_types%5B%5D=369681782&item_area_min=&item_area_max=&budget_min=&budget_max='
resp = requests.get(base, timeout=150)
resp.raise_for_status() # <- no-op if status==200
parsed = parse_source(resp.content, resp.encoding)
def fetch_solger():
base = 'http://www.seloger.com/list.htm?idtt=1&idtypebien=1&cp=75&tri=initial'
resp = requests.get(base, timeout=150)
resp.raise_for_status() # <- no-op if status==200
parsed = parse_source(resp.content, resp.encoding)
def fetch_pap():
base = 'http://www.pap.fr/annonce/locations-appartement-paris-14e-g37781'
try:
resp = requests.get(base, timeout=150)
resp.raise_for_status() # <- no-op if status==200
resp_comb = resp.content
except:
pass
listing = []
string = {}
string[15] = '15e-g37782'
string[13] = '13e-g37780'
string[14] = '14e-g37781'
string[2] = '2e-g37769'
string[3] = '3e-g37770'
string[4] = '4e-g37771'
string[5] = '5e-g37772'
string[6] = '6e-g37773'
string[7] = '7e-g37774'
string[8] = '8e-g37775'
string[9] = '9e-g37776'
string[10] = '10e-g37777'
string[11] = '11e-g37778'
string[12] = '12e-g37779'
string[16] = '16e-g37783'
string[17] = '17e-g37784'
string[18] = '18e-g37785'
string[19] = '19e-g37786'
string[20] = '20e-g37787'
for i in np.arange(2, 20):
print(i)
base2 = 'http://www.pap.fr/annonce/locations-appartement-paris-{}'.format(string[i])
try:
resp_ = requests.get(base2, timeout=200)
except:
break
# resp_.raise_for_status() # <- no-op if status==200
if resp_.status_code == 404:
break
parsed = parse_source(resp_.content, resp_.encoding)
listing.append(extract_listings_pap(parsed))
# print(listing)
# resp_comb += resp_.content + resp_comb
for j in np.arange(1, 7):
print(j)
base2 = 'http://www.pap.fr/annonce/locations-appartement-paris-{}-{}'.format(
string[i], j)
try:
resp_ = requests.get(base2, timeout=200)
except:
break
# resp_.raise_for_status() # <- no-op if status==200
if resp_.status_code == 404:
break
# resp_comb += resp_.content + resp_comb
parsed = parse_source(resp_.content, resp_.encoding)
listing.append(extract_listings_pap(parsed))
# return resp_comb, resp.encoding
return listing
def fetch_fusac():
base = 'http://ads.fusac.fr/ad-category/housing/'
listing = []
try:
resp = requests.get(base, timeout=100)
resp.raise_for_status() # <- no-op if status==200
resp_comb = resp.content
parsed = parse_source(resp.content, resp.encoding)
listing.append(extract_listings_fusac(parsed))
except:
pass
for i in np.arange(2, 6):
base2 = 'http://ads.fusac.fr/ad-category/housing/housing-offers/page/{}/'.format(i)
try:
resp_ = requests.get(base2, timeout=100)
except:
continue
# resp_.raise_for_status() # <- no-op if status==200
if resp_.status_code == 404:
break
# resp_comb += resp_.content + resp_comb
parsed = parse_source(resp_.content, resp_.encoding)
listing.append(extract_listings_fusac(parsed))
# return resp_comb, resp.encoding
return listing
# handle response 200
def fetch_search_results(
query=None, minAsk=600, maxAsk=1450, bedrooms=None, bundleDuplicates=1,
pets_cat=1
):
listing = []
search_params = {
key: val for key, val in locals().items() if val is not None
}
if not search_params:
raise ValueError("No valid keywords")
base = 'https://paris.craigslist.fr/search/apa'
try:
resp_ = requests.get(base, params=search_params, timeout=100)
resp_.raise_for_status() # <- no-op if status==200
parsed = parse_source(resp_.content, resp_.encoding)
listing.append(extract_listings(parsed))
except:
return None
return listing
# def extract_listings(parsed):
# listings = parsed.find_all("li", {"class": "result-row"})
# return listings
def extract_listings_fusac(parsed):
# location_attrs = {'data-latitude': True, 'data-longitude': True}
listings = parsed.find_all(
'div', {'class': "prod-cnt prod-box shadow Just-listed"})
extracted = []
for j, listing in enumerate(listings[0:]):
# hood = listing.find('span', {'class': 'result-hood'})
# # print(hood)
# # location = {key: listing.attrs.get(key, '') for key in location_attrs}
# link = listing.find('a', {'class': 'result-title hdrlnk'}) # add this
# if link is not None:
# descr = link.string.strip()
# link_href = link.attrs['href']
price = listing.find('p', {'class': 'post-price'})
if price is not None:
price = float(price.string.split()[0].replace(',', ''))
# link = listing.find('div', {'class': 'listos'}).find('a',href=True)['href']
# resp = requests.get(link, timeout=10)
# resp.raise_for_status() # <- no-op if status==200
desc = listing.find('p', {'class': 'post-desc'}
)
if price is not None:
desc = desc.string
url = listing.find('div', {'class': "post-left"}).find('div', {'class': "grido"}).find('a', href=True).get('href')
resp = requests.get(url, timeout=100)
resp.raise_for_status() # <- no-op if status==200
parse = parse_source(resp.content, resp.encoding)
try:
ars = int(parse.find('div', {'class': "single-main"}).find('li', {'class': "acf-details-item"}, id="acf-cp_zipcode").find('span', {'class': 'acf-details-val'}).string[-2:])
except:
ars = None
this_listing = {
# 'location': location,
# 'link': link_href, # add this too
'price': price,
'desc': desc,
# ====
# 'description': descr,
'pieces': None,
'meters': None,
'chambre': None,
'ars': ars,
'link': None
}
extracted.append(SortedDict(this_listing))
return extracted
def extract_listings_pap(parsed):
# location_attrs = {'data-latitude': True, 'data-longitude': True}
listings = parsed.find_all(
'div', {'class': "box search-results-item"})
extracted = []
for listing in listings[0:]:
# hood = listing.find('span', {'class': 'result-hood'})
# # print(hood)
# # location = {key: listing.attrs.get(key, '') for key in location_attrs}
# link = listing.find('a', {'class': 'result-title hdrlnk'}) # add this
# if link is not None:
# descr = link.string.strip()
# link_href = link.attrs['href']
price = listing.find('span', {'class': 'price'})
if price is not None:
price = float(price.string.split()[0].replace('.', ''))
ref = listing.find('div', {'class': 'float-right'}).find('a', href=True)['href']
base = 'http://www.pap.fr/' + ref
try:
resp = requests.get(base, timeout=100)
except:
break
link = base
resp.raise_for_status() # <- no-op if status==200
resp_comb = parse_source(resp.content, resp.encoding)
descr = resp_comb.find_all('p', {'class': 'item-description'})[0]
desc = ' '
for line in descr.contents:
if isinstance(line, bs4.element.NavigableString):
desc += ' ' + line.string.strip('<\br>').strip('\n')
# return resp_comb.find_all(
# 'ul', {'class': 'item-summary'})
try:
ars = int(resp_comb.find(
'div', {'class': 'item-geoloc'}).find('h2').string.split('e')[0][-2:])
except:
break
# return resp_comb.find_all('ul', {'class': 'item-summary'})[0].find_all('li')
# print(resp_comb.find_all('ul', {'class': 'item-summary'})[0].find_all('li'))
temp_dict_ = {}
for lines in resp_comb.find_all('ul', {'class': 'item-summary'})[0].find_all('li'):
tag = lines.contents[0].split()[0]
value = int(lines.find_all('strong')[0].string.split()[0])
temp_dict_[tag] = value
try:
pieces = temp_dict_[u'Pi\xe8ces']
except:
pieces = None
try:
chambre = temp_dict_[u'Chambre']
except:
chambre = None
try:
square_meters = temp_dict_['Surface']
except:
square_meters = None
# meters = resp_comb.find_all('ul', {'class': 'item-summary'}
# )[0].find_all('strong').string.split()[0]
# link = listing.find('div', {'class': 'listos'}).find('a',href=True)['href']
# resp = requests.get(link, timeout=10)
# resp.raise_for_status() # <- no-op if status==200
# desc = listing.find('p', {'class': 'post-desc'}
# )
# if price is not None:
# desc = desc.string
# housing = listing.find('span', {'class': 'housing'})
# if housing is not None:
# beds = housing.decode_contents().split('br')[0][-1]
# rm = housing.decode_contents().split('m<sup>2</sup>')[0]
# sqm = [int(s) for s in rm.split() if s.isdigit()]
# if len(sqm) == 0:
# sqm = None
# else:
# sqm = int(sqm[0])
this_listing = {
# 'location': location,
# 'link': link_href, # add this too
# 'description': descr, # and this
'price': price,
'desc': desc,
'pieces': pieces,
'meters': square_meters,
'chambre': chambre,
'ars': ars,
# 'meters': sqm,
# 'beds': beds
'link': link
}
extracted.append(SortedDict(this_listing))
return extracted
def extract_listings_solger(parsed):
# location_attrs = {'data-latitude': True, 'data-longitude': True}
listings = parsed.find_all(
'article', {'class': "listing life_annuity gold"})
extracted = []
return listings
# for listing in listings[0:]:
# # hood = listing.find('span', {'class': 'result-hood'})
# # # print(hood)
# # # location = {key: listing.attrs.get(key, '') for key in location_attrs}
# # link = listing.find('a', {'class': 'result-title hdrlnk'}) # add this
# # if link is not None:
# # descr = link.string.strip()
# # link_href = link.attrs['href']
# price = listing.find('span', {'class': 'price'})
# if price is not None:
# price = float(price.string.split()[0].replace('.', ''))
# ref = listing.find('div', {'class': 'float-right'}).find('a', href=True)['href']
# base = 'http://www.pap.fr/' + ref
# resp = requests.get(base, timeout=20)
# link = base
# resp.raise_for_status() # <- no-op if status==200
# resp_comb = parse_source(resp.content, resp.encoding)
# descr = resp_comb.find_all('p', {'class': 'item-description'})[0]
# desc = ' '
# for line in descr.contents:
# if isinstance(line, bs4.element.NavigableString):
# desc += ' ' + line.string.strip('<\br>').strip('\n')
# # return resp_comb.find_all(
# # 'ul', {'class': 'item-summary'})
# try:
# ars = int(resp_comb.find(
# 'div', {'class': 'item-geoloc'}).find('h2').string.split('e')[0][-2:])
# except:
# break
# # return resp_comb.find_all('ul', {'class': 'item-summary'})[0].find_all('li')
# # print(resp_comb.find_all('ul', {'class': 'item-summary'})[0].find_all('li'))
# temp_dict_ = {}
# for lines in resp_comb.find_all('ul', {'class': 'item-summary'})[0].find_all('li'):
# tag = lines.contents[0].split()[0]
# value = int(lines.find_all('strong')[0].string.split()[0])
# temp_dict_[tag] = value
# try:
# pieces = temp_dict_[u'Pi\xe8ces']
# except:
# pieces = None
# try:
# chambre = temp_dict_[u'Chambre']
# except:
# chambre = None
# try:
# square_meters = temp_dict_['Surface']
# except:
# square_meters = None
# # meters = resp_comb.find_all('ul', {'class': 'item-summary'}
# # )[0].find_all('strong').string.split()[0]
# # link = listing.find('div', {'class': 'listos'}).find('a',href=True)['href']
# # resp = requests.get(link, timeout=10)
# # resp.raise_for_status() # <- no-op if status==200
# # desc = listing.find('p', {'class': 'post-desc'}
# # )
# # if price is not None:
# # desc = desc.string
# # housing = listing.find('span', {'class': 'housing'})
# # if housing is not None:
# # beds = housing.decode_contents().split('br')[0][-1]
# # rm = housing.decode_contents().split('m<sup>2</sup>')[0]
# # sqm = [int(s) for s in rm.split() if s.isdigit()]
# # if len(sqm) == 0:
# # sqm = None
# # else:
# # sqm = int(sqm[0])
# this_listing = {
# # 'location': location,
# # 'link': link_href, # add this too
# # 'description': descr, # and this
# 'price': price,
# 'desc': desc,
# 'pieces': pieces,
# 'meters': square_meters,
# 'chambre': chambre,
# 'ars': ars,
# # 'meters': sqm,
# # 'beds': beds
# 'link': link
# }
# extracted.append(SortedDict(this_listing))
# return extracted
# parsed.find_all(
# ...: 'div', {'class': "box search-results-item"})[0].find('div',{'class':'float-right'}).find('a',href=True)['href']
def extract_listings(parsed):
# location_attrs = {'data-latitude': True, 'data-longitude': True}
listings = parsed.find_all("li", {"class": "result-row"})
extracted = []
for listing in listings[2:]:
hood = listing.find('span', {'class': 'result-hood'})
# print(hood)
# location = {key: listing.attrs.get(key, '') for key in location_attrs}
link = listing.find('a', {'class': 'result-title hdrlnk'}) # add this
if link is not None:
descr = link.string.strip()
link_href = link.attrs['href']
price = listing.find('span', {'class': 'result-price'})
if price is not None:
if price.string is not None:
price = int(price.string[1:])
housing = listing.find('span', {'class': 'housing'})
if housing is not None:
beds = housing.decode_contents().split('br')[0][-1]
rm = housing.decode_contents().split('m<sup>2</sup>')[0]
sqm = [int(s) for s in rm.split() if s.isdigit()]
if len(sqm) == 0:
sqm = None
else:
sqm = int(sqm[0])
this_listing = {
# 'location': location,
'link': link_href, # add this too
'desc': descr, # and this
'price': price,
'meters': sqm,
'chambre': beds,
'pieces': None,
'ars': None
}
extracted.append(SortedDict(this_listing))
return extracted
if __name__ == '__main__':
# df = pd.read_pickle('./ipapartment_paris.pk')
df = pd.DataFrame
resu = []
print('loading fusac')
resu.append(fetch_fusac())
print('loading pap')
resu.append(fetch_pap())
print('loading craig')
resu.append(fetch_search_results())
flat = [item for lis in resu for lis1 in lis for item in lis1]
df_new = pd.DataFrame(flat)
print('saving..')
# df_new.to_pickle('./apartment_paris_{}.pk'.format(str(datetime.datetime.now())))
# df = pd.concat([df, df_new])
df_new.to_pickle('./apartment_paris.pk')
print('Done.')
| |
# -*- coding: utf-8 -*-
"""Top-level display functions for displaying object in different formats.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
import os
import struct
from IPython.utils.py3compat import string_types, cast_bytes_py2, cast_unicode
from .displaypub import publish_display_data
#-----------------------------------------------------------------------------
# utility functions
#-----------------------------------------------------------------------------
def _safe_exists(path):
"""Check path, but don't let exceptions raise"""
try:
return os.path.exists(path)
except Exception:
return False
def _merge(d1, d2):
"""Like update, but merges sub-dicts instead of clobbering at the top level.
Updates d1 in-place
"""
if not isinstance(d2, dict) or not isinstance(d1, dict):
return d2
for key, value in d2.items():
d1[key] = _merge(d1.get(key), value)
return d1
def _display_mimetype(mimetype, objs, raw=False, metadata=None):
"""internal implementation of all display_foo methods
Parameters
----------
mimetype : str
The mimetype to be published (e.g. 'image/png')
objs : tuple of objects
The Python objects to display, or if raw=True raw text data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
if metadata:
metadata = {mimetype: metadata}
if raw:
# turn list of pngdata into list of { 'image/png': pngdata }
objs = [ {mimetype: obj} for obj in objs ]
display(*objs, raw=raw, metadata=metadata, include=[mimetype])
#-----------------------------------------------------------------------------
# Main functions
#-----------------------------------------------------------------------------
def display(*objs, **kwargs):
"""Display a Python object in all frontends.
By default all representations will be computed and sent to the frontends.
Frontends can decide which representation is used and how.
Parameters
----------
objs : tuple of objects
The Python objects to display.
raw : bool, optional
Are the objects to be displayed already mimetype-keyed dicts of raw display data,
or Python objects that need to be formatted before display? [default: False]
include : list or tuple, optional
A list of format type strings (MIME types) to include in the
format data dict. If this is set *only* the format types included
in this list will be computed.
exclude : list or tuple, optional
A list of format type strings (MIME types) to exclude in the format
data dict. If this is set all format types will be computed,
except for those included in this argument.
metadata : dict, optional
A dictionary of metadata to associate with the output.
mime-type keys in this dictionary will be associated with the individual
representation formats, if they exist.
"""
raw = kwargs.get('raw', False)
include = kwargs.get('include')
exclude = kwargs.get('exclude')
metadata = kwargs.get('metadata')
from IPython.core.interactiveshell import InteractiveShell
if raw:
for obj in objs:
publish_display_data('display', obj, metadata)
else:
format = InteractiveShell.instance().display_formatter.format
for obj in objs:
format_dict, md_dict = format(obj, include=include, exclude=exclude)
if metadata:
# kwarg-specified metadata gets precedence
_merge(md_dict, metadata)
publish_display_data('display', format_dict, md_dict)
def display_pretty(*objs, **kwargs):
"""Display the pretty (default) representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw text data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/plain', objs, **kwargs)
def display_html(*objs, **kwargs):
"""Display the HTML representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw HTML data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/html', objs, **kwargs)
def display_svg(*objs, **kwargs):
"""Display the SVG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw svg data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/svg+xml', objs, **kwargs)
def display_png(*objs, **kwargs):
"""Display the PNG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw png data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/png', objs, **kwargs)
def display_jpeg(*objs, **kwargs):
"""Display the JPEG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw JPEG data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/jpeg', objs, **kwargs)
def display_latex(*objs, **kwargs):
"""Display the LaTeX representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw latex data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/latex', objs, **kwargs)
def display_json(*objs, **kwargs):
"""Display the JSON representation of an object.
Note that not many frontends support displaying JSON.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw json data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('application/json', objs, **kwargs)
def display_javascript(*objs, **kwargs):
"""Display the Javascript representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw javascript data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('application/javascript', objs, **kwargs)
#-----------------------------------------------------------------------------
# Smart classes
#-----------------------------------------------------------------------------
class DisplayObject(object):
"""An object that wraps data to be displayed."""
_read_flags = 'r'
def __init__(self, data=None, url=None, filename=None):
"""Create a display object given raw data.
When this object is returned by an expression or passed to the
display function, it will result in the data being displayed
in the frontend. The MIME type of the data should match the
subclasses used, so the Png subclass should be used for 'image/png'
data. If the data is a URL, the data will first be downloaded
and then displayed. If
Parameters
----------
data : unicode, str or bytes
The raw data or a URL or file to load the data from
url : unicode
A URL to download the data from.
filename : unicode
Path to a local file to load the data from.
"""
if data is not None and isinstance(data, string_types):
if data.startswith('http') and url is None:
url = data
filename = None
data = None
elif _safe_exists(data) and filename is None:
url = None
filename = data
data = None
self.data = data
self.url = url
self.filename = None if filename is None else unicode(filename)
self.reload()
def reload(self):
"""Reload the raw data from file or URL."""
if self.filename is not None:
with open(self.filename, self._read_flags) as f:
self.data = f.read()
elif self.url is not None:
try:
import urllib2
response = urllib2.urlopen(self.url)
self.data = response.read()
# extract encoding from header, if there is one:
encoding = None
for sub in response.headers['content-type'].split(';'):
sub = sub.strip()
if sub.startswith('charset'):
encoding = sub.split('=')[-1].strip()
break
# decode data, if an encoding was specified
if encoding:
self.data = self.data.decode(encoding, 'replace')
except:
self.data = None
class Pretty(DisplayObject):
def _repr_pretty_(self):
return self.data
class HTML(DisplayObject):
def _repr_html_(self):
return self.data
def __html__(self):
"""
This method exists to inform other HTML-using modules (e.g. Markupsafe,
htmltag, etc) that this object is HTML and does not need things like
special characters (<>&) escaped.
"""
return self._repr_html_()
class Math(DisplayObject):
def _repr_latex_(self):
s = self.data.strip('$')
return "$$%s$$" % s
class Latex(DisplayObject):
def _repr_latex_(self):
return self.data
class SVG(DisplayObject):
# wrap data in a property, which extracts the <svg> tag, discarding
# document headers
_data = None
@property
def data(self):
return self._data
@data.setter
def data(self, svg):
if svg is None:
self._data = None
return
# parse into dom object
from xml.dom import minidom
svg = cast_bytes_py2(svg)
x = minidom.parseString(svg)
# get svg tag (should be 1)
found_svg = x.getElementsByTagName('svg')
if found_svg:
svg = found_svg[0].toxml()
else:
# fallback on the input, trust the user
# but this is probably an error.
pass
svg = cast_unicode(svg)
self._data = svg
def _repr_svg_(self):
return self.data
class JSON(DisplayObject):
def _repr_json_(self):
return self.data
css_t = """$("head").append($("<link/>").attr({
rel: "stylesheet",
type: "text/css",
href: "%s"
}));
"""
lib_t1 = """$.getScript("%s", function () {
"""
lib_t2 = """});
"""
class Javascript(DisplayObject):
def __init__(self, data=None, url=None, filename=None, lib=None, css=None):
"""Create a Javascript display object given raw data.
When this object is returned by an expression or passed to the
display function, it will result in the data being displayed
in the frontend. If the data is a URL, the data will first be
downloaded and then displayed.
In the Notebook, the containing element will be available as `element`,
and jQuery will be available. The output area starts hidden, so if
the js appends content to `element` that should be visible, then
it must call `container.show()` to unhide the area.
Parameters
----------
data : unicode, str or bytes
The Javascript source code or a URL to download it from.
url : unicode
A URL to download the data from.
filename : unicode
Path to a local file to load the data from.
lib : list or str
A sequence of Javascript library URLs to load asynchronously before
running the source code. The full URLs of the libraries should
be given. A single Javascript library URL can also be given as a
string.
css: : list or str
A sequence of css files to load before running the source code.
The full URLs of the css files should be given. A single css URL
can also be given as a string.
"""
if isinstance(lib, basestring):
lib = [lib]
elif lib is None:
lib = []
if isinstance(css, basestring):
css = [css]
elif css is None:
css = []
if not isinstance(lib, (list,tuple)):
raise TypeError('expected sequence, got: %r' % lib)
if not isinstance(css, (list,tuple)):
raise TypeError('expected sequence, got: %r' % css)
self.lib = lib
self.css = css
super(Javascript, self).__init__(data=data, url=url, filename=filename)
def _repr_javascript_(self):
r = ''
for c in self.css:
r += css_t % c
for l in self.lib:
r += lib_t1 % l
r += self.data
r += lib_t2*len(self.lib)
return r
# constants for identifying png/jpeg data
_PNG = b'\x89PNG\r\n\x1a\n'
_JPEG = b'\xff\xd8'
def _pngxy(data):
"""read the (width, height) from a PNG header"""
ihdr = data.index(b'IHDR')
# next 8 bytes are width/height
w4h4 = data[ihdr+4:ihdr+12]
return struct.unpack('>ii', w4h4)
def _jpegxy(data):
"""read the (width, height) from a JPEG header"""
# adapted from http://www.64lines.com/jpeg-width-height
idx = 4
while True:
block_size = struct.unpack('>H', data[idx:idx+2])[0]
idx = idx + block_size
if data[idx:idx+2] == b'\xFF\xC0':
# found Start of Frame
iSOF = idx
break
else:
# read another block
idx += 2
h, w = struct.unpack('>HH', data[iSOF+5:iSOF+9])
return w, h
class Image(DisplayObject):
_read_flags = 'rb'
_FMT_JPEG = u'jpeg'
_FMT_PNG = u'png'
_ACCEPTABLE_EMBEDDINGS = [_FMT_JPEG, _FMT_PNG]
def __init__(self, data=None, url=None, filename=None, format=u'png', embed=None, width=None, height=None, retina=False):
"""Create a display an PNG/JPEG image given raw data.
When this object is returned by an expression or passed to the
display function, it will result in the image being displayed
in the frontend.
Parameters
----------
data : unicode, str or bytes
The raw image data or a URL or filename to load the data from.
This always results in embedded image data.
url : unicode
A URL to download the data from. If you specify `url=`,
the image data will not be embedded unless you also specify `embed=True`.
filename : unicode
Path to a local file to load the data from.
Images from a file are always embedded.
format : unicode
The format of the image data (png/jpeg/jpg). If a filename or URL is given
for format will be inferred from the filename extension.
embed : bool
Should the image data be embedded using a data URI (True) or be
loaded using an <img> tag. Set this to True if you want the image
to be viewable later with no internet connection in the notebook.
Default is `True`, unless the keyword argument `url` is set, then
default value is `False`.
Note that QtConsole is not able to display images if `embed` is set to `False`
width : int
Width to which to constrain the image in html
height : int
Height to which to constrain the image in html
retina : bool
Automatically set the width and height to half of the measured
width and height.
This only works for embedded images because it reads the width/height
from image data.
For non-embedded images, you can just set the desired display width
and height directly.
Examples
--------
# embedded image data, works in qtconsole and notebook
# when passed positionally, the first arg can be any of raw image data,
# a URL, or a filename from which to load image data.
# The result is always embedding image data for inline images.
Image('http://www.google.fr/images/srpr/logo3w.png')
Image('/path/to/image.jpg')
Image(b'RAW_PNG_DATA...')
# Specifying Image(url=...) does not embed the image data,
# it only generates `<img>` tag with a link to the source.
# This will not work in the qtconsole or offline.
Image(url='http://www.google.fr/images/srpr/logo3w.png')
"""
if filename is not None:
ext = self._find_ext(filename)
elif url is not None:
ext = self._find_ext(url)
elif data is None:
raise ValueError("No image data found. Expecting filename, url, or data.")
elif isinstance(data, string_types) and (
data.startswith('http') or _safe_exists(data)
):
ext = self._find_ext(data)
else:
ext = None
if ext is not None:
format = ext.lower()
if ext == u'jpg' or ext == u'jpeg':
format = self._FMT_JPEG
if ext == u'png':
format = self._FMT_PNG
elif isinstance(data, bytes) and format == 'png':
# infer image type from image data header,
# only if format might not have been specified.
if data[:2] == _JPEG:
format = 'jpeg'
self.format = unicode(format).lower()
self.embed = embed if embed is not None else (url is None)
if self.embed and self.format not in self._ACCEPTABLE_EMBEDDINGS:
raise ValueError("Cannot embed the '%s' image format" % (self.format))
self.width = width
self.height = height
self.retina = retina
super(Image, self).__init__(data=data, url=url, filename=filename)
if retina:
self._retina_shape()
def _retina_shape(self):
"""load pixel-doubled width and height from image data"""
if not self.embed:
return
if self.format == 'png':
w, h = _pngxy(self.data)
elif self.format == 'jpeg':
w, h = _jpegxy(self.data)
else:
# retina only supports png
return
self.width = w // 2
self.height = h // 2
def reload(self):
"""Reload the raw data from file or URL."""
if self.embed:
super(Image,self).reload()
if self.retina:
self._retina_shape()
def _repr_html_(self):
if not self.embed:
width = height = ''
if self.width:
width = ' width="%d"' % self.width
if self.height:
height = ' height="%d"' % self.height
return u'<img src="%s"%s%s/>' % (self.url, width, height)
def _data_and_metadata(self):
"""shortcut for returning metadata with shape information, if defined"""
md = {}
if self.width:
md['width'] = self.width
if self.height:
md['height'] = self.height
if md:
return self.data, md
else:
return self.data
def _repr_png_(self):
if self.embed and self.format == u'png':
return self._data_and_metadata()
def _repr_jpeg_(self):
if self.embed and (self.format == u'jpeg' or self.format == u'jpg'):
return self._data_and_metadata()
def _find_ext(self, s):
return unicode(s.split('.')[-1].lower())
def clear_output(stdout=True, stderr=True, other=True):
"""Clear the output of the current cell receiving output.
Optionally, each of stdout/stderr or other non-stream data (e.g. anything
produced by display()) can be excluded from the clear event.
By default, everything is cleared.
Parameters
----------
stdout : bool [default: True]
Whether to clear stdout.
stderr : bool [default: True]
Whether to clear stderr.
other : bool [default: True]
Whether to clear everything else that is not stdout/stderr
(e.g. figures,images,HTML, any result of display()).
"""
from IPython.core.interactiveshell import InteractiveShell
if InteractiveShell.initialized():
InteractiveShell.instance().display_pub.clear_output(
stdout=stdout, stderr=stderr, other=other,
)
else:
from IPython.utils import io
if stdout:
print('\033[2K\r', file=io.stdout, end='')
io.stdout.flush()
if stderr:
print('\033[2K\r', file=io.stderr, end='')
io.stderr.flush()
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.subscribe."""
import collections
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import subscribe
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class SubscribeTest(test_util.TensorFlowTestCase):
def _ExpectSubscribedIdentities(self, container):
"""Convenience function to test a container of subscribed identities."""
self.assertTrue(
all(subscribe._is_subscribed_identity(x) for x in container))
@test_util.run_deprecated_v1
def testSideEffect(self):
a = constant_op.constant(1)
b = constant_op.constant(1)
c = math_ops.add(a, b)
with ops.control_dependencies([c]):
d = constant_op.constant(42)
n = math_ops.negative(c)
shared = []
def sub(t):
shared.append(t)
return t
c0 = c
self.assertTrue(c0.op in d.op.control_inputs)
c = subscribe.subscribe(c,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
# Verify that control dependencies are correctly moved to the subscription.
self.assertFalse(c0.op in d.op.control_inputs)
self.assertTrue(c.op in d.op.control_inputs)
with self.cached_session() as sess:
c_out = self.evaluate([c])
n_out = self.evaluate([n])
d_out = self.evaluate([d])
self.assertEqual(n_out, [-2])
self.assertEqual(c_out, [2])
self.assertEqual(d_out, [42])
self.assertEqual(shared, [2, 2, 2])
@test_util.run_deprecated_v1
def testSupportedTypes(self):
"""Confirm that supported types are correctly detected and handled."""
a = constant_op.constant(1)
b = constant_op.constant(1)
c = math_ops.add(a, b)
def sub(t):
return t
# Tuples.
subscribed = subscribe.subscribe(
(a, b), lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, tuple)
self._ExpectSubscribedIdentities(subscribed)
# Lists.
subscribed = subscribe.subscribe(
[a, b], lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, list)
self._ExpectSubscribedIdentities(subscribed)
# Dictionaries.
subscribed = subscribe.subscribe({
'first': a,
'second': b
}, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, dict)
self._ExpectSubscribedIdentities(subscribed.values())
# Namedtuples.
# pylint: disable=invalid-name
TensorPair = collections.namedtuple('TensorPair', ['first', 'second'])
# pylint: enable=invalid-name
pair = TensorPair(a, b)
subscribed = subscribe.subscribe(
pair, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIsInstance(subscribed, TensorPair)
self._ExpectSubscribedIdentities(subscribed)
# Expect an exception to be raised for unsupported types.
with self.assertRaisesRegex(TypeError, 'has invalid type'):
subscribe.subscribe(c.name,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
@test_util.run_deprecated_v1
def testCaching(self):
"""Confirm caching of control output is recalculated between calls."""
a = constant_op.constant(1)
b = constant_op.constant(2)
with ops.control_dependencies([a]):
c = constant_op.constant(42)
shared = {}
def sub(t):
shared[t] = shared.get(t, 0) + 1
return t
a = subscribe.subscribe(a,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
with ops.control_dependencies([b]):
d = constant_op.constant(11)
# If it was using outdated cached control_outputs then
# evaling would not trigger the new subscription.
b = subscribe.subscribe(b,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
with self.cached_session() as sess:
c_out = self.evaluate([c])
d_out = self.evaluate([d])
self.assertEqual(c_out, [42])
self.assertEqual(d_out, [11])
self.assertEqual(shared, {2: 1, 1: 1})
@test_util.run_deprecated_v1
def testIsSubscribedIdentity(self):
"""Confirm subscribed identity ops are correctly detected."""
a = constant_op.constant(1)
b = constant_op.constant(2)
c = math_ops.add(a, b)
idop = array_ops.identity(c)
c_sub = subscribe.subscribe(c, [])
self.assertFalse(subscribe._is_subscribed_identity(a))
self.assertFalse(subscribe._is_subscribed_identity(c))
self.assertFalse(subscribe._is_subscribed_identity(idop))
self.assertTrue(subscribe._is_subscribed_identity(c_sub))
@test_util.run_deprecated_v1
def testSubscribeExtend(self):
"""Confirm side effect are correctly added for different input types."""
a = constant_op.constant(1)
b = constant_op.constant(2)
c = math_ops.add(a, b)
shared = {}
def sub(t, name):
shared[name] = shared.get(name, 0) + 1
return t
# Subscribe with a first side effect graph, passing an unsubscribed tensor.
sub_graph1 = lambda t: sub(t, 'graph1')
c_sub = subscribe.subscribe(
c, lambda t: script_ops.py_func(sub_graph1, [t], [t.dtype]))
# Add a second side effect graph, passing the tensor returned by the
# previous call to subscribe().
sub_graph2 = lambda t: sub(t, 'graph2')
c_sub2 = subscribe.subscribe(
c_sub, lambda t: script_ops.py_func(sub_graph2, [t], [t.dtype]))
# Add a third side effect graph, passing the original tensor.
sub_graph3 = lambda t: sub(t, 'graph3')
c_sub3 = subscribe.subscribe(
c, lambda t: script_ops.py_func(sub_graph3, [t], [t.dtype]))
# Make sure there's only one identity op matching the source tensor's name.
graph_ops = ops.get_default_graph().get_operations()
name_prefix = c.op.name + '/subscription/Identity'
identity_ops = [op for op in graph_ops if op.name.startswith(name_prefix)]
self.assertEqual(1, len(identity_ops))
# Expect the objects returned by subscribe() to reference the same tensor.
self.assertIs(c_sub, c_sub2)
self.assertIs(c_sub, c_sub3)
# Expect the three side effect graphs to have been evaluated.
with self.cached_session() as sess:
self.evaluate([c_sub])
self.assertIn('graph1', shared)
self.assertIn('graph2', shared)
self.assertIn('graph3', shared)
@test_util.run_v1_only('b/120545219')
def testSubscribeVariable(self):
"""Confirm that variables can be subscribed."""
v1 = variables.VariableV1(0.0)
v2 = variables.VariableV1(4.0)
add = math_ops.add(v1, v2)
assign_v1 = v1.assign(3.0)
shared = []
def sub(t):
shared.append(t)
return t
v1_sub = subscribe.subscribe(
v1, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertTrue(subscribe._is_subscribed_identity(v1_sub))
with self.cached_session() as sess:
# Initialize the variables first.
self.evaluate([v1.initializer])
self.evaluate([v2.initializer])
# Expect the side effects to be triggered when evaluating the add op as
# it will read the value of the variable.
self.evaluate([add])
self.assertEqual(1, len(shared))
# Expect the side effect not to be triggered when evaluating the assign
# op as it will not access the 'read' output of the variable.
self.evaluate([assign_v1])
self.assertEqual(1, len(shared))
self.evaluate([add])
self.assertEqual(2, len(shared))
# Make sure the values read from the variable match the expected ones.
self.assertEqual([0.0, 3.0], shared)
@test_util.run_v1_only('b/120545219')
def testResourceType(self):
"""Confirm that subscribe correctly handles tensors with 'resource' type."""
tensor_array = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name='test',
size=3,
infer_shape=False)
writer = tensor_array.write(0, [[4.0, 5.0]])
reader = writer.read(0)
shared = []
def sub(t):
shared.append(t)
return t
# TensorArray's handle output tensor has a 'resource' type and cannot be
# subscribed as it's not 'numpy compatible' (see dtypes.py).
# Expect that the original tensor is returned when subscribing to it.
tensor_array_sub = subscribe.subscribe(
tensor_array.handle, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
self.assertIs(tensor_array_sub, tensor_array.handle)
self.assertFalse(subscribe._is_subscribed_identity(tensor_array.handle))
with self.cached_session() as sess:
self.evaluate([reader])
self.assertEqual(0, len(shared))
@test_util.run_deprecated_v1
def testMultipleOutputs(self):
"""Handle subscriptions to multiple outputs from the same op."""
sparse_tensor_1 = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
sparse_tensor_2 = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[2, 3], dense_shape=[3, 4])
# This op has three outputs.
sparse_add = sparse_ops.sparse_add(sparse_tensor_1, sparse_tensor_2)
self.assertEqual(3, len(sparse_add.op.outputs))
c1 = constant_op.constant(1)
with ops.control_dependencies(sparse_add.op.outputs):
# This op depends on all the three outputs.
neg = -c1
shared = []
def sub(t):
shared.append(t)
return t
# Subscribe the three outputs at once.
subscribe.subscribe(sparse_add.op.outputs,
lambda t: script_ops.py_func(sub, [t], [t.dtype]))
with self.cached_session() as sess:
self.evaluate([neg])
# All three ops have been processed.
self.assertEqual(3, len(shared))
@test_util.run_deprecated_v1
def test_subscribe_tensors_on_different_devices(self):
"""Side effect ops are added with the same device of the subscribed op."""
c1 = constant_op.constant(10)
c2 = constant_op.constant(20)
with ops.device('cpu:0'):
add = math_ops.add(c1, c2)
with ops.device('cpu:1'):
mul = math_ops.multiply(c1, c2)
def sub(t):
return t
add_sub = subscribe.subscribe(
add, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
mul_sub = subscribe.subscribe(
mul, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
# Expect the identity tensors injected by subscribe to have been created
# on the same device as their original tensors.
self.assertNotEqual(add_sub.device, mul_sub.device)
self.assertEqual(add.device, add_sub.device)
self.assertEqual(mul.device, mul_sub.device)
@test_util.run_v1_only('b/120545219')
def test_subscribe_tensors_within_control_flow_context(self):
"""Side effect ops are added with the same control flow context."""
c1 = constant_op.constant(10)
c2 = constant_op.constant(20)
x1 = math_ops.add(c1, c2)
x2 = math_ops.multiply(c1, c2)
cond = control_flow_ops.cond(
x1 < x2,
lambda: math_ops.add(c1, c2, name='then'),
lambda: math_ops.subtract(c1, c2, name='else'),
name='cond')
branch = ops.get_default_graph().get_tensor_by_name('cond/then:0')
def context(tensor):
return tensor.op._get_control_flow_context()
self.assertIs(context(x1), context(x2))
self.assertIsNot(context(x1), context(branch))
results = []
def sub(tensor):
results.append(tensor)
return tensor
tensors = [x1, branch, x2]
subscriptions = subscribe.subscribe(
tensors, lambda t: script_ops.py_func(sub, [t], [t.dtype]))
for tensor, subscription in zip(tensors, subscriptions):
self.assertIs(context(tensor), context(subscription))
# Verify that sub(x1) and sub(x2) are in the same context.
self.assertIs(context(subscriptions[0]), context(subscriptions[2]))
# Verify that sub(x1) and sub(branch) are not.
self.assertIsNot(context(subscriptions[0]), context(subscriptions[1]))
with self.cached_session() as sess:
self.evaluate(cond)
self.assertEqual(3, len(results))
if __name__ == '__main__':
googletest.main()
| |
import os # os.path.isfile()
import re
from PresentationObject import PresentationObject
from font import font # lineBreak()
from qt import *
from qtcanvas import *
true = success = 1
false = failure = 0
class img( PresentationObject ):
"""
<img> is used to draw an image.
<p>
<b>Properties:</b>
<ul>
<li>
<i>align</i>: how to horizontally align the image. Currently, the
only value that will work is "center"/"centre"
</li>
<li>
<i>border</i>: number of pixel border to draw around the image.
The default value is 0, which means no border.
</li>
<li>
<i>bordercolor</i>/<i>bordercolour</i>: colour of a border, if
it exists.
</li>
<li>
<i>width</i>/<i>height</i>: dimensions of the image. Possible
values are a pixel size, a percentage of the image on disk's
dimensions, or a percentage of the current slide/table cell
width. i.e., "55", "i50%", "75%". If either of the dimensions
aren't defined, the image on disk's actual dimensions are used.
</li>
<li>
<i>src</i>: location of the image file on disk. It can be relative
to the location of the XML file or absolute to the current file
system. i.e., "imgName.gif", "img/imgName.gif", "../imgName.gif",
"/home/dmacd/presenter/examples/1/imgName.gif"
</li>
"""
def __init__( self, *args ):
"""
Initiate the container, contents, and properties.
-*args, arguments for the PresentationObject constructor.
"""
apply( PresentationObject.__init__, (self,) + args )
#
# Ensure this tag doesn't try to take a border="" setting
# from a parent <table> or <td> instance.
#
if not self.hasProperty("border"):
self.setProperty( "border", 0 )
def render( self, app, x, y ):
"""
Draw the image to the app's QCanvas.
-app, SlideApplication object
-x, x coordinate to start drawing at
-y, y coordinate to start drawing at
Returns x, y coordinates where the rendering left off.
"""
if not self.isCached():
self.cacheImage( app )
self.resize( app )
#
# Align the image as requested.
#
if self.getProperty("align") == "centre" or \
self.getProperty("align") == "center":
#
# Skip to the next line if there's text to the left of
# the current position of the render cursor.
#
if x > self.getProperty("marginleft"):
x, y = font( self ).lineBreak( app, y )
from td import td
if self.hasContainerOfType(td):
centre = self.getContainerOfType( td ).getCentre()
else:
centre = self.getSlide().getCentre()
x = centre - self.getProperty( "border" ) - \
self.getImageSprite().boundingRect().width()/2
self.move( x, y )
y = y + self.getHeight()
x, y = font( self ).lineBreak( app, y )
else:
#
# If the image will go over the right margin if drawn
# at the current location, do a line break and draw
# the image on the next line.
#
width = self.getImageSprite().boundingRect().width()
if self.beyondMargin(x, width):
x, y = font( self ).lineBreak( app, y )
self.move( x, y )
x = x + self.getWidth() + 16
height = self.getHeight()
if self.getSlide().getRenderHeight() < height:
self.getSlide().setRenderHeight( height )
return x, y
def move( self, x, y ):
"""
Move an image to the coordinates x, y. If the image includes a
border, the border will be drawn at x, y and the image at
x + borderWidth, y + borderWidth. If the image hasn't been
attached to a canvas yet (loaded) or x/y are negative
coordinates, the image will not be moved.
"""
if x < 0 or y < 0 or not self.isCached():
return
if self.hasBorder():
self.getBorderSprite( "left" ).move( x, y )
borderX = x + \
self.getProperty( "border" ) + \
self.getImageSprite().width()
self.getBorderSprite( "right" ).move( borderX, y )
borderX = x + self.getProperty( "border" )
self.getBorderSprite( "top" ).move( borderX, y )
borderY = y + \
self.getProperty( "border" ) + \
self.getImageSprite().height()
self.getBorderSprite( "bottom" ).move( borderX,
borderY )
borderY = y + self.getProperty( "border" )
self.getImageSprite().move( borderX, borderY )
else:
self.getImageSprite().move( x, y )
def x( self ):
"""
The upper-left x-coord corner of the image on the canvas. If
the image hasn't been attached to a canvas yet (loaded), -1
will be returned.
"""
if not self.isCached():
return -1
elif self.hasBorder():
return self.getBorderSprite( "left" ).x()
else:
return self.getImageSprite().x()
def y( self ):
"""
The upper-left y-coord corner of the image on the canvas. If
the image hasn't been attached to a canvas yet (loaded), -1
will be returned.
"""
if not self.isCached():
return -1
elif self.hasBorder():
return self.getBorderSprite( "left" ).y()
else:
return self.getImageSprite().y()
def getWidth( self ):
"""
The pixel width that this image takes up on the canvas. If the
image hasn't been attached to a canvas yet (loaded), -1 will
be returned.
"""
if not self.isCached():
return -1
elif self.hasBorder():
width = self.getProperty("border") * 2 + \
self.getImageSprite().boundingRect().width()
return width
else:
return self.getImageSprite().boundingRect().width()
def getHeight( self ):
"""
The pixel height that this image takes up on the canvas. If
the image hasn't been attached to a canvas yet (loaded), -1
will be returned.
"""
if not self.isCached():
return -1
elif self.hasBorder():
height = self.getProperty("border") * 2 + \
self.getImageSprite().boundingRect().height()
return height
else:
return self.getImageSprite().boundingRect().height()
def resize( self, app ):
"""
"""
image = self.getImage()
width, height = self.getSize( image["QImageOriginal"] )
#
# Don't re-scale if the requested image dimensions are
# the same as the current size.
#
if width == image["QImage"].width() and \
height == image["QImage"].height():
return
image["QCanvasPixmapArray"].setImage( 0, None )
image["QCanvasPixmap"] = image["QImage"] = None
image["QImage"] = image["QImageOriginal"].smoothScale( width, height )
image["QCanvasPixmap"] = QCanvasPixmap( image["QImage"] )
image["QCanvasPixmapArray"].setImage( 0, image["QCanvasPixmap"] )
image["QCanvasSprite"].show()
if not self.hasBorder():
return
#
# Resize the borders.
#
borderSize = self.getProperty( "border" )
width = borderSize
height = self.getImageSprite().height() + (borderSize * 2)
imageWidth = self.getImageSprite().width()
border = self.getBorder( "left" )
border["QCanvasPixmapArray"].setImage( 0, None )
border["QImage"] = border["QImage"].smoothScale( width, height )
border["QCanvasPixmap"] = QCanvasPixmap( border["QImage"] )
border["QCanvasPixmap"].fill( self.getBorderColor() )
border["QCanvasPixmapArray"].setImage( 0, border["QCanvasPixmap"] )
border = self.getBorder( "right" )
border["QCanvasPixmapArray"].setImage( 0, None )
border["QImage"] = border["QImage"].smoothScale( width, height )
border["QCanvasPixmap"] = QCanvasPixmap( border["QImage"] )
border["QCanvasPixmap"].fill( self.getBorderColor() )
border["QCanvasPixmap"].fill( self.getBorderColor() )
border["QCanvasPixmapArray"].setImage( 0, border["QCanvasPixmap"] )
width = self.getImageSprite().width()
height = borderSize
border = self.getBorder( "top" )
border["QCanvasPixmapArray"].setImage( 0, None )
border["QImage"] = border["QImage"].smoothScale( width, height )
border["QCanvasPixmap"] = QCanvasPixmap( border["QImage"] )
border["QCanvasPixmap"].fill( self.getBorderColor() )
border["QCanvasPixmap"].fill( self.getBorderColor() )
border["QCanvasPixmapArray"].setImage( 0, border["QCanvasPixmap"] )
border = self.getBorder( "bottom" )
border["QCanvasPixmapArray"].setImage( 0, None )
border["QImage"] = border["QImage"].smoothScale( width, height )
border["QCanvasPixmap"] = QCanvasPixmap( border["QImage"] )
border["QCanvasPixmap"].fill( self.getBorderColor() )
border["QCanvasPixmap"].fill( self.getBorderColor() )
border["QCanvasPixmapArray"].setImage( 0, border["QCanvasPixmap"] )
def getSize( self, qImage ):
"""
"""
#if qImage == None:
# return -1, -1
#
# If the document specifies to resize the image to a certain
# width and/or height, get these values as integers.
#
# When a dimension is given as a percentage, the pixel size of
# the image is decided its dimensions.
#
# i.e., if the image is 200 pixels wide and the property
# width="50%" is given, the image will be displayed with the
# width scaled down to 100 pixels.
#
width = self.getProperty( "width" )
height = self.getProperty( "height" )
maxWidth = self.getMaxWidth()
try:
width = int( width )
except( ValueError, TypeError ):
if width != None:
#
# Percentage of the canvas size.
# i.e., if the canvas is 1024 pixels wide
# and width="50%" is defined, the image width
# will be scaled to 512 pixels, no matter
# what the image size is.
#
if re.search("^([1]?[0-9])?[0-9]%$", width):
width = ( float(width[:-1]) / 100 ) * \
self.getSlide().getWidth()
width = int( width )
if width > maxWidth:
width = maxWidth
#
# Percentage of the image size:
# i.e., if the image is 200 pixels wide and
# width="i50%" is defined, the image width will
# be 100 pixels.
#
elif re.search("^i[0-9]+%$", width):
width = ( float(width[1:-1]) / 100 ) * \
qImage.width()
width = int( width )
if width > maxWidth:
width = maxWidth
else:
width = 0
else:
width = 0
#
#
#
maxHeight = self.getSlide().getHeight()
try:
height = int( height )
except( ValueError, TypeError ):
if height != None:
#
# Percentage of the canvas size.
# i.e., if the canvas is 768 pixels height
# and height="50%" is defined, the image height
# will be scaled to 384 pixels, no matter
# what the image size is.
#
if re.search("^([1]?[0-9])?[0-9]%$", height):
height = ( float(height[:-1]) / 100 ) * \
maxHeight
height = int( height )
if height > maxHeight:
height = maxHeight
#
# Percentage of the image size:
# i.e., if the image is 200 pixels high and
# height="i50%" is defined, the image height
# will be 100 pixels.
#
elif re.search("^i[0-9]+%$", height):
height = ( float(height[1:-1]) / 100 ) * \
qImage.height()
height = int( height )
if height > maxHeight:
height = maxHeight
else:
height = 0
else:
height = 0
#
# If a dimension isn't being changed, just use the current
# dimensions of the image. (I thank myself for stating the
# obvious.)
#
if width > 0 and height == 0:
height = qImage.height()
elif width == 0 and height > 0:
width = qImage.width()
elif width == 0 and height == 0:
width = qImage.width()
height = qImage.height()
#
# Find the maximum width for the image. It the image is
# contained within a table cell, the maximum width is the
# cell's width. Otherwise, the maximum width is the width
# of the current slide.
#
maxWidth = maxWidth - ( self.getProperty("border") * 2 )
if width > maxWidth:
width = maxWidth
return width, height
def readImage( self ):
"""
Read the image for this object from disk.
"""
qImage = QImage( self.getImageName() )
#
# If the image file couldn't be loaded, try to load the broken
# image icon and if that can't be read, create a blank image.
#
if qImage.isNull() and \
not qImage.load("images/broken_image.png"):
if qImage.create(16, 16, 8, 256):
qImage.fill(0)
else:
qImage = None
return qImage
def cacheImage( self, app ):
"""
"""
image = self.getImage()
#
# Load the image from disk.
#
if image["QImageOriginal"] == None:
image["QImageOriginal"] = self.readImage()
if image["QImageOriginal"] == None:
image["QImage"] = None
image["QCanvasPixmap"] = None
image["QCanvasPixmapArray"] = None
image["QCanvasSprite"] = None
return
width, height = self.getSize( image["QImageOriginal"] )
image["QImage"] = image["QImageOriginal"].smoothScale( width,
height )
if image["QImage"] == None or image["QImage"].isNull():
image["QImageOriginal"] = None
image["QImage"] = None
image["QCanvasPixmap"] = None
image["QCanvasPixmapArray"] = None
image["QCanvasSprite"] = None
return
image["QCanvasPixmap"] = QCanvasPixmap( image["QImage"] )
#
# Constructor for Qt 2.*
#
try:
image["QCanvasPixmapArray"] = \
QCanvasPixmapArray( [image["QCanvasPixmap"]],
[QPoint(0, 0)] )
#
# Constructor for Qt 3.*+
#
except TypeError:
image["QCanvasPixmapArray"] = QCanvasPixmapArray()
pixmap = image[ "QCanvasPixmap" ]
image["QCanvasPixmapArray"].setImage( 0, pixmap )
canvas = app.getSlide().getCanvas()
image["QCanvasSprite"] = \
QCanvasSprite( image["QCanvasPixmapArray"], canvas )
self.setImage( image )
#
# Cache the border QCanvasSprite objects if this image must
# have a border around it.
#
if self.hasBorder():
self.cacheBorders( app )
#
# If the image is being animated onto the page, ensure it
# displays above the other QCanvasItems.
#
if self.isAnimated():
self.getImageSprite().setZ( self.getImageSprite().z() + 1 )
if self.hasBorder():
for sprite in self.getBorderSprites():
sprite.setZ( sprite.z() + 1 )
self.setCached( true )
def cacheBorders( self, app ):
"""
-app, SlideApplication object
"""
borderSize = self.getProperty( "border" )
#
# Vertical borders, left & right
#
width = borderSize
height = self.getImageSprite().height() + (borderSize * 2)
self.createBorder( app, "left", width, height )
self.createBorder( app, "right", width, height )
#
# Horizontal borders, top & bottom
#
width = self.getImageSprite().width()
height = borderSize
self.createBorder( app, "top", width, height )
self.createBorder( app, "bottom", width, height )
def getBorderColor( self ):
"""
"""
borderColor = self.getProperty( "bordercolor" )
if len(borderColor) == 0:
borderColor = self.getProperty( "color" )
return QColor( borderColor )
def createBorder( self, app, side, width, height ):
"""
"""
canvas = app.getSlide().getCanvas()
border = {} # Holds Qt objects for one border side.
border["QImage"] = QImage( width, height, 8, 256 )
#
# Fill the border Pixmap with the border color.
#
border["QCanvasPixmap"] = QCanvasPixmap( border["QImage"] )
border["QCanvasPixmap"].fill( self.getBorderColor() )
#
# QCanvasPixmapArray() constructor for Qt < 3.0
#
try:
border["QCanvasPixmapArray"] = \
QCanvasPixmapArray( [border["QCanvasPixmap"]],
[QPoint(0, 0)] )
#
# QCanvasPixmapArray() constructor for Qt >= 3.0
#
except TypeError:
border["QCanvasPixmapArray"] = QCanvasPixmapArray()
pixmap = border["QCanvasPixmap"]
border["QCanvasPixmapArray"].setImage( 0, pixmap )
#
# Add the border to this img object.
#
border["QCanvasSprite"] = \
QCanvasSprite( border["QCanvasPixmapArray"], canvas )
self.addBorder( side, border )
def hasBorder( self ):
"""
Returns true if this image has a border property.
"""
return self.getProperty( "border" ) != 0
def getImageName( self ):
"""
"""
fileName = self.getProperty( "src" )
dirName = self.getSlideshow().getDirName()
path = os.path.join( dirName, fileName )
if not os.path.isabs(fileName) and os.path.isfile(path):
return path
elif os.path.isfile(fileName):
return fileName
else:
return "images/broken_image.png"
def getCanvasItems( self ):
"""
"""
items = []
try:
items.append( self.getImageSprite() )
if self.hasBorder():
items = items + self.getBorderSprites()
except AttributeError:
pass
return items
def getImageSprite( self ):
"""
"""
return self.getImage()["QCanvasSprite"]
def getImage( self ):
"""
"""
try:
return self.__imageObjects
except AttributeError:
self.__imageObjects = { "QImageOriginal" : None,
"QImage" : None,
"QCanvasPixmap" : None,
"QCanvasPixmapArray" : None,
"QCanvasSprite" : None }
return self.__imageObjects
def setImage( self, imageObjects ):
"""
"""
self.__imageObjects = imageObjects
def addBorder( self, side, border ):
"""
"""
#
# Ensure the border dictionary to hold QObject instances for
# the image borders has been created.
#
try:
self.__borders
except AttributeError:
self.__borders = {}
self.__borders[side] = border
def getBorder( self, side ):
"""
"""
if not self.__borders.has_key(side):
return None
else:
return self.__borders[side]
def getBorderSprite( self, side ):
"""
"""
borderObjects = self.getBorder( side )
if borderObjects == None:
return None
else:
return borderObjects["QCanvasSprite"]
def getBorderSprites( self ):
"""
Return a list of the image border QCanvasSprite objects.
"""
items = []
for side in self.__borders.keys():
items.append( self.__borders[side]["QCanvasSprite"] )
return items
def cacheObjects( self ):
"""
"""
self.getImage()["QImageOriginal"] = self.readImage()
def getHtml( self ):
"""
Get the HTML associated with this object.
Returns a list of html strings, with each entry being a line
in a html file.
"""
width = ""
if self.hasProperty("width"):
width = " width=\"%s\"" % \
self.getProperty( "width" )
if len(width) > 8 and width[8] == "i":
width = ""
height = ""
if self.hasProperty("height"):
height = " height=\"%s\"" % \
self.getProperty( "height" )
if len(height) > 9 and height[9] == "i":
height = ""
border = ""
if self.hasProperty("border"):
border = " border=\"%s\"" % \
self.getProperty( "border" )
borderColor = ""
if self.hasProperty("bordercolor"):
borderColor = " bordercolor=\"%s\"" % \
self.getProperty( "bordercolor" )
src = self.getProperty( "src" )
imgHtml = "<img src=\"%s\"%s%s%s%s alt=\"%s\">" % \
( src, width, height, border, borderColor, src )
if self.getProperty("align") == "center" or \
self.getProperty("align") == "centre":
return [ "<center>", imgHtml, "</center>" ]
else:
return [ imgHtml ]
return htmlList
| |
# Copyright 2017 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from botocore.exceptions import ClientError
import boto3
import click
import json
from c7n.credentials import assumed_session
from c7n.utils import get_retry, dumps, chunks
from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime, timedelta
from dateutil.tz import tzutc, tzlocal
from dateutil.parser import parse
import fnmatch
import functools
import jsonschema
import logging
import sys
import time
import os
import operator
from tabulate import tabulate
import yaml
from c7n.executor import MainThreadExecutor
MainThreadExecutor.c7n_async = False
logging.basicConfig(level=logging.INFO)
logging.getLogger('c7n.worker').setLevel(logging.DEBUG)
logging.getLogger('botocore').setLevel(logging.WARNING)
log = logging.getLogger('c7n-log-exporter')
CONFIG_SCHEMA = {
'$schema': 'http://json-schema.org/draft-07/schema',
'id': 'http://schema.cloudcustodian.io/v0/logexporter.json',
'definitions': {
'subscription': {
'type': 'object',
'additionalProperties': False,
'required': ['destination-arn'],
'properties': {
'destination-arn': {'type': 'string'},
'destination-role': {'type': 'string'},
'managed-policy': {'type': 'boolean'},
'name': {'type': 'string'},
},
},
'destination': {
'type': 'object',
'additionalProperties': False,
'required': ['bucket'],
'properties': {
'bucket': {'type': 'string'},
'prefix': {'type': 'string'},
},
},
'account': {
'type': 'object',
'additionalProperties': False,
'required': ['role', 'groups'],
'properties': {
'name': {'type': 'string'},
'role': {'oneOf': [
{'type': 'array', 'items': {'type': 'string'}},
{'type': 'string'}]},
'groups': {
'type': 'array', 'items': {'type': 'string'}
}
}
}
},
'type': 'object',
'additionalProperties': False,
'required': ['accounts', 'destination'],
'properties': {
'accounts': {
'type': 'array',
'items': {'$ref': '#/definitions/account'}
},
'destination': {'$ref': '#/definitions/destination'},
'subscription': {'$ref': '#/definitions/subscription'}
}
}
def debug(func):
@functools.wraps(func)
def run(*args, **kw):
try:
return func(*args, **kw)
except SystemExit:
raise
except Exception:
import traceback
import pdb
import sys
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[-1])
raise
return run
@click.group()
def cli():
"""c7n cloudwatch log group exporter"""
@cli.command()
@click.option('--config', type=click.Path())
def validate(config):
"""validate config file"""
with open(config) as fh:
content = fh.read()
try:
data = yaml.safe_load(content)
except Exception:
log.error("config file: %s is not valid yaml", config)
raise
try:
jsonschema.validate(data, CONFIG_SCHEMA)
except Exception:
log.error("config file: %s is not valid", config)
raise
log.info("config file valid, accounts:%d", len(data['accounts']))
return data
def _process_subscribe_group(client, group_name, subscription, distribution):
sub_name = subscription.get('name', 'FlowLogStream')
filters = client.describe_subscription_filters(
logGroupName=group_name).get('subscriptionFilters', ())
if filters:
f = filters.pop()
if (f['filterName'] == sub_name and
f['destinationArn'] == subscription['destination-arn'] and
f['distribution'] == distribution):
return
client.delete_subscription_filter(
logGroupName=group_name, filterName=sub_name)
client.put_subscription_filter(
logGroupName=group_name,
destinationArn=subscription['destination-arn'],
filterName=sub_name,
filterPattern="",
distribution=distribution)
@cli.command()
@click.option('--config', type=click.Path(), required=True)
@click.option('-a', '--accounts', multiple=True)
@click.option('-r', '--region', multiple=False)
@click.option('--merge', is_flag=True, default=False)
@click.option('--debug', is_flag=True, default=False)
def subscribe(config, accounts, region, merge, debug):
"""subscribe accounts log groups to target account log group destination"""
config = validate.callback(config)
subscription = config.get('subscription')
if subscription is None:
log.error("config file: logs subscription missing")
sys.exit(1)
def converge_destination_policy(client, config):
destination_name = subscription['destination-arn'].rsplit(':', 1)[-1]
try:
extant_destinations = client.describe_destinations(
DestinationNamePrefix=destination_name).get('destinations')
except ClientError:
log.error("Log group destination not found: %s",
subscription['destination-arn'])
sys.exit(1)
account_ids = set()
for a in accounts:
if isinstance(a['role'], list):
account_ids.add(a['role'][-1].split(':')[4])
else:
account_ids.add(a['role'].split(':')[4])
if merge:
for d in extant_destinations:
if d['destinationName'] == destination_name:
for s in json.loads(d['accessPolicy']):
if s['Sid'] == 'CrossAccountDelivery':
account_ids.update(s['Principal']['AWS'])
client.put_destination_policy(
destinationName=destination_name,
accessPolicy=json.dumps({
'Statement': [{
'Action': 'logs:PutSubscriptionFilter',
'Effect': 'Allow',
'Principal': {'AWS': list(account_ids)},
'Resource': subscription['destination-arn'],
'Sid': 'CrossAccountDelivery'}]}))
def subscribe_account(t_account, subscription, region):
session = get_session(t_account['role'], region)
client = session.client('logs')
distribution = subscription.get('distribution', 'ByLogStream')
for g in account.get('groups'):
if (g.endswith('*')):
g = g.replace('*', '')
paginator = client.get_paginator('describe_log_groups')
allLogGroups = paginator.paginate(logGroupNamePrefix=g).build_full_result()
for l in allLogGroups:
_process_subscribe_group(
client, l['logGroupName'], subscription, distribution)
else:
_process_subscribe_group(client, g, subscription, distribution)
if subscription.get('managed-policy'):
if subscription.get('destination-role'):
session = get_session(subscription['destination-role'], region)
else:
session = boto3.Session()
converge_destination_policy(session.client('logs'), config)
executor = debug and MainThreadExecutor or ThreadPoolExecutor
with executor(max_workers=32) as w:
futures = {}
for account in config.get('accounts', ()):
if accounts and account['name'] not in accounts:
continue
futures[w.submit(subscribe_account, account, subscription, region)] = account
for f in as_completed(futures):
account = futures[f]
if f.exception():
log.error("Error on account %s err: %s",
account['name'], f.exception())
log.info("Completed %s", account['name'])
@cli.command()
@click.option('--config', type=click.Path(), required=True)
@click.option('--start', required=True)
@click.option('--end')
@click.option('-a', '--accounts', multiple=True)
@click.option('-r', '--region', multiple=False)
@click.option('--debug', is_flag=True, default=False)
def run(config, start, end, accounts, region, debug):
"""run export across accounts and log groups specified in config."""
config = validate.callback(config)
destination = config.get('destination')
start = start and parse(start) or start
end = end and parse(end) or datetime.now()
executor = debug and MainThreadExecutor or ThreadPoolExecutor
with executor(max_workers=32) as w:
futures = {}
for account in config.get('accounts', ()):
if accounts and account['name'] not in accounts:
continue
futures[
w.submit(process_account, account, start,
end, destination, region)] = account
for f in as_completed(futures):
account = futures[f]
if f.exception():
log.error("Error on account %s err: %s",
account['name'], f.exception())
log.info("Completed %s", account['name'])
def lambdafan(func):
"""simple decorator that will auto fan out async style in lambda.
outside of lambda, this will invoke synchrously.
"""
if 'AWS_LAMBDA_FUNCTION_NAME' not in os.environ:
return func
@functools.wraps(func)
def scaleout(*args, **kw):
client = boto3.client('lambda')
client.invoke(
FunctionName=os.environ['AWS_LAMBDA_FUNCTION_NAME'],
InvocationType='Event',
Payload=dumps({
'event': 'fanout',
'function': func.__name__,
'args': args,
'kwargs': kw}),
Qualifier=os.environ['AWS_LAMBDA_FUNCTION_VERSION'])
return scaleout
@lambdafan
def process_account(account, start, end, destination, region, incremental=True):
session = get_session(account['role'], region)
client = session.client('logs')
paginator = client.get_paginator('describe_log_groups')
all_groups = []
for p in paginator.paginate():
all_groups.extend([g for g in p.get('logGroups', ())])
group_count = len(all_groups)
groups = filter_creation_date(
filter_group_names(all_groups, account['groups']),
start, end)
if incremental:
groups = filter_last_write(client, groups, start)
account_id = session.client('sts').get_caller_identity()['Account']
prefix = destination.get('prefix', '').rstrip('/') + '/%s' % account_id
log.info("account:%s matched %d groups of %d",
account.get('name', account_id), len(groups), group_count)
if not groups:
log.warning("account:%s no groups matched, all groups \n %s",
account.get('name', account_id), "\n ".join(
[g['logGroupName'] for g in all_groups]))
t = time.time()
for g in groups:
export.callback(
g,
destination['bucket'], prefix,
g['exportStart'], end, account['role'],
name=account['name'])
log.info("account:%s exported %d log groups in time:%0.2f",
account.get('name') or account_id,
len(groups), time.time() - t)
def get_session(role, region, session_name="c7n-log-exporter", session=None):
if role == 'self':
session = boto3.Session()
elif isinstance(role, str):
session = assumed_session(role, session_name, region=region)
elif isinstance(role, list):
session = None
for r in role:
session = assumed_session(r, session_name, session=session, region=region)
else:
session = boto3.Session()
return session
def filter_group_names(groups, patterns):
"""Filter log groups by shell patterns.
"""
group_names = [g['logGroupName'] for g in groups]
matched = set()
for p in patterns:
matched.update(fnmatch.filter(group_names, p))
return [g for g in groups if g['logGroupName'] in matched]
def filter_creation_date(groups, start, end):
"""Filter log groups by their creation date.
Also sets group specific value for start to the minimum
of creation date or start.
"""
results = []
for g in groups:
created = datetime.fromtimestamp(g['creationTime'] / 1000.0)
if created > end:
continue
if created > start:
g['exportStart'] = created
else:
g['exportStart'] = start
results.append(g)
return results
def filter_last_write(client, groups, start):
"""Filter log groups where the last write was before the start date.
"""
retry = get_retry(('ThrottlingException',))
def process_group(group_set):
matched = []
for g in group_set:
streams = retry(
client.describe_log_streams,
logGroupName=g['logGroupName'],
orderBy='LastEventTime',
limit=1, descending=True)
if not streams.get('logStreams'):
continue
stream = streams['logStreams'][0]
if stream['storedBytes'] == 0 and datetime.fromtimestamp(
stream['creationTime'] / 1000) > start:
matched.append(g)
elif 'lastIngestionTime' in stream and datetime.fromtimestamp(
stream['lastIngestionTime'] / 1000) > start:
matched.append(g)
return matched
results = []
with ThreadPoolExecutor(max_workers=3) as w:
futures = {}
for group_set in chunks(groups, 10):
futures[w.submit(process_group, group_set)] = group_set
for f in as_completed(futures):
if f.exception():
log.error(
"Error processing groupset:%s error:%s",
group_set,
f.exception())
results.extend(f.result())
return results
def filter_extant_exports(client, bucket, prefix, days, start, end=None):
"""Filter days where the bucket already has extant export keys.
"""
end = end or datetime.now()
# days = [start + timedelta(i) for i in range((end-start).days)]
try:
tag_set = client.get_object_tagging(Bucket=bucket, Key=prefix).get('TagSet', [])
except ClientError as e:
if e.response['Error']['Code'] != 'NoSuchKey':
raise
tag_set = []
tags = {t['Key']: t['Value'] for t in tag_set}
if 'LastExport' not in tags:
return sorted(days)
last_export = parse(tags['LastExport'])
if last_export.tzinfo is None:
last_export = last_export.replace(tzinfo=tzutc())
return [d for d in sorted(days) if d > last_export]
@cli.command()
@click.option('--config', type=click.Path(), required=True)
@click.option('-a', '--accounts', multiple=True)
@click.option('-r', '--region', multiple=False)
def access(config, region, accounts=()):
"""Check iam permissions for log export access in each account"""
config = validate.callback(config)
accounts_report = []
def check_access(account):
accounts_report.append(account)
session = get_session(account['role'], region)
identity = session.client('sts').get_caller_identity()
account['account_id'] = identity['Account']
account.pop('groups')
account.pop('role')
client = session.client('iam')
policy_arn = identity['Arn']
if policy_arn.count('/') > 1:
policy_arn = policy_arn.rsplit('/', 1)[0]
if ':sts:' in policy_arn:
policy_arn = policy_arn.replace(':sts', ':iam')
if ':assumed-role' in policy_arn:
policy_arn = policy_arn.replace(':assumed-role', ':role')
evaluation = client.simulate_principal_policy(
PolicySourceArn=policy_arn,
ActionNames=['logs:CreateExportTask'])['EvaluationResults']
account['access'] = evaluation[0]['EvalDecision']
with ThreadPoolExecutor(max_workers=16) as w:
futures = {}
for account in config.get('accounts', ()):
if accounts and account['name'] not in accounts:
continue
futures[w.submit(check_access, account)] = None
for f in as_completed(futures):
pass
accounts_report.sort(key=operator.itemgetter('access'), reverse=True)
print(tabulate(accounts_report, headers='keys'))
def GetHumanSize(size, precision=2):
# interesting discussion on 1024 vs 1000 as base
# https://en.wikipedia.org/wiki/Binary_prefix
suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
suffixIndex = 0
while size > 1024:
suffixIndex += 1
size = size / 1024.0
return "%.*f %s" % (precision, size, suffixes[suffixIndex])
@cli.command()
@click.option('--config', type=click.Path(), required=True)
@click.option('-a', '--accounts', multiple=True)
@click.option('--day', required=True, help="calculate sizes for this day")
@click.option('--group', required=True)
@click.option('--human/--no-human', default=True)
@click.option('-r', '--region', multiple=False)
def size(config, accounts=(), day=None, group=None, human=True, region=None):
"""size of exported records for a given day."""
config = validate.callback(config)
destination = config.get('destination')
client = boto3.Session().client('s3')
day = parse(day)
def export_size(client, account):
paginator = client.get_paginator('list_objects_v2')
count = 0
size = 0
session = get_session(account['role'], region)
account_id = session.client('sts').get_caller_identity()['Account']
prefix = destination.get('prefix', '').rstrip('/') + '/%s' % account_id
prefix = "%s/%s/%s" % (prefix, group, day.strftime("%Y/%m/%d"))
account['account_id'] = account_id
for page in paginator.paginate(
Bucket=destination['bucket'],
Prefix=prefix):
for k in page.get('Contents', ()):
size += k['Size']
count += 1
return (count, size)
total_size = 0
accounts_report = []
logging.getLogger('botocore').setLevel(logging.ERROR)
with ThreadPoolExecutor(max_workers=16) as w:
futures = {}
for account in config.get('accounts'):
if accounts and account['name'] not in accounts:
continue
futures[w.submit(export_size, client, account)] = account
for f in as_completed(futures):
account = futures[f]
count, size = f.result()
account.pop('role')
account.pop('groups')
total_size += size
if human:
account['size'] = GetHumanSize(size)
else:
account['size'] = size
account['count'] = count
accounts_report.append(account)
accounts_report.sort(key=operator.itemgetter('count'), reverse=True)
print(tabulate(accounts_report, headers='keys'))
log.info("total size:%s", GetHumanSize(total_size))
@cli.command()
@click.option('--config', type=click.Path(), required=True)
@click.option('-g', '--group', required=True)
@click.option('-a', '--accounts', multiple=True)
@click.option('-r', '--region', multiple=False)
@click.option('--dryrun/--no-dryrun', is_flag=True, default=False)
def sync(config, group, accounts=(), dryrun=False, region=None):
"""sync last recorded export to actual
Use --dryrun to check status.
"""
config = validate.callback(config)
destination = config.get('destination')
client = boto3.Session().client('s3')
for account in config.get('accounts', ()):
if accounts and account['name'] not in accounts:
continue
session = get_session(account['role'], region)
account_id = session.client('sts').get_caller_identity()['Account']
prefix = destination.get('prefix', '').rstrip('/') + '/%s' % account_id
prefix = "%s/%s" % (prefix, group)
exports = get_exports(client, destination['bucket'], prefix + "/")
role = account.pop('role')
if isinstance(role, str):
account['account_id'] = role.split(':')[4]
else:
account['account_id'] = role[-1].split(':')[4]
account.pop('groups')
if exports:
last_export = exports.pop()
account['export'] = last_export
else:
account['export'] = 'missing'
last_export = None
try:
tag_set = client.get_object_tagging(
Bucket=destination['bucket'], Key=prefix).get('TagSet', [])
except ClientError:
tag_set = []
tags = {t['Key']: t['Value'] for t in tag_set}
tagged_last_export = None
if 'LastExport' in tags:
le = parse(tags['LastExport'])
tagged_last_export = (le.year, le.month, le.day)
account['sync'] = tagged_last_export
else:
account['sync'] = account['export'] != 'missing' and 'sync' or 'missing'
if last_export is None:
continue
if tagged_last_export == last_export or account['export'] == 'missing':
continue
if dryrun:
continue
client.put_object(
Bucket=destination['bucket'],
Key=prefix,
Body=json.dumps({}),
ACL="bucket-owner-full-control",
ServerSideEncryption="AES256")
export_time = datetime.now().replace(tzinfo=tzlocal()).astimezone(tzutc())
export_time = export_time.replace(
year=last_export[0], month=last_export[1], day=last_export[2],
minute=0, second=0, microsecond=0, hour=0)
client.put_object_tagging(
Bucket=destination['bucket'], Key=prefix,
Tagging={
'TagSet': [{
'Key': 'LastExport',
'Value': export_time.isoformat()}]})
accounts_report = []
for a in config.get('accounts'):
if accounts and a['name'] not in accounts:
continue
if isinstance(a['sync'], tuple):
a['sync'] = "%s/%s/%s" % (a['sync'])
if isinstance(a['export'], tuple):
a['export'] = "%s/%s/%s" % (a['export'])
accounts_report.append(a)
accounts_report.sort(key=operator.itemgetter('export'), reverse=True)
print(tabulate(accounts_report, headers='keys'))
@cli.command()
@click.option('--config', type=click.Path(), required=True)
@click.option('-g', '--group', required=True)
@click.option('-a', '--accounts', multiple=True)
@click.option('-r', '--region', multiple=False)
def status(config, group, accounts=(), region=None):
"""report current export state status"""
config = validate.callback(config)
destination = config.get('destination')
client = boto3.Session().client('s3')
for account in config.get('accounts', ()):
if accounts and account['name'] not in accounts:
continue
session = get_session(account['role'], region)
account_id = session.client('sts').get_caller_identity()['Account']
prefix = destination.get('prefix', '').rstrip('/') + '/%s' % account_id
prefix = "%s/flow-log" % prefix
role = account.pop('role')
if isinstance(role, str):
account['account_id'] = role.split(':')[4]
else:
account['account_id'] = role[-1].split(':')[4]
account.pop('groups')
try:
tag_set = client.get_object_tagging(
Bucket=destination['bucket'], Key=prefix).get('TagSet', [])
except ClientError:
account['export'] = 'missing'
continue
tags = {t['Key']: t['Value'] for t in tag_set}
if 'LastExport' not in tags:
account['export'] = 'empty'
else:
last_export = parse(tags['LastExport'])
account['export'] = last_export.strftime('%Y/%m/%d')
accounts = [a for a in config.get('accounts') if a in accounts or not accounts]
accounts.sort(key=operator.itemgetter('export'), reverse=True)
print(tabulate(accounts, headers='keys'))
def get_exports(client, bucket, prefix, latest=True):
"""Find exports for a given account
"""
keys = client.list_objects_v2(
Bucket=bucket, Prefix=prefix, Delimiter='/').get('CommonPrefixes', [])
found = []
years = []
for y in keys:
part = y['Prefix'].rsplit('/', 2)[-2]
if not part.isdigit():
continue
year = int(part)
years.append(year)
if not years:
return []
years.sort(reverse=True)
if latest:
years = [years[0]]
for y in years:
keys = client.list_objects_v2(
Bucket=bucket, Prefix="%s/%d/" % (prefix.strip('/'), y),
Delimiter='/').get('CommonPrefixes', [])
months = []
for m in keys:
part = m['Prefix'].rsplit('/', 2)[-2]
if not part.isdigit():
continue
month = int(part)
date_key = (y, month)
months.append(month)
months.sort(reverse=True)
if not months:
continue
if latest:
months = [months[0]]
for m in months:
keys = client.list_objects_v2(
Bucket=bucket, Prefix="%s/%d/%s/" % (
prefix.strip('/'), y, ('%d' % m).rjust(2, '0')),
Delimiter='/').get('CommonPrefixes', [])
for d in keys:
part = d['Prefix'].rsplit('/', 2)[-2]
if not part.isdigit():
continue
day = int(part)
date_key = (y, m, day)
found.append(date_key)
found.sort(reverse=True)
if latest:
found = [found[0]]
return found
@cli.command()
@click.option('--group', required=True, help="log group to export to s3.")
@click.option('--bucket', required=True, help="s3 bucket name export to.")
@click.option('--prefix', help="name of the tag to filter with using get_object_tagging API.")
@click.option('--start', required=True, help="export logs from this date")
@click.option('--end', help="export logs before this date")
@click.option('--role', help="sts role to assume for log group access")
@click.option('--poll-period', type=float, default=300)
@click.option('-r', '--region', multiple=False, help='aws region to use.')
# @click.option('--bucket-role', help="role to scan destination bucket")
# @click.option('--stream-prefix)
@lambdafan
def export(group, bucket, prefix, start, end, role, poll_period=120,
session=None, name="", region=None):
"""export a given log group to s3"""
start = start and isinstance(start, str) and parse(start) or start
end = (end and isinstance(start, str) and
parse(end) or end or datetime.now())
start = start.replace(tzinfo=tzlocal()).astimezone(tzutc())
end = end.replace(tzinfo=tzlocal()).astimezone(tzutc())
if session is None:
session = get_session(role, region)
client = session.client('logs')
paginator = client.get_paginator('describe_log_groups')
for p in paginator.paginate():
found = False
for _group in p['logGroups']:
if _group['logGroupName'] == group:
group = _group
found = True
break
if found:
break
if not found:
raise ValueError("Log group %s not found." % group)
if prefix:
prefix = "%s/%s" % (prefix.rstrip('/'), group['logGroupName'].strip('/'))
else:
prefix = group['logGroupName']
named_group = "%s:%s" % (name, group['logGroupName'])
log.info(
"Log exporting group:%s start:%s end:%s bucket:%s prefix:%s size:%s",
named_group,
start.strftime('%Y/%m/%d'),
end.strftime('%Y/%m/%d'),
bucket,
prefix,
group['storedBytes'])
t = time.time()
days = [(
start + timedelta(i)).replace(minute=0, hour=0, second=0, microsecond=0)
for i in range((end - start).days)]
day_count = len(days)
s3 = boto3.Session().client('s3')
days = filter_extant_exports(s3, bucket, prefix, days, start, end)
log.info("Group:%s filtering s3 extant keys from %d to %d start:%s end:%s",
named_group, day_count, len(days),
days[0] if days else '', days[-1] if days else '')
t = time.time()
retry = get_retry(('SlowDown',))
for idx, d in enumerate(days):
date = d.replace(minute=0, microsecond=0, hour=0)
export_prefix = "%s%s" % (prefix, date.strftime("/%Y/%m/%d"))
params = {
'taskName': "%s-%s" % ("c7n-log-exporter",
date.strftime("%Y-%m-%d")),
'logGroupName': group['logGroupName'],
'fromTime': int(time.mktime(
date.replace(
minute=0, microsecond=0, hour=0).timetuple()) * 1000),
'to': int(time.mktime(
date.replace(
minute=59, hour=23, microsecond=0).timetuple()) * 1000),
'destination': bucket,
'destinationPrefix': export_prefix
}
# if stream_prefix:
# params['logStreamPrefix'] = stream_prefix
try:
s3.head_object(Bucket=bucket, Key=prefix)
except ClientError as e:
if e.response['Error']['Code'] != '404': # Not Found
raise
s3.put_object(
Bucket=bucket,
Key=prefix,
Body=json.dumps({}),
ACL="bucket-owner-full-control",
ServerSideEncryption="AES256")
t = time.time()
counter = 0
while True:
counter += 1
try:
result = client.create_export_task(**params)
except ClientError as e:
if e.response['Error']['Code'] == 'LimitExceededException':
time.sleep(poll_period)
# log every 30m of export waiting
if counter % 6 == 0:
log.debug(
"group:%s day:%s waiting for %0.2f minutes",
named_group, d.strftime('%Y-%m-%d'),
(counter * poll_period) / 60.0)
continue
raise
retry(
s3.put_object_tagging,
Bucket=bucket, Key=prefix,
Tagging={
'TagSet': [{
'Key': 'LastExport',
'Value': d.isoformat()}]})
break
log.info(
"Log export time:%0.2f group:%s day:%s bucket:%s prefix:%s task:%s",
time.time() - t,
named_group,
d.strftime("%Y-%m-%d"),
bucket,
params['destinationPrefix'],
result['taskId'])
log.info(
("Exported log group:%s time:%0.2f days:%d start:%s"
" end:%s bucket:%s prefix:%s"),
named_group,
time.time() - t,
len(days),
start.strftime('%Y/%m/%d'),
end.strftime('%Y/%m/%d'),
bucket,
prefix)
if __name__ == '__main__':
cli()
| |
"""
Django settings for voteswap project.
Generated by 'django-admin startproject' using Django 1.9.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
from voteswap.cloud_settings import CloudSettings
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = CloudSettings.get('secret_key')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = CloudSettings.get('debug') == "True"
SOCIAL_AUTH_FACEBOOK_KEY = CloudSettings.get('facebook_key')
SOCIAL_AUTH_FACEBOOK_SECRET = CloudSettings.get('facebook_secret')
SOCIAL_AUTH_FACEBOOK_SCOPE = [
"public_profile",
"email",
"user_friends",
]
SOCIAL_AUTH_FACEBOOK_PROFILE_EXTRA_PARAMS = {
'fields': 'id,name,email,friends',
}
# Sendgrid email
SENDGRID_API_KEY = CloudSettings.get('sendgrid_api_key')
if CloudSettings.is_local:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
else:
EMAIL_BACKEND = 'voteswap.mail.backends.sendgrid.SendGridBackend'
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = 'login'
ALLOWED_HOSTS = [
'voteswap.us',
'2016-10-08-dot-voteswap-142902.appspot.com',
'2016-10-09-dot-voteswap-142902.appspot.com',
'2016-10-11-dot-voteswap-142902.appspot.com',
'2016-10-13-dot-voteswap-142902.appspot.com',
'2016-10-15-dot-voteswap-142902.appspot.com',
'2016-10-16-dot-voteswap-142902.appspot.com',
'2016-10-18-dot-voteswap-142902.appspot.com',
'2016-10-20-dot-voteswap-142902.appspot.com',
'2016-11-06-dot-voteswap-142902.appspot.com',
]
ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
APPS_DIR = os.path.join(ROOT_DIR, 'voteswap')
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social.apps.django_app.default',
'voteswap',
'polling',
'users',
'bootstrap3',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'social.apps.django_app.middleware.SocialAuthExceptionMiddleware',
]
# See http://psa.matiasaguirre.net/docs/configuration/django.html#django-admin
SOCIAL_AUTH_ADMIN_USER_SEARCH_FIELDS = ['username', 'first_name', 'email']
AUTHENTICATION_BACKENDS = [
'social.backends.facebook.Facebook2OAuth2', # FaceBook OAuth2 Graph2.0
'django.contrib.auth.backends.ModelBackend',
]
ROOT_URLCONF = 'voteswap.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
str(os.path.join(APPS_DIR, 'templates')),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
],
},
},
]
WSGI_APPLICATION = 'voteswap.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
if os.getenv('SERVER_SOFTWARE', '').startswith('Google App Engine'):
# Running on production App Engine, so use a Google Cloud SQL database.
DATABASES = {
'default': {
'ENGINE': CloudSettings.get('database_engine'),
'HOST': CloudSettings.get('database_host'),
'NAME': CloudSettings.get('database_name'),
'USER': CloudSettings.get('database_user'),
}
}
else:
DATABASES = {
'default': {
'ENGINE': CloudSettings.get('database_engine'),
'HOST': CloudSettings.get('database_host'),
'PORT': CloudSettings.get('database_port'),
'NAME': CloudSettings.get('database_name'),
'USER': CloudSettings.get('database_user'),
'PASSWORD': CloudSettings.get('database_password'),
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '[%(asctime)s][%(levelname)s::%(module)s] %(message)s',
'datefmt': '%d/%b/%Y %H:%M:%S',
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': CloudSettings.get('log_level').upper(),
'propagate': True,
},
'voteswap': {
'handlers': ['console'],
'level': CloudSettings.get('log_level').upper(),
'propagate': True,
},
'users': {
'handlers': ['console'],
'level': CloudSettings.get('log_level').upper(),
'propagate': True,
},
'polling': {
'handlers': ['console'],
'level': CloudSettings.get('log_level').upper(),
'propagate': True,
},
},
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', # NOQA
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', # NOQA
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', # NOQA
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', # NOQA
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'vendor_static'),
os.path.join(BASE_DIR, 'voteswap', 'static'),
os.path.join(BASE_DIR, 'social', 'exported'),
]
STATIC_ROOT = 'static'
STATIC_URL = '/static/'
| |
from flask import request
from flask_restplus import Namespace, Resource, fields as rest_fields
from cerebralcortex.apiserver import CC
from cerebralcortex.kernel.datatypes.datapoint import DataPoint
from cerebralcortex.kernel.datatypes.datastream import Stream
api = Namespace('stream', description='Data and annotation streams')
data_descriptor = api.model('DataDescriptor', {
'type': rest_fields.String(required=True),
'unit': rest_fields.String(required=True),
'descriptive_statistic': rest_fields.String(required=False)
})
parameter = api.model('Parameter', {
'name': rest_fields.String(required=True),
'value': rest_fields.Arbitrary(required=True)
})
stream_entry = api.model('Stream Entry', {
'name': rest_fields.String(required=True),
'identifier': rest_fields.String(required=True)
# "pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"
})
execution_context = api.model('Execution Context', {
'input_parameters': rest_fields.List(rest_fields.Nested(parameter)),
'input_streams': rest_fields.List(rest_fields.Nested(stream_entry))
})
annotations = api.model('Annotation', {
'name': rest_fields.String(required=True),
'identifier': rest_fields.String(required=True)
# "pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"
})
stream = api.model('Stream', {
'identifier': rest_fields.String(required=True),
# "pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"
'owner': rest_fields.String(required=True),
# "pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"
'name': rest_fields.String(required=True),
'data_descriptor': rest_fields.List(rest_fields.Nested(data_descriptor), required=True),
'execution_context': rest_fields.Nested(execution_context, required=True),
'annotations': rest_fields.List(rest_fields.Nested(annotations))
})
data_element = api.model('Data Element', {
'start_time': rest_fields.DateTime(required=True),
'end_time': rest_fields.DateTime(required=False),
'sample': rest_fields.List(rest_fields.Raw(required=True))
})
stream_data = api.model('Stream Data', {
'identifier': rest_fields.String(required=True),
# "pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"
'data': rest_fields.List(rest_fields.Nested(data_element), required=True)
})
# class AnnotationItem(Schema):
# name = fields.String(required=True)
# identifier = fields.UUID(required=True)
#
#
# class StreamEntry(Schema):
# name = fields.String()
# identifier = fields.UUID(required=True)
#
#
# class Parameter(Schema):
# name = fields.String(required=True)
# value = fields.Raw(required=True)
#
#
# class DataDescriptorItem(Schema):
# type = fields.String(required=True)
# unit = fields.String(required=True)
# descriptive_statistic = fields.String()
#
#
# class ExecutionContext(Schema):
# identifier = fields.UUID(required=True)
# input_parameters = fields.List(fields.Nested(Parameter))
# input_streams = fields.List(fields.Nested(StreamEntry))
#
#
# class StreamSchema(Schema):
# identifier = fields.UUID(required=True)
# owner = fields.UUID(required=True)
# name = fields.String(required=True)
# data_descriptor = fields.List(fields.Nested(DataDescriptorItem), required=True)
# execution_context = fields.Dict(fields.Nested(ExecutionContext), required=True)
# annotations = fields.List(fields.Nested(AnnotationItem))
#
# @post_load
# def make_stream(self, data):
# return CC_Stream(**data)
STREAMS = [
{
"identifier": "31a84a5d-549b-480b-8b6d-9faa898894f0",
"owner": "a970e186-e960-11e6-bf0e-fe55135034f3",
"name": "ecg",
"description": "RAW ecg from AutoSense",
"data_descriptor": [
{
"type": "number",
"unit": "none"
}
],
"execution_context": {
},
"annotations": [
{
"name": "study",
"identifier": "5b7fb6f3-7bf6-4031-881c-a25faf112dd9"
},
{
"name": "privacy",
"identifier": "01dd3847-4bae-418b-8fcd-03efc4607df0"
},
{
"name": "access control",
"identifier": "d1108a2c-fe86-4adc-8d95-f8bcf379955b"
},
{
"name": "platform",
"identifier": "aec29183-3a45-4ab4-9beb-72475b3cf38a"
},
{
"name": "informed consent",
"identifier": "aec29183-3a45-4ab4-9beb-72475b3cf38b"
}
]
},
{
"identifier": "8405dc31-fca9-4390-840e-5c888c3dbba0",
"owner": "a970e186-e960-11e6-bf0e-fe55135034f3",
"name": "80th_percentile_rr_variance",
"description": "80th percentile",
"data_descriptor": [
{
"type": "number",
"unit": "milliseconds",
"descriptive_statistic": "80th_percentile"
}
],
"execution_context": {
"input_parameters": [
{
"name": "window_size",
"value": 60.0
},
{
"name": "window_offset",
"value": 60.0
}
],
"input_streams": [
{
"name": "ecg_rr_interval",
"identifier": "5b7fb6f3-7bf6-4031-881c-a25faf112dd1"
}
]
},
"annotations": [
{
"name": "study",
"identifier": "5b7fb6f3-7bf6-4031-881c-a25faf112dd9"
},
{
"name": "privacy",
"identifier": "01dd3847-4bae-418b-8fcd-03efc4607df0"
},
{
"name": "access control",
"identifier": "d1108a2c-fe86-4adc-8d95-f8bcf379955b"
},
{
"name": "data_source",
"identifier": "d7cfab9d-c5c1-436f-a145-b03a7e3e1704"
},
{
"name": "platform",
"identifier": "aec29183-3a45-4ab4-9beb-72475b3cf38a"
}
]
}
]
# stream_schema = StreamSchema()
@api.route('/')
class APIStreamList(Resource):
@api.doc('list_streams')
@api.marshal_list_with(stream)
def get(self):
return STREAMS
@api.route('/<uuid:identifier>')
@api.param('identifier', 'Stream Details')
@api.response(403, 'Stream parameter error')
@api.response(404, 'Stream not found')
class APIStream(Resource):
@api.doc('get_stream_by_identifier')
@api.marshal_with(stream)
def get(self, identifier):
for u in STREAMS:
if u['identifier'] == str(identifier):
return u
api.abort(404)
@api.doc('create_stream_by_identifier')
@api.marshal_with(stream)
def post(self, identifier):
parameters = request.json
if str(identifier) != parameters['identifier']:
return api.abort(403, "URL identifier mismatch with JSON document request")
try:
new_stream = Stream(identifier=parameters['identifier'],
owner=parameters['owner'],
name=parameters['name'],
description=parameters['description'],
data_descriptor=parameters['data_descriptor'],
execution_context=parameters['execution_context'],
annotations=parameters['annotations'])
result = CC.update_or_create(new_stream)
CC.save_stream(result) # TODO: Should the semantics be here or automatically within CC
return parameters, 201
except KeyError as e:
return api.abort(404, "Named parameter not found: " + str(e))
@api.route('/<uuid:identifier>/data')
@api.param('identifier', 'stream identifier')
@api.response(403, 'Stream parameter error')
@api.response(404, 'Stream not found')
class APIStreamData(Resource):
@api.doc('put data into stream')
@api.marshal_with(stream_data)
def put(self, identifier):
parameters = request.json
if str(identifier) not in ['5b7fb6f3-7bf6-4031-881c-a25faf112dd9', '5b7fb6f3-7bf6-4031-881c-a25faf112ddf']:
return api.abort(404, "Identifier not found")
if str(identifier) != parameters['identifier']:
return api.abort(403, "URL identifier mismatch with JSON document request")
ds = CC.get_stream(identifier)
try:
for di in parameters['data']: # TODO: Test this loop's performance
if 'end_time' in di:
ds.data.append(DataPoint(start_time=di['start_time'], end_time=di['end_time'], sample=di['sample']))
else:
ds.data.append(DataPoint(start_time=di['start_time'], sample=di['sample']))
except KeyError as e:
return api.abort(404, "Named parameter not found: " + str(e))
return parameters, 201
| |
## @file
# This file is used to define common static strings used by INF/DEC/DSC files
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
# Portions copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
##
# Common Definitions
#
TAB_SPLIT = '.'
TAB_COMMENT_EDK_START = '/*'
TAB_COMMENT_EDK_END = '*/'
TAB_COMMENT_EDK_SPLIT = '//'
TAB_COMMENT_SPLIT = '#'
TAB_SPECIAL_COMMENT = '##'
TAB_EQUAL_SPLIT = '='
TAB_VALUE_SPLIT = '|'
TAB_COMMA_SPLIT = ','
TAB_SPACE_SPLIT = ' '
TAB_SEMI_COLON_SPLIT = ';'
TAB_SECTION_START = '['
TAB_SECTION_END = ']'
TAB_OPTION_START = '<'
TAB_OPTION_END = '>'
TAB_SLASH = '\\'
TAB_BACK_SLASH = '/'
TAB_STAR = '*'
TAB_LINE_BREAK = '\n'
TAB_PRINTCHAR_VT = '\x0b'
TAB_PRINTCHAR_BS = '\b'
TAB_PRINTCHAR_NUL = '\0'
TAB_UINT8 = 'UINT8'
TAB_UINT16 = 'UINT16'
TAB_UINT32 = 'UINT32'
TAB_UINT64 = 'UINT64'
TAB_VOID = 'VOID*'
TAB_GUID = 'GUID'
TAB_PCD_CLEAN_NUMERIC_TYPES = {TAB_UINT8, TAB_UINT16, TAB_UINT32, TAB_UINT64}
TAB_PCD_NUMERIC_TYPES = {TAB_UINT8, TAB_UINT16, TAB_UINT32, TAB_UINT64, 'BOOLEAN'}
TAB_PCD_NUMERIC_TYPES_VOID = {TAB_UINT8, TAB_UINT16, TAB_UINT32, TAB_UINT64, 'BOOLEAN', TAB_VOID}
TAB_WORKSPACE = '$(WORKSPACE)'
TAB_FV_DIRECTORY = 'FV'
TAB_ARCH_NULL = ''
TAB_ARCH_COMMON = 'COMMON'
TAB_ARCH_IA32 = 'IA32'
TAB_ARCH_X64 = 'X64'
TAB_ARCH_ARM = 'ARM'
TAB_ARCH_EBC = 'EBC'
TAB_ARCH_AARCH64 = 'AARCH64'
ARCH_SET_FULL = {TAB_ARCH_IA32, TAB_ARCH_X64, TAB_ARCH_ARM, TAB_ARCH_EBC, TAB_ARCH_AARCH64, TAB_ARCH_COMMON}
SUP_MODULE_BASE = 'BASE'
SUP_MODULE_SEC = 'SEC'
SUP_MODULE_PEI_CORE = 'PEI_CORE'
SUP_MODULE_PEIM = 'PEIM'
SUP_MODULE_DXE_CORE = 'DXE_CORE'
SUP_MODULE_DXE_DRIVER = 'DXE_DRIVER'
SUP_MODULE_DXE_RUNTIME_DRIVER = 'DXE_RUNTIME_DRIVER'
SUP_MODULE_DXE_SAL_DRIVER = 'DXE_SAL_DRIVER'
SUP_MODULE_DXE_SMM_DRIVER = 'DXE_SMM_DRIVER'
SUP_MODULE_UEFI_DRIVER = 'UEFI_DRIVER'
SUP_MODULE_UEFI_APPLICATION = 'UEFI_APPLICATION'
SUP_MODULE_USER_DEFINED = 'USER_DEFINED'
SUP_MODULE_SMM_CORE = 'SMM_CORE'
SUP_MODULE_MM_STANDALONE = 'MM_STANDALONE'
SUP_MODULE_MM_CORE_STANDALONE = 'MM_CORE_STANDALONE'
SUP_MODULE_LIST = [SUP_MODULE_BASE, SUP_MODULE_SEC, SUP_MODULE_PEI_CORE, SUP_MODULE_PEIM, SUP_MODULE_DXE_CORE, SUP_MODULE_DXE_DRIVER, \
SUP_MODULE_DXE_RUNTIME_DRIVER, SUP_MODULE_DXE_SAL_DRIVER, SUP_MODULE_DXE_SMM_DRIVER, SUP_MODULE_UEFI_DRIVER, \
SUP_MODULE_UEFI_APPLICATION, SUP_MODULE_USER_DEFINED, SUP_MODULE_SMM_CORE, SUP_MODULE_MM_STANDALONE, SUP_MODULE_MM_CORE_STANDALONE]
SUP_MODULE_LIST_STRING = TAB_VALUE_SPLIT.join(SUP_MODULE_LIST)
SUP_MODULE_SET_PEI = {SUP_MODULE_PEIM, SUP_MODULE_PEI_CORE}
EDK_COMPONENT_TYPE_LIBRARY = 'LIBRARY'
EDK_COMPONENT_TYPE_SECURITY_CORE = 'SECURITY_CORE'
EDK_COMPONENT_TYPE_PEI_CORE = SUP_MODULE_PEI_CORE
EDK_COMPONENT_TYPE_COMBINED_PEIM_DRIVER = 'COMBINED_PEIM_DRIVER'
EDK_COMPONENT_TYPE_PIC_PEIM = 'PIC_PEIM'
EDK_COMPONENT_TYPE_RELOCATABLE_PEIM = 'RELOCATABLE_PEIM'
EDK_COMPONENT_TYPE_BS_DRIVER = 'BS_DRIVER'
EDK_COMPONENT_TYPE_RT_DRIVER = 'RT_DRIVER'
EDK_COMPONENT_TYPE_SAL_RT_DRIVER = 'SAL_RT_DRIVER'
EDK_COMPONENT_TYPE_APPLICATION = 'APPLICATION'
EDK_NAME = 'EDK'
EDKII_NAME = 'EDKII'
COMPONENT_TO_MODULE_MAP_DICT = {
EDK_COMPONENT_TYPE_LIBRARY : SUP_MODULE_BASE,
EDK_COMPONENT_TYPE_SECURITY_CORE : SUP_MODULE_SEC,
EDK_COMPONENT_TYPE_PEI_CORE : SUP_MODULE_PEI_CORE,
EDK_COMPONENT_TYPE_COMBINED_PEIM_DRIVER : SUP_MODULE_PEIM,
EDK_COMPONENT_TYPE_PIC_PEIM : SUP_MODULE_PEIM,
EDK_COMPONENT_TYPE_RELOCATABLE_PEIM : SUP_MODULE_PEIM,
"PE32_PEIM" : SUP_MODULE_PEIM,
EDK_COMPONENT_TYPE_BS_DRIVER : SUP_MODULE_DXE_DRIVER,
EDK_COMPONENT_TYPE_RT_DRIVER : SUP_MODULE_DXE_RUNTIME_DRIVER,
EDK_COMPONENT_TYPE_SAL_RT_DRIVER : SUP_MODULE_DXE_SAL_DRIVER,
EDK_COMPONENT_TYPE_APPLICATION : SUP_MODULE_UEFI_APPLICATION,
"LOGO" : SUP_MODULE_BASE,
}
BINARY_FILE_TYPE_FW = 'FW'
BINARY_FILE_TYPE_GUID = 'GUID'
BINARY_FILE_TYPE_PREEFORM = 'PREEFORM'
BINARY_FILE_TYPE_UEFI_APP = 'UEFI_APP'
BINARY_FILE_TYPE_UNI_UI = 'UNI_UI'
BINARY_FILE_TYPE_UNI_VER = 'UNI_VER'
BINARY_FILE_TYPE_LIB = 'LIB'
BINARY_FILE_TYPE_PE32 = 'PE32'
BINARY_FILE_TYPE_PIC = 'PIC'
BINARY_FILE_TYPE_PEI_DEPEX = 'PEI_DEPEX'
BINARY_FILE_TYPE_DXE_DEPEX = 'DXE_DEPEX'
BINARY_FILE_TYPE_SMM_DEPEX = 'SMM_DEPEX'
BINARY_FILE_TYPE_TE = 'TE'
BINARY_FILE_TYPE_VER = 'VER'
BINARY_FILE_TYPE_UI = 'UI'
BINARY_FILE_TYPE_BIN = 'BIN'
BINARY_FILE_TYPE_FV = 'FV'
PLATFORM_COMPONENT_TYPE_LIBRARY_CLASS = 'LIBRARY_CLASS'
PLATFORM_COMPONENT_TYPE_MODULE = 'MODULE'
TAB_SOURCES = 'Sources'
TAB_SOURCES_COMMON = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_COMMON
TAB_SOURCES_IA32 = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_IA32
TAB_SOURCES_X64 = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_X64
TAB_SOURCES_ARM = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_ARM
TAB_SOURCES_EBC = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_EBC
TAB_SOURCES_AARCH64 = TAB_SOURCES + TAB_SPLIT + TAB_ARCH_AARCH64
TAB_BINARIES = 'Binaries'
TAB_BINARIES_COMMON = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_COMMON
TAB_BINARIES_IA32 = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_IA32
TAB_BINARIES_X64 = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_X64
TAB_BINARIES_ARM = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_ARM
TAB_BINARIES_EBC = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_EBC
TAB_BINARIES_AARCH64 = TAB_BINARIES + TAB_SPLIT + TAB_ARCH_AARCH64
TAB_INCLUDES = 'Includes'
TAB_INCLUDES_COMMON = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_COMMON
TAB_INCLUDES_IA32 = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_IA32
TAB_INCLUDES_X64 = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_X64
TAB_INCLUDES_ARM = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_ARM
TAB_INCLUDES_EBC = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_EBC
TAB_INCLUDES_AARCH64 = TAB_INCLUDES + TAB_SPLIT + TAB_ARCH_AARCH64
TAB_GUIDS = 'Guids'
TAB_GUIDS_COMMON = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_COMMON
TAB_GUIDS_IA32 = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_IA32
TAB_GUIDS_X64 = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_X64
TAB_GUIDS_ARM = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_ARM
TAB_GUIDS_EBC = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_EBC
TAB_GUIDS_AARCH64 = TAB_GUIDS + TAB_SPLIT + TAB_ARCH_AARCH64
TAB_PROTOCOLS = 'Protocols'
TAB_PROTOCOLS_COMMON = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_COMMON
TAB_PROTOCOLS_IA32 = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_IA32
TAB_PROTOCOLS_X64 = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_X64
TAB_PROTOCOLS_ARM = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_ARM
TAB_PROTOCOLS_EBC = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_EBC
TAB_PROTOCOLS_AARCH64 = TAB_PROTOCOLS + TAB_SPLIT + TAB_ARCH_AARCH64
TAB_PPIS = 'Ppis'
TAB_PPIS_COMMON = TAB_PPIS + TAB_SPLIT + TAB_ARCH_COMMON
TAB_PPIS_IA32 = TAB_PPIS + TAB_SPLIT + TAB_ARCH_IA32
TAB_PPIS_X64 = TAB_PPIS + TAB_SPLIT + TAB_ARCH_X64
TAB_PPIS_ARM = TAB_PPIS + TAB_SPLIT + TAB_ARCH_ARM
TAB_PPIS_EBC = TAB_PPIS + TAB_SPLIT + TAB_ARCH_EBC
TAB_PPIS_AARCH64 = TAB_PPIS + TAB_SPLIT + TAB_ARCH_AARCH64
TAB_LIBRARY_CLASSES = 'LibraryClasses'
TAB_LIBRARY_CLASSES_COMMON = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_COMMON
TAB_LIBRARY_CLASSES_IA32 = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_IA32
TAB_LIBRARY_CLASSES_X64 = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_X64
TAB_LIBRARY_CLASSES_ARM = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_ARM
TAB_LIBRARY_CLASSES_EBC = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_EBC
TAB_LIBRARY_CLASSES_AARCH64 = TAB_LIBRARY_CLASSES + TAB_SPLIT + TAB_ARCH_AARCH64
TAB_PACKAGES = 'Packages'
TAB_PACKAGES_COMMON = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_COMMON
TAB_PACKAGES_IA32 = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_IA32
TAB_PACKAGES_X64 = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_X64
TAB_PACKAGES_ARM = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_ARM
TAB_PACKAGES_EBC = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_EBC
TAB_PACKAGES_AARCH64 = TAB_PACKAGES + TAB_SPLIT + TAB_ARCH_AARCH64
TAB_PCDS = 'Pcds'
TAB_PCDS_FIXED_AT_BUILD = 'FixedAtBuild'
TAB_PCDS_PATCHABLE_IN_MODULE = 'PatchableInModule'
TAB_PCDS_FEATURE_FLAG = 'FeatureFlag'
TAB_PCDS_DYNAMIC_EX = 'DynamicEx'
TAB_PCDS_DYNAMIC_EX_DEFAULT = 'DynamicExDefault'
TAB_PCDS_DYNAMIC_EX_VPD = 'DynamicExVpd'
TAB_PCDS_DYNAMIC_EX_HII = 'DynamicExHii'
TAB_PCDS_DYNAMIC = 'Dynamic'
TAB_PCDS_DYNAMIC_DEFAULT = 'DynamicDefault'
TAB_PCDS_DYNAMIC_VPD = 'DynamicVpd'
TAB_PCDS_DYNAMIC_HII = 'DynamicHii'
PCD_DYNAMIC_TYPE_SET = {TAB_PCDS_DYNAMIC, TAB_PCDS_DYNAMIC_DEFAULT, TAB_PCDS_DYNAMIC_VPD, TAB_PCDS_DYNAMIC_HII}
PCD_DYNAMIC_EX_TYPE_SET = {TAB_PCDS_DYNAMIC_EX, TAB_PCDS_DYNAMIC_EX_DEFAULT, TAB_PCDS_DYNAMIC_EX_VPD, TAB_PCDS_DYNAMIC_EX_HII}
# leave as a list for order
PCD_TYPE_LIST = [TAB_PCDS_FIXED_AT_BUILD, TAB_PCDS_PATCHABLE_IN_MODULE, TAB_PCDS_FEATURE_FLAG, TAB_PCDS_DYNAMIC, TAB_PCDS_DYNAMIC_EX]
TAB_PCDS_FIXED_AT_BUILD_NULL = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD
TAB_PCDS_FIXED_AT_BUILD_COMMON = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + TAB_SPLIT + TAB_ARCH_COMMON
TAB_PCDS_FIXED_AT_BUILD_IA32 = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + TAB_SPLIT + TAB_ARCH_IA32
TAB_PCDS_FIXED_AT_BUILD_X64 = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + TAB_SPLIT + TAB_ARCH_X64
TAB_PCDS_FIXED_AT_BUILD_ARM = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + TAB_SPLIT + TAB_ARCH_ARM
TAB_PCDS_FIXED_AT_BUILD_EBC = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + TAB_SPLIT + TAB_ARCH_EBC
TAB_PCDS_FIXED_AT_BUILD_AARCH64 = TAB_PCDS + TAB_PCDS_FIXED_AT_BUILD + TAB_SPLIT + TAB_ARCH_AARCH64
TAB_PCDS_PATCHABLE_IN_MODULE_NULL = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE
TAB_PCDS_PATCHABLE_IN_MODULE_COMMON = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE + TAB_SPLIT + TAB_ARCH_COMMON
TAB_PCDS_PATCHABLE_IN_MODULE_IA32 = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE + TAB_SPLIT + TAB_ARCH_IA32
TAB_PCDS_PATCHABLE_IN_MODULE_X64 = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE + TAB_SPLIT + TAB_ARCH_X64
TAB_PCDS_PATCHABLE_IN_MODULE_ARM = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE + TAB_SPLIT + TAB_ARCH_ARM
TAB_PCDS_PATCHABLE_IN_MODULE_EBC = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE + TAB_SPLIT + TAB_ARCH_EBC
TAB_PCDS_PATCHABLE_IN_MODULE_AARCH64 = TAB_PCDS + TAB_PCDS_PATCHABLE_IN_MODULE + TAB_SPLIT + TAB_ARCH_AARCH64
TAB_PCDS_FEATURE_FLAG_NULL = TAB_PCDS + TAB_PCDS_FEATURE_FLAG
TAB_PCDS_FEATURE_FLAG_COMMON = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT + TAB_ARCH_COMMON
TAB_PCDS_FEATURE_FLAG_IA32 = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT + TAB_ARCH_IA32
TAB_PCDS_FEATURE_FLAG_X64 = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT + TAB_ARCH_X64
TAB_PCDS_FEATURE_FLAG_ARM = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT + TAB_ARCH_ARM
TAB_PCDS_FEATURE_FLAG_EBC = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT + TAB_ARCH_EBC
TAB_PCDS_FEATURE_FLAG_AARCH64 = TAB_PCDS + TAB_PCDS_FEATURE_FLAG + TAB_SPLIT + TAB_ARCH_AARCH64
TAB_PCDS_DYNAMIC_EX_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_EX
TAB_PCDS_DYNAMIC_EX_DEFAULT_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_EX_DEFAULT
TAB_PCDS_DYNAMIC_EX_HII_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_EX_HII
TAB_PCDS_DYNAMIC_EX_VPD_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_EX_VPD
TAB_PCDS_DYNAMIC_EX_COMMON = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + TAB_ARCH_COMMON
TAB_PCDS_DYNAMIC_EX_IA32 = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + TAB_ARCH_IA32
TAB_PCDS_DYNAMIC_EX_X64 = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + TAB_ARCH_X64
TAB_PCDS_DYNAMIC_EX_ARM = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + TAB_ARCH_ARM
TAB_PCDS_DYNAMIC_EX_EBC = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + TAB_ARCH_EBC
TAB_PCDS_DYNAMIC_EX_AARCH64 = TAB_PCDS + TAB_PCDS_DYNAMIC_EX + TAB_SPLIT + TAB_ARCH_AARCH64
TAB_PCDS_DYNAMIC_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC
TAB_PCDS_DYNAMIC_DEFAULT_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_DEFAULT
TAB_PCDS_DYNAMIC_HII_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_HII
TAB_PCDS_DYNAMIC_VPD_NULL = TAB_PCDS + TAB_PCDS_DYNAMIC_VPD
TAB_PCDS_DYNAMIC_COMMON = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_COMMON
TAB_PCDS_DYNAMIC_IA32 = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_IA32
TAB_PCDS_DYNAMIC_X64 = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_X64
TAB_PCDS_DYNAMIC_ARM = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_ARM
TAB_PCDS_DYNAMIC_EBC = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_EBC
TAB_PCDS_DYNAMIC_AARCH64 = TAB_PCDS + TAB_PCDS_DYNAMIC + TAB_SPLIT + TAB_ARCH_AARCH64
TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE = 'PcdLoadFixAddressPeiCodePageNumber'
TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE_DATA_TYPE = 'UINT32'
TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE = 'PcdLoadFixAddressBootTimeCodePageNumber'
TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE_DATA_TYPE = 'UINT32'
TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE = 'PcdLoadFixAddressRuntimeCodePageNumber'
TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE_DATA_TYPE = 'UINT32'
TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE = 'PcdLoadFixAddressSmmCodePageNumber'
TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE_DATA_TYPE = 'UINT32'
TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SET = {TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE, \
TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE, \
TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE, \
TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE}
## The mapping dictionary from datum type to its maximum number.
MAX_VAL_TYPE = {"BOOLEAN":0x01, TAB_UINT8:0xFF, TAB_UINT16:0xFFFF, TAB_UINT32:0xFFFFFFFF, TAB_UINT64:0xFFFFFFFFFFFFFFFF}
## The mapping dictionary from datum type to size string.
MAX_SIZE_TYPE = {"BOOLEAN":1, TAB_UINT8:1, TAB_UINT16:2, TAB_UINT32:4, TAB_UINT64:8}
TAB_DEPEX = 'Depex'
TAB_DEPEX_COMMON = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_COMMON
TAB_DEPEX_IA32 = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_IA32
TAB_DEPEX_X64 = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_X64
TAB_DEPEX_ARM = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_ARM
TAB_DEPEX_EBC = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_EBC
TAB_DEPEX_AARCH64 = TAB_DEPEX + TAB_SPLIT + TAB_ARCH_AARCH64
TAB_SKUIDS = 'SkuIds'
TAB_DEFAULT_STORES = 'DefaultStores'
TAB_DEFAULT_STORES_DEFAULT = 'STANDARD'
TAB_LIBRARIES = 'Libraries'
TAB_LIBRARIES_COMMON = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_COMMON
TAB_LIBRARIES_IA32 = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_IA32
TAB_LIBRARIES_X64 = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_X64
TAB_LIBRARIES_ARM = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_ARM
TAB_LIBRARIES_EBC = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_EBC
TAB_LIBRARIES_AARCH64 = TAB_LIBRARIES + TAB_SPLIT + TAB_ARCH_AARCH64
TAB_COMPONENTS = 'Components'
TAB_COMPONENTS_COMMON = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_COMMON
TAB_COMPONENTS_IA32 = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_IA32
TAB_COMPONENTS_X64 = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_X64
TAB_COMPONENTS_ARM = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_ARM
TAB_COMPONENTS_EBC = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_EBC
TAB_COMPONENTS_AARCH64 = TAB_COMPONENTS + TAB_SPLIT + TAB_ARCH_AARCH64
TAB_COMPONENTS_SOURCE_OVERRIDE_PATH = 'SOURCE_OVERRIDE_PATH'
TAB_BUILD_OPTIONS = 'BuildOptions'
TAB_DEFINE = 'DEFINE'
TAB_NMAKE = 'Nmake'
TAB_USER_EXTENSIONS = 'UserExtensions'
TAB_INCLUDE = '!include'
TAB_DEFAULT = 'DEFAULT'
TAB_COMMON = 'COMMON'
#
# Common Define
#
TAB_COMMON_DEFINES = 'Defines'
#
# Inf Definitions
#
TAB_INF_DEFINES = TAB_COMMON_DEFINES
TAB_INF_DEFINES_INF_VERSION = 'INF_VERSION'
TAB_INF_DEFINES_BASE_NAME = 'BASE_NAME'
TAB_INF_DEFINES_FILE_GUID = 'FILE_GUID'
TAB_INF_DEFINES_MODULE_TYPE = 'MODULE_TYPE'
TAB_INF_DEFINES_EFI_SPECIFICATION_VERSION = 'EFI_SPECIFICATION_VERSION'
TAB_INF_DEFINES_UEFI_SPECIFICATION_VERSION = 'UEFI_SPECIFICATION_VERSION'
TAB_INF_DEFINES_PI_SPECIFICATION_VERSION = 'PI_SPECIFICATION_VERSION'
TAB_INF_DEFINES_EDK_RELEASE_VERSION = 'EDK_RELEASE_VERSION'
TAB_INF_DEFINES_BINARY_MODULE = 'BINARY_MODULE'
TAB_INF_DEFINES_LIBRARY_CLASS = 'LIBRARY_CLASS'
TAB_INF_DEFINES_COMPONENT_TYPE = 'COMPONENT_TYPE'
TAB_INF_DEFINES_MAKEFILE_NAME = 'MAKEFILE_NAME'
TAB_INF_DEFINES_DPX_SOURCE = 'DPX_SOURCE'
TAB_INF_DEFINES_BUILD_NUMBER = 'BUILD_NUMBER'
TAB_INF_DEFINES_BUILD_TYPE = 'BUILD_TYPE'
TAB_INF_DEFINES_FFS_EXT = 'FFS_EXT'
TAB_INF_DEFINES_FV_EXT = 'FV_EXT'
TAB_INF_DEFINES_SOURCE_FV = 'SOURCE_FV'
TAB_INF_DEFINES_VERSION_NUMBER = 'VERSION_NUMBER'
TAB_INF_DEFINES_VERSION = 'VERSION' # for Edk inf, the same as VERSION_NUMBER
TAB_INF_DEFINES_VERSION_STRING = 'VERSION_STRING'
TAB_INF_DEFINES_PCD_IS_DRIVER = 'PCD_IS_DRIVER'
TAB_INF_DEFINES_TIANO_EDK_FLASHMAP_H = 'TIANO_EDK_FLASHMAP_H'
TAB_INF_DEFINES_ENTRY_POINT = 'ENTRY_POINT'
TAB_INF_DEFINES_UNLOAD_IMAGE = 'UNLOAD_IMAGE'
TAB_INF_DEFINES_CONSTRUCTOR = 'CONSTRUCTOR'
TAB_INF_DEFINES_DESTRUCTOR = 'DESTRUCTOR'
TAB_INF_DEFINES_DEFINE = 'DEFINE'
TAB_INF_DEFINES_SPEC = 'SPEC'
TAB_INF_DEFINES_CUSTOM_MAKEFILE = 'CUSTOM_MAKEFILE'
TAB_INF_DEFINES_MACRO = '__MACROS__'
TAB_INF_DEFINES_SHADOW = 'SHADOW'
TAB_INF_FIXED_PCD = 'FixedPcd'
TAB_INF_FEATURE_PCD = 'FeaturePcd'
TAB_INF_PATCH_PCD = 'PatchPcd'
TAB_INF_PCD = 'Pcd'
TAB_INF_PCD_EX = 'PcdEx'
TAB_INF_USAGE_PRO = 'PRODUCES'
TAB_INF_USAGE_SOME_PRO = 'SOMETIMES_PRODUCES'
TAB_INF_USAGE_CON = 'CONSUMES'
TAB_INF_USAGE_SOME_CON = 'SOMETIMES_CONSUMES'
TAB_INF_USAGE_NOTIFY = 'NOTIFY'
TAB_INF_USAGE_TO_START = 'TO_START'
TAB_INF_USAGE_BY_START = 'BY_START'
TAB_INF_GUIDTYPE_EVENT = 'Event'
TAB_INF_GUIDTYPE_FILE = 'File'
TAB_INF_GUIDTYPE_FV = 'FV'
TAB_INF_GUIDTYPE_GUID = 'GUID'
TAB_INF_GUIDTYPE_HII = 'HII'
TAB_INF_GUIDTYPE_HOB = 'HOB'
TAB_INF_GUIDTYPE_ST = 'SystemTable'
TAB_INF_GUIDTYPE_TSG = 'TokenSpaceGuid'
TAB_INF_GUIDTYPE_VAR = 'Variable'
TAB_INF_GUIDTYPE_PROTOCOL = 'PROTOCOL'
TAB_INF_GUIDTYPE_PPI = 'PPI'
TAB_INF_USAGE_UNDEFINED = 'UNDEFINED'
#
# Dec Definitions
#
TAB_DEC_DEFINES = TAB_COMMON_DEFINES
TAB_DEC_DEFINES_DEC_SPECIFICATION = 'DEC_SPECIFICATION'
TAB_DEC_DEFINES_PACKAGE_NAME = 'PACKAGE_NAME'
TAB_DEC_DEFINES_PACKAGE_GUID = 'PACKAGE_GUID'
TAB_DEC_DEFINES_PACKAGE_VERSION = 'PACKAGE_VERSION'
TAB_DEC_DEFINES_PKG_UNI_FILE = 'PKG_UNI_FILE'
#
# Dsc Definitions
#
TAB_DSC_DEFINES = TAB_COMMON_DEFINES
TAB_DSC_DEFINES_PLATFORM_NAME = 'PLATFORM_NAME'
TAB_DSC_DEFINES_PLATFORM_GUID = 'PLATFORM_GUID'
TAB_DSC_DEFINES_PLATFORM_VERSION = 'PLATFORM_VERSION'
TAB_DSC_DEFINES_DSC_SPECIFICATION = 'DSC_SPECIFICATION'
TAB_DSC_DEFINES_OUTPUT_DIRECTORY = 'OUTPUT_DIRECTORY'
TAB_DSC_DEFINES_SUPPORTED_ARCHITECTURES = 'SUPPORTED_ARCHITECTURES'
TAB_DSC_DEFINES_BUILD_TARGETS = 'BUILD_TARGETS'
TAB_DSC_DEFINES_SKUID_IDENTIFIER = 'SKUID_IDENTIFIER'
TAB_DSC_DEFINES_PCD_INFO_GENERATION = 'PCD_INFO_GENERATION'
TAB_DSC_DEFINES_PCD_VAR_CHECK_GENERATION = 'PCD_VAR_CHECK_GENERATION'
TAB_DSC_DEFINES_FLASH_DEFINITION = 'FLASH_DEFINITION'
TAB_DSC_DEFINES_BUILD_NUMBER = 'BUILD_NUMBER'
TAB_DSC_DEFINES_MAKEFILE_NAME = 'MAKEFILE_NAME'
TAB_DSC_DEFINES_BS_BASE_ADDRESS = 'BsBaseAddress'
TAB_DSC_DEFINES_RT_BASE_ADDRESS = 'RtBaseAddress'
TAB_DSC_DEFINES_RFC_LANGUAGES = 'RFC_LANGUAGES'
TAB_DSC_DEFINES_ISO_LANGUAGES = 'ISO_LANGUAGES'
TAB_DSC_DEFINES_DEFINE = 'DEFINE'
TAB_DSC_DEFINES_VPD_TOOL_GUID = 'VPD_TOOL_GUID'
TAB_FIX_LOAD_TOP_MEMORY_ADDRESS = 'FIX_LOAD_TOP_MEMORY_ADDRESS'
TAB_DSC_DEFINES_EDKGLOBAL = 'EDK_GLOBAL'
TAB_DSC_PREBUILD = 'PREBUILD'
TAB_DSC_POSTBUILD = 'POSTBUILD'
#
# TargetTxt Definitions
#
TAB_TAT_DEFINES_ACTIVE_PLATFORM = 'ACTIVE_PLATFORM'
TAB_TAT_DEFINES_ACTIVE_MODULE = 'ACTIVE_MODULE'
TAB_TAT_DEFINES_TOOL_CHAIN_CONF = 'TOOL_CHAIN_CONF'
TAB_TAT_DEFINES_MAX_CONCURRENT_THREAD_NUMBER = 'MAX_CONCURRENT_THREAD_NUMBER'
TAB_TAT_DEFINES_TARGET = 'TARGET'
TAB_TAT_DEFINES_TOOL_CHAIN_TAG = 'TOOL_CHAIN_TAG'
TAB_TAT_DEFINES_TARGET_ARCH = 'TARGET_ARCH'
TAB_TAT_DEFINES_BUILD_RULE_CONF = "BUILD_RULE_CONF"
#
# ToolDef Definitions
#
TAB_TOD_DEFINES_TARGET = 'TARGET'
TAB_TOD_DEFINES_TOOL_CHAIN_TAG = 'TOOL_CHAIN_TAG'
TAB_TOD_DEFINES_TARGET_ARCH = 'TARGET_ARCH'
TAB_TOD_DEFINES_COMMAND_TYPE = 'COMMAND_TYPE'
TAB_TOD_DEFINES_FAMILY = 'FAMILY'
TAB_TOD_DEFINES_BUILDRULEFAMILY = 'BUILDRULEFAMILY'
TAB_TOD_DEFINES_BUILDRULEORDER = 'BUILDRULEORDER'
#
# Conditional Statements
#
TAB_IF = '!if'
TAB_END_IF = '!endif'
TAB_ELSE_IF = '!elseif'
TAB_ELSE = '!else'
TAB_IF_DEF = '!ifdef'
TAB_IF_N_DEF = '!ifndef'
TAB_IF_EXIST = '!if exist'
TAB_ERROR = '!error'
#
# Unknown section
#
TAB_UNKNOWN = 'UNKNOWN'
#
# Build database path
#
DATABASE_PATH = ":memory:" #"BuildDatabase.db"
# used by ECC
MODIFIER_SET = {'IN', 'OUT', 'OPTIONAL', 'UNALIGNED', 'EFI_RUNTIMESERVICE', 'EFI_BOOTSERVICE', 'EFIAPI'}
# Dependency Opcodes
DEPEX_OPCODE_BEFORE = "BEFORE"
DEPEX_OPCODE_AFTER = "AFTER"
DEPEX_OPCODE_PUSH = "PUSH"
DEPEX_OPCODE_AND = "AND"
DEPEX_OPCODE_OR = "OR"
DEPEX_OPCODE_NOT = "NOT"
DEPEX_OPCODE_END = "END"
DEPEX_OPCODE_SOR = "SOR"
DEPEX_OPCODE_TRUE = "TRUE"
DEPEX_OPCODE_FALSE = "FALSE"
# Dependency Expression
DEPEX_SUPPORTED_OPCODE_SET = {"BEFORE", "AFTER", "PUSH", "AND", "OR", "NOT", "END", "SOR", "TRUE", "FALSE", '(', ')'}
TAB_STATIC_LIBRARY = "STATIC-LIBRARY-FILE"
TAB_DYNAMIC_LIBRARY = "DYNAMIC-LIBRARY-FILE"
TAB_FRAMEWORK_IMAGE = "EFI-IMAGE-FILE"
TAB_C_CODE_FILE = "C-CODE-FILE"
TAB_C_HEADER_FILE = "C-HEADER-FILE"
TAB_UNICODE_FILE = "UNICODE-TEXT-FILE"
TAB_IMAGE_FILE = "IMAGE-DEFINITION-FILE"
TAB_DEPENDENCY_EXPRESSION_FILE = "DEPENDENCY-EXPRESSION-FILE"
TAB_UNKNOWN_FILE = "UNKNOWN-TYPE-FILE"
TAB_DEFAULT_BINARY_FILE = "_BINARY_FILE_"
TAB_OBJECT_FILE = "OBJECT-FILE"
TAB_VFR_FILE = 'VISUAL-FORM-REPRESENTATION-FILE'
# used by BRG
TAB_BRG_PCD = 'PCD'
TAB_BRG_LIBRARY = 'Library'
#
# Build Rule File Version Definition
#
TAB_BUILD_RULE_VERSION = "build_rule_version"
# section name for PCDs
PCDS_DYNAMIC_DEFAULT = "PcdsDynamicDefault"
PCDS_DYNAMIC_VPD = "PcdsDynamicVpd"
PCDS_DYNAMIC_HII = "PcdsDynamicHii"
PCDS_DYNAMICEX_DEFAULT = "PcdsDynamicExDefault"
PCDS_DYNAMICEX_VPD = "PcdsDynamicExVpd"
PCDS_DYNAMICEX_HII = "PcdsDynamicExHii"
SECTIONS_HAVE_ITEM_PCD_SET = {PCDS_DYNAMIC_DEFAULT.upper(), PCDS_DYNAMIC_VPD.upper(), PCDS_DYNAMIC_HII.upper(), \
PCDS_DYNAMICEX_DEFAULT.upper(), PCDS_DYNAMICEX_VPD.upper(), PCDS_DYNAMICEX_HII.upper()}
# Section allowed to have items after arch
SECTIONS_HAVE_ITEM_AFTER_ARCH_SET = {TAB_LIBRARY_CLASSES.upper(), TAB_DEPEX.upper(), TAB_USER_EXTENSIONS.upper(),
PCDS_DYNAMIC_DEFAULT.upper(),
PCDS_DYNAMIC_VPD.upper(),
PCDS_DYNAMIC_HII.upper(),
PCDS_DYNAMICEX_DEFAULT.upper(),
PCDS_DYNAMICEX_VPD.upper(),
PCDS_DYNAMICEX_HII.upper(),
TAB_BUILD_OPTIONS.upper(),
TAB_INCLUDES.upper()}
#
# pack codes as used in PcdDb and elsewhere
#
PACK_PATTERN_GUID = '=LHHBBBBBBBB'
PACK_CODE_BY_SIZE = {8:'=Q',
4:'=L',
2:'=H',
1:'=B',
0:'=B',
16:""}
TAB_COMPILER_MSFT = 'MSFT'
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ScopeRoleAssignmentApprovalStepOperations:
"""ScopeRoleAssignmentApprovalStepOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.authorization.v2021_01_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get_by_id(
self,
approval_id: str,
stage_id: str,
scope: str,
**kwargs: Any
) -> "_models.RoleAssignmentApprovalStep":
"""Get role assignment approval.
:param approval_id: The id of the role assignment approval.
:type approval_id: str
:param stage_id: The id of the role assignment approval stage.
:type stage_id: str
:param scope: The scope of the resource.
:type scope: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RoleAssignmentApprovalStep, or the result of cls(response)
:rtype: ~azure.mgmt.authorization.v2021_01_01_preview.models.RoleAssignmentApprovalStep
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RoleAssignmentApprovalStep"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
accept = "application/json"
# Construct URL
url = self.get_by_id.metadata['url'] # type: ignore
path_format_arguments = {
'approvalId': self._serialize.url("approval_id", approval_id, 'str'),
'stageId': self._serialize.url("stage_id", stage_id, 'str'),
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDefinition, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('RoleAssignmentApprovalStep', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_id.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/roleAssignmentApprovals/{approvalId}/stages/{stageId}'} # type: ignore
async def patch(
self,
approval_id: str,
stage_id: str,
scope: str,
properties: "_models.RoleAssignmentApprovalStepProperties",
**kwargs: Any
) -> "_models.RoleAssignmentApprovalStep":
"""Record a decision.
:param approval_id: The id of the role assignment approval.
:type approval_id: str
:param stage_id: The id of the role assignment approval stage.
:type stage_id: str
:param scope: The scope of the resource.
:type scope: str
:param properties: Role Assignment Approval stage properties to patch.
:type properties: ~azure.mgmt.authorization.v2021_01_01_preview.models.RoleAssignmentApprovalStepProperties
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RoleAssignmentApprovalStep, or the result of cls(response)
:rtype: ~azure.mgmt.authorization.v2021_01_01_preview.models.RoleAssignmentApprovalStep
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RoleAssignmentApprovalStep"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.patch.metadata['url'] # type: ignore
path_format_arguments = {
'approvalId': self._serialize.url("approval_id", approval_id, 'str'),
'stageId': self._serialize.url("stage_id", stage_id, 'str'),
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(properties, 'RoleAssignmentApprovalStepProperties')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDefinition, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('RoleAssignmentApprovalStep', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
patch.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/roleAssignmentApprovals/{approvalId}/stages/{stageId}'} # type: ignore
async def put(
self,
approval_id: str,
stage_id: str,
scope: str,
properties: "_models.RoleAssignmentApprovalStepProperties",
**kwargs: Any
) -> "_models.RoleAssignmentApprovalStep":
"""Record a decision.
:param approval_id: The id of the role assignment approval.
:type approval_id: str
:param stage_id: The id of the role assignment approval stage.
:type stage_id: str
:param scope: The scope of the resource.
:type scope: str
:param properties: Role Assignment Approval stage properties to put.
:type properties: ~azure.mgmt.authorization.v2021_01_01_preview.models.RoleAssignmentApprovalStepProperties
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RoleAssignmentApprovalStep, or the result of cls(response)
:rtype: ~azure.mgmt.authorization.v2021_01_01_preview.models.RoleAssignmentApprovalStep
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RoleAssignmentApprovalStep"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.put.metadata['url'] # type: ignore
path_format_arguments = {
'approvalId': self._serialize.url("approval_id", approval_id, 'str'),
'stageId': self._serialize.url("stage_id", stage_id, 'str'),
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(properties, 'RoleAssignmentApprovalStepProperties')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDefinition, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('RoleAssignmentApprovalStep', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
put.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/roleAssignmentApprovals/{approvalId}/stages/{stageId}'} # type: ignore
| |
# Copyright 2009-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that pymongo is thread safe."""
import threading
from test import (client_context,
db_user,
db_pwd,
IntegrationTest,
unittest)
from test.utils import rs_or_single_client_noauth
from test.utils import frequent_thread_switches, joinall
from pymongo.errors import OperationFailure
@client_context.require_connection
def setUpModule():
pass
class AutoAuthenticateThreads(threading.Thread):
def __init__(self, collection, num):
threading.Thread.__init__(self)
self.coll = collection
self.num = num
self.success = False
self.setDaemon(True)
def run(self):
for i in range(self.num):
self.coll.insert_one({'num': i})
self.coll.find_one({'num': i})
self.success = True
class SaveAndFind(threading.Thread):
def __init__(self, collection):
threading.Thread.__init__(self)
self.collection = collection
self.setDaemon(True)
self.passed = False
def run(self):
sum = 0
for document in self.collection.find():
sum += document["x"]
assert sum == 499500, "sum was %d not 499500" % sum
self.passed = True
class Insert(threading.Thread):
def __init__(self, collection, n, expect_exception):
threading.Thread.__init__(self)
self.collection = collection
self.n = n
self.expect_exception = expect_exception
self.setDaemon(True)
def run(self):
for _ in range(self.n):
error = True
try:
self.collection.insert_one({"test": "insert"})
error = False
except:
if not self.expect_exception:
raise
if self.expect_exception:
assert error
class Update(threading.Thread):
def __init__(self, collection, n, expect_exception):
threading.Thread.__init__(self)
self.collection = collection
self.n = n
self.expect_exception = expect_exception
self.setDaemon(True)
def run(self):
for _ in range(self.n):
error = True
try:
self.collection.update_one({"test": "unique"},
{"$set": {"test": "update"}})
error = False
except:
if not self.expect_exception:
raise
if self.expect_exception:
assert error
class Disconnect(threading.Thread):
def __init__(self, client, n):
threading.Thread.__init__(self)
self.client = client
self.n = n
self.passed = False
def run(self):
for _ in range(self.n):
self.client.close()
self.passed = True
class TestThreads(IntegrationTest):
def setUp(self):
self.db = client_context.rs_or_standalone_client.pymongo_test
def test_threading(self):
self.db.drop_collection("test")
self.db.test.insert_many([{"x": i} for i in range(1000)])
threads = []
for i in range(10):
t = SaveAndFind(self.db.test)
t.start()
threads.append(t)
joinall(threads)
def test_safe_insert(self):
self.db.drop_collection("test1")
self.db.test1.insert_one({"test": "insert"})
self.db.drop_collection("test2")
self.db.test2.insert_one({"test": "insert"})
self.db.test2.create_index("test", unique=True)
self.db.test2.find_one()
okay = Insert(self.db.test1, 2000, False)
error = Insert(self.db.test2, 2000, True)
error.start()
okay.start()
error.join()
okay.join()
def test_safe_update(self):
self.db.drop_collection("test1")
self.db.test1.insert_one({"test": "update"})
self.db.test1.insert_one({"test": "unique"})
self.db.drop_collection("test2")
self.db.test2.insert_one({"test": "update"})
self.db.test2.insert_one({"test": "unique"})
self.db.test2.create_index("test", unique=True)
self.db.test2.find_one()
okay = Update(self.db.test1, 2000, False)
error = Update(self.db.test2, 2000, True)
error.start()
okay.start()
error.join()
okay.join()
def test_client_disconnect(self):
self.db.drop_collection("test")
self.db.test.insert_many([{"x": i} for i in range(1000)])
# Start 10 threads that execute a query, and 10 threads that call
# client.close() 10 times in a row.
threads = [SaveAndFind(self.db.test) for _ in range(10)]
threads.extend(Disconnect(self.db.client, 10) for _ in range(10))
with frequent_thread_switches():
# Frequent thread switches hurt performance badly enough to
# prevent reconnection within 5 seconds, especially in Python 2
# on a Windows build slave.
for t in threads:
t.start()
for t in threads:
t.join(30)
for t in threads:
self.assertTrue(t.passed)
class TestThreadsAuth(IntegrationTest):
@classmethod
@client_context.require_auth
def setUpClass(cls):
super(TestThreadsAuth, cls).setUpClass()
def test_auto_auth_login(self):
client = rs_or_single_client_noauth()
self.assertRaises(OperationFailure, client.auth_test.test.find_one)
# Admin auth
client.admin.authenticate(db_user, db_pwd)
nthreads = 10
threads = []
for _ in range(nthreads):
t = AutoAuthenticateThreads(client.auth_test.test, 10)
t.start()
threads.append(t)
joinall(threads)
for t in threads:
self.assertTrue(t.success)
if __name__ == "__main__":
unittest.main()
| |
# Copyright 2011 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from lxml import etree
import mock
import webob
from nova.api.openstack.compute.contrib import quotas as quotas_v2
from nova.api.openstack.compute.plugins.v3 import quota_sets as quotas_v21
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import context as context_maker
from nova import exception
from nova import quota
from nova import test
from nova.tests.unit.api.openstack import fakes
def quota_set(id, include_server_group_quotas=True):
res = {'quota_set': {'id': id, 'metadata_items': 128,
'ram': 51200, 'floating_ips': 10, 'fixed_ips': -1,
'instances': 10, 'injected_files': 5, 'cores': 20,
'injected_file_content_bytes': 10240,
'security_groups': 10, 'security_group_rules': 20,
'key_pairs': 100, 'injected_file_path_bytes': 255}}
if include_server_group_quotas:
res['quota_set']['server_groups'] = 10
res['quota_set']['server_group_members'] = 10
return res
class BaseQuotaSetsTest(test.TestCase):
def _is_v20_api_test(self):
# NOTE(oomichi): If a test is for v2.0 API, this method returns
# True. Otherwise(v2.1 API test), returns False.
return (self.plugin == quotas_v2)
def get_update_expected_response(self, base_body):
# NOTE(oomichi): "id" parameter is added to a response of
# "update quota" API since v2.1 API, because it makes the
# API consistent and it is not backwards incompatible change.
# This method adds "id" for an expected body of a response.
if self._is_v20_api_test():
expected_body = base_body
else:
expected_body = copy.deepcopy(base_body)
expected_body['quota_set'].update({'id': 'update_me'})
return expected_body
def setup_mock_for_show(self):
if self._is_v20_api_test():
self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
self.mox.ReplayAll()
def setup_mock_for_update(self):
if self._is_v20_api_test():
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
self.mox.ReplayAll()
def get_delete_status_int(self, res):
if self._is_v20_api_test():
return res.status_int
else:
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
return self.controller.delete.wsgi_code
class QuotaSetsTestV21(BaseQuotaSetsTest):
plugin = quotas_v21
validation_error = exception.ValidationError
include_server_group_quotas = True
def setUp(self):
super(QuotaSetsTestV21, self).setUp()
self._setup_controller()
self.default_quotas = {
'instances': 10,
'cores': 20,
'ram': 51200,
'floating_ips': 10,
'fixed_ips': -1,
'metadata_items': 128,
'injected_files': 5,
'injected_file_path_bytes': 255,
'injected_file_content_bytes': 10240,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100,
}
if self.include_server_group_quotas:
self.default_quotas['server_groups'] = 10
self.default_quotas['server_group_members'] = 10
def _setup_controller(self):
self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
self.controller = self.plugin.QuotaSetsController(self.ext_mgr)
def test_format_quota_set(self):
quota_set = self.controller._format_quota_set('1234',
self.default_quotas)
qs = quota_set['quota_set']
self.assertEqual(qs['id'], '1234')
self.assertEqual(qs['instances'], 10)
self.assertEqual(qs['cores'], 20)
self.assertEqual(qs['ram'], 51200)
self.assertEqual(qs['floating_ips'], 10)
self.assertEqual(qs['fixed_ips'], -1)
self.assertEqual(qs['metadata_items'], 128)
self.assertEqual(qs['injected_files'], 5)
self.assertEqual(qs['injected_file_path_bytes'], 255)
self.assertEqual(qs['injected_file_content_bytes'], 10240)
self.assertEqual(qs['security_groups'], 10)
self.assertEqual(qs['security_group_rules'], 20)
self.assertEqual(qs['key_pairs'], 100)
if self.include_server_group_quotas:
self.assertEqual(qs['server_groups'], 10)
self.assertEqual(qs['server_group_members'], 10)
def test_quotas_defaults(self):
uri = '/v2/fake_tenant/os-quota-sets/fake_tenant/defaults'
req = fakes.HTTPRequest.blank(uri)
res_dict = self.controller.defaults(req, 'fake_tenant')
self.default_quotas.update({'id': 'fake_tenant'})
expected = {'quota_set': self.default_quotas}
self.assertEqual(res_dict, expected)
def test_quotas_show_as_admin(self):
self.setup_mock_for_show()
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234',
use_admin_context=True)
res_dict = self.controller.show(req, 1234)
ref_quota_set = quota_set('1234', self.include_server_group_quotas)
self.assertEqual(res_dict, ref_quota_set)
def test_quotas_show_as_unauthorized_user(self):
self.setup_mock_for_show()
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234')
self.assertRaises(webob.exc.HTTPForbidden, self.controller.show,
req, 1234)
def test_quotas_update_as_admin(self):
self.setup_mock_for_update()
self.default_quotas.update({
'instances': 50,
'cores': 50
})
body = {'quota_set': self.default_quotas}
expected_body = self.get_update_expected_response(body)
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
res_dict = self.controller.update(req, 'update_me', body=body)
self.assertEqual(expected_body, res_dict)
def test_quotas_update_zero_value_as_admin(self):
self.setup_mock_for_update()
body = {'quota_set': {'instances': 0, 'cores': 0,
'ram': 0, 'floating_ips': 0,
'metadata_items': 0,
'injected_files': 0,
'injected_file_content_bytes': 0,
'injected_file_path_bytes': 0,
'security_groups': 0,
'security_group_rules': 0,
'key_pairs': 100, 'fixed_ips': -1}}
if self.include_server_group_quotas:
body['quota_set']['server_groups'] = 10
body['quota_set']['server_group_members'] = 10
expected_body = self.get_update_expected_response(body)
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
res_dict = self.controller.update(req, 'update_me', body=body)
self.assertEqual(expected_body, res_dict)
def test_quotas_update_as_user(self):
self.setup_mock_for_update()
self.default_quotas.update({
'instances': 50,
'cores': 50
})
body = {'quota_set': self.default_quotas}
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me')
self.assertRaises(webob.exc.HTTPForbidden, self.controller.update,
req, 'update_me', body=body)
def _quotas_update_bad_request_case(self, body):
self.setup_mock_for_update()
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
self.assertRaises(self.validation_error, self.controller.update,
req, 'update_me', body=body)
def test_quotas_update_invalid_key(self):
body = {'quota_set': {'instances2': -2, 'cores': -2,
'ram': -2, 'floating_ips': -2,
'metadata_items': -2, 'injected_files': -2,
'injected_file_content_bytes': -2}}
self._quotas_update_bad_request_case(body)
def test_quotas_update_invalid_limit(self):
body = {'quota_set': {'instances': -2, 'cores': -2,
'ram': -2, 'floating_ips': -2, 'fixed_ips': -2,
'metadata_items': -2, 'injected_files': -2,
'injected_file_content_bytes': -2}}
self._quotas_update_bad_request_case(body)
def test_quotas_update_empty_body(self):
body = {}
self._quotas_update_bad_request_case(body)
def test_quotas_update_invalid_value_non_int(self):
# when PUT non integer value
self.default_quotas.update({
'instances': 'test'
})
body = {'quota_set': self.default_quotas}
self._quotas_update_bad_request_case(body)
def test_quotas_update_invalid_value_with_float(self):
# when PUT non integer value
self.default_quotas.update({
'instances': 50.5
})
body = {'quota_set': self.default_quotas}
self._quotas_update_bad_request_case(body)
def test_quotas_update_invalid_value_with_unicode(self):
# when PUT non integer value
self.default_quotas.update({
'instances': u'\u30aa\u30fc\u30d7\u30f3'
})
body = {'quota_set': self.default_quotas}
self._quotas_update_bad_request_case(body)
def test_quotas_delete_as_unauthorized_user(self):
if self._is_v20_api_test():
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
self.mox.ReplayAll()
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234')
self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete,
req, 1234)
def test_quotas_delete_as_admin(self):
if self._is_v20_api_test():
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
context = context_maker.get_admin_context()
self.req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234')
self.req.environ['nova.context'] = context
self.mox.StubOutWithMock(quota.QUOTAS,
"destroy_all_by_project")
quota.QUOTAS.destroy_all_by_project(context, 1234)
self.mox.ReplayAll()
res = self.controller.delete(self.req, 1234)
self.mox.VerifyAll()
self.assertEqual(202, self.get_delete_status_int(res))
class QuotaXMLSerializerTest(test.TestCase):
def setUp(self):
super(QuotaXMLSerializerTest, self).setUp()
self.serializer = quotas_v2.QuotaTemplate()
self.deserializer = wsgi.XMLDeserializer()
def test_serializer(self):
exemplar = dict(quota_set=dict(
id='project_id',
metadata_items=10,
injected_file_path_bytes=255,
injected_file_content_bytes=20,
ram=50,
floating_ips=60,
fixed_ips=-1,
instances=70,
injected_files=80,
security_groups=10,
security_group_rules=20,
key_pairs=100,
cores=90))
text = self.serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('quota_set', tree.tag)
self.assertEqual('project_id', tree.get('id'))
self.assertEqual(len(exemplar['quota_set']) - 1, len(tree))
for child in tree:
self.assertIn(child.tag, exemplar['quota_set'])
self.assertEqual(int(child.text), exemplar['quota_set'][child.tag])
def test_deserializer(self):
exemplar = dict(quota_set=dict(
metadata_items='10',
injected_file_content_bytes='20',
ram='50',
floating_ips='60',
fixed_ips='-1',
instances='70',
injected_files='80',
security_groups='10',
security_group_rules='20',
key_pairs='100',
cores='90'))
intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<quota_set>'
'<metadata_items>10</metadata_items>'
'<injected_file_content_bytes>20'
'</injected_file_content_bytes>'
'<ram>50</ram>'
'<floating_ips>60</floating_ips>'
'<fixed_ips>-1</fixed_ips>'
'<instances>70</instances>'
'<injected_files>80</injected_files>'
'<security_groups>10</security_groups>'
'<security_group_rules>20</security_group_rules>'
'<key_pairs>100</key_pairs>'
'<cores>90</cores>'
'</quota_set>')
result = self.deserializer.deserialize(intext)['body']
self.assertEqual(result, exemplar)
class ExtendedQuotasTestV21(BaseQuotaSetsTest):
plugin = quotas_v21
def setUp(self):
super(ExtendedQuotasTestV21, self).setUp()
self._setup_controller()
self.setup_mock_for_update()
fake_quotas = {'ram': {'limit': 51200,
'in_use': 12800,
'reserved': 12800},
'cores': {'limit': 20,
'in_use': 10,
'reserved': 5},
'instances': {'limit': 100,
'in_use': 0,
'reserved': 0}}
def _setup_controller(self):
self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
self.controller = self.plugin.QuotaSetsController(self.ext_mgr)
def fake_get_quotas(self, context, id, user_id=None, usages=False):
if usages:
return self.fake_quotas
else:
return dict((k, v['limit']) for k, v in self.fake_quotas.items())
def fake_get_settable_quotas(self, context, project_id, user_id=None):
return {
'ram': {'minimum': self.fake_quotas['ram']['in_use'] +
self.fake_quotas['ram']['reserved'],
'maximum': -1},
'cores': {'minimum': self.fake_quotas['cores']['in_use'] +
self.fake_quotas['cores']['reserved'],
'maximum': -1},
'instances': {'minimum': self.fake_quotas['instances']['in_use'] +
self.fake_quotas['instances']['reserved'],
'maximum': -1},
}
def test_quotas_update_exceed_in_used(self):
patcher = mock.patch.object(quota.QUOTAS, 'get_settable_quotas')
get_settable_quotas = patcher.start()
body = {'quota_set': {'cores': 10}}
get_settable_quotas.side_effect = self.fake_get_settable_quotas
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 'update_me', body=body)
mock.patch.stopall()
def test_quotas_force_update_exceed_in_used(self):
patcher = mock.patch.object(quota.QUOTAS, 'get_settable_quotas')
get_settable_quotas = patcher.start()
patcher = mock.patch.object(self.plugin.QuotaSetsController,
'_get_quotas')
_get_quotas = patcher.start()
body = {'quota_set': {'cores': 10, 'force': 'True'}}
get_settable_quotas.side_effect = self.fake_get_settable_quotas
_get_quotas.side_effect = self.fake_get_quotas
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
self.controller.update(req, 'update_me', body=body)
mock.patch.stopall()
class UserQuotasTestV21(BaseQuotaSetsTest):
plugin = quotas_v21
include_server_group_quotas = True
def setUp(self):
super(UserQuotasTestV21, self).setUp()
self._setup_controller()
def _setup_controller(self):
self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
self.controller = self.plugin.QuotaSetsController(self.ext_mgr)
def test_user_quotas_show_as_admin(self):
self.setup_mock_for_show()
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234?user_id=1',
use_admin_context=True)
res_dict = self.controller.show(req, 1234)
ref_quota_set = quota_set('1234', self.include_server_group_quotas)
self.assertEqual(res_dict, ref_quota_set)
def test_user_quotas_show_as_unauthorized_user(self):
self.setup_mock_for_show()
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234?user_id=1')
self.assertRaises(webob.exc.HTTPForbidden, self.controller.show,
req, 1234)
def test_user_quotas_update_as_admin(self):
self.setup_mock_for_update()
body = {'quota_set': {'instances': 10, 'cores': 20,
'ram': 51200, 'floating_ips': 10,
'fixed_ips': -1, 'metadata_items': 128,
'injected_files': 5,
'injected_file_content_bytes': 10240,
'injected_file_path_bytes': 255,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100}}
if self.include_server_group_quotas:
body['quota_set']['server_groups'] = 10
body['quota_set']['server_group_members'] = 10
expected_body = self.get_update_expected_response(body)
url = '/v2/fake4/os-quota-sets/update_me?user_id=1'
req = fakes.HTTPRequest.blank(url, use_admin_context=True)
res_dict = self.controller.update(req, 'update_me', body=body)
self.assertEqual(expected_body, res_dict)
def test_user_quotas_update_as_user(self):
self.setup_mock_for_update()
body = {'quota_set': {'instances': 10, 'cores': 20,
'ram': 51200, 'floating_ips': 10,
'fixed_ips': -1, 'metadata_items': 128,
'injected_files': 5,
'injected_file_content_bytes': 10240,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100,
'server_groups': 10,
'server_group_members': 10}}
url = '/v2/fake4/os-quota-sets/update_me?user_id=1'
req = fakes.HTTPRequest.blank(url)
self.assertRaises(webob.exc.HTTPForbidden, self.controller.update,
req, 'update_me', body=body)
def test_user_quotas_update_exceed_project(self):
self.setup_mock_for_update()
body = {'quota_set': {'instances': 20}}
url = '/v2/fake4/os-quota-sets/update_me?user_id=1'
req = fakes.HTTPRequest.blank(url, use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 'update_me', body=body)
def test_user_quotas_delete_as_unauthorized_user(self):
self.setup_mock_for_update()
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234?user_id=1')
self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete,
req, 1234)
def test_user_quotas_delete_as_admin(self):
if self._is_v20_api_test():
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
context = context_maker.get_admin_context()
url = '/v2/fake4/os-quota-sets/1234?user_id=1'
self.req = fakes.HTTPRequest.blank(url)
self.req.environ['nova.context'] = context
self.mox.StubOutWithMock(quota.QUOTAS,
"destroy_all_by_project_and_user")
quota.QUOTAS.destroy_all_by_project_and_user(context, 1234, '1')
self.mox.ReplayAll()
res = self.controller.delete(self.req, 1234)
self.mox.VerifyAll()
self.assertEqual(202, self.get_delete_status_int(res))
class QuotaSetsTestV2(QuotaSetsTestV21):
plugin = quotas_v2
validation_error = webob.exc.HTTPBadRequest
def _setup_controller(self):
self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
self.ext_mgr.is_loaded('os-server-group-quotas').MultipleTimes().\
AndReturn(self.include_server_group_quotas)
self.mox.ReplayAll()
self.controller = self.plugin.QuotaSetsController(self.ext_mgr)
self.mox.ResetAll()
# NOTE: The following tests are tricky and v2.1 API does not allow
# this kind of input by strong input validation. Just for test coverage,
# we keep them now.
def test_quotas_update_invalid_value_json_fromat_empty_string(self):
self.setup_mock_for_update()
self.default_quotas.update({
'instances': 50,
'cores': 50
})
expected_resp = {'quota_set': self.default_quotas}
# when PUT JSON format with empty string for quota
body = copy.deepcopy(expected_resp)
body['quota_set']['ram'] = ''
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
res_dict = self.controller.update(req, 'update_me', body)
self.assertEqual(res_dict, expected_resp)
def test_quotas_update_invalid_value_xml_fromat_empty_string(self):
self.default_quotas.update({
'instances': 50,
'cores': 50
})
expected_resp = {'quota_set': self.default_quotas}
# when PUT XML format with empty string for quota
body = copy.deepcopy(expected_resp)
body['quota_set']['ram'] = {}
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
self.setup_mock_for_update()
res_dict = self.controller.update(req, 'update_me', body)
self.assertEqual(res_dict, expected_resp)
# NOTE: os-extended-quotas and os-user-quotas are only for v2.0.
# On v2.1, these features are always enable. So we need the following
# tests only for v2.0.
def test_delete_quotas_when_extension_not_loaded(self):
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(False)
self.mox.ReplayAll()
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, 1234)
def test_delete_user_quotas_when_extension_not_loaded(self):
self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
self.ext_mgr.is_loaded('os-user-quotas').AndReturn(False)
self.mox.ReplayAll()
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234?user_id=1')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, 1234)
class QuotaSetsTestV2WithoutServerGroupQuotas(QuotaSetsTestV2):
include_server_group_quotas = False
# NOTE: os-server-group-quotas is only for v2.0. On v2.1 this feature
# is always enabled, so this test is only needed for v2.0
def test_quotas_update_without_server_group_quotas_extenstion(self):
self.setup_mock_for_update()
self.default_quotas.update({
'server_groups': 50,
'sever_group_members': 50
})
body = {'quota_set': self.default_quotas}
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 'update_me', body=body)
class ExtendedQuotasTestV2(ExtendedQuotasTestV21):
plugin = quotas_v2
def _setup_controller(self):
self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
self.ext_mgr.is_loaded('os-server-group-quotas').MultipleTimes().\
AndReturn(False)
self.mox.ReplayAll()
self.controller = self.plugin.QuotaSetsController(self.ext_mgr)
self.mox.ResetAll()
class UserQuotasTestV2(UserQuotasTestV21):
plugin = quotas_v2
def _setup_controller(self):
self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
self.ext_mgr.is_loaded('os-server-group-quotas').MultipleTimes().\
AndReturn(self.include_server_group_quotas)
self.mox.ReplayAll()
self.controller = self.plugin.QuotaSetsController(self.ext_mgr)
self.mox.ResetAll()
class UserQuotasTestV2WithoutServerGroupQuotas(UserQuotasTestV2):
include_server_group_quotas = False
# NOTE: os-server-group-quotas is only for v2.0. On v2.1 this feature
# is always enabled, so this test is only needed for v2.0
def test_user_quotas_update_as_admin_without_sg_quota_extension(self):
self.setup_mock_for_update()
body = {'quota_set': {'instances': 10, 'cores': 20,
'ram': 51200, 'floating_ips': 10,
'fixed_ips': -1, 'metadata_items': 128,
'injected_files': 5,
'injected_file_content_bytes': 10240,
'injected_file_path_bytes': 255,
'security_groups': 10,
'security_group_rules': 20,
'key_pairs': 100,
'server_groups': 100,
'server_group_members': 200}}
url = '/v2/fake4/os-quota-sets/update_me?user_id=1'
req = fakes.HTTPRequest.blank(url, use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 'update_me', body=body)
| |
# TODO: implement destructive version of BatchedInvOp
# TODO: implement optimization to replace batched_inv{destructive=False} with batched_inv{destructive=True} if applicable.
import numpy as np
import theano
import theano.tensor as T
import theano.sandbox.cuda as cuda
from theano.misc.pycuda_utils import to_gpuarray
import scikits.cuda
from scikits.cuda import linalg
from scikits.cuda import cublas
import pycuda.gpuarray
import theano.misc.pycuda_init
import string
linalg.init()
class ScikitsCudaOp(cuda.GpuOp): # base class for shared code between scikits.cuda-based ops
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
def __str__(self):
return self.__class__.__name__
def output_type(self, inp):
raise NotImplementedError
def make_node(self, inp):
inp = cuda.basic_ops.gpu_contiguous(
cuda.basic_ops.as_cuda_ndarray_variable(inp))
assert inp.dtype == "float32"
return theano.Apply(self, [inp], [self.output_type(inp)()])
def bptrs(a):
"""
Pointer array when input represents a batch of matrices.
taken from scikits.cuda tests/test_cublas.py
"""
return pycuda.gpuarray.arange(a.ptr,a.ptr+a.shape[0]*a.strides[0],a.strides[0],
dtype=cublas.ctypes.c_void_p)
def gpu_dot_batched(bx_gpu, by_gpu, bc_gpu, transa='N', transb='N', handle=None):
"""
uses cublasSgemmBatched to compute a bunch of dot products in parallel
"""
if handle is None:
handle = scikits.cuda.misc._global_cublas_handle
assert len(bx_gpu.shape) == 3
assert len(by_gpu.shape) == 3
assert len(bc_gpu.shape) == 3
assert bx_gpu.dtype == np.float32
assert by_gpu.dtype == np.float32
assert bc_gpu.dtype == np.float32
# Get the shapes of the arguments
bx_shape = bx_gpu.shape
by_shape = by_gpu.shape
# Perform matrix multiplication for 2D arrays:
alpha = np.float32(1.0)
beta = np.float32(0.0)
transa = string.lower(transa)
transb = string.lower(transb)
if transb in ['t', 'c']:
N, m, k = by_shape
elif transb in ['n']:
N, k, m = by_shape
else:
raise ValueError('invalid value for transb')
if transa in ['t', 'c']:
N2, l, n = bx_shape
elif transa in ['n']:
N2, n, l = bx_shape
else:
raise ValueError('invalid value for transa')
if l != k:
raise ValueError('objects are not aligned')
if N != N2:
raise ValueError('batch sizes are not the same')
if transb == 'n':
lda = max(1, m)
else:
lda = max(1, k)
if transa == 'n':
ldb = max(1, k)
else:
ldb = max(1, n)
ldc = max(1, m)
# construct pointer arrays needed for cublasCgemmBatched
bx_arr = bptrs(bx_gpu)
by_arr = bptrs(by_gpu)
bc_arr = bptrs(bc_gpu)
cublas.cublasSgemmBatched(handle, transb, transa, m, n, k, alpha, by_arr.gpudata,
lda, bx_arr.gpudata, ldb, beta, bc_arr.gpudata, ldc, N)
class BatchedDotOp(ScikitsCudaOp):
def make_node(self, inp1, inp2):
inp1 = cuda.basic_ops.gpu_contiguous(
cuda.basic_ops.as_cuda_ndarray_variable(inp1))
inp2 = cuda.basic_ops.gpu_contiguous(
cuda.basic_ops.as_cuda_ndarray_variable(inp2))
assert inp1.dtype == "float32"
assert inp2.dtype == "float32"
assert inp1.ndim == 3 # (batch, a, b)
assert inp2.ndim == 3
return theano.Apply(self, [inp1, inp2], [self.output_type(inp1)()])
def output_type(self, inp):
return cuda.CudaNdarrayType(broadcastable=[False] * inp.type.ndim)
def make_thunk(self, node, storage_map, _, _2):
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
def thunk():
bx = inputs[0]
by = inputs[1]
input_shape_x = bx[0].shape # (batch, a, b)
input_shape_y = by[0].shape # (batch, b, c)
output_shape = (input_shape_x[0], input_shape_x[1], input_shape_y[2]) # (batch, a, c)
bz = outputs[0]
# only allocate if there is no previous allocation of the right size.
if bz[0] is None or bz[0].shape != output_shape:
bz[0] = cuda.CudaNdarray.zeros(output_shape)
input_bx_pycuda = to_gpuarray(bx[0])
input_by_pycuda = to_gpuarray(by[0])
output_b_pycuda = to_gpuarray(bz[0])
# fancy native batched version
gpu_dot_batched(input_bx_pycuda, input_by_pycuda, output_b_pycuda)
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
batched_dot = BatchedDotOp()
class BatchedInvOp(ScikitsCudaOp):
def __init__(self, destructive=False):
super(BatchedInvOp, self).__init__()
assert destructive == False # TODO: destructive op not supported for now (need to add destroy_map and optimization)
self.destructive = destructive
def __eq__(self, other):
return (type(self) == type(other) and
self.destructive == other.destructive)
def __hash__(self):
return (hash(type(self)) ^
hash(self.destructive))
def __str__(self):
return "%s{destructive=%s}" % (self.__class__.__name__, self.destructive)
def output_type(self, inp):
return cuda.CudaNdarrayType(broadcastable=[False] * inp.type.ndim)
def make_thunk(self, node, storage_map, _, _2):
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
# reusable allocations
pivot_alloc = [None]
info_alloc = [None]
def thunk():
input_shape = inputs[0][0].shape
size = input_shape[1] # matrices to invert are (size x size)
batch_size = input_shape[0]
z = outputs[0]
# only allocate if there is no previous allocation of the right size.
if z[0] is None or z[0].shape != input_shape:
z[0] = cuda.CudaNdarray.zeros(input_shape)
pivot_alloc[0] = pycuda.gpuarray.empty((batch_size, size), np.int32)
info_alloc[0] = pycuda.gpuarray.zeros(batch_size, np.int32)
input_pycuda = to_gpuarray(inputs[0][0])
output_pycuda = to_gpuarray(z[0])
pivot = pivot_alloc[0]
info = info_alloc[0]
# construct pointer arrays for batched operations
input_arr = bptrs(input_pycuda)
output_arr = bptrs(output_pycuda)
if not self.destructive:
input_pycuda = input_pycuda.copy() # to prevent destruction of the input
handle = scikits.cuda.misc._global_cublas_handle
# perform LU factorization
cublas.cublasSgetrfBatched(handle, size, input_arr.gpudata, size, pivot.gpudata, info.gpudata, batch_size)
# the LU factorization is now in input_pycuda (destructive operation!)
# use factorization to perform inversion
cublas.cublasSgetriBatched(handle, size, input_arr.gpudata, size, pivot.gpudata, output_arr.gpudata, size, info.gpudata, batch_size)
# the inverted matrices are now in output_pycuda
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
batched_inv = BatchedInvOp()
# batched_inv_destructive = BatchedInvOp(destroy_input=True)
| |
# Copyright (c) 2013, Preferred Infrastructure, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import functools
# These two lines are necessary for desktop-enabled environment.
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot
import maflib.util
class PlotData:
"""Result of experimentation collected through a meta node to plot.
Result of experiments is represented by a meta node consisted by a set of
physical nodes each of which contains a dictionary or an array of
dictionaries. This class is used to collect all dictionaries through the
meta node and to extract point sequences to plot.
"""
def __init__(self, inputs):
"""Constructs a plot data from a list of values to be plotted.
:param inputs: A list of values to be plotted. The first argument of
callback body function passed to
:py:func:`maflib.util.aggregator` can be used for this
argument.
"""
self._inputs = inputs
def get_data_1d(self, x, key=None, sort=True):
"""Extracts a sequence of one-dimensional data points.
This function extracts x coordinate of each result value and creates a
list of them. If sort == True, then the list is sorted. User can extract
different sequences for varying values corresponding to given key(s).
:param x: A key string corresponding to x coordinate.
:type x: ``str``
:param key: Key strings that define distinct sequences of data points.
It can be either of None, a string value or a tuple of string
values.
:type key: None, ``str`` or tuple of strings
:param sort: Flag for sorting the sequence(s).
:type sort: ``bool``
:return: If ``key`` is None, then it returns a list of x values.
Otherwise, it returns a dictionary from key(s) to a sequence of x
values. Each sequence consists of values matched to the key(s).
:rtype: ``dict`` or ``list``
"""
if key is None:
xs = [value[x] for value in self._inputs if x in value]
if sort:
xs.sort()
return xs
data = {}
for value in self._inputs:
if x not in value:
continue
if isinstance(key, str):
if key not in value:
continue
key_value = value[key]
else:
key_value = tuple((value[k] for k in key if k in value))
if len(key) != len(key_value):
continue
if key_value not in data:
data[key_value] = []
data[key_value].append(value[x])
if sort:
for k in data:
data[k].sort()
return data
def get_data_2d(self, x, y, key=None, sort=True):
"""Extracts a sequence of two-dimensional data points.
See get_data_1d for detail. Difference from get_data_2d is that the
values are represented by pairs.
:param x: A key string corresponding to x (first) coordinate.
:type x: ``str``
:param y: A key string corresponding to y (second) coordinate.
:type y: ``str``
:param key: Key strings that define distinct sequences of data points.
It can be either of None, a string value or a tuple of string
values.
:type key: None, ``str`` or tuple of strings
:param sort: Flag for sorting the sequence(s).
:type sort: ``bool``
:return: If ``key`` is None, then it returns a pair of x value sequence
and y value sequence. Otherwise, it returns a dictionary from a key
to a pair of x value sequence and y value sequence. Each sequence
consists of values matched to the key(s).
:rtype: ``dict`` or ``tuple`` of two ``list`` s
"""
if key is None:
vals = [(value[x], value[y])
for value in self._inputs if x in value and y in value]
if sort:
vals.sort()
return ([v[0] for v in vals], [v[1] for v in vals])
data = {}
for value in self._inputs:
if x not in value or y not in value:
continue
if isinstance(key, str):
if key not in value:
continue
key_value = value[key]
else:
key_value = tuple((value[k] for k in key if k in value))
if len(key) != len(key_value):
continue
if key_value not in data:
data[key_value] = []
data[key_value].append((value[x], value[y]))
for k in data:
if sort:
data[k].sort()
data[k] = ([v[0] for v in data[k]], [v[1] for v in data[k]])
return data
def get_data_3d(self, x, y, z, key=None, sort=True):
"""Extracts a sequence of three-dimensional data points.
See get_data_1d for detail. Difference from get_data_3d is that the
values are represented by triples.
:param x: A key string corresponding to x (first) coordinate.
:type x: ``str``
:param y: A key string corresponding to y (second) coordinate.
:type y: ``str``
:param z: A key string corresponding to z (third) coordinate.
:type z: ``str``
:param key: Key strings that define distinct sequences of data points.
It can be either of None, a string value or a tuple of string
values.
:type key: None, ``str`` or tuple of strings
:param sort: Flag for sorting the sequence(s).
:type sort: ``bool``
:return: If ``key`` is None, then it returns a triple of x value
sequence, y value sequence and z value sequence. Otherwise, it
returns a dictionary from a key to a triple of x value sequence, y
value sequence and z value sequence. Each sequence consists of
values matched to the key(s).
:rtype: ``dict`` or ``tuple`` of three ``list`` s.
"""
if key is None:
vals = [(value[x], value[y], value[z])
for value in self._inputs
if x in value and y in value and z in value]
if sort:
vals.sort()
return (
[v[0] for v in vals],
[v[1] for v in vals],
[v[2] for v in vals])
data = {}
for value in self._inputs:
if not (x in value and y in value and z in value):
continue
if isinstance(key, str):
if key not in value:
continue
key_value = value[key]
else:
key_value = tuple((value[k] for k in key if k in value))
if len(key) != len(key_value):
continue
if key_value not in data:
data[key_value] = []
data[key_value].append((value[x], value[y], value[z]))
for k in data:
if sort:
data[k].sort()
data[k] = (
[v[0] for v in data[k]],
[v[1] for v in data[k]],
[v[2] for v in data[k]])
return data
def plot_by(callback_body):
"""Creates an aggregator to plot data using matplotlib and PlotData.
:param callback_body: Callable object or function that plots data. It takes
three parameters: :py:class:`matplotlib.figure.Figure` object,
:py:class:`maflib.plot.PlotData` object and a parameter of class
:py:class:`maflib.core.Parameter`. User must define a callback function
that plots given data to given figure.
:type callback_body: ``function`` or callable object, whose signature is
(:py:class:`matplotlib.figure.Figure`, :py:class:`PlotData`).
"""
@functools.wraps(callback_body)
@maflib.util.aggregator
def callback(values, abspath, parameter):
figure = matplotlib.pyplot.figure()
plot_data = PlotData(values)
callback_body(figure, plot_data, parameter)
figure.savefig(abspath)
return None
return callback
def plot_line(x, y, legend=None):
"""Creates an aggregator that draw a line plot."""
# TODO(beam2d): Write a document.
def get_normalized_axis_config(k):
if isinstance(k, str):
return {'key': k}
return k
x = get_normalized_axis_config(x)
y = get_normalized_axis_config(y)
def callback(figure, data, parameter):
axes = figure.add_subplot(111)
if 'scale' in x:
axes.set_xscale(x['scale'])
if 'scale' in y:
axes.set_yscale(y['scale'])
axes.set_xlabel(x['key'])
axes.set_ylabel(y['key'])
if legend:
legend_key = legend['key']
labels = {}
if 'labels' in legend:
labels = legend['labels']
key_to_xys = data.get_data_2d(x['key'], y['key'], key=legend_key)
keys = sorted(key_to_xys.keys())
for key in keys:
xs, ys = key_to_xys[key]
if key in labels:
label = labels[key]
else:
label = '%s=%s' % (legend_key, key)
# TODO(beam2d): Support marker.
axes.plot(xs, ys, label=label)
place = legend.get('loc', 'best')
axes.legend(loc=place)
else:
xs, ys = data.get_data_2d(x['key'], y['key'])
axes.plot(xs, ys)
return plot_by(callback)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for remote procedure calls using kombu
"""
from nova import context
from nova import flags
from nova import log as logging
from nova import test
from nova.rpc import amqp as rpc_amqp
from nova.rpc import impl_kombu
from nova.tests.rpc import common
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
class MyException(Exception):
pass
def _raise_exc_stub(stubs, times, obj, method, exc_msg):
info = {'called': 0}
orig_method = getattr(obj, method)
def _raise_stub(*args, **kwargs):
info['called'] += 1
if info['called'] <= times:
raise MyException(exc_msg)
orig_method(*args, **kwargs)
stubs.Set(obj, method, _raise_stub)
return info
class RpcKombuTestCase(common.BaseRpcAMQPTestCase):
def setUp(self):
self.rpc = impl_kombu
super(RpcKombuTestCase, self).setUp()
def tearDown(self):
impl_kombu.cleanup()
super(RpcKombuTestCase, self).tearDown()
def test_reusing_connection(self):
"""Test that reusing a connection returns same one."""
conn_context = self.rpc.create_connection(new=False)
conn1 = conn_context.connection
conn_context.close()
conn_context = self.rpc.create_connection(new=False)
conn2 = conn_context.connection
conn_context.close()
self.assertEqual(conn1, conn2)
def test_topic_send_receive(self):
"""Test sending to a topic exchange/queue"""
conn = self.rpc.create_connection()
message = 'topic test message'
self.received_message = None
def _callback(message):
self.received_message = message
conn.declare_topic_consumer('a_topic', _callback)
conn.topic_send('a_topic', message)
conn.consume(limit=1)
conn.close()
self.assertEqual(self.received_message, message)
def test_direct_send_receive(self):
"""Test sending to a direct exchange/queue"""
conn = self.rpc.create_connection()
message = 'direct test message'
self.received_message = None
def _callback(message):
self.received_message = message
conn.declare_direct_consumer('a_direct', _callback)
conn.direct_send('a_direct', message)
conn.consume(limit=1)
conn.close()
self.assertEqual(self.received_message, message)
def test_cast_interface_uses_default_options(self):
"""Test kombu rpc.cast"""
ctxt = context.RequestContext('fake_user', 'fake_project')
class MyConnection(impl_kombu.Connection):
def __init__(myself, *args, **kwargs):
super(MyConnection, myself).__init__(*args, **kwargs)
self.assertEqual(myself.params,
{'hostname': FLAGS.rabbit_host,
'userid': FLAGS.rabbit_userid,
'password': FLAGS.rabbit_password,
'port': FLAGS.rabbit_port,
'virtual_host': FLAGS.rabbit_virtual_host,
'transport': 'memory'})
def topic_send(_context, topic, msg):
pass
MyConnection.pool = rpc_amqp.Pool(connection_cls=MyConnection)
self.stubs.Set(impl_kombu, 'Connection', MyConnection)
impl_kombu.cast(ctxt, 'fake_topic', {'msg': 'fake'})
def test_cast_to_server_uses_server_params(self):
"""Test kombu rpc.cast"""
ctxt = context.RequestContext('fake_user', 'fake_project')
server_params = {'username': 'fake_username',
'password': 'fake_password',
'hostname': 'fake_hostname',
'port': 31337,
'virtual_host': 'fake_virtual_host'}
class MyConnection(impl_kombu.Connection):
def __init__(myself, *args, **kwargs):
super(MyConnection, myself).__init__(*args, **kwargs)
self.assertEqual(myself.params,
{'hostname': server_params['hostname'],
'userid': server_params['username'],
'password': server_params['password'],
'port': server_params['port'],
'virtual_host': server_params['virtual_host'],
'transport': 'memory'})
def topic_send(_context, topic, msg):
pass
MyConnection.pool = rpc_amqp.Pool(connection_cls=MyConnection)
self.stubs.Set(impl_kombu, 'Connection', MyConnection)
impl_kombu.cast_to_server(ctxt, server_params,
'fake_topic', {'msg': 'fake'})
@test.skip_test("kombu memory transport seems buggy with fanout queues "
"as this test passes when you use rabbit (fake_rabbit=False)")
def test_fanout_send_receive(self):
"""Test sending to a fanout exchange and consuming from 2 queues"""
conn = self.rpc.create_connection()
conn2 = self.rpc.create_connection()
message = 'fanout test message'
self.received_message = None
def _callback(message):
self.received_message = message
conn.declare_fanout_consumer('a_fanout', _callback)
conn2.declare_fanout_consumer('a_fanout', _callback)
conn.fanout_send('a_fanout', message)
conn.consume(limit=1)
conn.close()
self.assertEqual(self.received_message, message)
self.received_message = None
conn2.consume(limit=1)
conn2.close()
self.assertEqual(self.received_message, message)
def test_declare_consumer_errors_will_reconnect(self):
# Test that any exception with 'timeout' in it causes a
# reconnection
info = _raise_exc_stub(self.stubs, 2, self.rpc.DirectConsumer,
'__init__', 'foo timeout foo')
conn = self.rpc.Connection()
result = conn.declare_consumer(self.rpc.DirectConsumer,
'test_topic', None)
self.assertEqual(info['called'], 3)
self.assertTrue(isinstance(result, self.rpc.DirectConsumer))
# Test that any exception in transport.connection_errors causes
# a reconnection
self.stubs.UnsetAll()
info = _raise_exc_stub(self.stubs, 1, self.rpc.DirectConsumer,
'__init__', 'meow')
conn = self.rpc.Connection()
conn.connection_errors = (MyException, )
result = conn.declare_consumer(self.rpc.DirectConsumer,
'test_topic', None)
self.assertEqual(info['called'], 2)
self.assertTrue(isinstance(result, self.rpc.DirectConsumer))
def test_publishing_errors_will_reconnect(self):
# Test that any exception with 'timeout' in it causes a
# reconnection when declaring the publisher class and when
# calling send()
info = _raise_exc_stub(self.stubs, 2, self.rpc.DirectPublisher,
'__init__', 'foo timeout foo')
conn = self.rpc.Connection()
conn.publisher_send(self.rpc.DirectPublisher, 'test_topic', 'msg')
self.assertEqual(info['called'], 3)
self.stubs.UnsetAll()
info = _raise_exc_stub(self.stubs, 2, self.rpc.DirectPublisher,
'send', 'foo timeout foo')
conn = self.rpc.Connection()
conn.publisher_send(self.rpc.DirectPublisher, 'test_topic', 'msg')
self.assertEqual(info['called'], 3)
# Test that any exception in transport.connection_errors causes
# a reconnection when declaring the publisher class and when
# calling send()
self.stubs.UnsetAll()
info = _raise_exc_stub(self.stubs, 1, self.rpc.DirectPublisher,
'__init__', 'meow')
conn = self.rpc.Connection()
conn.connection_errors = (MyException, )
conn.publisher_send(self.rpc.DirectPublisher, 'test_topic', 'msg')
self.assertEqual(info['called'], 2)
self.stubs.UnsetAll()
info = _raise_exc_stub(self.stubs, 1, self.rpc.DirectPublisher,
'send', 'meow')
conn = self.rpc.Connection()
conn.connection_errors = (MyException, )
conn.publisher_send(self.rpc.DirectPublisher, 'test_topic', 'msg')
self.assertEqual(info['called'], 2)
def test_iterconsume_errors_will_reconnect(self):
conn = self.rpc.Connection()
message = 'reconnect test message'
self.received_message = None
def _callback(message):
self.received_message = message
conn.declare_direct_consumer('a_direct', _callback)
conn.direct_send('a_direct', message)
info = _raise_exc_stub(self.stubs, 1, conn.connection,
'drain_events', 'foo timeout foo')
conn.consume(limit=1)
conn.close()
self.assertEqual(self.received_message, message)
# Only called once, because our stub goes away during reconnection
self.assertEqual(info['called'], 1)
| |
import collections
import math
import cv2
import scipy.weave
import numpy as np
import time
import os
import random
class Atan(object):
@classmethod
def fwd(cls, x):
return np.arctan(x)
@classmethod
def derivative(cls, x):
return 1./ (1 + x ** 2.)
class Tanh(object):
@classmethod
def fwd(cls, x):
return np.tanh(x)
@classmethod
def derivative(cls, x):
return 1 - np.tanh(x) ** 2
class Relu(object):
@classmethod
def fwd(cls, x):
return x * (x > 0)
@classmethod
def derivative(cls, x):
return (x > 0).astype(np.float)
class NN(object):
def __init__(self, sizes, types, init_w_scale = 0.1, init_b_scale = 0.1,
eta = 0.1, momentum = 0.):
assert len(types) == len(sizes) - 1
self._sizes = sizes
self._types = types
self._init_w_scale = init_w_scale
self._init_b_scale = init_b_scale
self._eta = eta
self._momentum = momentum
self._initialize()
def _initialize(self):
self._w = [self._init_w_scale * np.random.randn(self._sizes[i], self._sizes[i + 1]) / np.sqrt(self._sizes[i])
for i in range(len(self._sizes) - 1)]
self._b = [self._init_b_scale * np.random.randn(self._sizes[i + 1])
for i in range(len(self._sizes) - 1)]
self._dw = [np.zeros((self._sizes[i], self._sizes[i + 1]))
for i in range(len(self._sizes) - 1)]
self._db = [np.zeros(self._sizes[i + 1])
for i in range(len(self._sizes) - 1)]
def train(self, batch, teacher):
assert batch.shape[1:] == tuple(self._sizes[0:1])
i = [] # layer inputs
o = [batch] # layer outputs
for k in range(len(self._w)):
i.append(np.dot(o[-1], self._w[k]) + self._b[k])
o.append(self._types[k].fwd(i[-1]))
e = teacher - o[-1]
self._last_gb = []
self._last_gw = []
for k in range(len(self._w))[::-1]:
e *= self._types[k].derivative(i[k]) # backprop the error across the neurons
self._last_gb = [np.mean(e, axis=0)] + self._last_gb # prepending, not adding
self._last_gw = [np.mean(o[k][..., None] * e[:, None, :], axis=0)] + self._last_gw # prepending, not adding
e = np.dot(e, self._w[k].T) # backprop the error across the bridge
for k, (db, dw) in enumerate(zip(self._last_gb, self._last_gw)):
self._db[k] = self._momentum * self._db[k] + (1 - self._momentum) * db
self._dw[k] = self._momentum * self._dw[k] + (1 - self._momentum) * dw
self._b[k] += self._eta * self._db[k]
self._w[k] += self._eta * self._dw[k]
def predict(self, batch):
assert batch.shape[1:] == tuple(self._sizes[0:1])
o = batch # layer outputs
for k in range(len(self._w)):
i = np.dot(o, self._w[k]) + self._b[k]
o = self._types[k].fwd(i)
return o
def copy(self):
c = NN([s for s in self._sizes], [t for t in self._types],
self._init_w_scale, self._init_b_scale, self._eta, self._momentum)
c._w = [w.copy() for w in self._w]
c._b = [b.copy() for b in self._b]
c._dw = [dw.copy() for dw in self._dw]
c._db = [db.copy() for db in self._db]
return c
def test_gradients():
sizes = [5, 3, 2]
eps = 1e-9
n = NN(sizes, [Atan] * (len(sizes) - 1), init_w_scale=0.1)
w = [x.copy() for x in n._w]
b = [x.copy() for x in n._b]
input = np.random.random((1, sizes[0]))
output = np.ones((1, sizes[-1]))
e0 = 0.5 * np.sum((n.predict(input) - output) ** 2)
n.train(input, output)
gb = [x.copy() for x in n._last_gb]
gw = [x.copy() for x in n._last_gw]
for k in range(len(sizes) - 1):
for i in range(sizes[k]):
for j in range(sizes[k + 1]):
w_prime = w[k].copy()
w_prime[i, j] += eps
n._w = [x for x in w]
n._w[k] = w_prime
n._b = b
e1 = 0.5 * np.sum((n.predict(input) - output) ** 2)
assert np.allclose(gw[k][i, j], (e0 - e1) / eps, rtol=1e-3)
for j in range(sizes[k + 1]):
b_prime = b[k].copy()
b_prime[j] += eps
n._b = [x for x in b]
n._b[k] = b_prime
n._w = w
e1 = 0.5 * np.sum((n.predict(input) - output) ** 2)
assert np.allclose(gb[k][j], (e0 - e1) / eps, rtol=1e-3)
def test_train():
# Test that we learn correctly to predict two outputs, each latched to
# one particular input, with opposite signs
sizes = [10, 5, 2]
n_samples = 200
batch_size = 50
training_input = np.random.random((n_samples, sizes[0]))
for j in range(sizes[0]):
n = NN(sizes, [Tanh] * (len(sizes) - 1), init_w_scale=0.1, eta=0.5, momentum=0.95)
training_output = np.vstack((0.999 * ((training_input[:, j] > 0.5) * 2 - 1),
0.999 * ((training_input[:, (j + sizes[0] / 2) % sizes[0]] < 0.5) * 2 - 1))).T
e0 = np.sqrt(np.mean((n.predict(training_input) - training_output) ** 2, axis=0))
for _ in range(1000):
for begin in range(0, n_samples, batch_size):
end = min(n_samples, begin + batch_size)
n.train(training_input[begin:end], training_output[begin:end])
e1 = np.sqrt(np.mean((n.predict(training_input) - training_output) ** 2, axis=0))
# with these parameters (batching and momentum are particularly useful),
# training should reduce error to about 5% of the initial
assert np.all(e1 < e0 * 0.1)
def test_copy():
sizes = [10, 5, 2]
n_samples = 200
batch_size = 50
training_input = np.random.random((n_samples, sizes[0]))
n = NN(sizes, [Tanh] * (len(sizes) - 1), init_w_scale=0.1, eta=0.5, momentum=0.95)
training_output = np.vstack((0.999 * ((training_input[:, 0] > 0.5) * 2 - 1),
0.999 * ((training_input[:, sizes[0] / 2] < 0.5) * 2 - 1))).T
n_out_0 = n.predict(training_input)
cn = n.copy()
cn_out_0 = cn.predict(training_input)
assert np.array_equal(n_out_0, cn_out_0)
# do some training of the original network
for _ in range(10):
for begin in range(0, n_samples, batch_size):
end = min(n_samples, begin + batch_size)
n.train(training_input[begin:end], training_output[begin:end])
# the original network has changed
n_out_1 = n.predict(training_input)
assert not np.array_equal(n_out_0, n_out_1)
# but the copy is intact
assert np.array_equal(cn_out_0, cn.predict(training_input))
# Now do the exact same training on the copy
for _ in range(10):
for begin in range(0, n_samples, batch_size):
end = min(n_samples, begin + batch_size)
cn.train(training_input[begin:end], training_output[begin:end])
cn_out_1 = cn.predict(training_input)
# now the output of the copy is the same as the output of the original
assert np.array_equal(n_out_1, cn_out_1)
# Train some more and check they still go together
for _ in range(10):
for begin in range(0, n_samples, batch_size):
end = min(n_samples, begin + batch_size)
n.train(training_input[begin:end], training_output[begin:end])
cn.train(training_input[begin:end], training_output[begin:end])
assert np.array_equal(n.predict(training_input), cn.predict(training_input))
if __name__ == "__main__":
test = True
if test:
test_gradients()
test_train()
test_copy()
print "Passed all tests!"
| |
# -*- coding: utf-8 -*-
# pylint: disable=star-args, too-many-arguments, fixme
""" This module contains classes for handling DIDL-Lite metadata.
This is the XML schema used by Sonos for carrying metadata representing many
items such as tracks, playlists, composers, albums etc.
"""
# It tries to follow the class hierarchy provided by the DIDL-Lite schema
# described in the UPnP Spec, especially that for the ContentDirectory Service
# Although Sonos uses ContentDirectory v1, the document for v2 is more helpful:
# http://upnp.org/specs/av/UPnP-av-ContentDirectory-v2-Service.pdf
from __future__ import unicode_literals
import sys
import warnings
warnings.simplefilter('always', DeprecationWarning)
import textwrap
from .xml import XML, ns_tag
from .exceptions import DIDLMetadataError
from .utils import really_unicode
###############################################################################
# MISC HELPER FUNCTIONS #
###############################################################################
def to_didl_string(*args):
""" Convert any number of DIDLObjects to a unicode xml string.
Args:
*args (DidlObject): One or more DidlObject (or subclass) instances
Returns:
str: A unicode string of the form <DIDL-Lite ...>...</DIDL-Lite>
representing the instances
"""
didl = XML.Element(
'DIDL-Lite',
{
'xmlns': "urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/",
'xmlns:dc': "http://purl.org/dc/elements/1.1/",
'xmlns:upnp': "urn:schemas-upnp-org:metadata-1-0/upnp/",
})
for arg in args:
didl.append(arg.to_element())
if sys.version_info[0] == 2:
return XML.tostring(didl)
else:
return XML.tostring(didl, encoding='unicode')
def from_didl_string(string):
""" Convert a unicode xml string to a list of DIDLObjects.
Arg:
string (str): A unicode string containing an xml representation of one
or more DIDL-Lite items (in the form <DIDL-Lite ...>
...</DIDL-Lite> )
Returns:
list: A list of one or more instances of DIDLObject or a subclass
"""
items = []
root = XML.fromstring(string.encode('utf-8'))
for elt in root:
if elt.tag.endswith('item') or elt.tag.endswith('container'):
item_class = elt.findtext(ns_tag('upnp', 'class'))
try:
cls = _DIDL_CLASS_TO_CLASS[item_class]
except KeyError:
raise DIDLMetadataError("Unknown UPnP class: %s" % item_class)
items.append(cls.from_element(elt))
else:
# <desc> elements are allowed as an immediate child of <DIDL-Lite>
# according to the spec, but I have not seen one there in Sonos, so
# we treat them as illegal. May need to fix this if this
# causes problems.
raise DIDLMetadataError("Illegal child of DIDL element: <%s>"
% elt.tag)
return items
###############################################################################
# DIDL RESOURCE #
###############################################################################
class DidlResource(object):
""" Identifies a resource, typically some type of a binary asset, such as
a song.
A 'res' element contains a uri that identifies the resource.
"""
# Adapted from a class taken from the Python Brisa project - MIT licence.
# pylint: disable=too-many-instance-attributes
def __init__(self, uri, protocol_info, import_uri=None, size=None,
duration=None, bitrate=None, sample_frequency=None,
bits_per_sample=None, nr_audio_channels=None, resolution=None,
color_depth=None, protection=None):
""" Constructor for the Resource class.
Args:
uri (str): value of the res tag, typically a URI. It MUST be
properly escaped URIs as described in RFC 239
protocol_info (str): A string in the form a:b:c:d that
identifies the streaming or transport protocol for
transmitting the resource. A value is required. For more
information see section 2.5.2 at
http://upnp.org/specs/av/UPnP-av-ConnectionManager-v1-Service.pdf
import_uri (str, optional): uri locator for resource update
size (int, optional): size in bytes
duration (str, optional): duration of the playback of the res
at normal speed (H*:MM:SS:F* or H*:MM:SS:F0/F1)
bitrate (int, optional): bitrate in bytes/second
sample_frequency (int, optional): sample frequency in Hz
bits_per_sample (int, optional): bits per sample
nr_audio_channels (int, optional): number of audio channels
resolution (str, optional): resolution of the resource (X*Y)
color_depth (int, optional): color depth in bits
protection (str, optional): statement of protection type
"""
# Of these attributes, only uri, protocol_info and duration have been
# spotted 'in the wild'
self.uri = uri
# Protocol info is in the form a:b:c:d - see
# sec 2.5.2 at
# http://upnp.org/specs/av/UPnP-av-ConnectionManager-v1-Service.pdf
self.protocol_info = protocol_info
self.import_uri = import_uri
self.size = size
self.duration = duration
self.bitrate = bitrate
self.sample_frequency = sample_frequency
self.bits_per_sample = bits_per_sample
self.nr_audio_channels = nr_audio_channels
self.resolution = resolution
self.color_depth = color_depth
self.protection = protection
@classmethod
def from_element(cls, element):
""" Set the resource properties from a <res> element.
Arg:
element (Element): An ElementTree Element
"""
def _int_helper(name):
"""Try to convert the name attribute to an int, or None."""
result = element.get(name)
if result is not None:
try:
return int(result)
except ValueError:
raise ValueError(
'Could not convert {0} to an integer'.format(name))
else:
return None
content = {}
# required
content['protocol_info'] = element.get('protocolInfo')
if content['protocol_info'] is None:
raise Exception('Could not create Resource from Element: '
'protocolInfo not found (required).')
# Optional
content['import_uri'] = element.get('importUri')
content['size'] = _int_helper('size')
content['duration'] = element.get('duration')
content['bitrate'] = _int_helper('bitrate')
content['sample_frequency'] = _int_helper('sampleFrequency')
content['bits_per_sample'] = _int_helper('bitsPerSample')
content['nr_audio_channels'] = _int_helper('nrAudioChannels')
content['resolution'] = element.get('resolution')
content['color_depth'] = _int_helper('colorDepth')
content['protection'] = element.get('protection')
content['uri'] = element.text
return cls(**content)
def __repr__(self):
return '<{0} \'{1}\' at {2}>'.format(self.__class__.__name__,
self.uri,
hex(id(self)))
def __str__(self):
return self.__repr__()
def to_element(self):
""" Return an ElementTree Element based on this resource."""
if not self.protocol_info:
raise Exception('Could not create Element for this resource: '
'protocolInfo not set (required).')
root = XML.Element('res')
# Required
root.attrib['protocolInfo'] = self.protocol_info
# Optional
if self.import_uri is not None:
root.attrib['importUri'] = self.import_uri
if self.size is not None:
root.attrib['size'] = str(self.size)
if self.duration is not None:
root.attrib['duration'] = self.duration
if self.bitrate is not None:
root.attrib['bitrate'] = str(self.bitrate)
if self.sample_frequency is not None:
root.attrib['sampleFrequency'] = str(self.sample_frequency)
if self.bits_per_sample is not None:
root.attrib['bitsPerSample'] = str(self.bits_per_sample)
if self.nr_audio_channels is not None:
root.attrib['nrAudioChannels'] = str(self.nr_audio_channels)
if self.resolution is not None:
root.attrib['resolution'] = self.resolution
if self.color_depth is not None:
root.attrib['colorDepth'] = str(self.color_depth)
if self.protection is not None:
root.attrib['protection'] = self.protection
root.text = self.uri
return root
###############################################################################
# BASE OBJECTS #
###############################################################################
# a mapping which will be used to look up the relevant class from the
# DIDL item class
_DIDL_CLASS_TO_CLASS = {}
class DidlMetaClass(type):
"""Meta class for all Didl objects."""
def __new__(mcs, name, bases, attrs):
"""Create a new instance.
Args:
name: Name of the class
bases: Base classes (tuple)
attrs: Attributes defined for the class
"""
new_cls = super(DidlMetaClass, mcs).__new__(mcs, name, bases, attrs)
# Register all subclasses with the global _DIDL_CLASS_TO_CLASS mapping
item_class = attrs.get('item_class', None)
if item_class is not None:
_DIDL_CLASS_TO_CLASS[item_class] = new_cls
return new_cls
# Py2/3 compatible way of declaring the metaclass
class DidlObject(DidlMetaClass(str('DidlMetaClass'), (object,), {})):
"""Abstract base class for all DIDL-Lite items.
You should not need to instantiate this.
Attributes:
item_class (str): The DIDL Lite class for this object
tag (str): The XML element tag name used for this instance
_translation (dict): A dict used to translate between instance
attribute names and XML tags/namespaces. It also serves to define
the allowed tags/attributes for this instance. Overridden and
extended by subclasses.
"""
item_class = 'object'
tag = 'item'
# key: attribute_name: (ns, tag)
_translation = {
'creator': ('dc', 'creator'),
'write_status': ('upnp', 'writeStatus'),
}
def __init__(self, title, parent_id, item_id, restricted=True,
resources=None, desc='RINCON_AssociatedZPUDN', **kwargs):
r"""Construct and initialize a DidlObject.
Args:
title (str): The title for the item
parent_id (str): The parent ID for the item
item_id (str): The ID for the item
restricted (bool): Whether the item can be modified
resources (list): A list of resources for this object
desc (str): A didl descriptor, default RINCON_AssociatedZPUDN. This
is not the same as "description"! It is used for identifying
the relevant music service
**kwargs: Extra metadata. What is allowed depends on the
_translation class attribute, which in turn depends on the DIDL
class
"""
# All didl objects *must* have a title, a parent_id and an item_id
# so we specify these as required args in the constructor signature
# to ensure that we get them. Other didl object properties are
# optional, so can be passed as kwargs.
# The content of _translation is adapted from the list in table C at
# http://upnp.org/specs/av/UPnP-av-ContentDirectory-v2-Service.pdf
# Not all properties referred to there are catered for, since Sonos
# does not use some of them.
# pylint: disable=super-on-old-class
super(DidlObject, self).__init__()
self.title = title
self.parent_id = parent_id
self.item_id = item_id
# Restricted is a compulsory attribute, but is almost always True for
# Sonos. (Only seen it 'false' when browsing favorites)
self.restricted = restricted
# Resources is multi-valued, and dealt with separately
self.resources = [] if resources is None else resources
# According to the spec, there may be one or more desc values. Sonos
# only seems to use one, so we won't bother with a list
self.desc = desc
for key, value in kwargs.items():
# For each attribute, check to see if this class allows it
if key not in self._translation:
raise ValueError(
'The key \'{0}\' is not allowed as an argument. Only '
'these keys are allowed: parent_id, item_id, title, '
'restricted, resources, desc'
' {1}'.format(key, ', '.join(self._translation.keys())))
# It is an allowed attribute. Set it as an attribute on self, so
# that it can be accessed as Classname.attribute in the normal
# way.
setattr(self, key, value)
@classmethod
def from_element(cls, element):
"""Create an instance of this class from an ElementTree xml Element.
An alternative constructor. The element must be a DIDL-Lite <item> or
<container> element, and must be properly namespaced.
Arg:
xml (Element): An :py:class:`xml.etree.ElementTree.Element` object.
"""
# Check we have the right sort of element. tag can be an empty string
# which indicates that any tag is allowed (see eg the musicAlbum DIDL
# class)
if not element.tag.endswith(cls.tag):
raise DIDLMetadataError(
"Wrong element. Expected '<{0}>',"
" got '<{1}>'".format(cls.tag, element.tag))
# and that the upnp matches what we are expecting
item_class = element.find(ns_tag('upnp', 'class')).text
if item_class != cls.item_class:
raise DIDLMetadataError(
"UPnP class is incorrect. Expected '{0}',"
" got '{1}'".format(cls.item_class, item_class))
# parent_id, item_id and restricted are stored as attibutes on the
# element
item_id = really_unicode(element.get('id', None))
if item_id is None:
raise DIDLMetadataError("Missing id attribute")
parent_id = really_unicode(element.get('parentID', None))
if parent_id is None:
raise DIDLMetadataError("Missing parentID attribute")
restricted = element.get('restricted', None)
if restricted is None:
raise DIDLMetadataError("Missing restricted attribute")
restricted = True if restricted in [1, 'true', 'True'] else False
# There must be a title. According to spec, it should be the first
# child, but Sonos does not abide by this
title_elt = element.find(ns_tag('dc', 'title'))
if title_elt is None:
raise DIDLMetadataError(
"Missing title element")
title = really_unicode(title_elt.text)
# Deal with any resource elements
resources = []
for res_elt in element.findall(ns_tag('', 'res')):
resources.append(
DidlResource.from_element(res_elt))
# and the desc element (There is only one in Sonos)
desc = element.findtext(ns_tag('', 'desc'))
# Get values of the elements listed in _translation and add them to
# the content dict
content = {}
for key, value in cls._translation.items():
result = element.findtext(ns_tag(*value))
if result is not None:
# We store info as unicode internally.
content[key] = really_unicode(result)
# Convert type for original track number
if content.get('original_track_number') is not None:
content['original_track_number'] = \
int(content['original_track_number'])
# Now pass the content dict we have just built to the main
# constructor, as kwargs, to create the object
return cls(title=title, parent_id=parent_id, item_id=item_id,
restricted=restricted, resources=resources, desc=desc,
**content)
@classmethod
def from_dict(cls, content):
"""Create an instance from a dict.
An alternative constructor. Equivalent to DidlObject(**content).
Arg:
content (dict): Dict containing metadata information.Required and
valid arguments are the same as for the ``__init__`` method.
"""
# Do we really need this constructor? Could use DidlObject(**content)
# instead.
return cls(**content)
def __eq__(self, playable_item):
"""Compare with another ``playable_item``.
Returns:
(bool): True if items are equal, else False
"""
if not isinstance(playable_item, DidlObject):
return False
return self.to_dict() == playable_item.to_dict()
def __ne__(self, playable_item):
"""Compare with another ``playable_item``.
Returns:
(bool): True if items are unequal, else False
"""
if not isinstance(playable_item, DidlObject):
return True
return self.to_dict() != playable_item.to_dict()
def __repr__(self):
"""Return the repr value for the item.
The repr is of the form::
<class_name 'middle_part[0:40]' at id_in_hex>
where middle_part is either the title item in content, if it is set,
or ``str(content)``. The output is also cleared of non-ascii
characters.
"""
# 40 originates from terminal width (78) - (15) for address part and
# (19) for the longest class name and a little left for buffer
if self.title is not None:
middle = self.title.encode('ascii', 'replace')[0:40]
else:
middle = str(self.to_dict).encode('ascii', 'replace')[0:40]
return '<{0} \'{1}\' at {2}>'.format(self.__class__.__name__,
middle,
hex(id(self)))
def __str__(self):
"""Return the str value for the item::
<class_name 'middle_part[0:40]' at id_in_hex>
where middle_part is either the title item in content, if it is set, or
``str(content)``. The output is also cleared of non-ascii characters.
"""
return self.__repr__()
def to_dict(self):
"""Return the dict representation of the instance."""
content = {}
# Get the value of each attribute listed in _translation, and add it
# to the content dict
for key in self._translation:
if hasattr(self, key):
content[key] = getattr(self, key)
# also add parent_id, item_id, restricted, title and resources because
# they are not listed in _translation
content['parent_id'] = self.parent_id
content['item_id'] = self.item_id
content['restricted'] = self.restricted
content['title'] = self.title
if self.resources != []:
content['resources'] = self.resources
content['desc'] = self.desc
return content
def to_element(self, include_namespaces=False):
"""Return an ElementTree Element representing this instance.
Arg:
include_namespaces (bool, optional): If True, include xml
namespace attributes on the root element
Return:
An ElementTree Element
.. code :: xml
<DIDL-Lite ..NS_INFO..>
<item id="...self.item_id..."
parentID="...cls.parent_id..." restricted="true">
<dc:title>...self.title...</dc:title>
<upnp:class>...self.item_class...</upnp:class>
<desc id="cdudn"
nameSpace="urn:schemas-rinconnetworks-com:metadata-1-0/">
RINCON_AssociatedZPUDN
</desc>
</item>
</DIDL-Lite>
"""
elt_attrib = {}
if include_namespaces:
elt_attrib.update({
'xmlns': "urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/",
'xmlns:dc': "http://purl.org/dc/elements/1.1/",
'xmlns:upnp': "urn:schemas-upnp-org:metadata-1-0/upnp/",
})
elt_attrib.update({
'parentID': self.parent_id,
'restricted': 'true' if self.restricted else 'false',
'id': self.item_id
})
elt = XML.Element(self.tag, elt_attrib)
# Add the title, which should always come first, according to the spec
XML.SubElement(elt, 'dc:title').text = self.title
# Add in any resources
for resource in self.resources:
elt.append(resource.to_element())
# Add the rest of the metadata attributes (i.e all those listed in
# _translation) as sub-elements of the item element.
for key, value in self._translation.items():
if hasattr(self, key):
# Some attributes have a namespace of '', which means they
# are in the default namespace. We need to handle those
# carefully
tag = "%s:%s" % value if value[0] else "%s" % value[1]
XML.SubElement(elt, tag).text = ("%s" % getattr(self, key))
# Now add in the item class
XML.SubElement(elt, 'upnp:class').text = self.item_class
# And the desc element
desc_attrib = {'id': 'cdudn', 'nameSpace':
'urn:schemas-rinconnetworks-com:metadata-1-0/'}
desc_elt = XML.SubElement(elt, 'desc', desc_attrib)
desc_elt.text = self.desc
return elt
###############################################################################
# OBJECT.ITEM HIERARCHY #
###############################################################################
class DidlItem(DidlObject):
"""A basic content directory item."""
# The spec allows for an option 'refID' attribute, but we do not handle it
item_class = 'object.item'
# _translation = DidlObject._translation.update({ ...})
# does not work, but doing it in two steps does
_translation = DidlObject._translation.copy()
_translation.update(
{
'stream_content': ('r', 'streamContent'),
'radio_show': ('r', 'radioShowMd'),
'album_art_uri': ('upnp', 'albumArtURI'),
}
)
class DidlAudioItem(DidlItem):
"""An audio item."""
item_class = 'object.item.audioitem'
_translation = DidlItem._translation.copy()
_translation.update(
{
'genre': ('upnp', 'genre'),
'description': ('dc', 'description'),
'long_description': ('upnp', 'longDescription'),
'publisher': ('dc', 'publisher'),
'language': ('dc', 'language'),
'relation': ('dc', 'relation'),
'rights': ('dc', 'rights'),
}
)
# Browsing Sonos Favorites produces some odd looking DIDL-Lite. The object
# class is 'object.itemobject.item.sonos-favorite', which is probably a typo
# in Sonos' code somewhere.
# Here is an example:
# <?xml version="1.0" ?>
# <DIDL-Lite xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"
# xmlns:dc="http://purl.org/dc/elements/1.1/"
# xmlns:r="urn:schemas-rinconnetworks-com:metadata-1-0/"
# xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/">
# <item id="FV:2/13" parentID="FV:2" restricted="false">
# <dc:title>Shake It Off</dc:title>
# <upnp:class>object.itemobject.item.sonos-favorite</upnp:class>
# <r:ordinal>4</r:ordinal>
# <res protocolInfo="sonos.com-spotify:*:audio/x-spotify:*">
# x-sonos-spotify:spotify%3atrack%3a7n.......?sid=9&flags=32</res>
# <upnp:albumArtURI>http://o.scd.....</upnp:albumArtURI>
# <r:type>instantPlay</r:type>
# <r:description>By Taylor Swift</r:description>
# <r:resMD><DIDL-Lite xmlns:dc="
# http://purl.org/dc/elements/1.1/"
# xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/"
# xmlns:r="urn:schemas-rinconnetworks-com:metadata-1-0/"
# xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/">
# <item id="00030020spotify%3atrack%3a7n9Q6b...74uCtajkddPt"
# parentID="0006006ctoplist%2ftracks%2fregion%2fGB"
# restricted="true"><dc:title>Shake It Off
# </dc:title><upnp:class>object.item.audioItem.musicTrack
# </upnp:class><desc id="cdudn"
# nameSpace="urn:schemas-rinconnetworks-com:metadata-1-0/">
# SA_RINCON2311_XXXXX</desc>
# </item>
# </DIDL-Lite>
# </r:resMD>
# </item>
# </DIDL-Lite>
# Note the r:ordinal, r:type; r:description, r:resMD elements which are not
# seen (?) anywhere else
# We're ignoring this for the moment!
class DidlMusicTrack(DidlAudioItem):
"""Class that represents a music library track. """
item_class = 'object.item.audioItem.musicTrack'
# name: (ns, tag)
_translation = DidlAudioItem._translation.copy()
_translation.update(
{
'artist': ('upnp', 'artist'),
'album': ('upnp', 'album'),
'original_track_number': ('upnp', 'originalTrackNumber'),
'playlist': ('upnp', 'playlist'),
'contributor': ('dc', 'contributor'),
'date': ('dc', 'date'),
}
)
class DidlAudioBroadcast(DidlAudioItem):
"""Class that represents an audio broadcast."""
item_class = 'object.item.audioItem.audioBroadcast'
_translation = DidlAudioItem._translation.copy()
_translation.update(
{
'region': ('upnp', 'region'),
'radio_call_sign': ('upnp', 'radioCallSign'),
'radio_station_id': ('upnp', 'radioStationID'),
'channel_nr': ('upnp', 'channelNr'),
}
)
class DidlAudioBroadcastFavorite(DidlAudioBroadcast):
"""Class that represents an audio broadcast sonos favorite."""
# Note: The sonos-favorite part of the class spec obviously isn't part of
# the DIDL spec, so just assume that it has the same definition as the
# regular object.item.audioItem.audioBroadcast
item_class = 'object.item.audioItem.audioBroadcast.sonos-favorite'
###############################################################################
# OBJECT.CONTAINER HIERARCHY #
###############################################################################
class DidlContainer(DidlObject):
"""Class that represents a music library container. """
item_class = 'object.container'
tag = 'container'
# We do not implement createClass or searchClass. Not used by Sonos??
# TODO: handle the 'childCount' element.
class DidlAlbum(DidlContainer):
"""A content directory album."""
item_class = 'object.container.album'
# name: (ns, tag)
_translation = DidlContainer._translation.copy()
_translation.update(
{
'description': ('dc', 'description'),
'long_description': ('upnp', 'longDescription'),
'publisher': ('dc', 'publisher'),
'contributor': ('dc', 'contributor'),
'date': ('dc', 'date'),
'relation': ('dc', 'relation'),
'rights': ('dc', 'rights'),
}
)
class DidlMusicAlbum(DidlAlbum):
"""Class that represents a music library album. """
item_class = 'object.container.album.musicAlbum'
# According to the spec, all musicAlbums should be represented in
# XML by a <container> tag. Sonos sometimes uses <container> and
# sometimes uses <item>. Set the tag type to '' to indicate that
# either is allowed.
tag = ''
# name: (ns, tag)
# pylint: disable=protected-access
_translation = DidlAudioItem._translation.copy()
_translation.update(
{
'artist': ('upnp', 'artist'),
'genre': ('upnp', 'genre'),
'producer': ('upnp', 'producer'),
'toc': ('upnp', 'toc'),
'album_art_uri': ('upnp', 'albumArtURI'),
}
)
class DidlMusicAlbumFavorite(DidlAlbum):
"""Class that represents a Sonos favorite music library album.
This class is not part of the DIDL spec and is Sonos specific.
"""
item_class = 'object.container.album.musicAlbum.sonos-favorite'
# Despite the fact that the item derives from object.container, it's
# XML does not include a <container> tag, but an <item> tag. This seems
# to be an error by Sonos.
tag = 'item'
class DidlMusicAlbumCompilation(DidlAlbum):
"""Class that represents a Sonos favorite music library compilation.
This class is not part of the DIDL spec and is Sonos specific.
"""
# These classes appear when browsing the library and Sonos has been set
# to group albums using compilations.
# See https://github.com/SoCo/SoCo/issues/280
item_class = 'object.container.album.musicAlbum.compilation'
tag = 'container'
class DidlPerson(DidlContainer):
"""A content directory class representing a person."""
item_class = 'object.container.person'
_translation = DidlContainer._translation.copy()
_translation.update(
{
'language': ('dc', 'language'),
}
)
class DidlComposer(DidlPerson):
"""Class that represents a music library composer."""
# Not in the DIDL-Lite spec. Sonos specific??
item_class = 'object.container.person.composer'
class DidlMusicArtist(DidlPerson):
"""Class that represents a music library artist."""
item_class = 'object.container.person.musicArtist'
# name: (ns, tag)
_translation = DidlPerson._translation.copy()
_translation.update(
{
'genre': ('upnp', 'genre'),
'artist_discography_uri': ('upnp', 'artistDiscographyURI'),
}
)
class DidlAlbumList(DidlContainer):
"""Class that represents a music library album list."""
# This does not appear (that I can find) in the DIDL-Lite specs.
# Presumably Sonos specific
item_class = 'object.container.albumlist'
class DidlPlaylistContainer(DidlContainer):
"""Class that represents a music library play list."""
item_class = 'object.container.playlistContainer'
# name: (ns, tag)
_translation = DidlContainer._translation.copy()
_translation.update(
{
'artist': ('upnp', 'artist'),
'genre': ('upnp', 'genre'),
'long_description': ('upnp', 'longDescription'),
'producer': ('dc', 'producer'),
'contributor': ('dc', 'contributor'),
'description': ('dc', 'description'),
'date': ('dc', 'date'),
'language': ('dc', 'language'),
'rights': ('dc', 'rights'),
}
)
class DidlSameArtist(DidlPlaylistContainer):
"""Class that represents all tracks by a single artist.
This type is returned by browsing an artist or a composer
"""
# Not in the DIDL-Lite spec. Sonos specific?
item_class = 'object.container.playlistContainer.sameArtist'
class DidlGenre(DidlContainer):
"""A content directory class representing a general genre."""
item_class = 'object.container.genre'
# name: (ns, tag)
_translation = DidlContainer._translation.copy()
_translation.update(
{
'genre': ('upnp', 'genre'),
'long_description': ('upnp', 'longDescription'),
'description': ('dc', 'description'),
}
)
class DidlMusicGenre(DidlGenre):
"""Class that represents a music genre."""
item_class = 'object.container.genre.musicGenre'
###############################################################################
# SPECIAL LISTS #
###############################################################################
class ListOfMusicInfoItems(list):
"""Abstract container class for a list of music information items."""
def __init__(self, items, number_returned, total_matches, update_id):
super(ListOfMusicInfoItems, self).__init__(items)
self._metadata = {
'item_list': list(items),
'number_returned': number_returned,
'total_matches': total_matches,
'update_id': update_id,
}
def __getitem__(self, key):
"""Legacy get metadata by string key or list item(s) by index.
DEPRECATION: This overriding form of __getitem__ will be removed in
the 3rd release after 0.8. The metadata can be fetched via the named
attributes
"""
if key in self._metadata:
if key == 'item_list':
message = """
Calling [\'item_list\'] on search results to obtain the objects
is no longer necessary, since the object returned from searches
now is a list. This deprecated way of getting the items will
be removed from the third release after 0.8."""
else:
message = """
Getting metadata items by indexing the search result like a
dictionary [\'{0}\'] is deprecated. Please use the named
attribute {1}.{0} instead. The deprecated way of retrieving the
metadata will be removed from the third release after
0.8""".format(key, self.__class__.__name__)
message = textwrap.dedent(message).replace('\n', ' ').lstrip()
warnings.warn(message, DeprecationWarning, stacklevel=2)
return self._metadata[key]
else:
return super(ListOfMusicInfoItems, self).__getitem__(key)
@property
def number_returned(self):
"""The number of returned matches."""
return self._metadata['number_returned']
@property
def total_matches(self):
"""The number of total matches."""
return self._metadata['total_matches']
@property
def update_id(self):
"""The update ID."""
return self._metadata['update_id']
class SearchResult(ListOfMusicInfoItems):
"""Container class that represents a search or browse result.
(browse is just a special case of search)
"""
def __init__(self, items, search_type, number_returned,
total_matches, update_id):
super(SearchResult, self).__init__(
items, number_returned, total_matches, update_id
)
self._metadata['search_type'] = search_type
def __repr__(self):
return '{0}(items={1}, search_type=\'{2}\')'.format(
self.__class__.__name__,
super(SearchResult, self).__repr__(),
self.search_type)
@property
def search_type(self):
"""The search type."""
return self._metadata['search_type']
class Queue(ListOfMusicInfoItems):
"""Container class that represents a queue."""
def __init__(self, items, number_returned, total_matches, update_id):
super(Queue, self).__init__(
items, number_returned, total_matches, update_id
)
def __repr__(self):
return '{0}(items={1})'.format(
self.__class__.__name__,
super(Queue, self).__repr__(),
)
| |
# Copyright 2011 Nicholas Bray
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@PydevCodeAnalysisIgnore
from __future__ import absolute_import
from stubs.stubcollector import stubgenerator
@stubgenerator
def makeFloat(collector):
llfunc = collector.llfunc
export = collector.export
replaceAttr = collector.replaceAttr
fold = collector.fold
staticFold = collector.staticFold
attachPtr = collector.attachPtr
#############################
### Primitive conversions ###
#############################
@staticFold(lambda i: float(i))
@llfunc(primitive=True)
def prim_int_to_float(i):
return allocate(float)
##################################
### Primitive float operations ###
##################################
@export
@staticFold(lambda a: +a)
@llfunc(primitive=True)
def prim_float_pos(a):
return allocate(float)
@export
@staticFold(lambda a: -a)
@llfunc(primitive=True)
def prim_float_neg(a):
return allocate(float)
@export
@staticFold(lambda a, b: a+b)
@llfunc(primitive=True)
def prim_float_add(a, b):
return allocate(float)
@export
@staticFold(lambda a, b: a-b)
@llfunc(primitive=True)
def prim_float_sub(a, b):
return allocate(float)
@export
@staticFold(lambda a, b: a*b)
@llfunc(primitive=True)
def prim_float_mul(a, b):
return allocate(float)
@export # HACK
@staticFold(lambda a, b: a/b)
@llfunc(primitive=True)
def prim_float_div(a, b):
return allocate(float)
@export
@staticFold(lambda a, b: a%b)
@llfunc(primitive=True)
def prim_float_mod(a, b):
return allocate(float)
@export # HACK
@staticFold(lambda a, b: a**b)
@llfunc(primitive=True)
def prim_float_pow(a, b):
return allocate(float)
@export
@staticFold(lambda a, b: a==b)
@fold(lambda a, b: a==b)
@llfunc(primitive=True)
def prim_float_eq(a, b):
return allocate(bool)
@export
@staticFold(lambda a, b: a!=b)
@fold(lambda a, b: a!=b)
@llfunc(primitive=True)
def prim_float_ne(a, b):
return allocate(bool)
@export
@staticFold(lambda a, b: a<b)
@fold(lambda a, b: a<b)
@llfunc(primitive=True)
def prim_float_lt(a, b):
return allocate(bool)
@export
@staticFold(lambda a, b: a<=b)
@fold(lambda a, b: a<=b)
@llfunc(primitive=True)
def prim_float_le(a, b):
return allocate(bool)
@export
@staticFold(lambda a, b: a>b)
@fold(lambda a, b: a>b)
@llfunc(primitive=True)
def prim_float_gt(a, b):
return allocate(bool)
@export
@staticFold(lambda a, b: a>=b)
@fold(lambda a, b: a>=b)
@llfunc(primitive=True)
def prim_float_ge(a, b):
return allocate(bool)
##############################
### Float object functions ###
##############################
@attachPtr(float, '__pos__')
@llfunc
def float__pos__(self):
return prim_float_pos(self)
@attachPtr(float, '__neg__')
@llfunc
def float__neg__(self):
return prim_float_neg(self)
# TODO longs? booleans?
@llfunc
def coerce_to_float(value):
if isinstance(value, int):
return prim_int_to_float(value)
else:
return value
@attachPtr(float, '__add__')
@llfunc
def float__add__(self, other):
other = coerce_to_float(other)
if isinstance(other, float):
return prim_float_add(self, other)
else:
return NotImplemented
@attachPtr(float, '__sub__')
@llfunc
def float__sub__(self, other):
other = coerce_to_float(other)
if isinstance(other, float):
return prim_float_sub(self, other)
else:
return NotImplemented
@attachPtr(float, '__mul__')
@llfunc
def float__mul__(self, other):
other = coerce_to_float(other)
if isinstance(other, float):
return prim_float_mul(self, other)
else:
return NotImplemented
@attachPtr(float, '__div__')
@llfunc
def float__div__(self, other):
other = coerce_to_float(other)
if isinstance(other, float):
return prim_float_div(self, other)
else:
return NotImplemented
@attachPtr(float, '__mod__')
@llfunc
def float__mod__(self, other):
other = coerce_to_float(other)
if isinstance(other, float):
return prim_float_mod(self, other)
else:
return NotImplemented
@attachPtr(float, '__pow__')
@llfunc
def float__pow__(self, other):
other = coerce_to_float(other)
if isinstance(other, float):
return prim_float_pow(self, other)
else:
return NotImplemented
@attachPtr(float, '__eq__')
@llfunc
def float__eq__(self, other):
other = coerce_to_float(other)
if isinstance(other, float):
return prim_float_eq(self, other)
else:
return NotImplemented
@attachPtr(float, '__ne__')
@llfunc
def float__ne__(self, other):
other = coerce_to_float(other)
if isinstance(other, float):
return prim_float_ne(self, other)
else:
return NotImplemented
@attachPtr(float, '__lt__')
@llfunc
def float__lt__(self, other):
other = coerce_to_float(other)
if isinstance(other, float):
return prim_float_lt(self, other)
else:
return NotImplemented
@attachPtr(float, '__le__')
@llfunc
def float__le__(self, other):
other = coerce_to_float(other)
if isinstance(other, float):
return prim_float_le(self, other)
else:
return NotImplemented
@attachPtr(float, '__gt__')
@llfunc
def float__gt__(self, other):
other = coerce_to_float(other)
if isinstance(other, float):
return prim_float_gt(self, other)
else:
return NotImplemented
@attachPtr(float, '__ge__')
@llfunc
def float__ge__(self, other):
other = coerce_to_float(other)
if isinstance(other, float):
return prim_float_ge(self, other)
else:
return NotImplemented
| |
# Copyright 2011 OpenStack Foundation
# Copyright 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The security groups extension."""
import json
import webob
from webob import exc
from xml.dom import minidom
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova.compute import api as compute_api
from nova import exception
from nova.network.security_group import openstack_driver
from nova.network.security_group import quantum_driver
from nova.openstack.common import log as logging
from nova.virt import netutils
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'security_groups')
softauth = extensions.soft_extension_authorizer('compute', 'security_groups')
def make_rule(elem):
elem.set('id')
elem.set('parent_group_id')
proto = xmlutil.SubTemplateElement(elem, 'ip_protocol')
proto.text = 'ip_protocol'
from_port = xmlutil.SubTemplateElement(elem, 'from_port')
from_port.text = 'from_port'
to_port = xmlutil.SubTemplateElement(elem, 'to_port')
to_port.text = 'to_port'
group = xmlutil.SubTemplateElement(elem, 'group', selector='group')
name = xmlutil.SubTemplateElement(group, 'name')
name.text = 'name'
tenant_id = xmlutil.SubTemplateElement(group, 'tenant_id')
tenant_id.text = 'tenant_id'
ip_range = xmlutil.SubTemplateElement(elem, 'ip_range',
selector='ip_range')
cidr = xmlutil.SubTemplateElement(ip_range, 'cidr')
cidr.text = 'cidr'
def make_sg(elem):
elem.set('id')
elem.set('tenant_id')
elem.set('name')
desc = xmlutil.SubTemplateElement(elem, 'description')
desc.text = 'description'
rules = xmlutil.SubTemplateElement(elem, 'rules')
rule = xmlutil.SubTemplateElement(rules, 'rule', selector='rules')
make_rule(rule)
def _authorize_context(req):
context = req.environ['nova.context']
authorize(context)
return context
sg_nsmap = {None: wsgi.XMLNS_V11}
class SecurityGroupRuleTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('security_group_rule',
selector='security_group_rule')
make_rule(root)
return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap)
class SecurityGroupTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('security_group',
selector='security_group')
make_sg(root)
return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap)
class SecurityGroupsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('security_groups')
elem = xmlutil.SubTemplateElement(root, 'security_group',
selector='security_groups')
make_sg(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap)
class SecurityGroupXMLDeserializer(wsgi.MetadataXMLDeserializer):
"""
Deserializer to handle xml-formatted security group requests.
"""
def default(self, string):
"""Deserialize an xml-formatted security group create request."""
dom = xmlutil.safe_minidom_parse_string(string)
security_group = {}
sg_node = self.find_first_child_named(dom,
'security_group')
if sg_node is not None:
if sg_node.hasAttribute('name'):
security_group['name'] = sg_node.getAttribute('name')
desc_node = self.find_first_child_named(sg_node,
"description")
if desc_node:
security_group['description'] = self.extract_text(desc_node)
return {'body': {'security_group': security_group}}
class SecurityGroupRulesXMLDeserializer(wsgi.MetadataXMLDeserializer):
"""
Deserializer to handle xml-formatted security group requests.
"""
def default(self, string):
"""Deserialize an xml-formatted security group create request."""
dom = xmlutil.safe_minidom_parse_string(string)
security_group_rule = self._extract_security_group_rule(dom)
return {'body': {'security_group_rule': security_group_rule}}
def _extract_security_group_rule(self, node):
"""Marshal the security group rule attribute of a parsed request."""
sg_rule = {}
sg_rule_node = self.find_first_child_named(node,
'security_group_rule')
if sg_rule_node is not None:
ip_protocol_node = self.find_first_child_named(sg_rule_node,
"ip_protocol")
if ip_protocol_node is not None:
sg_rule['ip_protocol'] = self.extract_text(ip_protocol_node)
from_port_node = self.find_first_child_named(sg_rule_node,
"from_port")
if from_port_node is not None:
sg_rule['from_port'] = self.extract_text(from_port_node)
to_port_node = self.find_first_child_named(sg_rule_node, "to_port")
if to_port_node is not None:
sg_rule['to_port'] = self.extract_text(to_port_node)
parent_group_id_node = self.find_first_child_named(sg_rule_node,
"parent_group_id")
if parent_group_id_node is not None:
sg_rule['parent_group_id'] = self.extract_text(
parent_group_id_node)
group_id_node = self.find_first_child_named(sg_rule_node,
"group_id")
if group_id_node is not None:
sg_rule['group_id'] = self.extract_text(group_id_node)
cidr_node = self.find_first_child_named(sg_rule_node, "cidr")
if cidr_node is not None:
sg_rule['cidr'] = self.extract_text(cidr_node)
return sg_rule
class SecurityGroupControllerBase(object):
"""Base class for Security Group controllers."""
def __init__(self):
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self.compute_api = compute.API(
security_group_api=self.security_group_api)
def _format_security_group_rule(self, context, rule):
sg_rule = {}
sg_rule['id'] = rule['id']
sg_rule['parent_group_id'] = rule['parent_group_id']
sg_rule['ip_protocol'] = rule['protocol']
sg_rule['from_port'] = rule['from_port']
sg_rule['to_port'] = rule['to_port']
sg_rule['group'] = {}
sg_rule['ip_range'] = {}
if rule['group_id']:
source_group = self.security_group_api.get(context,
id=rule['group_id'])
sg_rule['group'] = {'name': source_group.get('name'),
'tenant_id': source_group.get('project_id')}
else:
sg_rule['ip_range'] = {'cidr': rule['cidr']}
return sg_rule
def _format_security_group(self, context, group):
security_group = {}
security_group['id'] = group['id']
security_group['description'] = group['description']
security_group['name'] = group['name']
security_group['tenant_id'] = group['project_id']
security_group['rules'] = []
for rule in group['rules']:
security_group['rules'] += [self._format_security_group_rule(
context, rule)]
return security_group
def _from_body(self, body, key):
if not body:
raise exc.HTTPUnprocessableEntity()
value = body.get(key, None)
if value is None:
raise exc.HTTPUnprocessableEntity()
return value
class SecurityGroupController(SecurityGroupControllerBase):
"""The Security group API controller for the OpenStack API."""
@wsgi.serializers(xml=SecurityGroupTemplate)
def show(self, req, id):
"""Return data about the given security group."""
context = _authorize_context(req)
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
return {'security_group': self._format_security_group(context,
security_group)}
def delete(self, req, id):
"""Delete a security group."""
context = _authorize_context(req)
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
self.security_group_api.destroy(context, security_group)
return webob.Response(status_int=202)
@wsgi.serializers(xml=SecurityGroupsTemplate)
def index(self, req):
"""Returns a list of security groups."""
context = _authorize_context(req)
search_opts = {}
search_opts.update(req.GET)
raw_groups = self.security_group_api.list(context,
project=context.project_id,
search_opts=search_opts)
limited_list = common.limited(raw_groups, req)
result = [self._format_security_group(context, group)
for group in limited_list]
return {'security_groups':
list(sorted(result,
key=lambda k: (k['tenant_id'], k['name'])))}
@wsgi.serializers(xml=SecurityGroupTemplate)
@wsgi.deserializers(xml=SecurityGroupXMLDeserializer)
def create(self, req, body):
"""Creates a new security group."""
context = _authorize_context(req)
security_group = self._from_body(body, 'security_group')
group_name = security_group.get('name', None)
group_description = security_group.get('description', None)
self.security_group_api.validate_property(group_name, 'name', None)
self.security_group_api.validate_property(group_description,
'description', None)
group_ref = self.security_group_api.create_security_group(
context, group_name, group_description)
return {'security_group': self._format_security_group(context,
group_ref)}
class SecurityGroupRulesController(SecurityGroupControllerBase):
@wsgi.serializers(xml=SecurityGroupRuleTemplate)
@wsgi.deserializers(xml=SecurityGroupRulesXMLDeserializer)
def create(self, req, body):
context = _authorize_context(req)
sg_rule = self._from_body(body, 'security_group_rule')
parent_group_id = self.security_group_api.validate_id(
sg_rule.get('parent_group_id', None))
security_group = self.security_group_api.get(context, None,
parent_group_id, map_exception=True)
try:
new_rule = self._rule_args_to_dict(context,
to_port=sg_rule.get('to_port'),
from_port=sg_rule.get('from_port'),
ip_protocol=sg_rule.get('ip_protocol'),
cidr=sg_rule.get('cidr'),
group_id=sg_rule.get('group_id'))
except Exception as exp:
raise exc.HTTPBadRequest(explanation=unicode(exp))
if new_rule is None:
msg = _("Not enough parameters to build a valid rule.")
raise exc.HTTPBadRequest(explanation=msg)
new_rule['parent_group_id'] = security_group['id']
if 'cidr' in new_rule:
net, prefixlen = netutils.get_net_and_prefixlen(new_rule['cidr'])
if net != '0.0.0.0' and prefixlen == '0':
msg = _("Bad prefix for network in cidr %s") % new_rule['cidr']
raise exc.HTTPBadRequest(explanation=msg)
security_group_rule = (
self.security_group_api.create_security_group_rule(
context, security_group, new_rule))
return {"security_group_rule": self._format_security_group_rule(
context,
security_group_rule)}
def _rule_args_to_dict(self, context, to_port=None, from_port=None,
ip_protocol=None, cidr=None, group_id=None):
if group_id is not None:
group_id = self.security_group_api.validate_id(group_id)
# check if groupId exists
self.security_group_api.get(context, id=group_id)
return self.security_group_api.new_group_ingress_rule(
group_id, ip_protocol, from_port, to_port)
else:
cidr = self.security_group_api.parse_cidr(cidr)
return self.security_group_api.new_cidr_ingress_rule(
cidr, ip_protocol, from_port, to_port)
def delete(self, req, id):
context = _authorize_context(req)
id = self.security_group_api.validate_id(id)
rule = self.security_group_api.get_rule(context, id)
group_id = rule['parent_group_id']
security_group = self.security_group_api.get(context, None, group_id,
map_exception=True)
self.security_group_api.remove_rules(context, security_group,
[rule['id']])
return webob.Response(status_int=202)
class ServerSecurityGroupController(SecurityGroupControllerBase):
@wsgi.serializers(xml=SecurityGroupsTemplate)
def index(self, req, server_id):
"""Returns a list of security groups for the given instance."""
context = _authorize_context(req)
self.security_group_api.ensure_default(context)
try:
instance = self.compute_api.get(context, server_id)
except exception.InstanceNotFound as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
groups = self.security_group_api.get_instance_security_groups(
context, instance['id'], instance['uuid'], True)
result = [self._format_security_group(context, group)
for group in groups]
return {'security_groups':
list(sorted(result,
key=lambda k: (k['tenant_id'], k['name'])))}
class SecurityGroupActionController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupActionController, self).__init__(*args, **kwargs)
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self.compute_api = compute.API(
security_group_api=self.security_group_api)
def _parse(self, body, action):
try:
body = body[action]
group_name = body['name']
except TypeError:
msg = _("Missing parameter dict")
raise webob.exc.HTTPBadRequest(explanation=msg)
except KeyError:
msg = _("Security group not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
if not group_name or group_name.strip() == '':
msg = _("Security group name cannot be empty")
raise webob.exc.HTTPBadRequest(explanation=msg)
return group_name
def _invoke(self, method, context, id, group_name):
try:
instance = self.compute_api.get(context, id)
method(context, instance, group_name)
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.InstanceNotFound as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
return webob.Response(status_int=202)
@wsgi.action('addSecurityGroup')
def _addSecurityGroup(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
group_name = self._parse(body, 'addSecurityGroup')
return self._invoke(self.security_group_api.add_to_instance,
context, id, group_name)
@wsgi.action('removeSecurityGroup')
def _removeSecurityGroup(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
group_name = self._parse(body, 'removeSecurityGroup')
return self._invoke(self.security_group_api.remove_from_instance,
context, id, group_name)
class SecurityGroupsOutputController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupsOutputController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
def _extend_servers(self, req, servers):
# TODO(arosen) this function should be refactored to reduce duplicate
# code and use get_instance_security_groups instead of get_db_instance.
key = "security_groups"
context = _authorize_context(req)
if not openstack_driver.is_quantum_security_groups():
for server in servers:
instance = req.get_db_instance(server['id'])
groups = instance.get(key)
if groups:
server[key] = [{"name": group["name"]} for group in groups]
else:
# If method is a POST we get the security groups intended for an
# instance from the request. The reason for this is if using
# quantum security groups the requested security groups for the
# instance are not in the db and have not been sent to quantum yet.
if req.method != 'POST':
sg_instance_bindings = (
self.security_group_api
.get_instances_security_groups_bindings(context))
for server in servers:
groups = sg_instance_bindings.get(server['id'])
if groups:
server[key] = groups
# In this section of code len(servers) == 1 as you can only POST
# one server in an API request.
else:
try:
# try converting to json
req_obj = json.loads(req.body)
# Add security group to server, if no security group was in
# request add default since that is the group it is part of
servers[0][key] = req_obj['server'].get(
key, [{'name': 'default'}])
except ValueError:
root = minidom.parseString(req.body)
sg_root = root.getElementsByTagName(key)
groups = []
if sg_root:
security_groups = sg_root[0].getElementsByTagName(
'security_group')
for security_group in security_groups:
groups.append(
{'name': security_group.getAttribute('name')})
if not groups:
groups = [{'name': 'default'}]
servers[0][key] = groups
def _show(self, req, resp_obj):
if not softauth(req.environ['nova.context']):
return
if 'server' in resp_obj.obj:
resp_obj.attach(xml=SecurityGroupServerTemplate())
self._extend_servers(req, [resp_obj.obj['server']])
@wsgi.extends
def show(self, req, resp_obj, id):
return self._show(req, resp_obj)
@wsgi.extends
def create(self, req, resp_obj, body):
return self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
if not softauth(req.environ['nova.context']):
return
resp_obj.attach(xml=SecurityGroupServersTemplate())
self._extend_servers(req, list(resp_obj.obj['servers']))
class SecurityGroupsTemplateElement(xmlutil.TemplateElement):
def will_render(self, datum):
return "security_groups" in datum
def make_server(elem):
secgrps = SecurityGroupsTemplateElement('security_groups')
elem.append(secgrps)
secgrp = xmlutil.SubTemplateElement(secgrps, 'security_group',
selector="security_groups")
secgrp.set('name')
class SecurityGroupServerTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server')
make_server(root)
return xmlutil.SlaveTemplate(root, 1)
class SecurityGroupServersTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
return xmlutil.SlaveTemplate(root, 1)
class Security_groups(extensions.ExtensionDescriptor):
"""Security group support."""
name = "SecurityGroups"
alias = "os-security-groups"
namespace = "http://docs.openstack.org/compute/ext/securitygroups/api/v1.1"
updated = "2011-07-21T00:00:00+00:00"
def get_controller_extensions(self):
controller = SecurityGroupActionController()
actions = extensions.ControllerExtension(self, 'servers', controller)
controller = SecurityGroupsOutputController()
output = extensions.ControllerExtension(self, 'servers', controller)
return [actions, output]
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-security-groups',
controller=SecurityGroupController())
resources.append(res)
res = extensions.ResourceExtension('os-security-group-rules',
controller=SecurityGroupRulesController())
resources.append(res)
res = extensions.ResourceExtension(
'os-security-groups',
controller=ServerSecurityGroupController(),
parent=dict(member_name='server', collection_name='servers'))
resources.append(res)
return resources
class NativeSecurityGroupExceptions(object):
@staticmethod
def raise_invalid_property(msg):
raise exc.HTTPBadRequest(explanation=msg)
@staticmethod
def raise_group_already_exists(msg):
raise exc.HTTPBadRequest(explanation=msg)
@staticmethod
def raise_invalid_group(msg):
raise exc.HTTPBadRequest(explanation=msg)
@staticmethod
def raise_invalid_cidr(cidr, decoding_exception=None):
raise exception.InvalidCidr(cidr=cidr)
@staticmethod
def raise_over_quota(msg):
raise exception.SecurityGroupLimitExceeded(msg)
@staticmethod
def raise_not_found(msg):
raise exc.HTTPNotFound(explanation=msg)
class NativeNovaSecurityGroupAPI(NativeSecurityGroupExceptions,
compute_api.SecurityGroupAPI):
pass
class NativeQuantumSecurityGroupAPI(NativeSecurityGroupExceptions,
quantum_driver.SecurityGroupAPI):
pass
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.Lu."""
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
@test_util.with_eager_op_as_function
class LuOpTest(test.TestCase):
@property
def float_types(self):
return set((np.float64, np.float32, np.complex64, np.complex128))
def _verifyLuBase(self, x, lower, upper, perm, verification,
output_idx_type):
lower_np, upper_np, perm_np, verification_np = self.evaluate(
[lower, upper, perm, verification])
self.assertAllClose(x, verification_np)
self.assertShapeEqual(x, lower)
self.assertShapeEqual(x, upper)
self.assertAllEqual(x.shape[:-1], perm.shape.as_list())
# Check dtypes are as expected.
self.assertEqual(x.dtype, lower_np.dtype)
self.assertEqual(x.dtype, upper_np.dtype)
self.assertEqual(output_idx_type.as_numpy_dtype, perm_np.dtype)
# Check that the permutation is valid.
if perm_np.shape[-1] > 0:
perm_reshaped = np.reshape(perm_np, (-1, perm_np.shape[-1]))
for perm_vector in perm_reshaped:
self.assertAllClose(np.arange(len(perm_vector)), np.sort(perm_vector))
def _verifyLu(self, x, output_idx_type=dtypes.int64):
# Verify that Px = LU.
lu, perm = linalg_ops.lu(x, output_idx_type=output_idx_type)
# Prepare the lower factor of shape num_rows x num_rows
lu_shape = np.array(lu.shape.as_list())
batch_shape = lu_shape[:-2]
num_rows = lu_shape[-2]
num_cols = lu_shape[-1]
lower = array_ops.matrix_band_part(lu, -1, 0)
if num_rows > num_cols:
eye = linalg_ops.eye(
num_rows, batch_shape=batch_shape, dtype=lower.dtype)
lower = array_ops.concat([lower, eye[..., num_cols:]], axis=-1)
elif num_rows < num_cols:
lower = lower[..., :num_rows]
# Fill the diagonal with ones.
ones_diag = array_ops.ones(
np.append(batch_shape, num_rows), dtype=lower.dtype)
lower = array_ops.matrix_set_diag(lower, ones_diag)
# Prepare the upper factor.
upper = array_ops.matrix_band_part(lu, 0, -1)
verification = test_util.matmul_without_tf32(lower, upper)
# Permute the rows of product of the Cholesky factors.
if num_rows > 0:
# Reshape the product of the triangular factors and permutation indices
# to a single batch dimension. This makes it easy to apply
# invert_permutation and gather_nd ops.
perm_reshaped = array_ops.reshape(perm, [-1, num_rows])
verification_reshaped = array_ops.reshape(verification,
[-1, num_rows, num_cols])
# Invert the permutation in each batch.
inv_perm_reshaped = map_fn.map_fn(array_ops.invert_permutation,
perm_reshaped)
batch_size = perm_reshaped.shape.as_list()[0]
# Prepare the batch indices with the same shape as the permutation.
# The corresponding batch index is paired with each of the `num_rows`
# permutation indices.
batch_indices = math_ops.cast(
array_ops.broadcast_to(
math_ops.range(batch_size)[:, None], perm_reshaped.shape),
dtype=output_idx_type)
permuted_verification_reshaped = array_ops.gather_nd(
verification_reshaped,
array_ops.stack([batch_indices, inv_perm_reshaped], axis=-1))
# Reshape the verification matrix back to the original shape.
verification = array_ops.reshape(permuted_verification_reshaped,
lu_shape)
self._verifyLuBase(x, lower, upper, perm, verification,
output_idx_type)
def testBasic(self):
data = np.array([[4., -1., 2.], [-1., 6., 0], [10., 0., 5.]])
for dtype in (np.float32, np.float64):
for output_idx_type in (dtypes.int32, dtypes.int64):
with self.subTest(dtype=dtype, output_idx_type=output_idx_type):
self._verifyLu(data.astype(dtype), output_idx_type=output_idx_type)
for dtype in (np.complex64, np.complex128):
for output_idx_type in (dtypes.int32, dtypes.int64):
with self.subTest(dtype=dtype, output_idx_type=output_idx_type):
complex_data = np.tril(1j * data, -1).astype(dtype)
complex_data += np.triu(-1j * data, 1).astype(dtype)
complex_data += data
self._verifyLu(complex_data, output_idx_type=output_idx_type)
def testPivoting(self):
# This matrix triggers partial pivoting because the first diagonal entry
# is small.
data = np.array([[1e-9, 1., 0.], [1., 0., 0], [0., 1., 5]])
self._verifyLu(data.astype(np.float32))
for dtype in (np.float32, np.float64):
with self.subTest(dtype=dtype):
self._verifyLu(data.astype(dtype))
_, p = linalg_ops.lu(data)
p_val = self.evaluate([p])
# Make sure p_val is not the identity permutation.
self.assertNotAllClose(np.arange(3), p_val)
for dtype in (np.complex64, np.complex128):
with self.subTest(dtype=dtype):
complex_data = np.tril(1j * data, -1).astype(dtype)
complex_data += np.triu(-1j * data, 1).astype(dtype)
complex_data += data
self._verifyLu(complex_data)
_, p = linalg_ops.lu(data)
p_val = self.evaluate([p])
# Make sure p_val is not the identity permutation.
self.assertNotAllClose(np.arange(3), p_val)
def testInvalidMatrix(self):
# LU factorization gives an error when the input is singular.
# Note: A singular matrix may return without error but it won't be a valid
# factorization.
for dtype in self.float_types:
with self.subTest(dtype=dtype):
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(
linalg_ops.lu(
np.array([[1., 2., 3.], [2., 4., 6.], [2., 3., 4.]],
dtype=dtype)))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(
linalg_ops.lu(
np.array([[[1., 2., 3.], [2., 4., 6.], [1., 2., 3.]],
[[1., 2., 3.], [3., 4., 5.], [5., 6., 7.]]],
dtype=dtype)))
def testBatch(self):
simple_array = np.array([[[1., -1.], [2., 5.]]]) # shape (1, 2, 2)
self._verifyLu(simple_array)
self._verifyLu(np.vstack((simple_array, simple_array)))
odd_sized_array = np.array([[[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]]])
self._verifyLu(np.vstack((odd_sized_array, odd_sized_array)))
batch_size = 200
# Generate random matrices.
np.random.seed(42)
matrices = np.random.rand(batch_size, 5, 5)
self._verifyLu(matrices)
# Generate random complex valued matrices.
np.random.seed(52)
matrices = np.random.rand(batch_size, 5,
5) + 1j * np.random.rand(batch_size, 5, 5)
self._verifyLu(matrices)
def testLargeMatrix(self):
# Generate random matrices.
n = 500
np.random.seed(64)
data = np.random.rand(n, n)
self._verifyLu(data)
# Generate random complex valued matrices.
np.random.seed(129)
data = np.random.rand(n, n) + 1j * np.random.rand(n, n)
self._verifyLu(data)
@test_util.disable_xla("b/206106619")
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testEmpty(self):
self._verifyLu(np.empty([0, 2, 2]))
self._verifyLu(np.empty([2, 0, 0]))
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testConcurrentExecutesWithoutError(self):
matrix_shape = [5, 5]
seed = [42, 24]
matrix1 = stateless_random_ops.stateless_random_normal(
shape=matrix_shape, seed=seed)
matrix2 = stateless_random_ops.stateless_random_normal(
shape=matrix_shape, seed=seed)
self.assertAllEqual(matrix1, matrix2)
lu1, p1 = linalg_ops.lu(matrix1)
lu2, p2 = linalg_ops.lu(matrix2)
lu1_val, p1_val, lu2_val, p2_val = self.evaluate([lu1, p1, lu2, p2])
self.assertAllEqual(lu1_val, lu2_val)
self.assertAllEqual(p1_val, p2_val)
class LuBenchmark(test.Benchmark):
shapes = [
(4, 4),
(10, 10),
(16, 16),
(101, 101),
(256, 256),
(1000, 1000),
(1024, 1024),
(2048, 2048),
(4096, 4096),
(513, 2, 2),
(513, 8, 8),
(513, 256, 256),
(4, 513, 2, 2),
]
def _GenerateMatrix(self, shape):
batch_shape = shape[:-2]
shape = shape[-2:]
assert shape[0] == shape[1]
n = shape[0]
matrix = np.ones(shape).astype(np.float32) / (2.0 * n) + np.diag(
np.ones(n).astype(np.float32))
return np.tile(matrix, batch_shape + (1, 1))
def benchmarkLuOp(self):
for shape in self.shapes:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix = variables.Variable(self._GenerateMatrix(shape))
lu, p = linalg_ops.lu(matrix)
self.evaluate(variables.global_variables_initializer())
self.run_op_benchmark(
sess,
control_flow_ops.group(lu, p),
min_iters=25,
name="lu_cpu_{shape}".format(shape=shape))
if test.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/device:GPU:0"):
matrix = variables.Variable(self._GenerateMatrix(shape))
lu, p = linalg_ops.lu(matrix)
self.evaluate(variables.global_variables_initializer())
self.run_op_benchmark(
sess,
control_flow_ops.group(lu, p),
min_iters=25,
name="lu_gpu_{shape}".format(shape=shape))
if __name__ == "__main__":
test.main()
| |
from nose.tools import *
from unittest import TestCase
from datetime import datetime, timedelta
from stats.stat_collector_factory import StatCollectorFactory
from stats.stat_collector_factory import StatConfigurationInvalidException
from stats.results_stat_collector import ResultsStatCollector
from files.file_package import FilePackageFactory
class StatCollectorFactoryTests(TestCase):
def setUp(self):
self.scf = StatCollectorFactory()
self.mock_stat_factory = MockStatFactory()
self.scf.set_stat_factory(self.mock_stat_factory)
self.mock_repo_factory = MockRepositoryFactory()
self.scf.set_repo_factory(self.mock_repo_factory)
self.mock_results_stat_collector_factory = MockResultsStatCollectorFactory(None)
self.scf.set_results_stat_collector_factory(self.mock_results_stat_collector_factory)
self.mock_file_package_factory = MockFilePackageFactory()
self.scf.set_file_package_factory(self.mock_file_package_factory)
self.mock_file_iterator_factory = MockFileIteratorFactory()
self.scf.set_file_iterator_factory(self.mock_file_iterator_factory)
def test_stat_collector_factory_raises_exception_for_invalid_config_with_msg(self):
didthrow = False
try:
self.scf.get_stat_collector({})
except StatConfigurationInvalidException as e:
didthrow = True
ok_(str(e).startswith("Unable to find required configuration option"))
ok_(didthrow)
def test_stat_collector_factory_creates_proper_stat(self):
statname = "my_favorite_stat"
conf = { "statname": statname }
stat = self.scf.create_stat_from_config(conf)
eq_(statname, self.mock_stat_factory.get_last_stat_created())
eq_(conf, self.mock_stat_factory.get_last_config_passed())
def test_stat_collector_factory_creates_proper_repo(self):
directory = "/home/mdrago/repository_lives_here"
conf = { "repodir": directory }
repo = self.scf.create_repo_from_config(conf)
eq_(directory, self.mock_repo_factory.get_last_directory())
def test_stat_collector_factory_creates_file_package(self):
basedir = "/home/mdrago/repository_lives_here"
subdir = "TestProject"
conf = {}
conf["repodir"] = basedir
conf["dirs"] = [ subdir ]
fp = self.scf.create_file_package_from_config(conf)
eq_(subdir, self.mock_file_package_factory.get_file_package().dirs[0])
@raises(Exception)
def test_stat_collector_factory_raises_exception_with_dirs_as_string(self):
"the dirs setting has to be a list and not a string"
basedir = "/home/mdrago/repository_lives_here"
conf = {}
conf["repodir"] = basedir
conf["dirs"] = "a_string"
self.scf.create_file_package_from_config(conf)
def test_stat_collector_factory_creates_file_package_with_subdirs(self):
basedir = "/home/mdrago/repository_lives_here"
conf = {}
conf["repodir"] = basedir
conf["dirs"] = ['*']
fp = self.scf.create_file_package_from_config(conf)
eq_("*", self.mock_file_package_factory.get_file_package().dirs[0])
def test_stat_collector_factory_creates_file_package_with_subdirs_when_dirs_omitted(self):
basedir = "/home/mdrago/repository_lives_here"
conf = {}
conf["repodir"] = basedir
fp = self.scf.create_file_package_from_config(conf)
ok_(self.mock_file_package_factory.get_file_package().dirs[0])
def test_stat_collector_factory_creates_file_package_with_excluded_dirs(self):
basedir = "/home/mdrago/repository_lives_here"
conf = {}
conf["repodir"] = basedir
conf["exclude_dirs"] = ["dir1", "dir2"]
fp = self.scf.create_file_package_from_config(conf)
expected = set(("dir1", "dir2"))
actual = set(self.mock_file_package_factory.get_file_package().excluded_dirs)
eq_(expected, actual)
def test_stat_collector_factory_creates_file_iterator(self):
basedir = "/home/mdrago/repository_lives_here"
subdir = "TestProject"
conf = {}
conf["repodir"] = basedir
conf["dirs"] = [ subdir ]
fi = self.scf.create_file_iterator_from_config(conf)
eq_(1, len(fi.get_filepackages()))
def test_stat_collector_factory_creates_file_iterator_with_excluded_path_globs(self):
basedir = "/home/mdrago/repository_lives_here"
conf = {}
conf["repodir"] = basedir
conf["exclude_path_globs"] = ["*/test/*"]
fp = self.scf.create_file_iterator_from_config(conf)
actual = set(self.mock_file_iterator_factory.get_file_iterator().excluded_path_globs)
eq_(set(["*/test/*"]), actual)
def test_stat_collector_factory_creates_matcher_glob(self):
self.scf.set_file_package_factory(FilePackageFactory)
glob = "*.java"
basedir = "/home/mdrago/repository_lives_here"
subdir = "TestProject"
conf = {}
conf["glob"] = glob
conf["repodir"] = basedir
conf["dirs"] = [ subdir ]
fp = self.scf.create_file_package_from_config(conf)
eq_(1, len(fp.file_matchers))
eq_(glob, fp.file_matchers[0].get_globs()[0])
def test_stat_collector_factory_creates_start_time(self):
current = datetime(2011, 5, 26, 7, 15, 0)
self.scf.set_current_time(current)
conf = {"start_time_delta": 7776000}
start_time = self.scf.get_start_time_from_config(conf)
eq_(datetime(2011, 2, 25, 7, 15, 0), start_time)
def test_stat_collector_factory_creates_sample_time_interval(self):
seconds = 2592000
conf = {'sample_time_interval': seconds}
expected = timedelta(seconds = seconds)
actual = self.scf.get_sample_time_interval_from_config(conf)
eq_(expected, actual)
def test_stat_collector_factory_creates_results_stat_collector(self):
conf = {'stattype': 'results', 'statname': 'mystat',
'results_files': ['file1']}
sc = self.scf.get_stat_collector(conf)
eq_(MockResultsStatCollector, type(sc))
def test_stat_collector_injects_results_file_names_in_to_collector(self):
conf = {'stattype': 'results', 'statname': 'mystat',
'results_files': ['file1', 'file2']}
sc = self.scf.get_stat_collector(conf)
rsc = self.mock_results_stat_collector_factory.get_results_stat_collector(None)
eq_(['file1', 'file2'], rsc.files)
class MockResultsStatCollectorFactory(object):
def __init__(self, stat):
self.rsc = MockResultsStatCollector(stat)
def get_results_stat_collector(self, stat):
return self.rsc
class MockResultsStatCollector(object):
def __init__(self, stat):
self.files = None
def set_results_files(self, files):
self.files = files
def get_stats(self):
pass
class MockRepositoryFactory(object):
def __init__(self):
self.last_directory = None
def get_repository(self, directory):
self.last_directory = directory
def get_last_directory(self):
return self.last_directory
class MockStatFactory(object):
def __init__(self):
self.last_stat_created = None
self.last_config_passed = None
def get_stat(self, statname, conf=None):
self.last_stat_created = statname
self.last_config_passed = conf
return None
def get_last_stat_created(self):
return self.last_stat_created
def get_last_config_passed(self):
return self.last_config_passed
def return_non_existant_stat(self):
self.get_stat_returns_non_existant = True
class MockFilePackageFactory(object):
def __init__(self):
self.mock = MockFilePackage()
def get_file_package(self):
return self.mock
class MockFilePackage(object):
def __init__(self):
self.dirs = []
self.excluded_dirs = []
def add_directory(self, directory):
self.dirs.append(directory)
def add_directories(self, *directories):
for subdir in directories:
self.add_directory(subdir)
def exclude_directories(self, *directories):
for directory in directories:
self.excluded_dirs.append(directory)
def add_file_matcher(self, fm):
pass
def set_basedir(self, basedir):
pass
def file_matchers(self):
pass
class MockFileIteratorFactory(object):
def __init__(self):
self.file_iterator = MockFileIterator()
def get_file_iterator(self, filepackages=None):
self.file_iterator.set_filepackages(filepackages)
return self.file_iterator
class MockFileIterator(object):
def __init__(self, filepackages=None):
self.file_packages = []
self.excluded_path_globs = []
def exclude_path_globs(self, *globs):
for glob in globs:
self.excluded_path_globs.append(glob)
def get_filepackages(self):
return self.filepackages
def set_filepackages(self, filepackages):
self.filepackages = filepackages
| |
"""
Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
See https://llvm.org/LICENSE.txt for license information.
SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Provides an xUnit ResultsFormatter for integrating the LLDB
test suite with the Jenkins xUnit aggregator and other xUnit-compliant
test output processors.
"""
from __future__ import absolute_import
from __future__ import print_function
# System modules
import re
import sys
import xml.sax.saxutils
# Third-party modules
import six
# Local modules
from ..event_builder import EventBuilder
from ..build_exception import BuildError
from .results_formatter import ResultsFormatter
class XunitFormatter(ResultsFormatter):
"""Provides xUnit-style formatted output.
"""
# Result mapping arguments
RM_IGNORE = 'ignore'
RM_SUCCESS = 'success'
RM_FAILURE = 'failure'
RM_PASSTHRU = 'passthru'
@staticmethod
def _build_illegal_xml_regex():
"""Constructs a regex to match all illegal xml characters.
Expects to be used against a unicode string."""
# Construct the range pairs of invalid unicode characters.
illegal_chars_u = [
(0x00, 0x08), (0x0B, 0x0C), (0x0E, 0x1F), (0x7F, 0x84),
(0x86, 0x9F), (0xFDD0, 0xFDDF), (0xFFFE, 0xFFFF)]
# For wide builds, we have more.
if sys.maxunicode >= 0x10000:
illegal_chars_u.extend(
[(0x1FFFE, 0x1FFFF), (0x2FFFE, 0x2FFFF), (0x3FFFE, 0x3FFFF),
(0x4FFFE, 0x4FFFF), (0x5FFFE, 0x5FFFF), (0x6FFFE, 0x6FFFF),
(0x7FFFE, 0x7FFFF), (0x8FFFE, 0x8FFFF), (0x9FFFE, 0x9FFFF),
(0xAFFFE, 0xAFFFF), (0xBFFFE, 0xBFFFF), (0xCFFFE, 0xCFFFF),
(0xDFFFE, 0xDFFFF), (0xEFFFE, 0xEFFFF), (0xFFFFE, 0xFFFFF),
(0x10FFFE, 0x10FFFF)])
# Build up an array of range expressions.
illegal_ranges = [
"%s-%s" % (six.unichr(low), six.unichr(high))
for (low, high) in illegal_chars_u]
# Compile the regex
return re.compile(six.u('[%s]') % six.u('').join(illegal_ranges))
@staticmethod
def _quote_attribute(text):
"""Returns the given text in a manner safe for usage in an XML attribute.
@param text the text that should appear within an XML attribute.
@return the attribute-escaped version of the input text.
"""
return xml.sax.saxutils.quoteattr(text)
def _replace_invalid_xml(self, str_or_unicode):
"""Replaces invalid XML characters with a '?'.
@param str_or_unicode a string to replace invalid XML
characters within. Can be unicode or not. If not unicode,
assumes it is a byte string in utf-8 encoding.
@returns a utf-8-encoded byte string with invalid
XML replaced with '?'.
"""
# Get the content into unicode
if isinstance(str_or_unicode, str):
# If we hit decoding errors due to data corruption, replace the
# invalid characters with U+FFFD REPLACEMENT CHARACTER.
unicode_content = str_or_unicode.decode('utf-8', 'replace')
else:
unicode_content = str_or_unicode
return self.invalid_xml_re.sub(
six.u('?'), unicode_content).encode('utf-8')
@classmethod
def arg_parser(cls):
"""@return arg parser used to parse formatter-specific options."""
parser = super(XunitFormatter, cls).arg_parser()
# These are valid choices for results mapping.
results_mapping_choices = [
XunitFormatter.RM_IGNORE,
XunitFormatter.RM_SUCCESS,
XunitFormatter.RM_FAILURE,
XunitFormatter.RM_PASSTHRU]
parser.add_argument(
"--assert-on-unknown-events",
action="store_true",
help=('cause unknown test events to generate '
'a python assert. Default is to ignore.'))
parser.add_argument(
"--ignore-skip-name",
"-n",
metavar='PATTERN',
action="append",
dest='ignore_skip_name_patterns',
help=('a python regex pattern, where '
'any skipped test with a test method name where regex '
'matches (via search) will be ignored for xUnit test '
'result purposes. Can be specified multiple times.'))
parser.add_argument(
"--ignore-skip-reason",
"-r",
metavar='PATTERN',
action="append",
dest='ignore_skip_reason_patterns',
help=('a python regex pattern, where '
'any skipped test with a skip reason where the regex '
'matches (via search) will be ignored for xUnit test '
'result purposes. Can be specified multiple times.'))
parser.add_argument(
"--xpass", action="store", choices=results_mapping_choices,
default=XunitFormatter.RM_FAILURE,
help=('specify mapping from unexpected success to jUnit/xUnit '
'result type'))
parser.add_argument(
"--xfail", action="store", choices=results_mapping_choices,
default=XunitFormatter.RM_IGNORE,
help=('specify mapping from expected failure to jUnit/xUnit '
'result type'))
return parser
@staticmethod
def _build_regex_list_from_patterns(patterns):
"""Builds a list of compiled regular expressions from option value.
@param patterns contains a list of regular expression
patterns.
@return list of compiled regular expressions, empty if no
patterns provided.
"""
regex_list = []
if patterns is not None:
for pattern in patterns:
regex_list.append(re.compile(pattern))
return regex_list
def __init__(self, out_file, options):
"""Initializes the XunitFormatter instance.
@param out_file file-like object where formatted output is written.
@param options specifies a dictionary of options for the
formatter.
"""
# Initialize the parent
super(XunitFormatter, self).__init__(out_file, options)
self.text_encoding = "UTF-8"
self.invalid_xml_re = XunitFormatter._build_illegal_xml_regex()
self.total_test_count = 0
self.ignore_skip_name_regexes = (
XunitFormatter._build_regex_list_from_patterns(
options.ignore_skip_name_patterns))
self.ignore_skip_reason_regexes = (
XunitFormatter._build_regex_list_from_patterns(
options.ignore_skip_reason_patterns))
self.elements = {
"successes": [],
"errors": [],
"failures": [],
"skips": [],
"unexpected_successes": [],
"expected_failures": [],
"all": []
}
self.status_handlers = {
EventBuilder.STATUS_SUCCESS: self._handle_success,
EventBuilder.STATUS_FAILURE: self._handle_failure,
EventBuilder.STATUS_ERROR: self._handle_error,
EventBuilder.STATUS_SKIP: self._handle_skip,
EventBuilder.STATUS_EXPECTED_FAILURE:
self._handle_expected_failure,
EventBuilder.STATUS_EXPECTED_TIMEOUT:
self._handle_expected_timeout,
EventBuilder.STATUS_UNEXPECTED_SUCCESS:
self._handle_unexpected_success,
EventBuilder.STATUS_EXCEPTIONAL_EXIT:
self._handle_exceptional_exit,
EventBuilder.STATUS_TIMEOUT:
self._handle_timeout
}
RESULT_TYPES = {
EventBuilder.TYPE_TEST_RESULT,
EventBuilder.TYPE_JOB_RESULT}
def handle_event(self, test_event):
super(XunitFormatter, self).handle_event(test_event)
event_type = test_event["event"]
if event_type is None:
return
if event_type == "terminate":
# Process all the final result events into their
# XML counterparts.
for result_event in self.result_events.values():
self._process_test_result(result_event)
self._finish_output()
else:
# This is an unknown event.
if self.options.assert_on_unknown_events:
raise Exception("unknown event type {} from {}\n".format(
event_type, test_event))
def _handle_success(self, test_event):
"""Handles a test success.
@param test_event the test event to handle.
"""
result = self._common_add_testcase_entry(test_event)
with self.lock:
self.elements["successes"].append(result)
def _handle_failure(self, test_event):
"""Handles a test failure.
@param test_event the test event to handle.
"""
message = self._replace_invalid_xml(test_event["issue_message"])
backtrace = self._replace_invalid_xml(
"".join(test_event.get("issue_backtrace", [])))
result = self._common_add_testcase_entry(
test_event,
inner_content=(
'<failure type={} message={}><![CDATA[{}]]></failure>'.format(
XunitFormatter._quote_attribute(test_event["issue_class"]),
XunitFormatter._quote_attribute(message),
backtrace)
))
with self.lock:
self.elements["failures"].append(result)
def _handle_error_build(self, test_event):
"""Handles a test error.
@param test_event the test event to handle.
"""
message = self._replace_invalid_xml(test_event["issue_message"])
build_issue_description = self._replace_invalid_xml(
BuildError.format_build_error(
test_event.get("build_command", "<None>"),
test_event.get("build_error", "<None>")))
result = self._common_add_testcase_entry(
test_event,
inner_content=(
'<error type={} message={}><![CDATA[{}]]></error>'.format(
XunitFormatter._quote_attribute(test_event["issue_class"]),
XunitFormatter._quote_attribute(message),
build_issue_description)
))
with self.lock:
self.elements["errors"].append(result)
def _handle_error_standard(self, test_event):
"""Handles a test error.
@param test_event the test event to handle.
"""
message = self._replace_invalid_xml(test_event["issue_message"])
backtrace = self._replace_invalid_xml(
"".join(test_event.get("issue_backtrace", [])))
result = self._common_add_testcase_entry(
test_event,
inner_content=(
'<error type={} message={}><![CDATA[{}]]></error>'.format(
XunitFormatter._quote_attribute(test_event["issue_class"]),
XunitFormatter._quote_attribute(message),
backtrace)
))
with self.lock:
self.elements["errors"].append(result)
def _handle_error(self, test_event):
if test_event.get("issue_phase", None) == "build":
self._handle_error_build(test_event)
else:
self._handle_error_standard(test_event)
def _handle_exceptional_exit(self, test_event):
"""Handles an exceptional exit.
@param test_event the test method or job result event to handle.
"""
if "test_name" in test_event:
name = test_event["test_name"]
else:
name = test_event.get("test_filename", "<unknown test/filename>")
message_text = "ERROR: {} ({}): {}".format(
test_event.get("exception_code", 0),
test_event.get("exception_description", ""),
name)
message = self._replace_invalid_xml(message_text)
result = self._common_add_testcase_entry(
test_event,
inner_content=(
'<error type={} message={}></error>'.format(
"exceptional_exit",
XunitFormatter._quote_attribute(message))
))
with self.lock:
self.elements["errors"].append(result)
def _handle_timeout(self, test_event):
"""Handles a test method or job timeout.
@param test_event the test method or job result event to handle.
"""
if "test_name" in test_event:
name = test_event["test_name"]
else:
name = test_event.get("test_filename", "<unknown test/filename>")
message_text = "TIMEOUT: {}".format(name)
message = self._replace_invalid_xml(message_text)
result = self._common_add_testcase_entry(
test_event,
inner_content=(
'<error type={} message={}></error>'.format(
XunitFormatter._quote_attribute("timeout"),
XunitFormatter._quote_attribute(message))
))
with self.lock:
self.elements["errors"].append(result)
@staticmethod
def _ignore_based_on_regex_list(test_event, test_key, regex_list):
"""Returns whether to ignore a test event based on patterns.
@param test_event the test event dictionary to check.
@param test_key the key within the dictionary to check.
@param regex_list a list of zero or more regexes. May contain
zero or more compiled regexes.
@return True if any o the regex list match based on the
re.search() method; false otherwise.
"""
for regex in regex_list:
match = regex.search(test_event.get(test_key, ''))
if match:
return True
return False
def _handle_skip(self, test_event):
"""Handles a skipped test.
@param test_event the test event to handle.
"""
# Are we ignoring this test based on test name?
if XunitFormatter._ignore_based_on_regex_list(
test_event, 'test_name', self.ignore_skip_name_regexes):
return
# Are we ignoring this test based on skip reason?
if XunitFormatter._ignore_based_on_regex_list(
test_event, 'skip_reason', self.ignore_skip_reason_regexes):
return
# We're not ignoring this test. Process the skip.
reason = self._replace_invalid_xml(test_event.get("skip_reason", ""))
result = self._common_add_testcase_entry(
test_event,
inner_content='<skipped message={} />'.format(
XunitFormatter._quote_attribute(reason)))
with self.lock:
self.elements["skips"].append(result)
def _handle_expected_failure(self, test_event):
"""Handles a test that failed as expected.
@param test_event the test event to handle.
"""
if self.options.xfail == XunitFormatter.RM_PASSTHRU:
# This is not a natively-supported junit/xunit
# testcase mode, so it might fail a validating
# test results viewer.
if "bugnumber" in test_event:
bug_id_attribute = 'bug-id={} '.format(
XunitFormatter._quote_attribute(test_event["bugnumber"]))
else:
bug_id_attribute = ''
result = self._common_add_testcase_entry(
test_event,
inner_content=(
'<expected-failure {}type={} message={} />'.format(
bug_id_attribute,
XunitFormatter._quote_attribute(
test_event["issue_class"]),
XunitFormatter._quote_attribute(
test_event["issue_message"]))
))
with self.lock:
self.elements["expected_failures"].append(result)
elif self.options.xfail == XunitFormatter.RM_SUCCESS:
result = self._common_add_testcase_entry(test_event)
with self.lock:
self.elements["successes"].append(result)
elif self.options.xfail == XunitFormatter.RM_FAILURE:
result = self._common_add_testcase_entry(
test_event,
inner_content='<failure type={} message={} />'.format(
XunitFormatter._quote_attribute(test_event["issue_class"]),
XunitFormatter._quote_attribute(
test_event["issue_message"])))
with self.lock:
self.elements["failures"].append(result)
elif self.options.xfail == XunitFormatter.RM_IGNORE:
pass
else:
raise Exception(
"unknown xfail option: {}".format(self.options.xfail))
@staticmethod
def _handle_expected_timeout(test_event):
"""Handles expected_timeout.
@param test_event the test event to handle.
"""
# We don't do anything with expected timeouts, not even report.
pass
def _handle_unexpected_success(self, test_event):
"""Handles a test that passed but was expected to fail.
@param test_event the test event to handle.
"""
if self.options.xpass == XunitFormatter.RM_PASSTHRU:
# This is not a natively-supported junit/xunit
# testcase mode, so it might fail a validating
# test results viewer.
result = self._common_add_testcase_entry(
test_event,
inner_content="<unexpected-success />")
with self.lock:
self.elements["unexpected_successes"].append(result)
elif self.options.xpass == XunitFormatter.RM_SUCCESS:
# Treat the xpass as a success.
result = self._common_add_testcase_entry(test_event)
with self.lock:
self.elements["successes"].append(result)
elif self.options.xpass == XunitFormatter.RM_FAILURE:
# Treat the xpass as a failure.
if "bugnumber" in test_event:
message = "unexpected success (bug_id:{})".format(
test_event["bugnumber"])
else:
message = "unexpected success (bug_id:none)"
result = self._common_add_testcase_entry(
test_event,
inner_content='<failure type={} message={} />'.format(
XunitFormatter._quote_attribute("unexpected_success"),
XunitFormatter._quote_attribute(message)))
with self.lock:
self.elements["failures"].append(result)
elif self.options.xpass == XunitFormatter.RM_IGNORE:
# Ignore the xpass result as far as xUnit reporting goes.
pass
else:
raise Exception("unknown xpass option: {}".format(
self.options.xpass))
def _process_test_result(self, test_event):
"""Processes the test_event known to be a test result.
This categorizes the event appropriately and stores the data needed
to generate the final xUnit report. This method skips events that
cannot be represented in xUnit output.
"""
if "status" not in test_event:
raise Exception("test event dictionary missing 'status' key")
status = test_event["status"]
if status not in self.status_handlers:
raise Exception("test event status '{}' unsupported".format(
status))
# Call the status handler for the test result.
self.status_handlers[status](test_event)
def _common_add_testcase_entry(self, test_event, inner_content=None):
"""Registers a testcase result, and returns the text created.
The caller is expected to manage failure/skip/success counts
in some kind of appropriate way. This call simply constructs
the XML and appends the returned result to the self.all_results
list.
@param test_event the test event dictionary.
@param inner_content if specified, gets included in the <testcase>
inner section, at the point before stdout and stderr would be
included. This is where a <failure/>, <skipped/>, <error/>, etc.
could go.
@return the text of the xml testcase element.
"""
# Get elapsed time.
test_class = test_event.get("test_class", "<no_class>")
test_name = test_event.get("test_name", "<no_test_method>")
event_time = test_event["event_time"]
time_taken = self.elapsed_time_for_test(
test_class, test_name, event_time)
# Plumb in stdout/stderr once we shift over to only test results.
test_stdout = ''
test_stderr = ''
# Formulate the output xml.
if not inner_content:
inner_content = ""
result = (
'<testcase classname="{}" name="{}" time="{:.3f}">'
'{}{}{}</testcase>'.format(
test_class,
test_name,
time_taken,
inner_content,
test_stdout,
test_stderr))
# Save the result, update total test count.
with self.lock:
self.total_test_count += 1
self.elements["all"].append(result)
return result
def _finish_output_no_lock(self):
"""Flushes out the report of test executions to form valid xml output.
xUnit output is in XML. The reporting system cannot complete the
formatting of the output without knowing when there is no more input.
This call addresses notification of the completed test run and thus is
when we can finish off the report output.
"""
# Figure out the counts line for the testsuite. If we have
# been counting either unexpected successes or expected
# failures, we'll output those in the counts, at the risk of
# being invalidated by a validating test results viewer.
# These aren't counted by default so they won't show up unless
# the user specified a formatter option to include them.
xfail_count = len(self.elements["expected_failures"])
xpass_count = len(self.elements["unexpected_successes"])
if xfail_count > 0 or xpass_count > 0:
extra_testsuite_attributes = (
' expected-failures="{}"'
' unexpected-successes="{}"'.format(xfail_count, xpass_count))
else:
extra_testsuite_attributes = ""
# Output the header.
self.out_file.write(
'<?xml version="1.0" encoding="{}"?>\n'
'<testsuites>'
'<testsuite name="{}" tests="{}" errors="{}" failures="{}" '
'skip="{}"{}>\n'.format(
self.text_encoding,
"LLDB test suite",
self.total_test_count,
len(self.elements["errors"]),
len(self.elements["failures"]),
len(self.elements["skips"]),
extra_testsuite_attributes))
# Output each of the test result entries.
for result in self.elements["all"]:
self.out_file.write(result + '\n')
# Close off the test suite.
self.out_file.write('</testsuite></testsuites>\n')
def _finish_output(self):
"""Finish writing output as all incoming events have arrived."""
with self.lock:
self._finish_output_no_lock()
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test run for a single file and a display of how many events are collected."""
import argparse
import collections
import cProfile
import logging
import os
import pstats
import sys
import time
from dfvfs.lib import definitions
from dfvfs.path import factory as path_spec_factory
from dfvfs.proto import transmission_pb2
from dfvfs.resolver import resolver as path_spec_resolver
from dfvfs.serializer import protobuf_serializer
from google.protobuf import text_format
try:
# Support version 1.X of IPython.
# pylint: disable=no-name-in-module
from IPython.terminal.embed import InteractiveShellEmbed
except ImportError:
# Support version older than 1.X of IPython.
# pylint: disable=no-name-in-module
from IPython.frontend.terminal.embed import InteractiveShellEmbed
import pyevt
import pyevtx
import pylnk
import pymsiecf
import pyregf
import plaso
from plaso.engine import worker
from plaso.frontend import psort
from plaso.frontend import utils as frontend_utils
from plaso.lib import errors
from plaso.lib import event
from plaso.lib import putils
from plaso.lib import queue
# TODO: Remove this after the dfVFS integration.
# TODO: Make sure we don't need to imlement the method _ConsumeItem, or
# to have that not as an abstract method.
# pylint: disable=abstract-method
class PprofEventObjectQueueConsumer(queue.EventObjectQueueConsumer):
"""Class that implements an event object queue consumer for pprof."""
def __init__(self, queue_object):
"""Initializes the queue consumer.
Args:
queue_object: the queue object (instance of Queue).
"""
super(PprofEventObjectQueueConsumer, self).__init__(queue_object)
self.counter = collections.Counter()
self.parsers = []
self.plugins = []
def _ConsumeEventObject(self, event_object):
"""Consumes an event object callback for ConsumeEventObject."""
parser = getattr(event_object, 'parser', u'N/A')
if parser not in self.parsers:
self.parsers.append(parser)
plugin = getattr(event_object, 'plugin', u'N/A')
if plugin not in self.plugins:
self.plugins.append(plugin)
self.counter[parser] += 1
if plugin != u'N/A':
self.counter[u'[Plugin] {}'.format(plugin)] += 1
self.counter['Total'] += 1
def PrintHeader(options):
"""Print header information, including library versions."""
print frontend_utils.FormatHeader('File Parsed')
print u'{:>20s}'.format(options.file_to_parse)
print frontend_utils.FormatHeader('Versions')
print frontend_utils.FormatOutputString('plaso engine', plaso.GetVersion())
print frontend_utils.FormatOutputString('pyevt', pyevt.get_version())
print frontend_utils.FormatOutputString('pyevtx', pyevtx.get_version())
print frontend_utils.FormatOutputString('pylnk', pylnk.get_version())
print frontend_utils.FormatOutputString('pymsiecf', pymsiecf.get_version())
print frontend_utils.FormatOutputString('pyregf', pyregf.get_version())
if options.filter:
print frontend_utils.FormatHeader('Filter Used')
print frontend_utils.FormatOutputString('Filter String', options.filter)
if options.parsers:
print frontend_utils.FormatHeader('Parser Filter Used')
print frontend_utils.FormatOutputString('Parser String', options.parsers)
def ProcessStorage(options):
"""Process a storage file and produce profile results.
Args:
options: the command line arguments (instance of argparse.Namespace).
Returns:
The profiling statistics or None on error.
"""
storage_parameters = options.storage.split()
storage_parameters.append(options.file_to_parse)
if options.filter:
storage_parameters.append(options.filter)
front_end = psort.PsortFrontend()
try:
front_end.ParseOptions(options)
except errors.BadConfigOption as exception:
logging.error(u'{0:s}'.format(exception))
return
if options.verbose:
# TODO: why not move this functionality into psort?
profiler = cProfile.Profile()
profiler.enable()
else:
time_start = time.time()
# Call psort and process output.
_ = front_end.ParseStorage(options)
if options.verbose:
profiler.disable()
else:
time_end = time.time()
if options.verbose:
return GetStats(profiler)
else:
print frontend_utils.FormatHeader('Time Used')
print u'{:>20f}s'.format(time_end - time_start)
def ProcessFile(options):
"""Process a file and produce profile results."""
if options.proto_file and os.path.isfile(options.proto_file):
with open(options.proto_file) as fh:
proto_string = fh.read()
proto = transmission_pb2.PathSpec()
try:
text_format.Merge(proto_string, proto)
except text_format.ParseError as exception:
logging.error(u'Unable to parse file, error: {}'.format(
exception))
sys.exit(1)
serializer = protobuf_serializer.ProtobufPathSpecSerializer
path_spec = serializer.ReadSerializedObject(proto)
else:
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=options.file_to_parse)
file_entry = path_spec_resolver.Resolver.OpenFileEntry(path_spec)
if file_entry is None:
logging.error(u'Unable to open file: {0:s}'.format(options.file_to_parse))
sys.exit(1)
pre_obj = event.PreprocessObject()
storage_queue = queue.SingleThreadedQueue()
storage_queue_producer = queue.EventObjectQueueProducer(storage_queue)
# Set few options the engine expects to be there.
# TODO: Can we rather set this directly in argparse?
options.single_process = True
options.debug = False
options.text_prepend = u''
parsers = putils.FindAllParsers(pre_obj, options)
my_worker = worker.EventExtractionWorker(
'0', None, storage_queue_producer, pre_obj, parsers)
if options.verbose:
profiler = cProfile.Profile()
profiler.enable()
else:
time_start = time.time()
my_worker.ParseFile(file_entry)
if options.verbose:
profiler.disable()
else:
time_end = time.time()
storage_queue_producer.SignalEndOfInput()
event_object_consumer = PprofEventObjectQueueConsumer(storage_queue)
event_object_consumer.ConsumeEventObjects()
if not options.verbose:
print frontend_utils.FormatHeader('Time Used')
print u'{:>20f}s'.format(time_end - time_start)
print frontend_utils.FormatHeader('Parsers Loaded')
# Accessing protected member.
# pylint: disable=protected-access
plugins = []
for parser in sorted(my_worker._parsers['all']):
print frontend_utils.FormatOutputString('', parser.parser_name)
parser_plugins = getattr(parser, '_plugins', [])
plugins.extend(parser_plugins)
print frontend_utils.FormatHeader('Plugins Loaded')
for plugin in sorted(plugins):
if isinstance(plugin, basestring):
print frontend_utils.FormatOutputString('', plugin)
else:
plugin_string = getattr(plugin, 'NAME', u'N/A')
print frontend_utils.FormatOutputString('', plugin_string)
print frontend_utils.FormatHeader('Parsers Used')
for parser in sorted(event_object_consumer.parsers):
print frontend_utils.FormatOutputString('', parser)
print frontend_utils.FormatHeader('Plugins Used')
for plugin in sorted(event_object_consumer.plugins):
print frontend_utils.FormatOutputString('', plugin)
print frontend_utils.FormatHeader('Counter')
for key, value in event_object_consumer.counter.most_common():
print frontend_utils.FormatOutputString(key, value)
if options.verbose:
return GetStats(profiler)
def GetStats(profiler):
"""Print verbose information from profiler and return a stats object."""
stats = pstats.Stats(profiler, stream=sys.stdout)
print frontend_utils.FormatHeader('Profiler')
print '\n{:-^20}'.format(' Top 10 Time Spent ')
stats.sort_stats('cumulative')
stats.print_stats(10)
print '\n{:-^20}'.format(' Sorted By Function Calls ')
stats.sort_stats('calls')
stats.print_stats()
return stats
def Main():
"""Start the tool."""
usage = (
u'Run this tool against a single file to see how many events are '
u'extracted from it and which parsers recognize it.')
arg_parser = argparse.ArgumentParser(description=usage)
format_str = '[%(levelname)s] %(message)s'
logging.basicConfig(level=logging.INFO, format=format_str)
arg_parser.add_argument(
'-v', '--verbose', dest='verbose', action='store_true', default=False,
help=(
'Be extra verbose in the information printed out (include full '
'stats).'))
arg_parser.add_argument(
'-c', '--console', dest='console', action='store_true',
default=False, help='After processing drop to an interactive shell.')
arg_parser.add_argument(
'-p', '--parsers', dest='parsers', action='store', default='', type=str,
help='A list of parsers to include (see log2timeline documentation).')
arg_parser.add_argument(
'--proto', dest='proto_file', action='store', default='', type=unicode,
metavar='PROTO_FILE', help=(
'A file containing an ASCII PathSpec protobuf describing how to '
'open up the file for parsing.'))
arg_parser.add_argument(
'-s', '--storage', dest='storage', action='store', type=unicode,
metavar='PSORT_PARAMETER', default='', help=(
'Run the profiler against a storage file, with the parameters '
'provided with this option, eg: "-q -w /dev/null". The storage '
'file has to be passed in as the FILE_TO_PARSE argument to the '
'tool and filters are also optional. This is equivilant to calling '
'psort.py STORAGE_PARAMETER FILE_TO_PARSE [FILTER]. Where the '
'storage parameters are the ones defined with this parameter.'))
# TODO: Add the option of dropping into a python shell that contains the
# stats attribute and others, just print out basic information and do the
# profiling, then drop into a ipython shell that allows you to work with
# the stats object.
arg_parser.add_argument(
'file_to_parse', nargs='?', action='store', metavar='FILE_TO_PARSE',
default=None, help='A path to the file that is to be parsed.')
arg_parser.add_argument(
'filter', action='store', metavar='FILTER', nargs='?', default=None,
help=('A filter that can be used to filter the dataset before it '
'is written into storage. More information about the filters'
' and it\'s usage can be found here: http://plaso.kiddaland.'
'net/usage/filters'))
options = arg_parser.parse_args()
if not (options.file_to_parse or options.proto_file):
arg_parser.print_help()
print ''
arg_parser.print_usage()
print ''
logging.error('Not able to run without a file to process.')
return False
if options.file_to_parse and not os.path.isfile(options.file_to_parse):
logging.error(u'File [{0:s}] needs to exist.'.format(options.file_to_parse))
return False
PrintHeader(options)
# Stats attribute used for console sesssions.
# pylint: disable=unused-variable
if options.storage:
stats = ProcessStorage(options)
else:
stats = ProcessFile(options)
if options.console:
ipshell = InteractiveShellEmbed()
ipshell.confirm_exit = False
ipshell()
return True
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
| |
import os
import sys
import unittest
from flask import Flask, request
from app import generator
try:
import kvm_player
except ImportError:
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import kvm_player
@unittest.skipIf(os.getenv("KVM_ISO", None) is None, "TestKVMBasicISO need env KVM_ISO=whatever")
class TestKVMBasicISO(kvm_player.KernelVirtualMachinePlayer):
flask_ok_port = 5050
@classmethod
def setUpClass(cls):
cls.running_requirements()
cls.set_rack0()
cls.set_matchbox()
cls.set_dnsmasq()
cls.set_api()
cls.pause(cls.wait_setup_teardown)
def test_00(self):
marker = "euid-%s-%s" % (TestKVMBasicISO.__name__.lower(), self.test_00.__name__)
gen = generator.Generator(
api_uri=self.api_uri,
profile_id="%s" % marker,
name="%s" % marker,
ignition_id="%s.yaml" % marker,
matchbox_path=self.test_matchbox_path
)
gen.dumps()
app = Flask(marker)
resp = []
@app.route('/ok', methods=['POST'])
def machine_ready():
resp.append(request.form.keys())
request.environ.get('werkzeug.server.shutdown')()
return "roger\n"
destroy, undefine = ["virsh", "destroy", "%s" % marker], ["virsh", "undefine", "%s" % marker]
self.virsh(destroy, v=self.dev_null), self.virsh(undefine, v=self.dev_null)
try:
virt_install = [
"virt-install",
"--name",
"%s" % marker,
"--network=bridge:rack0,model=virtio",
"--memory=1024",
"--vcpus=%d" % self.get_optimized_cpu(1),
"--cdrom",
"%s/ipxe.iso" % self.tests_path,
"--disk",
"none",
"--os-type=linux",
"--os-variant=generic",
"--noautoconsole",
"--boot=cdrom"
]
self.virsh(virt_install, assertion=True, v=self.dev_null)
app.run(
host="172.20.0.1", port=self.flask_ok_port, debug=False, use_reloader=False)
finally:
self.virsh(destroy)
self.virsh(undefine)
self.assertItemsEqual(resp, [['euid-testkvmbasiciso-test_00']])
# @unittest.skip("just skip")
def test_01(self):
nb_node = 3
marker = "euid-%s-%s" % (TestKVMBasicISO.__name__.lower(), self.test_01.__name__)
gen = generator.Generator(
api_uri=self.api_uri,
profile_id="%s" % marker,
name="%s" % marker,
ignition_id="%s.yaml" % marker,
matchbox_path=self.test_matchbox_path
)
gen.dumps()
app = Flask(marker)
resp = []
@app.route('/ok', methods=['POST'])
def machine_ready():
resp.append(request.form.keys())
if len(resp) == nb_node:
request.environ.get('werkzeug.server.shutdown')()
return "roger\n"
try:
for i in range(nb_node):
machine_marker = "%s-%d" % (marker, i)
destroy, undefine = ["virsh", "destroy", "%s" % machine_marker], \
["virsh", "undefine", "%s" % machine_marker]
self.virsh(destroy, v=self.dev_null), self.virsh(undefine, v=self.dev_null)
virt_install = [
"virt-install",
"--name",
"%s" % machine_marker,
"--network=bridge:rack0,model=virtio",
"--memory=1024",
"--vcpus=%d" % self.get_optimized_cpu(nb_node),
"--cdrom",
"%s/ipxe.iso" % self.tests_path,
"--disk",
"none",
"--os-type=linux",
"--os-variant=generic",
"--noautoconsole",
"--boot=cdrom"
]
self.virsh(virt_install, assertion=True, v=self.dev_null)
app.run(
host="172.20.0.1", port=self.flask_ok_port, debug=False, use_reloader=False)
finally:
for i in range(nb_node):
machine_marker = "%s-%d" % (marker, i)
destroy, undefine = ["virsh", "destroy", "%s" % machine_marker], \
["virsh", "undefine", "%s" % machine_marker]
self.virsh(destroy)
self.virsh(undefine)
self.assertEqual(nb_node, len(resp))
self.assertItemsEqual(resp, [
['euid-testkvmbasiciso-test_01'],
['euid-testkvmbasiciso-test_01'],
['euid-testkvmbasiciso-test_01']])
# @unittest.skip("just skip")
def test_02(self):
nb_node = 3
marker = "euid-%s-%s" % (TestKVMBasicISO.__name__.lower(), self.test_02.__name__)
app = Flask(marker)
resp = []
@app.route('/ok', methods=['POST'])
def machine_ready():
resp.append(request.form.keys())
if len(resp) == nb_node:
request.environ.get('werkzeug.server.shutdown')()
return "roger\n"
base_mac = "52:54:00:78:83:0"
try:
for i in range(nb_node):
machine_marker = "%s-%d" % (marker, i)
gen = generator.Generator(
api_uri=self.api_uri,
profile_id="%s" % machine_marker,
name="%s" % machine_marker,
ignition_id="%s.yaml" % machine_marker,
matchbox_path=self.test_matchbox_path,
selector={"mac": "%s%d" % (base_mac, i)}
)
gen.dumps()
destroy, undefine = ["virsh", "destroy", "%s" % machine_marker], \
["virsh", "undefine", "%s" % machine_marker]
self.virsh(destroy, v=self.dev_null), self.virsh(undefine, v=self.dev_null)
virt_install = [
"virt-install",
"--name",
"%s" % machine_marker,
"--network=bridge:rack0,model=virtio,mac=%s%d" % (base_mac, i),
"--memory=1024",
"--vcpus=%d" % self.get_optimized_cpu(nb_node),
"--cdrom",
"%s/ipxe.iso" % self.tests_path,
"--disk",
"none",
"--os-type=linux",
"--os-variant=generic",
"--noautoconsole",
"--boot=cdrom"
]
self.virsh(virt_install, assertion=True, v=self.dev_null)
app.run(
host="172.20.0.1", port=self.flask_ok_port, debug=False, use_reloader=False)
finally:
for i in range(nb_node):
machine_marker = "%s-%d" % (marker, i)
destroy, undefine = ["virsh", "destroy", "%s" % machine_marker], \
["virsh", "undefine", "%s" % machine_marker]
self.virsh(destroy, v=self.dev_null)
self.virsh(undefine, v=self.dev_null)
self.assertEqual(nb_node, len(resp))
self.assertItemsEqual(resp, [
['euid-testkvmbasiciso-test_02-0'],
['euid-testkvmbasiciso-test_02-2'],
['euid-testkvmbasiciso-test_02-1']])
if __name__ == "__main__":
unittest.main()
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Rackspace US, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from pyrax.client import BaseClient
import pyrax.exceptions as exc
from pyrax.manager import BaseManager
from pyrax.resource import BaseResource
import pyrax.utils as utils
class Network(BaseResource):
"""A rackconnected cloudnetwork instance."""
pass
class LoadBalancerPool(BaseResource):
"""A pool of nodes that are Load-Balanced."""
def nodes(self):
return self.manager.get_pool_nodes(self)
def add_node(self, server):
self.manager.add_pool_node(self, server)
class PoolNode(BaseResource):
"""A node in a LoadBalancerPool."""
def get_pool(self):
return self.manager.get(self.load_balancer_pool['id'])
def get(self):
"""Gets the details for the object."""
# set 'loaded' first ... so if we have to bail, we know we tried.
self.loaded = True
if not hasattr(self.manager, "get"):
return
if not self.get_details:
return
pool = self.get_pool()
new = self.manager.get_pool_node(pool, self)
if new:
self._add_details(new._info)
class PublicIP(BaseResource):
"""Represents Public IP's assigned to RackConnected servers."""
pass
class LoadBalancerPoolManager(BaseManager):
def _get_node_base_uri(self, pool, node=None):
if node is not None:
template = "/%s/%s/nodes/%s"
params = (self.uri_base, utils.get_id(pool), utils.get_id(node))
else:
template = "/%s/%s/nodes"
params = (self.uri_base, utils.get_id(pool))
return template % params
def _make_pool_node_body(self, pool, server):
return {
'cloud_server': {
'id': utils.get_id(server)
},
'load_balancer_pool': {
'id': utils.get_id(pool),
}
}
def get_pool_node(self, pool, node):
uri = self._get_node_base_uri(pool, node=node)
resp, resp_body = self.api.method_get(uri)
return PoolNode(self, resp_body, loaded=True)
def get_pool_nodes(self, pool):
uri = self._get_node_base_uri(pool)
resp, resp_body = self.api.method_get(uri)
return [PoolNode(self, node, loaded=True)
for node in resp_body if node]
def add_pool_node(self, pool, server):
pool_id = utils.get_id(pool)
uri = self._get_node_base_uri(pool_id)
body = self._make_pool_node_body(pool, server)
resp, resp_body = self.api.method_post(uri, body=body)
return PoolNode(self, resp_body, loaded=True)
def add_pool_nodes(self, pool_map):
uri = "/%s/nodes" % self.uri_base
body = [self._make_pool_node_body(pool, server)
for pool, server in pool_map.items()]
resp, resp_body = self.api.method_post(uri, body=body)
return [PoolNode(self, res, loaded=True) for res in resp_body]
def delete_pool_node(self, pool, node):
uri = self._get_node_base_uri(pool, node=node)
resp, resp_body = self.api.method_delete(uri)
try:
return self.get_pool_node(pool, node)
except exc.NotFound:
return
class PublicIPManager(BaseManager):
def get_ip_for_server(self, server):
uri = "/%s?cloud_server_id=%s" % (self.uri_base, utils.get_id(server))
resp, resp_body = self.api.method_get(uri)
return [PublicIP(self, res, loaded=True) for res in resp_body]
def add_public_ip(self, server):
uri = "/%s" % (self.uri_base)
body = {
'cloud_server': {
'id': utils.get_id(server),
},
}
resp, resp_body = self.api.method_post(uri, body=body)
return PublicIP(self, resp_body, loaded=True)
def delete_public_ip(self, public_ip):
uri = "/%s/%s" % (self.uri_base, utils.get_id(public_ip))
resp, resp_body = self.api.method_delete(uri)
try:
return self.get(public_ip)
except exc.NotFound:
return
class RackConnectClient(BaseClient):
"""A client to interact with RackConnected resources."""
name = "RackConnect"
def _configure_manager(self):
"""Create a manager to handle RackConnect operations."""
self._network_manager = BaseManager(
self, resource_class=Network, uri_base="cloud_networks",
)
self._load_balancer_pool_manager = LoadBalancerPoolManager(
self, resource_class=LoadBalancerPool,
uri_base="load_balancer_pools"
)
self._public_ip_manager = PublicIPManager(
self, resource_class=PublicIP, uri_base="public_ips",
)
def get_network(self, network):
return self._network_manager.get(network)
def list_networks(self):
return self._network_manager.list()
def list_load_balancer_pools(self):
return self._load_balancer_pool_manager.list()
def get_load_balancer_pool(self, pool):
return self._load_balancer_pool_manager.get(pool)
def list_pool_nodes(self, pool):
return self._load_balancer_pool_manager.get_pool_nodes(pool)
def create_pool_node(self, pool, server):
return self._load_balancer_pool_manager.add_pool_node(pool, server)
def get_pool_node(self, pool, node):
return self._load_balancer_pool_manager.get_pool_node(pool, node)
def delete_pool_node(self, pool, node):
return self._load_balancer_pool_manager.delete_pool_node(pool, node)
def create_public_ip(self, public_ip):
return self._public_ip_manager.add_public_ip(public_ip)
def list_public_ips(self):
return self._public_ip_manager.list()
def get_public_ip(self, public_ip):
return self._public_ip_manager.get(public_ip)
def get_public_ips_for_server(self, server):
return self._public_ip_manager.get_ip_for_server(server)
def delete_public_ip(self, public_ip):
return self._public_ip_manager.delete_public_ip(public_ip)
#################################################################
# The following methods are defined in the generic client class,
# but don't have meaning in RackConnect, as there is not a single
# resource that defines this module.
#################################################################
def list(self, limit=None, marker=None):
"""Not applicable in RackConnect."""
raise NotImplementedError
def get(self, item):
"""Not applicable in RackConnect."""
raise NotImplementedError
def create(self, *args, **kwargs):
"""Not applicable in RackConnect."""
raise NotImplementedError
def delete(self, item):
"""Not applicable in RackConnect."""
raise NotImplementedError
def find(self, **kwargs):
"""Not applicable in RackConnect."""
raise NotImplementedError
def findall(self, **kwargs):
"""Not applicable in RackConnect."""
raise NotImplementedError
| |
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.template import defaultfilters as filters
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from neutronclient.common import exceptions as q_ext
from horizon import exceptions
from horizon import messages
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard import policy
LOG = logging.getLogger(__name__)
class DeleteRouter(policy.PolicyTargetMixin, tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Router",
u"Delete Routers",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Router",
u"Deleted Routers",
count
)
redirect_url = "horizon:project:routers:index"
policy_rules = (("network", "delete_router"),)
def delete(self, request, obj_id):
obj = self.table.get_object_by_id(obj_id)
name = self.table.get_object_display(obj)
try:
api.neutron.router_delete(request, obj_id)
except q_ext.NeutronClientException as e:
msg = _('Unable to delete router "%s"') % e
LOG.info(msg)
messages.error(request, msg)
redirect = reverse(self.redirect_url)
raise exceptions.Http302(redirect, message=msg)
except Exception:
msg = _('Unable to delete router "%s"') % name
LOG.info(msg)
exceptions.handle(request, msg)
def allowed(self, request, router=None):
return True
class CreateRouter(tables.LinkAction):
name = "create"
verbose_name = _("Create Router")
url = "horizon:project:routers:create"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_router"),)
class EditRouter(policy.PolicyTargetMixin, tables.LinkAction):
name = "update"
verbose_name = _("Edit Router")
url = "horizon:project:routers:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("network", "update_router"),)
class SetGateway(policy.PolicyTargetMixin, tables.LinkAction):
name = "setgateway"
verbose_name = _("Set Gateway")
url = "horizon:project:routers:setgateway"
classes = ("ajax-modal",)
icon = "camera"
policy_rules = (("network", "update_router"),)
def allowed(self, request, datum=None):
if datum.external_gateway_info:
return False
return True
class ClearGateway(policy.PolicyTargetMixin, tables.BatchAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Clear Gateway",
u"Clear Gateways",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Cleared Gateway",
u"Cleared Gateways",
count
)
name = "cleargateway"
classes = ('btn-danger', 'btn-cleargateway')
redirect_url = "horizon:project:routers:index"
policy_rules = (("network", "update_router"),)
def action(self, request, obj_id):
obj = self.table.get_object_by_id(obj_id)
name = self.table.get_object_display(obj)
try:
api.neutron.router_remove_gateway(request, obj_id)
except Exception as e:
msg = (_('Unable to clear gateway for router '
'"%(name)s": "%(msg)s"')
% {"name": name, "msg": e})
LOG.info(msg)
redirect = reverse(self.redirect_url)
exceptions.handle(request, msg, redirect=redirect)
def get_success_url(self, request):
return reverse(self.redirect_url)
def allowed(self, request, datum=None):
if datum.external_gateway_info:
return True
return False
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, router_id):
router = api.neutron.router_get(request, router_id)
return router
def get_external_network(router):
if router.external_gateway_info:
return router.external_gateway_info['network']
else:
return "-"
class RoutersTable(tables.DataTable):
name = tables.Column("name",
verbose_name=_("Name"),
link="horizon:project:routers:detail")
status = tables.Column("status",
filters=(filters.title,),
verbose_name=_("Status"),
status=True)
distributed = tables.Column("distributed",
filters=(filters.yesno, filters.capfirst),
verbose_name=_("Distributed"))
ha = tables.Column("ha",
filters=(filters.yesno, filters.capfirst),
# Translators: High Availability mode of Neutron router
verbose_name=_("HA mode"))
ext_net = tables.Column(get_external_network,
verbose_name=_("External Network"))
def __init__(self, request, data=None, needs_form_wrapper=None, **kwargs):
super(RoutersTable, self).__init__(
request,
data=data,
needs_form_wrapper=needs_form_wrapper,
**kwargs)
if not api.neutron.get_feature_permission(request, "dvr", "get"):
del self.columns["distributed"]
if not api.neutron.get_feature_permission(request, "l3-ha", "get"):
del self.columns["ha"]
def get_object_display(self, obj):
return obj.name
class Meta:
name = "Routers"
verbose_name = _("Routers")
status_columns = ["status"]
row_class = UpdateRow
table_actions = (CreateRouter, DeleteRouter)
row_actions = (SetGateway, ClearGateway, EditRouter, DeleteRouter)
| |
#!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# tcplife Trace the lifespan of TCP sessions and summarize.
# For Linux, uses BCC, BPF. Embedded C.
#
# USAGE: tcplife [-h] [-C] [-S] [-p PID] [interval [count]]
#
# This uses the sock:inet_sock_set_state tracepoint if it exists (added to
# Linux 4.16, and replacing the earlier tcp:tcp_set_state), else it uses
# kernel dynamic tracing of tcp_set_state().
#
# While throughput counters are emitted, they are fetched in a low-overhead
# manner: reading members of the tcp_info struct on TCP close. ie, we do not
# trace send/receive.
#
# Copyright 2016 Netflix, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# IDEA: Julia Evans
#
# 18-Oct-2016 Brendan Gregg Created this.
# 29-Dec-2017 " " Added tracepoint support.
from __future__ import print_function
from bcc import BPF
import argparse
from socket import inet_ntop, ntohs, AF_INET, AF_INET6
from struct import pack
from time import strftime
# arguments
examples = """examples:
./tcplife # trace all TCP connect()s
./tcplife -t # include time column (HH:MM:SS)
./tcplife -w # wider colums (fit IPv6)
./tcplife -stT # csv output, with times & timestamps
./tcplife -p 181 # only trace PID 181
./tcplife -L 80 # only trace local port 80
./tcplife -L 80,81 # only trace local ports 80 and 81
./tcplife -D 80 # only trace remote port 80
"""
parser = argparse.ArgumentParser(
description="Trace the lifespan of TCP sessions and summarize",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-T", "--time", action="store_true",
help="include time column on output (HH:MM:SS)")
parser.add_argument("-t", "--timestamp", action="store_true",
help="include timestamp on output (seconds)")
parser.add_argument("-w", "--wide", action="store_true",
help="wide column output (fits IPv6 addresses)")
parser.add_argument("-s", "--csv", action="store_true",
help="comma separated values output")
parser.add_argument("-p", "--pid",
help="trace this PID only")
parser.add_argument("-L", "--localport",
help="comma-separated list of local ports to trace.")
parser.add_argument("-D", "--remoteport",
help="comma-separated list of remote ports to trace.")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
debug = 0
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#define KBUILD_MODNAME "foo"
#include <linux/tcp.h>
#include <net/sock.h>
#include <bcc/proto.h>
BPF_HASH(birth, struct sock *, u64);
// separate data structs for ipv4 and ipv6
struct ipv4_data_t {
u64 ts_us;
u32 pid;
u32 saddr;
u32 daddr;
u64 ports;
u64 rx_b;
u64 tx_b;
u64 span_us;
char task[TASK_COMM_LEN];
};
BPF_PERF_OUTPUT(ipv4_events);
struct ipv6_data_t {
u64 ts_us;
u32 pid;
unsigned __int128 saddr;
unsigned __int128 daddr;
u64 ports;
u64 rx_b;
u64 tx_b;
u64 span_us;
char task[TASK_COMM_LEN];
};
BPF_PERF_OUTPUT(ipv6_events);
struct id_t {
u32 pid;
char task[TASK_COMM_LEN];
};
BPF_HASH(whoami, struct sock *, struct id_t);
"""
#
# XXX: The following is temporary code for older kernels, Linux 4.14 and
# older. It uses kprobes to instrument tcp_set_state(). On Linux 4.16 and
# later, the sock:inet_sock_set_state tracepoint should be used instead, as
# is done by the code that follows this. In the distant future (2021?), this
# kprobe code can be removed. This is why there is so much code
# duplication: to make removal easier.
#
bpf_text_kprobe = """
int kprobe__tcp_set_state(struct pt_regs *ctx, struct sock *sk, int state)
{
u32 pid = bpf_get_current_pid_tgid() >> 32;
// lport is either used in a filter here, or later
u16 lport = sk->__sk_common.skc_num;
FILTER_LPORT
// dport is either used in a filter here, or later
u16 dport = sk->__sk_common.skc_dport;
dport = ntohs(dport);
FILTER_DPORT
/*
* This tool includes PID and comm context. It's best effort, and may
* be wrong in some situations. It currently works like this:
* - record timestamp on any state < TCP_FIN_WAIT1
* - cache task context on:
* TCP_SYN_SENT: tracing from client
* TCP_LAST_ACK: client-closed from server
* - do output on TCP_CLOSE:
* fetch task context if cached, or use current task
*/
// capture birth time
if (state < TCP_FIN_WAIT1) {
/*
* Matching just ESTABLISHED may be sufficient, provided no code-path
* sets ESTABLISHED without a tcp_set_state() call. Until we know
* that for sure, match all early states to increase chances a
* timestamp is set.
* Note that this needs to be set before the PID filter later on,
* since the PID isn't reliable for these early stages, so we must
* save all timestamps and do the PID filter later when we can.
*/
u64 ts = bpf_ktime_get_ns();
birth.update(&sk, &ts);
}
// record PID & comm on SYN_SENT
if (state == TCP_SYN_SENT || state == TCP_LAST_ACK) {
// now we can PID filter, both here and a little later on for CLOSE
FILTER_PID
struct id_t me = {.pid = pid};
bpf_get_current_comm(&me.task, sizeof(me.task));
whoami.update(&sk, &me);
}
if (state != TCP_CLOSE)
return 0;
// calculate lifespan
u64 *tsp, delta_us;
tsp = birth.lookup(&sk);
if (tsp == 0) {
whoami.delete(&sk); // may not exist
return 0; // missed create
}
delta_us = (bpf_ktime_get_ns() - *tsp) / 1000;
birth.delete(&sk);
// fetch possible cached data, and filter
struct id_t *mep;
mep = whoami.lookup(&sk);
if (mep != 0)
pid = mep->pid;
FILTER_PID
// get throughput stats. see tcp_get_info().
u64 rx_b = 0, tx_b = 0, sport = 0;
struct tcp_sock *tp = (struct tcp_sock *)sk;
rx_b = tp->bytes_received;
tx_b = tp->bytes_acked;
u16 family = sk->__sk_common.skc_family;
if (family == AF_INET) {
struct ipv4_data_t data4 = {};
data4.span_us = delta_us;
data4.rx_b = rx_b;
data4.tx_b = tx_b;
data4.ts_us = bpf_ktime_get_ns() / 1000;
data4.saddr = sk->__sk_common.skc_rcv_saddr;
data4.daddr = sk->__sk_common.skc_daddr;
// a workaround until data4 compiles with separate lport/dport
data4.pid = pid;
data4.ports = dport + ((0ULL + lport) << 32);
if (mep == 0) {
bpf_get_current_comm(&data4.task, sizeof(data4.task));
} else {
bpf_probe_read(&data4.task, sizeof(data4.task), (void *)mep->task);
}
ipv4_events.perf_submit(ctx, &data4, sizeof(data4));
} else /* 6 */ {
struct ipv6_data_t data6 = {};
data6.span_us = delta_us;
data6.rx_b = rx_b;
data6.tx_b = tx_b;
data6.ts_us = bpf_ktime_get_ns() / 1000;
bpf_probe_read(&data6.saddr, sizeof(data6.saddr),
sk->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
bpf_probe_read(&data6.daddr, sizeof(data6.daddr),
sk->__sk_common.skc_v6_daddr.in6_u.u6_addr32);
// a workaround until data6 compiles with separate lport/dport
data6.ports = dport + ((0ULL + lport) << 32);
data6.pid = pid;
if (mep == 0) {
bpf_get_current_comm(&data6.task, sizeof(data6.task));
} else {
bpf_probe_read(&data6.task, sizeof(data6.task), (void *)mep->task);
}
ipv6_events.perf_submit(ctx, &data6, sizeof(data6));
}
if (mep != 0)
whoami.delete(&sk);
return 0;
}
"""
bpf_text_tracepoint = """
TRACEPOINT_PROBE(sock, inet_sock_set_state)
{
if (args->protocol != IPPROTO_TCP)
return 0;
u32 pid = bpf_get_current_pid_tgid() >> 32;
// sk is mostly used as a UUID, and for two tcp stats:
struct sock *sk = (struct sock *)args->skaddr;
// lport is either used in a filter here, or later
u16 lport = args->sport;
FILTER_LPORT
// dport is either used in a filter here, or later
u16 dport = args->dport;
FILTER_DPORT
/*
* This tool includes PID and comm context. It's best effort, and may
* be wrong in some situations. It currently works like this:
* - record timestamp on any state < TCP_FIN_WAIT1
* - cache task context on:
* TCP_SYN_SENT: tracing from client
* TCP_LAST_ACK: client-closed from server
* - do output on TCP_CLOSE:
* fetch task context if cached, or use current task
*/
// capture birth time
if (args->newstate < TCP_FIN_WAIT1) {
/*
* Matching just ESTABLISHED may be sufficient, provided no code-path
* sets ESTABLISHED without a tcp_set_state() call. Until we know
* that for sure, match all early states to increase chances a
* timestamp is set.
* Note that this needs to be set before the PID filter later on,
* since the PID isn't reliable for these early stages, so we must
* save all timestamps and do the PID filter later when we can.
*/
u64 ts = bpf_ktime_get_ns();
birth.update(&sk, &ts);
}
// record PID & comm on SYN_SENT
if (args->newstate == TCP_SYN_SENT || args->newstate == TCP_LAST_ACK) {
// now we can PID filter, both here and a little later on for CLOSE
FILTER_PID
struct id_t me = {.pid = pid};
bpf_get_current_comm(&me.task, sizeof(me.task));
whoami.update(&sk, &me);
}
if (args->newstate != TCP_CLOSE)
return 0;
// calculate lifespan
u64 *tsp, delta_us;
tsp = birth.lookup(&sk);
if (tsp == 0) {
whoami.delete(&sk); // may not exist
return 0; // missed create
}
delta_us = (bpf_ktime_get_ns() - *tsp) / 1000;
birth.delete(&sk);
// fetch possible cached data, and filter
struct id_t *mep;
mep = whoami.lookup(&sk);
if (mep != 0)
pid = mep->pid;
FILTER_PID
// get throughput stats. see tcp_get_info().
u64 rx_b = 0, tx_b = 0, sport = 0;
struct tcp_sock *tp = (struct tcp_sock *)sk;
rx_b = tp->bytes_received;
tx_b = tp->bytes_acked;
if (args->family == AF_INET) {
struct ipv4_data_t data4 = {};
data4.span_us = delta_us;
data4.rx_b = rx_b;
data4.tx_b = tx_b;
data4.ts_us = bpf_ktime_get_ns() / 1000;
__builtin_memcpy(&data4.saddr, args->saddr, sizeof(data4.saddr));
__builtin_memcpy(&data4.daddr, args->daddr, sizeof(data4.daddr));
// a workaround until data4 compiles with separate lport/dport
data4.ports = dport + ((0ULL + lport) << 32);
data4.pid = pid;
if (mep == 0) {
bpf_get_current_comm(&data4.task, sizeof(data4.task));
} else {
bpf_probe_read(&data4.task, sizeof(data4.task), (void *)mep->task);
}
ipv4_events.perf_submit(args, &data4, sizeof(data4));
} else /* 6 */ {
struct ipv6_data_t data6 = {};
data6.span_us = delta_us;
data6.rx_b = rx_b;
data6.tx_b = tx_b;
data6.ts_us = bpf_ktime_get_ns() / 1000;
__builtin_memcpy(&data6.saddr, args->saddr_v6, sizeof(data6.saddr));
__builtin_memcpy(&data6.daddr, args->daddr_v6, sizeof(data6.daddr));
// a workaround until data6 compiles with separate lport/dport
data6.ports = dport + ((0ULL + lport) << 32);
data6.pid = pid;
if (mep == 0) {
bpf_get_current_comm(&data6.task, sizeof(data6.task));
} else {
bpf_probe_read(&data6.task, sizeof(data6.task), (void *)mep->task);
}
ipv6_events.perf_submit(args, &data6, sizeof(data6));
}
if (mep != 0)
whoami.delete(&sk);
return 0;
}
"""
if (BPF.tracepoint_exists("sock", "inet_sock_set_state")):
bpf_text += bpf_text_tracepoint
else:
bpf_text += bpf_text_kprobe
# code substitutions
if args.pid:
bpf_text = bpf_text.replace('FILTER_PID',
'if (pid != %s) { return 0; }' % args.pid)
if args.remoteport:
dports = [int(dport) for dport in args.remoteport.split(',')]
dports_if = ' && '.join(['dport != %d' % dport for dport in dports])
bpf_text = bpf_text.replace('FILTER_DPORT',
'if (%s) { birth.delete(&sk); return 0; }' % dports_if)
if args.localport:
lports = [int(lport) for lport in args.localport.split(',')]
lports_if = ' && '.join(['lport != %d' % lport for lport in lports])
bpf_text = bpf_text.replace('FILTER_LPORT',
'if (%s) { birth.delete(&sk); return 0; }' % lports_if)
bpf_text = bpf_text.replace('FILTER_PID', '')
bpf_text = bpf_text.replace('FILTER_DPORT', '')
bpf_text = bpf_text.replace('FILTER_LPORT', '')
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
exit()
#
# Setup output formats
#
# Don't change the default output (next 2 lines): this fits in 80 chars. I
# know it doesn't have NS or UIDs etc. I know. If you really, really, really
# need to add columns, columns that solve real actual problems, I'd start by
# adding an extended mode (-x) to included those columns.
#
header_string = "%-5s %-10.10s %s%-15s %-5s %-15s %-5s %5s %5s %s"
format_string = "%-5d %-10.10s %s%-15s %-5d %-15s %-5d %5d %5d %.2f"
if args.wide:
header_string = "%-5s %-16.16s %-2s %-26s %-5s %-26s %-5s %6s %6s %s"
format_string = "%-5d %-16.16s %-2s %-26s %-5s %-26s %-5d %6d %6d %.2f"
if args.csv:
header_string = "%s,%s,%s,%s,%s,%s,%s,%s,%s,%s"
format_string = "%d,%s,%s,%s,%s,%s,%d,%d,%d,%.2f"
# process event
def print_ipv4_event(cpu, data, size):
event = b["ipv4_events"].event(data)
global start_ts
if args.time:
if args.csv:
print("%s," % strftime("%H:%M:%S"), end="")
else:
print("%-8s " % strftime("%H:%M:%S"), end="")
if args.timestamp:
if start_ts == 0:
start_ts = event.ts_us
delta_s = (float(event.ts_us) - start_ts) / 1000000
if args.csv:
print("%.6f," % delta_s, end="")
else:
print("%-9.6f " % delta_s, end="")
print(format_string % (event.pid, event.task.decode('utf-8', 'replace'),
"4" if args.wide or args.csv else "",
inet_ntop(AF_INET, pack("I", event.saddr)), event.ports >> 32,
inet_ntop(AF_INET, pack("I", event.daddr)), event.ports & 0xffffffff,
event.tx_b / 1024, event.rx_b / 1024, float(event.span_us) / 1000))
def print_ipv6_event(cpu, data, size):
event = b["ipv6_events"].event(data)
global start_ts
if args.time:
if args.csv:
print("%s," % strftime("%H:%M:%S"), end="")
else:
print("%-8s " % strftime("%H:%M:%S"), end="")
if args.timestamp:
if start_ts == 0:
start_ts = event.ts_us
delta_s = (float(event.ts_us) - start_ts) / 1000000
if args.csv:
print("%.6f," % delta_s, end="")
else:
print("%-9.6f " % delta_s, end="")
print(format_string % (event.pid, event.task.decode('utf-8', 'replace'),
"6" if args.wide or args.csv else "",
inet_ntop(AF_INET6, event.saddr), event.ports >> 32,
inet_ntop(AF_INET6, event.daddr), event.ports & 0xffffffff,
event.tx_b / 1024, event.rx_b / 1024, float(event.span_us) / 1000))
# initialize BPF
b = BPF(text=bpf_text)
# header
if args.time:
if args.csv:
print("%s," % ("TIME"), end="")
else:
print("%-8s " % ("TIME"), end="")
if args.timestamp:
if args.csv:
print("%s," % ("TIME(s)"), end="")
else:
print("%-9s " % ("TIME(s)"), end="")
print(header_string % ("PID", "COMM",
"IP" if args.wide or args.csv else "", "LADDR",
"LPORT", "RADDR", "RPORT", "TX_KB", "RX_KB", "MS"))
start_ts = 0
# read events
b["ipv4_events"].open_perf_buffer(print_ipv4_event, page_cnt=64)
b["ipv6_events"].open_perf_buffer(print_ipv6_event, page_cnt=64)
while 1:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
| |
#Graphics Manager
# The class that manages the drawing of all objects in the game.
import random
import pygame
import environment
# Colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
WING_YELLOW = pygame.image.load('img/colorwings/wing yellow.png')
WING_GREY = pygame.image.load('img/colorwings/wing grey.png')
WING_PURPLE = pygame.image.load('img/colorwings/wing purple.png')
WING_PINK = pygame.image.load('img/colorwings/wing pink.png')
WING_ORANGE = pygame.image.load('img/colorwings/wing orange.png')
WING_GREEN = pygame.image.load('img/colorwings/wing green.png')
WING_AQUA = pygame.image.load('img/colorwings/wing aqua.png')
WING_BLUE = pygame.image.load('img/colorwings/wing blue.png')
WING_RED = pygame.image.load('img/colorwings/wing red.png')
STACHE_LONG = pygame.image.load('img/staches/stache long.png')
STACHE_HITLER = pygame.image.load('img/staches/stache hitler.png')
STACHE_CURLY = pygame.image.load('img/staches/stache curly end.png')
STACHE_SHARP = pygame.image.load('img/staches/stache sharp.png')
STACHE_WIDE = pygame.image.load('img/staches/stache wide.png')
STACHES = {
"LONG": STACHE_LONG,
"HITLER": STACHE_HITLER,
"CURLY": STACHE_CURLY,
"SHARP": STACHE_SHARP,
"WIDE": STACHE_WIDE
}
WING_COLORS = [
"YELLOW", "PURPLE", "PINK", "ORANGE", "GREEN", "AQUA", "RED", "BLUE", "GREY"
]
WINGS = {
"YELLOW": WING_YELLOW,
"GREY": WING_GREY,
"PURPLE": WING_PURPLE,
"PINK": WING_PINK,
"ORANGE": WING_ORANGE,
"GREEN": WING_GREEN,
"AQUA": WING_AQUA,
"BLUE": WING_BLUE,
"RED": WING_RED
}
ORB_YELLOW = pygame.image.load('img/orbs/orb yellow.png')
ORB_WHITE = pygame.image.load('img/orbs/orb white.png')
ORB_PURPLE = pygame.image.load('img/orbs/orb purple.png')
ORB_PINK = pygame.image.load('img/orbs/orb pink.png')
ORB_ORANGE = pygame.image.load('img/orbs/orb orange.png')
ORB_GREEN = pygame.image.load('img/orbs/orb green.png')
ORB_AQUA = pygame.image.load('img/orbs/orb aqua.png')
ORB_BLUE = pygame.image.load('img/orbs/orb blue.png')
ORB_RED = pygame.image.load('img/orbs/orb red.png')
ORB_COLORS = [
"YELLOW", "WHITE", "PURPLE", "PINK", "ORANGE", "GREEN", "AQUA"
]
ORBS = {
"YELLOW": ORB_YELLOW,
"WHITE": ORB_WHITE,
"PURPLE": ORB_PURPLE,
"PINK": ORB_PINK,
"ORANGE": ORB_ORANGE,
"GREEN": ORB_GREEN,
"AQUA": ORB_AQUA,
"BLUE": ORB_BLUE,
"RED": ORB_RED
}
CLASS_TO_TYPE = {
"TANK": "YELLOW",
"SWARM": "ORANGE",
"NORMAL": "GREEN",
"BOSS": "RED",
"SCATTER": "PURPLE",
"SPEED": "PINK",
"GHOST": "WHITE",
"MIMIC": "AQUA"
}
LASER_ITEM = pygame.image.load('img/laser/laser item.png')
GLOW_ORANGE = pygame.image.load('img/laser/laser glow orange long.png').convert_alpha()
GLOW_BLUE = pygame.image.load('img/laser/laser glow blue long.png').convert_alpha()
GLOW_GREEN = pygame.image.load('img/laser/laser glow green long.png').convert_alpha()
GLOW_PINK = pygame.image.load('img/laser/laser glow pink long.png').convert_alpha()
GLOW_VIOLET = pygame.image.load('img/laser/laser glow violet long.png').convert_alpha()
ORBITAL_LONG = pygame.image.load('img/laser/laser orbital long.png').convert_alpha()
BUMPS_LONG = pygame.image.load('img/laser/laser bumps long.png').convert_alpha()
empty_laser = pygame.image.load('img/laser/empty_laser.png')
LASER = {
"PICKUP": LASER_ITEM,
"HUD": LASER_ITEM,
"LASER": {
"BEAMS": [
ORBITAL_LONG, BUMPS_LONG
],
"AURAS": [
GLOW_ORANGE, GLOW_BLUE, GLOW_GREEN, GLOW_PINK, GLOW_VIOLET
]
}
}
the_range = range(len(LASER["LASER"]["AURAS"]))
the_other_range = range(len(LASER["LASER"]["BEAMS"]))
HP_BORDER = pygame.image.load('img/misc/hp bar border.png')
HP_ITEM = pygame.image.load('img/misc/heart icon purple.png')
HP_HEART = pygame.image.load('img/misc/heart icon red.png')
SHIELD_ITEM = pygame.image.load('img/misc/shield item.png')
MONEY_ITEM = pygame.image.load('img/item thingie.png')
ITEMS = {
"HP": HP_ITEM,
"LASER": LASER_ITEM,
"DEFENSE": SHIELD_ITEM,
"MONEY": MONEY_ITEM
}
# Images
BACKGROUND = pygame.image.load('img/cloud background.png')
HERO_IMG = pygame.image.load('img/hero and goatee.png')
RUPEE_IMG = pygame.image.load('img/item thingie.png')
ENEMY_IMG = pygame.image.load('img/enemy navi.png')
LASER_IMG = pygame.image.load('img/laser.png')
PEW_IMG = pygame.image.load('img/pew.png')
laser_offset = 0
ticks = 0
"""
Handles all the drawing of the Actor instance.
-> screen is the Surface object which the Actor will be drawn on
"""
def draw_bar(screen, bar, hero_bar=False):
if hero_bar:
screen.blit(HP_HEART, (bar.left - 27, bar.top + 2))
screen.blit(HP_BORDER, (bar.x - 4, bar.y - 4))
if bar.visible:
pygame.draw.rect(screen, bar.color, bar)
def draw_background(screen):
if BACKGROUND is None:
pygame.draw.rect(screen, BLACK, [0, 0, environment.WINDOW_WIDTH, environment.WINDOW_HEIGHT])
else:
screen.blit(BACKGROUND, (0, 0))
def draw_hero(screen, hero):
if HERO_IMG is None:
pygame.draw.rect(screen, RED, hero)
else:
screen.blit(HERO_IMG, (hero.x, hero.y))
def draw_enemy(screen, enemy):
if ENEMY_IMG is None:
pygame.draw.rect(screen, RED, [enemy.x, enemy.y, enemy.get_width(), enemy.get_height()])
else:
screen.blit(WINGS[enemy.sign.id[1]], (enemy.x+enemy.width/2, enemy.y))
enemy_class = enemy.sign.id[0]
screen.blit(ORBS[CLASS_TO_TYPE[enemy_class]], (enemy.x, enemy.y))
screen.blit(STACHES[enemy.sign.id[2]], ((enemy.x + enemy.centerx - 5)/2, enemy.centery + 5))
def draw_rupee(screen, rupee):
if RUPEE_IMG is None:
pygame.draw.rect(screen, RED, [rupee.x, rupee.y, rupee.get_width(), rupee.get_height()])
else:
screen.blit(LASER_ITEM, (rupee.x, rupee.y))
def draw_pickup(screen, pickup):
screen.blit(ITEMS[pickup.sign.id[0]], (pickup.x, pickup.y))
def draw_laser(screen, laser):
if LASER_IMG is None:
pygame.draw.rect(screen, RED, [laser.x, laser.y, laser.get_width(), laser.get_height()])
else:
global laser_offset, the_range, the_other_range, ticks, empty_laser
graphical_laser = pygame.Surface((laser.width, laser.height)).convert_alpha()
if not ticks % 200:
random.shuffle(the_range)
ticks %= 200
if not ticks % 15:
random.shuffle(the_other_range)
for laser_index in the_range:
graphical_laser.blit(LASER["LASER"]["AURAS"][laser_index], ((laser_offset/10 - GLOW_ORANGE.get_width() + environment.WINDOW_WIDTH), -15))
#graphical_laser.blit(GLOW_BLUE, ((laser_offset - GLOW_ORANGE.get_width() + environment.WINDOW_WIDTH), -15))
for laser_index in the_other_range:
graphical_laser.blit(LASER["LASER"]["BEAMS"][laser_index], ((laser_offset - GLOW_ORANGE.get_width() + environment.WINDOW_WIDTH), -11))
screen.blit(graphical_laser, (laser.x, laser.y))
laser_offset += 60
laser_offset %= GLOW_ORANGE.get_width() - environment.WINDOW_WIDTH
def draw_pewpew(screen, pewpew):
if PEW_IMG is None:
pygame.draw.rect(screen, RED, [pewpew.x, pewpew.y, pewpew.width, pewpew.height])
else:
screen.blit(PEW_IMG, (pewpew.x, pewpew.y))
def draw_score(screen, score):
font=pygame.font.Font(None, 30)
scoretext = font.render("Score: "+str(score), 1,(255,255,255))
screen.blit(scoretext, (environment.WINDOW_WIDTH - 200, 5))
| |
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
from torchvision import datasets, transforms
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
expose = self.dropout2(x)
x = self.fc2(expose)
output = F.log_softmax(x, dim=1)
return output, expose
class HigherNet(nn.Module):
def __init__(self):
super(HigherNet, self).__init__()
self.output_fc = nn.Linear(10, 16)
self.fc_expose = nn.Linear(128, 16)
self.fc_final = nn.Linear(16, 1)
def forward(self, output, expose):
out1 = self.output_fc(output)
out2 = self.fc_expose(expose)
out = self.fc_final(out1 + out2)
return out
def train(args, model, conf, device, train_loader, optimizer, epoch):
model.train()
conf.eval()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output, expose = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
if args.dry_run:
break
class ConfidenceLoss(nn.Module):
def __init__(self):
super(ConfidenceLoss, self).__init__()
self.bce_logits = torch.nn.BCEWithLogitsLoss()
def forward(self, pred, logits_confidence, target):
pred = pred.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct = pred.eq(target.view_as(pred)).float()
loss = self.bce_logits(logits_confidence, correct)
return loss
def train_confidence(args, model, conf, device, train_loader, optimizer, epoch):
model.eval()
conf.train()
cl = ConfidenceLoss()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output, expose = model(data)
conf_logits = conf(output, expose)
loss = cl(output, conf_logits, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
if args.dry_run:
break
def tttest(model, conf_model, device, test_loader):
model.eval()
conf_model.eval()
test_loss = 0
correct = 0
correct_conf = 0
incorrect_conf = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output, expose = model(data)
conf_logits = conf_model(output, expose)
conf = torch.sigmoid(conf_logits)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct_pred = pred.eq(target.view_as(pred))
correct += correct_pred.sum().item()
correct_confidence = conf[correct_pred]
incorrect_confidence = conf[~correct_pred]
correct_conf += correct_confidence.sum()
incorrect_conf += incorrect_confidence.sum()
test_loss /= len(test_loader.dataset)
dslen = len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
print(f"correct_confidence: {correct_conf/correct:.4f}, incorrect_confidence: {incorrect_conf/(dslen-correct):.4f}")
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--tttest-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=14, metavar='N',
help='number of epochs to train (default: 14)')
parser.add_argument('--lr', type=float, default=1.0, metavar='LR',
help='learning rate (default: 1.0)')
parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
help='Learning rate step gamma (default: 0.7)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--dry-run', action='store_true', default=False,
help='quickly check a single pass')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'batch_size': args.batch_size}
if use_cuda:
kwargs.update({'num_workers': 1,
'pin_memory': True,
'shuffle': True},
)
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
dataset1 = datasets.MNIST('../data', train=True, download=True,
transform=transform)
dataset2 = datasets.MNIST('../data', train=False,
transform=transform)
train_loader = torch.utils.data.DataLoader(dataset1, **kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, **kwargs)
model = Net().to(device)
conf_model = HigherNet().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
conf_optim = optim.Adadelta(conf_model.parameters(), lr=args.lr)
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
for epoch in range(1, args.epochs + 1):
train(args, model, conf_model, device, train_loader, optimizer, epoch)
train_confidence(args, model, conf_model, device, train_loader, conf_optim, epoch)
tttest(model, conf_model, device, test_loader)
scheduler.step()
if args.save_model:
torch.save(model.state_dict(), "mnist_cnn.pt")
if __name__ == '__main__':
main()
| |
PBPTonscreenText = 0.2
RPdirectFrame = (1.75, 1, 0.75)
RPtrackLabels = 0.05
RPmeritBarLabels = 0.165
RPskipScale = 0.2
RPskipPos = (0, -.28)
RPmeritLabelPosX = 0.55
RPmeritBarsPosX = 0.825
BBbattleInputTimeout = 20.0
FCPtextFrame = 0.08
DHQInamePath = 0.9
DHQInamePathPos = (-4, 0, 0)
DHQIscorePathPos = (-4.6, 0, 0)
DHQItrophyStarPos = (-6.6, 0, 0.3)
EexitButton = 0.8
CCIPexitButton = 0.06
CIPnameLabel = 1
CIPwordwrapOffset = 0
CIPtypeLabel = 0.075
CIPbuyButton = (0.06, 0.05)
CSgiftTogglePos = (0.855, -0.13)
CSgiftToggle = 0.08
CSbackCatalogButton = 0.065
NametagReverse = False
TTCISCspeedChat = 0.055
TTCISCtopLevelOverlap = 0.0
TCMnormalButton = 0.06
TCMscButtonPos = (0.204, 0, -0.072)
TCMscButton = 0.06
TCMwhisperFrame = 0.06
TCMwhisperButton = 0.05
TCMwhisperScButton = 0.05
TCMunpaidChatWarningWordwrap = 18
TCMunpaidChatWarning = 0.055
TCMunpaidChatWarningPos = (0.0, 0.3)
TCMpayButton = 0.06
TCMpayButtonPos = (0.0, 0.0, -0.13)
TCMopenChatWarning = 0.06
TCMactivateChatGui = 0.05
TCMsecretChatActivated = 0.06
TCMdirectButtonTextPos = (0.0, 0.0, -0.28)
LCHQLfdText = 0.1
SCHQLfdTypeText = 0.075
SCHQLdgText = 0.1
DMEEsignText = 2
BCHQLsignText = 1.12
DGGGquitButton = 0.045
DGGGhowToButton = 0.045
DGGGscoreLabel = 0.075
FPvaluePos = (0, 0, -0.35)
FPvalue = 0.05
FPinfo = 0.055
FSGUIdirectFrame = 0.06
FSGUIcancelButton = 0.06
FSGUIokButton = 0.06
GTenterPage2Wordwrap = 13.5
GTenterPage4Wordwrap = 16.5
BCGjpText = 0.04
BCGjpTextWordwrap = 10.5
BCGnextGame = 1.71
FSGUIokButton = 0.06
FSGUIcancelButton = 0.06
FPnewEntry = 0.08
FPnewRecord = 0.08
GPgenus = 0.045
FLPnewFriend = 0.045
FLPsecrets = 0.045
FLPsecretsPos = (0.152, 0.0, 0.14)
FLPtitle = 0.04
FIbStop = 0.05
FIdirectFrame = 0.06
FIbCancelPos = (0.0, 0.0, -0.1)
FIbStopTextPos = (0.075, -0.015)
FIbStopPos = (-0.2, 0.0, 0.05)
FIbYesPos = (-0.15, 0.0, -0.1)
FIdirectFrameWordwrap = 14
FIdirectFramePos = (0, 0.2)
DGHaimInstructions = 0.1
DGHteeInstructions = 0.1
GSBexitCourseBPos = (0.15, -.01)
GSBtitleLabel = 0.07
EHpopupInfo = 0.08
HtitleText = 0.16
ACplayThisToon = 0.12
ACmakeAToon = 0.12
ACsubscribersOnly = 0.115
ACdeleteWithPasswordFrame = 0.06
ACstatusText = 1.0
ACtitle = 0.15
ACquitButton = 0.1
AClogoutButton = 0.1
ACquitButtonPos = (0, -0.035)
MASPscoreText = 0.1
MASPnameText = 0.055
MRPgameTitleText = 0.11
MRgameTitleTextPos = (-0.046, 0.2, 0.092)
MRPplayButton = 0.055
MRPinstructionsText = 0.07
MRPinstructionsTextWordwrap = 26.5
MRPinstructionsTextPos = (-0.115, 0.05, 0)
CRPgameTitleText = 0.088
CRPgameTitleTextPos = (-0.046, 0.2, 0.13)
MPMpowerText = 0.07
MPMtooSlow = 0.07
MPMtooFast = 0.07
MPMgaugeA = 0.35
MPMgaugeTargetTop = 0.35
MPMgaugeTargetBot = 0.35
PstatusLabel = 0.08
PBstatusLabel = 0.08
DPGpointsFrame = 0.7
DPGflipsFrame = 0.7
DTGvoteButton = 0.07
DTGuseLabel = 0.1
DTGvotesPeriodLabel = 0.1
DTGvotesToGoLabel = 0.1
DTGupLabel = 0.125
DTGdownLabel = 0.125
DTGremainingVotesFrame = 0.7
CStoonFrame = 0.0575
NSmaxNameWidth = 8.0
NSdirectScrolleList = 0.1
NSmakeLabel = 0.1
NSmakeCheckBox = 0.8
NSnameEntry = 0.08
NStypeANameButton = 0.06
NStypeANameButtonPos = (0, -0.02)
NSnameResult = (0.09, 0.084, 0.084)
NSnameLabel = 0.1
NStypeNotification = 0.08
NScolorPrecede = True
MATenterGenderShop = 0.18
MATenterBodyShop = 0.18
MATenterColorShop = 0.18
MATenterClothesShop = 0.16
MATenterNameShop = 0.15
MATclothesGUIshirt_scale = 0.06
MATclothesGUIshirt_posL = 0.01
MATclothesGUIshirt_posR = -0.014
MATguiCancelButton = 0.08
MATguiNextButton = 0.08
SBshuffleBtn = 0.08
IVwhenTextLabel = 0.06
IVactivityTextLabel = 0.06
PPelementDescription = 0.06
PPelementTitleLabel = 0.07
PPelementBuyButton = 0.055
PPtitleScale = 0.1
PPpbulicDescriptionLabel = 0.065
PPprivateDescriptionLabel = 0.065
PPpublicButton = 0.05
PPprivateButton = 0.05
PPcostLabel = 0.065
PPpartyGroundsLabel = 1.0
PPinstructionLabel = 0.07
PPelementPriceNode = 0.065
DPtimerTextLabel = 1.1
DPtimerMinute = 1.1
DPtimerColon = 1.1
DPtimerSecond = 1.1
DPtimerMinutePos = (-1.2, 0.0, 0.0)
DPtimerColonPos = (0.0, 0.0, 0.0)
DPtimerSecondPos = (1.2, 0.0, 0.0)
PPGpartyStartButton = 0.065
PPGinstructionsLabel = 0.065
PPGtoonsLabel = 0.06
PPGactivitiesLabel = 0.06
PPGminLeftLabel = 0.06
JGcurrentlyPlayingLabel = 0.07
JGsongNameLabel = 0.13
JGaddSongButton = 0.1
JGnumItemsVisible = 9
JGlistItem = 1.0
PAPfeedButton = 0.5
PAPcallButton = 0.5
PAPownerButton = 0.35
PAPscratchButton = 0.5
PAPstateLabel = 0.4
PAPstateLabelPos = (0.7, 0, 3.5)
PAPstateLabelWordwrap = 7.5
PDPtrickText = 0.17
PDPlaff = 0.17
PDPlaffPos = (0.0, -0.05)
PGUItextScale = 1
PGUIchooserTitle = 0.1
PGUIwordwrap = 14
PGUIdescLabel = 0.9
PGUIreturnConfirm = 0.07
PGUIokButton = 0.6
PGUIsubmitButton = 0.8
PGUIokButtonPos = (-0.21, 1.05)
PGUIcancelButtonPos = (-3.3, 2.95)
PGUIcharLength = 1
PTtitle = 0.13
PTenterPage1Pos = (0.15, 0.13)
PTenterPage2Pos = (-0.27, 0.16)
PTenterPage3Pos = (0.15, 0.13)
QPauxText = 0.04
QPtextScale = 0.045
QPtextWordwrap = 15.6
QPinfoZ = -0.0625
DLBbuildTitleRow = 0.4
DRenterWaiting = 0.2
DRrollScale = 0.5
KSGtextSizeBig = 0.088
KSGtextSizeSmall = 0.055
KSGaccDescriptionWordwrap = 11
REPlargeLabel = 0.08
REPsmallLabel = 0.04
REPtextPosX = -0.6
RGUIphotoFinish = 0.25
RGUIplaceLabelNumPos = (0.15, 0, 0.05)
RGUIplaceLabelStrPos = (0.31, 0.0, 0.22)
DRAIwaitingForJoin = 90
PimgLabel = 1.0
GZSZLsignText = 1.5
EPtitleLabel = 0.12
EPhostTab = 0.07
EPinvitedTab = 0.07
EPcalendarTab = 0.07
EPnewsTab = 0.07
EPhostingCancelButton = 0.04
EPhostingDateLabel = 0.05
EPpartyGoButton = 0.045
EPpublicPrivateLabel = 0.05
EPpublicButton = 0.5
EPprivateButton = 0.5
EPinvitePartyGoButton = 0.045
EPdecorationItemLabel = 0.055
EPactivityItemLabel = 0.055
EPcreateListAndLabel = 0.055
FPtankTab = 0.07
FPcollectionTab = 0.07
FPtrophyTab = 0.07
DSDintroText = 0.06
DSDintroTextWordwrap = 25
DSDwindowedButtonPos = (0.0961, 0, -0.221)
DSDfullscreenButtonPos = (0.097, 0, -0.311)
DSDcancel = 0.06
DSDcancelPos = (0, -0.02)
DPtab = 0.1
DPdeptLabel = 0.17
DPcogName = 0.093
TPstartFrame = 0.12
TPendFrame = 0.12
SBpageTab = 0.75
OPoptionsTab = 0.07
OPCodesInstructionPanelTextPos = (0, -0.01)
OPCodesInstructionPanelTextWordWrap = 6
OPCodesResultPanelTextPos = (0, 0.35)
OPCodesResultPanelTextScale = 0.06
OPCodesResultPanelTextWordWrap = 9
OPCodesInputTextScale = 0.8
OPCodesSubmitTextScale = 0.07
OPCodesSubmitTextPos = (0, -0.02)
MPsafeZoneButton = 0.055
MPgoHomeButton = 0.055
MPhoodLabel = 0.06
MPhoodLabelWordwrap = 14
KPkartTab = 0.07
KPdeleteButton = 0.06
KProtateButton = 0.035
GPbasketTab = 0.07
GPcollectionTab = 0.07
GPtrophyTab = 0.07
GPspecialsTab = 0.07
GPrecordsTab = 0.07
GPrecordsTabPos = (0.92, 0, 0.1)
GPtrophyTab = 0.07
GPtrophyTabTextPos = (0.03, 0.0, 0.0)
GPtrophyTabPos = (0.92, 0, -0.3)
APBdialog = 0.06
APBdirectLabelPosY = 0
TAPwhisperButton = 0.06
TAPsecretsButton = 0.045
TAPgroupFrame = 0.05
TAPgroupButton = 0.055
TADPbCancel = 0.05
TADPbCancelPos = (-0.865, 0.0, -0.78)
TADPtrackLabel = 0.066
TADtrackLabelPosZ = 0.08
GPdestFrame = 0.05
GPdestScrollList = 0.05
GPgoButton = 0.06
INtrackNameLabels = 0.05
INclickToAttack = 1.0
INpassButton = 0.05
INrunButton = 0.05
INdetailNameLabel = 1.0
INfireButton = 0.05
NPCFimgLabel = 1.0
PIPsecretsButton = 0.045
PIPwisperButton = 0.06
PIPdetailButton = 0.05
TLStip = 0.18
TPdialogWordwrap = 22
TPdialog = 0.05
TPpanel = 0.08
TPpanelPos = (0.0, -0.7)
TPbrowserPosZ = -0.45
TPbuttonTextList = 0.05
TPhaveFun = 0.1
TPjoinUs = 0.1
TBSOSPSPenter = 0.1
TexitButton = 0.8
#Some languages need to change the word order
SellbotFactoryPosPart1 = (0, -0.25)
SellbotFactoryScalePart1 = 0.075
SellbotFactoryPosPart2 = (0, -0.34)
SellbotFactoryScalePart2 = 0.12
| |
"""Provides a command-line interface for running experiments."""
# Copyright (c) 2011-2013 Mick Thomure
# All rights reserved.
#
# Please see the file LICENSE.txt in this distribution for usage terms.
try:
import matplotlib
matplotlib.use("cairo") # workaround old scipy bug
except ImportError:
pass
import cPickle as pickle
from itertools import chain
import logging
import os
import pprint
import sys
from glimpse.experiment import *
from glimpse.experiment.prototype_algorithms import (GetAlgorithmNames,
ResolveAlgorithm)
from glimpse.models import MakeModel, MakeParams
from glimpse.pools import MakePool
from glimpse.util.option import *
from glimpse.util.learn import ScoreFunctions, ResolveLearner
from glimpse.util.progress import ProgressBar
def MakeCliOptions():
"""Options for the command-line project"""
return OptionRoot(
Option('verbose', False, flag = ('v', 'verbose'),
doc = "Enable verbose logging"),
Option('input_path', flag = ('i:', 'input='),
doc = "Read initial experiment data from a file"),
Option('result_path', flag = ('o:', 'output='),
doc = "Store results to a file"),
Option('pool_type', None, flag = ('t:', 'pool-type='),
enum = ('s', 'singlecore', 'm', 'multicore', 'c', 'cluster'),
doc = "Set the worker pool type. Can also use the 'GLIMPSE_POOL' "
"environment variable. If using a cluster pool type, the "
"cluster package and arguments are read from the "
"GLIMPSE_CLUSTER and 'GLIMPSE_CLUSTER_ARGS' environment "
"variables."),
Option('train_size', None, flag = ('T:', 'train-size='),
doc = "Set the size of the training set (number of instances or "
"fraction of total)"),
Option('timing', flag = ('timing'), doc = "Report timing"
" information for worker pool (assumes cluster pool is used)"),
Option('command', flag = ('command='), doc = "Execute a command "
"after running the experiment (but before results are saved)"),
OptionGroup('corpus',
Option('root_dir', flag = ('c:','corpus='),
doc = "Set corpus directory"),
Option('subdirs', flag = ('C:', 'corpus-subdir='), multiple=True,
doc = "Specify subdirectories (using -C repeatedly)"),
Option('from_name', flag = 'corpus-name=',
doc = "Specify corpus by name (one of 'easy', 'moderate', or "
"'hard')"),
Option('balance', False, flag = ('b', 'balance'),
doc = "Choose equal number of images per class")),
OptionGroup('extractor',
Option('param_file', flag = ('param-file='),
doc = "Read model options from a file"),
Option('params', flag = ('P:', 'param='), multiple=True,
doc = "Set model options from command line (e.g.: glab -P "
"num_scales=3 -P scale_factor=1.25"),
Option('no_activity', flag = ('N', 'no-activity'),
doc = "Do not compute activity model activity for each image "
"(implies no classifier). This can be used to learn "
"prototypes without immediately evaluating them."),
Option('save_all', flag = ('A', 'save-all'),
doc = "Save activity for all layers, rather than just the "
"layers from which features are extracted."),
OptionGroup('prototypes',
Option('path', flag = ('prototype-file='),
doc = "Read S2 prototypes from a file (overrides -p)"),
Option('length', default = 10, flag = ('n:', 'num-prototypes='),
doc = "Number of S2 prototypes to generate"),
Option('algorithm', flag = ('p:', 'prototype-algorithm='),
enum = sorted(GetAlgorithmNames()),
doc = "Specify how S2 prototypes are generated"),
Option('low', flag = 'low=', default = 0., doc = "Low end "
"of random prototype distribution"),
Option('high', flag = 'high=', default = 1., doc = "High "
"end of random prototype distribution"),
Option('num_samples', flag = 'samples=', default=0,
doc = "Number of training patches to sample for S2 "
"prototype learning"),
Option('num_regr_samples', flag='regr-samples=',
default=0, doc="Number of patches to sample when training "
"regression model for meta_feature_wkmeans"),
Option('mask_dir', flag='masks=', default='',
doc="Mask directory for object_mask_wkmeans"),
Option('base_weight', flag='base-weight=', default=0.,
doc="Value added to weight for all training patches"),
)),
OptionGroup('evaluation',
Option('layer', default = "C2", flag = ('l:', 'layer='),
doc = "Choose the layer(s) from which features are extracted"),
Option('evaluate', default = False, flag = ('E', 'evaluate'),
doc = "Train and test a classifier"),
Option('cross_validate', default = False, flag = ('x',
'cross-validate'), doc = "Compute test accuracy via "
"cross-validation instead of fixed training/testing split"),
Option('cross_val_folds', default = 10, flag = ('f:',
'num-folds='), doc = "Number of folds for cross-validation"),
Option('score_func', "accuracy", flag = ('S:', 'score-function='),
enum = ScoreFunctions(),
doc = "Specify the scoring function for classifier "
"evaluation"),
Option('hist_features', False, flag = ('H', 'hist-features'),
doc = "Use histograms (accumulated over space and scale) for "
"each feature band (requires spatial features, such as "
"C1)"),
Option('learner', 'svm', flag = ('L:', 'learner='),
doc = "Learning algorithm to use for classification (can be "
"a Python expression, or one of 'svm' or 'logreg')"),
Option('predictions', flag = 'predictions', doc = "Print the true "
"and predicted labels for each image in the corpus (only for "
"fixed-split evaluations)."),
),
Option('help', flag = ('h', 'help'), doc = "Print this help and exit"))
def InitModel(opts, exp):
if exp.extractor.model is None:
if opts.extractor.param_file:
logging.info("Reading model parameters from file: %s" %
opts.extractor.param_file)
with open(opts.extractor.param_file) as fh:
params = pickle.load(fh)
else:
params = MakeParams()
if opts.extractor.params:
args = [p.split('=') for p in opts.extractor.params]
if not all(len(a) == 2 for a in args):
raise OptionError("Must specify model parameters as KEY=VALUE")
for k,v in args:
setattr(params, k, type(params.trait(k).default)(v))
exp.extractor.model = MakeModel(params)
elif opts.extractor.param_file or opts.extractor.params:
logging.warn("Ignoring user's model parameters (model exists)!")
def CliWithActivity(opts, exp, pool):
progress = None
if opts.verbose:
progress = ProgressBar
InitModel(opts, exp)
# Initialize prototypes
popts = opts.extractor.prototypes
if popts.path:
with open(popts.path) as fh:
protos = pickle.load(fh)
# XXX assuming single-size prototypes
logging.info("Read %d prototypes from file: %s" % (len(protos[0]),
popts.path))
exp.extractor.model.s2_kernels = protos
elif popts.algorithm:
num_prototypes = int(popts.length)
alg = ResolveAlgorithm(popts.algorithm)
alg = alg() # create algorithm object of given class
for key in ('low', 'high', 'num_samples', 'num_regr_samples', 'mask_dir',
'base_weight'):
if hasattr(alg, key):
setattr(alg, key, getattr(popts, key))
MakePrototypes(exp, num_prototypes, alg, pool, train_size=opts.train_size,
progress=progress)
# Compute model activation
if not opts.extractor.no_activity:
report_timing = opts.timing and hasattr(pool, 'save_timing')
if report_timing:
pool.save_timing = True
ComputeActivation(exp, opts.evaluation.layer, pool,
save_all = opts.extractor.save_all, progress = progress)
if report_timing:
exp.extractor['timing'] = pool.timing
if opts.verbose:
import importlib
m = importlib.import_module(pool.__module__)
print "START TIMING REPORT"
m.PrintTiming(pool.timing)
print "END TIMING REPORT"
# for large datasets, it's useful to stream results to disk as they arrive,
# rather than wait for the entire batch to complete. in this case we must
# 1) open file handle fh
# 3) for each batch of exp.corpus.paths:
# a) compute activation
# b) store to fh
# 4) close fh
# note that activation is stored in the same order as that of corpus.paths.
# afterwards, activation can be read in and stored in the experiment.
# to support this, we could pass a pool wrapper, which chunks the input
# arguments and delegates to original pool.map() for each chunk. [how does this
# interact chunksize argument in ipythonpool.map?]
class TeePool(object):
"""A pool object that incrementally processes results as they arrive."""
chunksize = None # size of each batch
def __init__(self, pool):
self.pool = pool
def map(self, func, iterable, progress=None, chunksize=None):
# XXX progress bar is currently unsupported
return list(iterator.chain(self.process(self.pool.map(func, batch))
for batch in batches))
def process(self, results):
"""Perform processing on incremental results.
:rtype: iterable
:return: Incremental results as returned to caller. Return an empty list to
remove all results.
"""
return results
def CheckClassLabels(exp):
if exp.corpus.labels is None:
return
labels = np.unique(exp.corpus.labels)
labels.sort()
if len(labels) != 2 or tuple(labels) == (0,1):
return
logging.warning("Found binary classification task without zero-one labels. "
"Fixing.")
idx_0 = exp.corpus.labels == labels[0]
idx_1 = exp.corpus.labels == labels[1]
exp.corpus.labels[idx_0] = 0
exp.corpus.labels[idx_1] = 1
def CliEvaluate(opts, exp):
if not opts.evaluation.layer:
raise OptionError("Must specify model layer to use for features")
if opts.evaluation.hist_features:
feature_builder = ExtractHistogramFeatures
else:
feature_builder = None
try:
learner = ResolveLearner(opts.evaluation.learner)
except Exception, e:
raise OptionError("Error in 'learner' expression -- %s" % e)
if opts.evaluation.cross_validate:
if opts.evaluation.score_func not in (None, 'accuracy'):
logging.warn("Ignoring score_func of '%s'. Cross-validation always uses "
"'accuracy'.", opts.evaluation.score_func)
CrossValidateClassifier(exp, opts.evaluation.layer, learner=learner,
feature_builder=feature_builder,
num_folds=opts.evaluation.cross_val_folds)
else:
TrainAndTestClassifier(exp, opts.evaluation.layer,
train_size=opts.train_size, feature_builder=feature_builder,
score_func=opts.evaluation.score_func, learner=learner)
if opts.evaluation.predictions:
print
print "Classifier Predictions"
print "======================"
print
print ("Each line gives the true and predicted labels (in that order) "
"for an image in the corpus.")
print
print "Training Set Predictions"
print "------------------------"
training_predictions = GetPredictions(exp, training=True, evaluation=-1)
if len(training_predictions) == 0:
print "no training instances"
else:
for img,lbl,pred in training_predictions:
print img, lbl, pred
print
print "Test Set Predictions"
print "--------------------"
predictions = GetPredictions(exp, training=False, evaluation=-1)
if len(predictions) == 0:
print "no test instances"
else:
for img,lbl,pred in predictions:
print img, lbl, pred
print
def CliProject(opts):
# Read verbosity from environment var unless flag is given.
log_level = Verbose(opts.verbose or None)
if (log_level != logging.INFO and opts.result_path is None and
opts.command is None):
logging.warn("No results will be given. You probably want to specify a "
"results file or command, or enable the verbose flag.")
reader = DirReader(ignore_hidden = True)
# Initialize experiment object
if opts.input_path:
# Read experiment from disk
logging.info("Reading initial experiment data from file -- %s",
opts.input_path)
if opts.input_path == '-':
exp = pickle.load(sys.stdin)
else:
with open(opts.input_path, 'rb') as fh:
exp = pickle.load(fh)
CheckClassLabels(exp)
else:
exp = ExperimentData()
# Initialize corpus: Each sub-directory is given a distinct numeric label,
# starting at one. If the root directory is given, labels are assigned to
# sub-directories in alphabetical order.
if opts.corpus.subdirs:
SetCorpusSubdirs(exp, opts.corpus.subdirs, opts.corpus.balance, reader)
else:
path = None
if opts.corpus.root_dir:
path = opts.corpus.root_dir
elif opts.corpus.from_name:
path = GetCorpusByName(opts.corpus.from_name)
if path:
SetCorpus(exp, path, opts.corpus.balance, reader)
elif exp.corpus.paths is None:
raise OptionError("Must specify a corpus")
# Initialize model and compute activation, if necessary.
eopts = opts.extractor
pool = None
if (not eopts.no_activity or eopts.prototypes.path or
eopts.prototypes.algorithm):
pool = MakePool(opts.pool_type)
logging.info("Using pool: %s" % type(pool).__name__)
CliWithActivity(opts, exp, pool)
elif eopts.param_file or eopts.params:
# Ensure model is created if parameters are set.
InitModel(opts, exp)
# Evaluate features
if opts.evaluation.evaluate:
CliEvaluate(opts, exp)
if opts.command:
exec opts.command
# Store experiment to disk
if opts.result_path == '-':
logging.info("Writing experiment data to stdout")
pickle.dump(exp, sys.stdout, protocol = 2)
elif opts.result_path is not None:
logging.info("Writing experiment data to file -- %s" % opts.result_path)
with open(opts.result_path, 'wb') as fh:
pickle.dump(exp, fh, protocol = 2)
return exp
import textwrap
def PrintDict(data, max_key_len=25, width=None, stream=None, pad=3):
if stream is None:
stream = sys.stderr
if width is None:
try:
# Try to read terminal width (ony for *nix systems)
_,width = os.popen('stty size', 'r').read().split()
width = int(width)
except:
width = 70
tmpl = "%%-%ds" % max_key_len
indent = ' ' * (max_key_len+pad)
for k,v in data:
print >>stream, tmpl % k,
v = textwrap.fill(v, width=width,
subsequent_indent=indent,
initial_indent=indent)
if len(k) <= max_key_len:
# Use initial indent to account for printed flags, but remove it afterward
print >>stream, " ", v[max_key_len+pad:]
else:
print >>stream, "\n%s" % v
def PrintModelParamHelp():
params = MakeParams()
traits = params.traits()
# Display traits in alphabetical order.
keys = sorted(n for n in params.trait_names() if not n.startswith('trait_'))
# Format set of traits as a string.
data = list()
for k in keys:
trait = traits[k]
desc = trait.desc or ''
if desc:
idx = desc.index(' ')
desc = desc[:idx].capitalize() + desc[idx:]
desc = "%s. " % desc
doc = (desc + "Must be %s. " % trait.full_info(params, '', '') +
"Default is: %s" % pprint.pformat(trait.default))
data.append(("%s:" % k, doc))
PrintDict(data, max_key_len=25)
def Main(argv = None):
options = MakeCliOptions()
try:
ParseCommandLine(options, argv = argv)
if options.help.value:
print >>sys.stderr, "Usage: [options]"
PrintUsage(options)
print >>sys.stderr
print >>sys.stderr, "Model Parameters (and defaults):"
PrintModelParamHelp()
sys.exit(-1)
def p(opt, indent=0):
if isinstance(opt, OptionGroup):
if opt._name: # skip root
print (" " * indent) + opt._name
for c in opt._children:
p(c, indent+1)
else:
print (" " * indent) + "%s: %s" % (opt._name, opt.value)
if options.verbose.value:
print "Raw Arguments: %s" % (argv or sys.argv)[1:]
print "Parsed Arguments:"
p(options)
CliProject(OptValue(options))
except ExpError, e:
print >>sys.stderr, "Error: %s." % e
except OptionError, e:
print >>sys.stderr, "Usage Error (use -h for help): %s." % e
if __name__ == '__main__':
Main()
"""
== Example use cases that should be supported (at least for the gui) ==
Given classification results, view false positive or false negative images.
"""
| |
# Copyright 2008-2013 Yousef Ourabi
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import django
from django.conf import settings
from django.http import HttpResponseForbidden, HttpResponseRedirect, HttpResponse
from django.core.exceptions import MiddlewareNotUsed
from django.core.cache import cache
from django.template import loader
# Own model
from models import Banishment, Whitelist
class BanishMiddleware(object):
def __init__(self):
"""
Middleware init is called once per server on startup - do the heavy
lifting here.
"""
# If disabled or not enabled raise MiddleWareNotUsed so django
# processes next middleware.
self.ENABLED = getattr(settings, 'BANISH_ENABLED', False)
self.DEBUG = getattr(settings, 'BANISH_DEBUG', False)
self.USE_HTTP_X_FORWARDED_FOR = getattr(settings, 'BANISH_USE_HTTP_X_FORWARDED_FOR', False)
self.BANISH_EMPTY_UA = getattr(settings, 'BANISH_EMPTY_UA', True)
self.BANISH_MESSAGE = getattr(settings, 'BANISH_MESSAGE', "You are banned.")
self.BANISH_RESTRICT_FILTER = getattr(settings, 'BANISH_RESTRICT_FILTER', False)
self.BANISH_URL_REDIRECT = getattr(settings, 'BANISH_URL_REDIRECT', None)
self.BANISH_TEMPLATRE = getattr(settings, 'BANISH_TEMPLATRE', None)
self.BANISH_TOR_IPS = getattr(settings, 'BANISH_TOR_IPS', False)
self.BANISH_ONLY_WHITELIST = getattr(settings, 'BANISH_ONLY_WHITELIST', False)
# New version
self.BANISH_ABUSE_THRESHOLD_TO_URL = getattr(settings, 'BANISH_ABUSE_THRESHOLD_TO_URL', 10000)
self.DEFAULT_BANISH_ABUSE_THRESHOLD = getattr(settings, 'DEFAULT_BANISH_ABUSE_THRESHOLD', False)
if not self.ENABLED:
raise MiddlewareNotUsed(
"django-banish is not enabled via settings.py")
if self.DEBUG:
print >> sys.stderr, "[django-banish] status = enabled"
# Prefix All keys in cache to avoid key collisions
self.BANISH_PREFIX = 'DJANGO_BANISH:'
self.ABUSE_PREFIX = 'DJANGO_BANISH_ABUSE:'
self.WHITELIST_PREFIX = 'DJANGO_BANISH_WHITELIST:'
self.BANNED_AGENTS = []
if self.BANISH_EMPTY_UA:
self.BANNED_AGENTS.append(None)
# Populate various 'banish' buckets
for ban in Banishment.objects.all():
if self.DEBUG:
print >> sys.stderr, "IP BANISHMENT: ", ban.type
if ban.type == 'ip-address':
cache_key = self.BANISH_PREFIX + ban.condition
cache.set(cache_key, "1")
if ban.type == 'user-agent':
self.BANNED_AGENTS.append(ban.condition)
for whitelist in Whitelist.objects.all():
if whitelist.type == 'ip-address-whitelist':
cache_key = self.WHITELIST_PREFIX + whitelist.condition
cache.set(cache_key, "1")
def _get_path(self, request):
return request.path
def _get_ip(self, request):
ip = request.META['REMOTE_ADDR']
if self.USE_HTTP_X_FORWARDED_FOR or not ip or ip == '127.0.0.1':
ip = request.META.get('HTTP_X_FORWARDED_FOR', ip).split(',')[0].strip()
return ip
def _is_tor_ip(self, ip):
""" Checks if ip address is a TOR exit node.
Relies on periodically updated IP list.
If IP list update has failed then gracefully assumes
there are no Tor exit nodes. This is so that
our service continues to function even if the external
party we are relying on goes down.
:param ip: IP address as a string
"""
TOR_CACHE_KEY = getattr(settings, 'TOR_CACHE_KEY')
ips = cache.get(TOR_CACHE_KEY)
if not ips:
# Tor IP list not available; IP check not active
return False
return ip in ips
def process_request(self, request):
abuse_threshold = self.DEFAULT_BANISH_ABUSE_THRESHOLD
url_name = "all"
ip = self._get_ip(request)
user_agent = request.META.get('HTTP_USER_AGENT', None)
if self.BANISH_RESTRICT_FILTER:
for threshold_to_url in self.BANISH_ABUSE_THRESHOLD_TO_URL:
if (self._get_path(request).find(threshold_to_url.get('url')) >= 0):
abuse_threshold = threshold_to_url.get(u'threshold')
url = threshold_to_url.get(u'url')
url_name = threshold_to_url.get(u'name')
if self.DEBUG:
print >> sys.stderr, "Request URL in BANISH_ABUSE_THRESHOLD_TO_URL: %s with %s" % (url, abuse_threshold)
if self.DEBUG:
print >> sys.stderr, "GOT IP FROM Request: %s and User Agent %s" % (ip, user_agent)
# Check whitelist first, if not allowed, then check ban conditions
if self.is_whitelisted(ip):
return None
elif self.is_banned(ip) or \
self.monitor_abuse(ip, abuse_threshold, url_name) or \
self.BANISH_ONLY_WHITELIST or \
user_agent in self.BANNED_AGENTS:
if self.BANISH_URL_REDIRECT:
return self.redirect_response_forbidden(self.BANISH_URL_REDIRECT)
elif self.BANISH_TEMPLATRE:
return self.template_response_forbidden(request, self.BANISH_TEMPLATRE)
else:
return self.http_response_forbidden(self.BANISH_MESSAGE, content_type="text/html")
else:
if self._is_tor_ip(ip) and self.BANISH_TOR_IPS:
return self.http_response_forbidden("Banish TOR ip", content_type="text/html")
def http_response_forbidden(self, message, content_type):
if django.VERSION[:2] > (1, 3):
kwargs = {'content_type': content_type}
else:
kwargs = {'mimetype': content_type}
return HttpResponseForbidden(message, **kwargs)
def redirect_response_forbidden(self, url):
return HttpResponseRedirect(url)
def template_response_forbidden(self, request, template):
t = loader.get_template(template)
return HttpResponse(t.render({}, request))
def is_banned(self, ip):
# If a key BANISH MC key exists we know the user is banned.
is_banned = cache.get(self.BANISH_PREFIX + ip)
if self.DEBUG and is_banned:
print >> sys.stderr, "BANISH BANNED IP: ", self.BANISH_PREFIX + ip
return is_banned
def is_whitelisted(self, ip):
# If a whitelist key exists, return True to allow the request through
is_whitelisted = cache.get(self.WHITELIST_PREFIX + ip)
if self.DEBUG and is_whitelisted:
print >> sys.stderr, "BANISH WHITELISTED IP: ", self.WHITELIST_PREFIX + ip
return is_whitelisted
def monitor_abuse(self, ip, abuse_threshold, url_name):
"""
Track the number of hits per second for a given IP.
If the count is over ABUSE_THRESHOLD banish user
"""
cache_key = self.ABUSE_PREFIX + ip + ":" + url_name
abuse_count = cache.get(cache_key)
if self.DEBUG:
print >> sys.stderr, "BANISH ABUSE COUNT: ", abuse_count
print >> sys.stderr, "BANISH CACHE KEY: ", cache_key
over_abuse_limit = False
if not abuse_count:
# Set counter value with expiration time 1 minute
cache.set(cache_key, 1, 60)
else:
if abuse_count >= abuse_threshold:
over_abuse_limit = True
# Store IP Abuse in memcache and database
# Chck if exist in database
oldbanishment = Banishment.objects.filter(condition=ip + ":" + url_name).exists()
# If not exist save
if not oldbanishment:
ban = Banishment(ban_reason="IP Abuse limit exceeded", type="ip-address", condition=ip + ":" + url_name)
ban.save()
# Rewrite banish with infinite time expiration.
cache.set(cache_key, "1")
else:
# If no excess abuse count only increment.
try:
cache.incr(cache_key)
except ValueError:
pass
return over_abuse_limit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.