repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
jsalva/crux | refs/heads/master | crux/emberjs/ember_apps/crux/node_modules/ember-cli/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/ordered_dict.py | 2354 | # Unmodified from http://code.activestate.com/recipes/576693/
# other than to add MIT license header (as specified on page, but not in code).
# Linked from Python documentation here:
# http://docs.python.org/2/library/collections.html#collections.OrderedDict
#
# This should be deleted once Py2.7 is available on all bots, see
# http://crbug.com/241769.
#
# Copyright (c) 2009 Raymond Hettinger.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
# Suppress 'OrderedDict.update: Method has no argument':
# pylint: disable=E0211
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
|
thaJeztah/compose | refs/heads/master | tests/unit/container_test.py | 1 | import docker
from .. import mock
from .. import unittest
from ..helpers import BUSYBOX_IMAGE_WITH_TAG
from compose.const import LABEL_ONE_OFF
from compose.const import LABEL_SLUG
from compose.container import Container
from compose.container import get_container_name
class ContainerTest(unittest.TestCase):
def setUp(self):
self.container_id = "abcabcabcbabc12345"
self.container_dict = {
"Id": self.container_id,
"Image": BUSYBOX_IMAGE_WITH_TAG,
"Command": "top",
"Created": 1387384730,
"Status": "Up 8 seconds",
"Ports": None,
"SizeRw": 0,
"SizeRootFs": 0,
"Names": ["/composetest_db_1", "/composetest_web_1/db"],
"NetworkSettings": {
"Ports": {},
},
"Config": {
"Labels": {
"com.docker.compose.project": "composetest",
"com.docker.compose.service": "web",
"com.docker.compose.container-number": "7",
},
}
}
def test_from_ps(self):
container = Container.from_ps(None,
self.container_dict,
has_been_inspected=True)
assert container.dictionary == {
"Id": self.container_id,
"Image": BUSYBOX_IMAGE_WITH_TAG,
"Name": "/composetest_db_1",
}
def test_from_ps_prefixed(self):
self.container_dict['Names'] = [
'/swarm-host-1' + n for n in self.container_dict['Names']
]
container = Container.from_ps(
None,
self.container_dict,
has_been_inspected=True)
assert container.dictionary == {
"Id": self.container_id,
"Image": BUSYBOX_IMAGE_WITH_TAG,
"Name": "/composetest_db_1",
}
def test_environment(self):
container = Container(None, {
'Id': 'abc',
'Config': {
'Env': [
'FOO=BAR',
'BAZ=DOGE',
]
}
}, has_been_inspected=True)
assert container.environment == {
'FOO': 'BAR',
'BAZ': 'DOGE',
}
def test_number(self):
container = Container(None, self.container_dict, has_been_inspected=True)
assert container.number == 7
def test_name(self):
container = Container.from_ps(None,
self.container_dict,
has_been_inspected=True)
assert container.name == "composetest_db_1"
def test_name_without_project(self):
self.container_dict['Name'] = "/composetest_web_7"
container = Container(None, self.container_dict, has_been_inspected=True)
assert container.name_without_project == "web_7"
def test_name_without_project_custom_container_name(self):
self.container_dict['Name'] = "/custom_name_of_container"
container = Container(None, self.container_dict, has_been_inspected=True)
assert container.name_without_project == "custom_name_of_container"
def test_name_without_project_one_off(self):
self.container_dict['Name'] = "/composetest_web_092cd63296f"
self.container_dict['Config']['Labels'][LABEL_SLUG] = (
"092cd63296fdc446ad432d3905dd1fcbe12a2ba6b52"
)
self.container_dict['Config']['Labels'][LABEL_ONE_OFF] = 'True'
container = Container(None, self.container_dict, has_been_inspected=True)
assert container.name_without_project == 'web_092cd63296fd'
def test_inspect_if_not_inspected(self):
mock_client = mock.create_autospec(docker.APIClient)
container = Container(mock_client, dict(Id="the_id"))
container.inspect_if_not_inspected()
mock_client.inspect_container.assert_called_once_with("the_id")
assert container.dictionary == mock_client.inspect_container.return_value
assert container.has_been_inspected
container.inspect_if_not_inspected()
assert mock_client.inspect_container.call_count == 1
def test_human_readable_ports_none(self):
container = Container(None, self.container_dict, has_been_inspected=True)
assert container.human_readable_ports == ''
def test_human_readable_ports_public_and_private(self):
self.container_dict['NetworkSettings']['Ports'].update({
"45454/tcp": [{"HostIp": "0.0.0.0", "HostPort": "49197"}],
"45453/tcp": [],
})
container = Container(None, self.container_dict, has_been_inspected=True)
expected = "45453/tcp, 0.0.0.0:49197->45454/tcp"
assert container.human_readable_ports == expected
def test_get_local_port(self):
self.container_dict['NetworkSettings']['Ports'].update({
"45454/tcp": [{"HostIp": "0.0.0.0", "HostPort": "49197"}],
})
container = Container(None, self.container_dict, has_been_inspected=True)
assert container.get_local_port(45454, protocol='tcp') == '0.0.0.0:49197'
def test_human_readable_states_no_health(self):
container = Container(None, {
"State": {
"Status": "running",
"Running": True,
"Paused": False,
"Restarting": False,
"OOMKilled": False,
"Dead": False,
"Pid": 7623,
"ExitCode": 0,
"Error": "",
"StartedAt": "2018-01-29T00:34:25.2052414Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
}, has_been_inspected=True)
expected = "Up"
assert container.human_readable_state == expected
def test_human_readable_states_starting(self):
container = Container(None, {
"State": {
"Status": "running",
"Running": True,
"Paused": False,
"Restarting": False,
"OOMKilled": False,
"Dead": False,
"Pid": 11744,
"ExitCode": 0,
"Error": "",
"StartedAt": "2018-02-03T07:56:20.3591233Z",
"FinishedAt": "2018-01-31T08:56:11.0505228Z",
"Health": {
"Status": "starting",
"FailingStreak": 0,
"Log": []
}
}
}, has_been_inspected=True)
expected = "Up (health: starting)"
assert container.human_readable_state == expected
def test_human_readable_states_healthy(self):
container = Container(None, {
"State": {
"Status": "running",
"Running": True,
"Paused": False,
"Restarting": False,
"OOMKilled": False,
"Dead": False,
"Pid": 5674,
"ExitCode": 0,
"Error": "",
"StartedAt": "2018-02-03T08:32:05.3281831Z",
"FinishedAt": "2018-02-03T08:11:35.7872706Z",
"Health": {
"Status": "healthy",
"FailingStreak": 0,
"Log": []
}
}
}, has_been_inspected=True)
expected = "Up (healthy)"
assert container.human_readable_state == expected
def test_get(self):
container = Container(None, {
"Status": "Up 8 seconds",
"HostConfig": {
"VolumesFrom": ["volume_id"]
},
}, has_been_inspected=True)
assert container.get('Status') == "Up 8 seconds"
assert container.get('HostConfig.VolumesFrom') == ["volume_id"]
assert container.get('Foo.Bar.DoesNotExist') is None
def test_short_id(self):
container = Container(None, self.container_dict, has_been_inspected=True)
assert container.short_id == self.container_id[:12]
def test_has_api_logs(self):
container_dict = {
'HostConfig': {
'LogConfig': {
'Type': 'json-file'
}
}
}
container = Container(None, container_dict, has_been_inspected=True)
assert container.has_api_logs is True
container_dict['HostConfig']['LogConfig']['Type'] = 'none'
container = Container(None, container_dict, has_been_inspected=True)
assert container.has_api_logs is False
container_dict['HostConfig']['LogConfig']['Type'] = 'syslog'
container = Container(None, container_dict, has_been_inspected=True)
assert container.has_api_logs is False
container_dict['HostConfig']['LogConfig']['Type'] = 'journald'
container = Container(None, container_dict, has_been_inspected=True)
assert container.has_api_logs is True
container_dict['HostConfig']['LogConfig']['Type'] = 'foobar'
container = Container(None, container_dict, has_been_inspected=True)
assert container.has_api_logs is False
class GetContainerNameTestCase(unittest.TestCase):
def test_get_container_name(self):
assert get_container_name({}) is None
assert get_container_name({'Name': 'myproject_db_1'}) == 'myproject_db_1'
assert get_container_name(
{'Names': ['/myproject_db_1', '/myproject_web_1/db']}
) == 'myproject_db_1'
assert get_container_name({
'Names': [
'/swarm-host-1/myproject_db_1',
'/swarm-host-1/myproject_web_1/db'
]
}) == 'myproject_db_1'
|
rs2/bokeh | refs/heads/master | examples/models/file/trail.py | 6 | # -*- coding: utf-8 -*-
from __future__ import print_function
from math import sin, cos, atan2, sqrt, radians
import numpy as np
import scipy.ndimage as im
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.resources import INLINE
from bokeh.util.browser import view
from bokeh.models.glyphs import Line, Patches
from bokeh.models.layouts import Column
from bokeh.models import (
Plot, GMapPlot, GMapOptions,
DataRange1d, ColumnDataSource,
LinearAxis, Grid, Label,
PanTool, WheelZoomTool, ResetTool)
from bokeh.sampledata.mtb import obiszow_mtb_xcm
def haversin(theta):
return sin(0.5 * theta) ** 2
def distance(p1, p2):
"""Distance between (lat1, lon1) and (lat2, lon2). """
R = 6371
lat1, lon1 = p1
lat2, lon2 = p2
phi1 = radians(lat1)
phi2 = radians(lat2)
delta_lat = radians(lat2 - lat1)
delta_lon = radians(lon2 - lon1)
a = haversin(delta_lat) + cos(phi1) * cos(phi2) * haversin(delta_lon)
return 2 * R * atan2(sqrt(a), sqrt(1 - a))
def prep_data(dataset):
df = dataset.copy()
latlon = list(zip(df.lat, df.lon))
dist = np.array([distance(latlon[i + 1], latlon[i]) for i in range(len((latlon[:-1])))])
df["dist"] = np.concatenate(([0], np.cumsum(dist)))
slope = np.abs(100 * np.diff(df.alt) / (1000 * dist))
slope[np.where( slope < 4) ] = 0 # "green"
slope[np.where((slope >= 4) & (slope < 6))] = 1 # "yellow"
slope[np.where((slope >= 6) & (slope < 10))] = 2 # "pink"
slope[np.where((slope >= 10) & (slope < 15))] = 3 # "orange"
slope[np.where( slope >= 15 )] = 4 # "red"
slope = im.median_filter(slope, 6)
colors = np.empty_like(slope, dtype=object)
colors[np.where(slope == 0)] = "green"
colors[np.where(slope == 1)] = "yellow"
colors[np.where(slope == 2)] = "pink"
colors[np.where(slope == 3)] = "orange"
colors[np.where(slope == 4)] = "red"
df["colors"] = list(colors) + [None] # NOTE: add [None] just make pandas happy
return df
name = "Obiszów MTB XCM"
# Google Maps now requires an API key. You can find out how to get one here:
# https://developers.google.com/maps/documentation/javascript/get-api-key
API_KEY = "GOOGLE_API_KEY"
def trail_map(data):
lon = (min(data.lon) + max(data.lon)) / 2
lat = (min(data.lat) + max(data.lat)) / 2
map_options = GMapOptions(lng=lon, lat=lat, zoom=13)
plot = GMapPlot(plot_width=800, plot_height=800, map_options=map_options, api_key=API_KEY)
plot.title.text = "%s - Trail Map" % name
plot.x_range = DataRange1d()
plot.y_range = DataRange1d()
plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
line_source = ColumnDataSource(dict(x=data.lon, y=data.lat, dist=data.dist))
line = Line(x="x", y="y", line_color="blue", line_width=2)
plot.add_glyph(line_source, line)
if plot.api_key == "GOOGLE_API_KEY":
plot.add_layout(Label(x=240, y=700, x_units='screen', y_units='screen',
text='Replace GOOGLE_API_KEY with your own key',
text_color='red'))
return plot
def altitude_profile(data):
plot = Plot(plot_width=800, plot_height=400)
plot.title.text = "%s - Altitude Profile" % name
plot.x_range = DataRange1d()
plot.y_range = DataRange1d(range_padding=0)
xaxis = LinearAxis(axis_label="Distance (km)")
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis(axis_label="Altitude (m)")
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker)) # x grid
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker)) # y grid
plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
X, Y = data.dist, data.alt
y0 = min(Y)
patches_source = ColumnDataSource(dict(
xs=[[X[i], X[i+1], X[i+1], X[i]] for i in range(len(X[:-1])) ],
ys=[[y0, y0, Y[i+1], Y[i]] for i in range(len(Y[:-1])) ],
color=data.colors[:-1]
))
patches = Patches(xs="xs", ys="ys", fill_color="color", line_color="color")
plot.add_glyph(patches_source, patches)
line_source = ColumnDataSource(dict(x=data.dist, y=data.alt))
line = Line(x='x', y='y', line_color="black", line_width=1)
plot.add_glyph(line_source, line)
return plot
data = prep_data(obiszow_mtb_xcm)
trail = trail_map(data)
altitude = altitude_profile(data)
layout = Column(children=[altitude, trail])
doc = Document()
doc.add_root(layout)
if __name__ == "__main__":
doc.validate()
filename = "trail.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Trail map and altitude profile"))
print("Wrote %s" % filename)
view(filename)
|
imrehg/labhardware | refs/heads/master | projects/tempmon/ourgui.py | 24 | import wx
def openFile(type="*"):
''' Simple application to present a file-selection dialog
filepath = openFile(type="*")
input parameter
----------------
type: file extension to filter by default. E.g.: type="py" to select
Python source files
output parameter
-----------------
filepath: selected file name with complete path,
or empty string if no file selected
'''
application = wx.PySimpleApp()
dialog = wx.FileDialog(parent=None, message="Select file to open",
wildcard="*.%s"%(type), style=wx.OPEN)
if dialog.ShowModal() == wx.ID_OK:
return dialog.GetPath()
else:
print 'Nothing was selected.'
return ""
|
fossevents/fossevents.in | refs/heads/master | fossevents/users/forms.py | 2 | # -*- coding: utf-8 -*-
from django import forms
from .models import User
class UserForm(forms.ModelForm):
class Meta:
# Set this form to use the User model.
model = User
# Constrain the UserForm to just these fields.
fields = ("first_name", "last_name")
|
zhanghenry/stocks | refs/heads/master | tests/logging_tests/tests.py | 13 | # -*- coding:utf-8 -*-
from __future__ import unicode_literals
import logging
import warnings
from admin_scripts.tests import AdminScriptTestCase
from django.core import mail
from django.core.files.temp import NamedTemporaryFile
from django.test import RequestFactory, TestCase, override_settings
from django.test.utils import patch_logger
from django.utils.deprecation import RemovedInNextVersionWarning
from django.utils.encoding import force_text
from django.utils.log import (
AdminEmailHandler, CallbackFilter, RequireDebugFalse, RequireDebugTrue,
)
from django.utils.six import StringIO
from .logconfig import MyEmailBackend
# logging config prior to using filter with mail_admins
OLD_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
class LoggingFiltersTest(TestCase):
def test_require_debug_false_filter(self):
"""
Test the RequireDebugFalse filter class.
"""
filter_ = RequireDebugFalse()
with self.settings(DEBUG=True):
self.assertEqual(filter_.filter("record is not used"), False)
with self.settings(DEBUG=False):
self.assertEqual(filter_.filter("record is not used"), True)
def test_require_debug_true_filter(self):
"""
Test the RequireDebugTrue filter class.
"""
filter_ = RequireDebugTrue()
with self.settings(DEBUG=True):
self.assertEqual(filter_.filter("record is not used"), True)
with self.settings(DEBUG=False):
self.assertEqual(filter_.filter("record is not used"), False)
class DefaultLoggingTest(TestCase):
def setUp(self):
self.logger = logging.getLogger('django')
self.old_stream = self.logger.handlers[0].stream
def tearDown(self):
self.logger.handlers[0].stream = self.old_stream
def test_django_logger(self):
"""
The 'django' base logger only output anything when DEBUG=True.
"""
output = StringIO()
self.logger.handlers[0].stream = output
self.logger.error("Hey, this is an error.")
self.assertEqual(output.getvalue(), '')
with self.settings(DEBUG=True):
self.logger.error("Hey, this is an error.")
self.assertEqual(output.getvalue(), 'Hey, this is an error.\n')
class WarningLoggerTests(TestCase):
"""
Tests that warnings output for RemovedInDjangoXXWarning (XX being the next
Django version) is enabled and captured to the logging system
"""
def setUp(self):
# If tests are invoke with "-Wall" (or any -W flag actually) then
# warning logging gets disabled (see configure_logging in django/utils/log.py).
# However, these tests expect warnings to be logged, so manually force warnings
# to the logs. Use getattr() here because the logging capture state is
# undocumented and (I assume) brittle.
self._old_capture_state = bool(getattr(logging, '_warnings_showwarning', False))
logging.captureWarnings(True)
# this convoluted setup is to avoid printing this deprecation to
# stderr during test running - as the test runner forces deprecations
# to be displayed at the global py.warnings level
self.logger = logging.getLogger('py.warnings')
self.outputs = []
self.old_streams = []
for handler in self.logger.handlers:
self.old_streams.append(handler.stream)
self.outputs.append(StringIO())
handler.stream = self.outputs[-1]
def tearDown(self):
for i, handler in enumerate(self.logger.handlers):
self.logger.handlers[i].stream = self.old_streams[i]
# Reset warnings state.
logging.captureWarnings(self._old_capture_state)
@override_settings(DEBUG=True)
def test_warnings_capture(self):
with warnings.catch_warnings():
warnings.filterwarnings('always')
warnings.warn('Foo Deprecated', RemovedInNextVersionWarning)
output = force_text(self.outputs[0].getvalue())
self.assertIn('Foo Deprecated', output)
def test_warnings_capture_debug_false(self):
with warnings.catch_warnings():
warnings.filterwarnings('always')
warnings.warn('Foo Deprecated', RemovedInNextVersionWarning)
output = force_text(self.outputs[0].getvalue())
self.assertNotIn('Foo Deprecated', output)
@override_settings(DEBUG=True)
def test_error_filter_still_raises(self):
with warnings.catch_warnings():
warnings.filterwarnings(
'error',
category=RemovedInNextVersionWarning
)
with self.assertRaises(RemovedInNextVersionWarning):
warnings.warn('Foo Deprecated', RemovedInNextVersionWarning)
class CallbackFilterTest(TestCase):
def test_sense(self):
f_false = CallbackFilter(lambda r: False)
f_true = CallbackFilter(lambda r: True)
self.assertEqual(f_false.filter("record"), False)
self.assertEqual(f_true.filter("record"), True)
def test_passes_on_record(self):
collector = []
def _callback(record):
collector.append(record)
return True
f = CallbackFilter(_callback)
f.filter("a record")
self.assertEqual(collector, ["a record"])
class AdminEmailHandlerTest(TestCase):
logger = logging.getLogger('django.request')
def get_admin_email_handler(self, logger):
# Inspired from views/views.py: send_log()
# ensuring the AdminEmailHandler does not get filtered out
# even with DEBUG=True.
admin_email_handler = [
h for h in logger.handlers
if h.__class__.__name__ == "AdminEmailHandler"
][0]
return admin_email_handler
def test_fail_silently(self):
admin_email_handler = self.get_admin_email_handler(self.logger)
self.assertTrue(admin_email_handler.connection().fail_silently)
@override_settings(
ADMINS=(('whatever admin', 'admin@example.com'),),
EMAIL_SUBJECT_PREFIX='-SuperAwesomeSubject-'
)
def test_accepts_args(self):
"""
Ensure that user-supplied arguments and the EMAIL_SUBJECT_PREFIX
setting are used to compose the email subject.
Refs #16736.
"""
message = "Custom message that says '%s' and '%s'"
token1 = 'ping'
token2 = 'pong'
admin_email_handler = self.get_admin_email_handler(self.logger)
# Backup then override original filters
orig_filters = admin_email_handler.filters
try:
admin_email_handler.filters = []
self.logger.error(message, token1, token2)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['admin@example.com'])
self.assertEqual(mail.outbox[0].subject,
"-SuperAwesomeSubject-ERROR: Custom message that says 'ping' and 'pong'")
finally:
# Restore original filters
admin_email_handler.filters = orig_filters
@override_settings(
ADMINS=(('whatever admin', 'admin@example.com'),),
EMAIL_SUBJECT_PREFIX='-SuperAwesomeSubject-',
INTERNAL_IPS=('127.0.0.1',),
)
def test_accepts_args_and_request(self):
"""
Ensure that the subject is also handled if being
passed a request object.
"""
message = "Custom message that says '%s' and '%s'"
token1 = 'ping'
token2 = 'pong'
admin_email_handler = self.get_admin_email_handler(self.logger)
# Backup then override original filters
orig_filters = admin_email_handler.filters
try:
admin_email_handler.filters = []
rf = RequestFactory()
request = rf.get('/')
self.logger.error(message, token1, token2,
extra={
'status_code': 403,
'request': request,
}
)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['admin@example.com'])
self.assertEqual(mail.outbox[0].subject,
"-SuperAwesomeSubject-ERROR (internal IP): Custom message that says 'ping' and 'pong'")
finally:
# Restore original filters
admin_email_handler.filters = orig_filters
@override_settings(
ADMINS=(('admin', 'admin@example.com'),),
EMAIL_SUBJECT_PREFIX='',
DEBUG=False,
)
def test_subject_accepts_newlines(self):
"""
Ensure that newlines in email reports' subjects are escaped to avoid
AdminErrorHandler to fail.
Refs #17281.
"""
message = 'Message \r\n with newlines'
expected_subject = 'ERROR: Message \\r\\n with newlines'
self.assertEqual(len(mail.outbox), 0)
self.logger.error(message)
self.assertEqual(len(mail.outbox), 1)
self.assertNotIn('\n', mail.outbox[0].subject)
self.assertNotIn('\r', mail.outbox[0].subject)
self.assertEqual(mail.outbox[0].subject, expected_subject)
@override_settings(
ADMINS=(('admin', 'admin@example.com'),),
EMAIL_SUBJECT_PREFIX='',
DEBUG=False,
)
def test_truncate_subject(self):
"""
RFC 2822's hard limit is 998 characters per line.
So, minus "Subject: ", the actual subject must be no longer than 989
characters.
Refs #17281.
"""
message = 'a' * 1000
expected_subject = 'ERROR: aa' + 'a' * 980
self.assertEqual(len(mail.outbox), 0)
self.logger.error(message)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, expected_subject)
@override_settings(
ADMINS=(('admin', 'admin@example.com'),),
DEBUG=False,
)
def test_uses_custom_email_backend(self):
"""
Refs #19325
"""
message = 'All work and no play makes Jack a dull boy'
admin_email_handler = self.get_admin_email_handler(self.logger)
mail_admins_called = {'called': False}
def my_mail_admins(*args, **kwargs):
connection = kwargs['connection']
self.assertIsInstance(connection, MyEmailBackend)
mail_admins_called['called'] = True
# Monkeypatches
orig_mail_admins = mail.mail_admins
orig_email_backend = admin_email_handler.email_backend
mail.mail_admins = my_mail_admins
admin_email_handler.email_backend = (
'logging_tests.logconfig.MyEmailBackend')
try:
self.logger.error(message)
self.assertTrue(mail_admins_called['called'])
finally:
# Revert Monkeypatches
mail.mail_admins = orig_mail_admins
admin_email_handler.email_backend = orig_email_backend
@override_settings(
ADMINS=(('whatever admin', 'admin@example.com'),),
)
def test_emit_non_ascii(self):
"""
#23593 - AdminEmailHandler should allow Unicode characters in the
request.
"""
handler = self.get_admin_email_handler(self.logger)
record = self.logger.makeRecord('name', logging.ERROR, 'function', 'lno', 'message', None, None)
rf = RequestFactory()
url_path = '/º'
record.request = rf.get(url_path)
handler.emit(record)
self.assertEqual(len(mail.outbox), 1)
msg = mail.outbox[0]
self.assertEqual(msg.to, ['admin@example.com'])
self.assertEqual(msg.subject, "[Django] ERROR (EXTERNAL IP): message")
self.assertIn("path:%s" % url_path, msg.body)
@override_settings(
MANAGERS=(('manager', 'manager@example.com'),),
DEBUG=False,
)
def test_customize_send_mail_method(self):
class ManagerEmailHandler(AdminEmailHandler):
def send_mail(self, subject, message, *args, **kwargs):
mail.mail_managers(subject, message, *args, connection=self.connection(), **kwargs)
handler = ManagerEmailHandler()
record = self.logger.makeRecord('name', logging.ERROR, 'function', 'lno', 'message', None, None)
self.assertEqual(len(mail.outbox), 0)
handler.emit(record)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['manager@example.com'])
class SettingsConfigTest(AdminScriptTestCase):
"""
Test that accessing settings in a custom logging handler does not trigger
a circular import error.
"""
def setUp(self):
log_config = """{
'version': 1,
'handlers': {
'custom_handler': {
'level': 'INFO',
'class': 'logging_tests.logconfig.MyHandler',
}
}
}"""
self.write_settings('settings.py', sdict={'LOGGING': log_config})
def tearDown(self):
self.remove_settings('settings.py')
def test_circular_dependency(self):
# validate is just an example command to trigger settings configuration
out, err = self.run_manage(['validate'])
self.assertNoOutput(err)
self.assertOutput(out, "System check identified no issues (0 silenced).")
def dictConfig(config):
dictConfig.called = True
dictConfig.called = False
class SetupConfigureLogging(TestCase):
"""
Test that calling django.setup() initializes the logging configuration.
"""
@override_settings(LOGGING_CONFIG='logging_tests.tests.dictConfig',
LOGGING=OLD_LOGGING)
def test_configure_initializes_logging(self):
from django import setup
setup()
self.assertTrue(dictConfig.called)
@override_settings(DEBUG=True, ROOT_URLCONF='logging_tests.urls')
class SecurityLoggerTest(TestCase):
def test_suspicious_operation_creates_log_message(self):
with patch_logger('django.security.SuspiciousOperation', 'error') as calls:
self.client.get('/suspicious/')
self.assertEqual(len(calls), 1)
self.assertEqual(calls[0], 'dubious')
def test_suspicious_operation_uses_sublogger(self):
with patch_logger('django.security.DisallowedHost', 'error') as calls:
self.client.get('/suspicious_spec/')
self.assertEqual(len(calls), 1)
self.assertEqual(calls[0], 'dubious')
@override_settings(
ADMINS=(('admin', 'admin@example.com'),),
DEBUG=False,
)
def test_suspicious_email_admins(self):
self.client.get('/suspicious/')
self.assertEqual(len(mail.outbox), 1)
self.assertIn('path:/suspicious/,', mail.outbox[0].body)
class SettingsCustomLoggingTest(AdminScriptTestCase):
"""
Test that using a logging defaults are still applied when using a custom
callable in LOGGING_CONFIG (i.e., logging.config.fileConfig).
"""
def setUp(self):
logging_conf = """
[loggers]
keys=root
[handlers]
keys=stream
[formatters]
keys=simple
[logger_root]
handlers=stream
[handler_stream]
class=StreamHandler
formatter=simple
args=(sys.stdout,)
[formatter_simple]
format=%(message)s
"""
self.temp_file = NamedTemporaryFile()
self.temp_file.write(logging_conf.encode('utf-8'))
self.temp_file.flush()
sdict = {'LOGGING_CONFIG': '"logging.config.fileConfig"',
'LOGGING': 'r"%s"' % self.temp_file.name}
self.write_settings('settings.py', sdict=sdict)
def tearDown(self):
self.temp_file.close()
self.remove_settings('settings.py')
def test_custom_logging(self):
out, err = self.run_manage(['validate'])
self.assertNoOutput(err)
self.assertOutput(out, "System check identified no issues (0 silenced).")
|
anish/buildbot | refs/heads/master | master/buildbot/test/unit/test_www_auth.py | 6 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import mock
from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse
from twisted.cred.credentials import UsernamePassword
from twisted.cred.error import UnauthorizedLogin
from twisted.internet import defer
from twisted.trial import unittest
from twisted.web.error import Error
from twisted.web.guard import BasicCredentialFactory
from twisted.web.guard import HTTPAuthSessionWrapper
from twisted.web.resource import IResource
from buildbot.test.util import www
from buildbot.test.util.misc import TestReactorMixin
from buildbot.www import auth
class AuthResourceMixin:
def setUpAuthResource(self):
self.master = self.make_master(url='h:/a/b/')
self.auth = self.master.config.www['auth']
self.master.www.auth = self.auth
self.auth.master = self.master
class AuthRootResource(TestReactorMixin, www.WwwTestMixin, AuthResourceMixin,
unittest.TestCase):
def setUp(self):
self.setUpTestReactor()
self.setUpAuthResource()
self.rsrc = auth.AuthRootResource(self.master)
def test_getChild_login(self):
glr = mock.Mock(name='glr')
self.master.www.auth.getLoginResource = glr
child = self.rsrc.getChild(b'login', mock.Mock(name='req'))
self.assertIdentical(child, glr())
def test_getChild_logout(self):
glr = mock.Mock(name='glr')
self.master.www.auth.getLogoutResource = glr
child = self.rsrc.getChild(b'logout', mock.Mock(name='req'))
self.assertIdentical(child, glr())
class AuthBase(TestReactorMixin, www.WwwTestMixin, unittest.TestCase):
def setUp(self):
self.setUpTestReactor()
self.auth = auth.AuthBase()
self.master = self.make_master(url='h:/a/b/')
self.auth.master = self.master
self.req = self.make_request(b'/')
@defer.inlineCallbacks
def test_maybeAutoLogin(self):
self.assertEqual((yield self.auth.maybeAutoLogin(self.req)), None)
def test_getLoginResource(self):
with self.assertRaises(Error):
self.auth.getLoginResource()
@defer.inlineCallbacks
def test_updateUserInfo(self):
self.auth.userInfoProvider = auth.UserInfoProviderBase()
self.auth.userInfoProvider.getUserInfo = lambda un: {'info': un}
self.req.session.user_info = {'username': 'elvira'}
yield self.auth.updateUserInfo(self.req)
self.assertEqual(self.req.session.user_info,
{'info': 'elvira', 'username': 'elvira'})
def getConfigDict(self):
self.assertEqual(auth.getConfigDict(),
{'name': 'AuthBase'})
class UseAuthInfoProviderBase(unittest.TestCase):
@defer.inlineCallbacks
def test_getUserInfo(self):
uip = auth.UserInfoProviderBase()
self.assertEqual((yield uip.getUserInfo('jess')),
{'email': 'jess'})
class NoAuth(unittest.TestCase):
def test_exists(self):
assert auth.NoAuth
class RemoteUserAuth(TestReactorMixin, www.WwwTestMixin, unittest.TestCase):
def setUp(self):
self.setUpTestReactor()
self.auth = auth.RemoteUserAuth(header=b'HDR')
self.make_master()
self.request = self.make_request(b'/')
@defer.inlineCallbacks
def test_maybeAutoLogin(self):
self.request.input_headers[b'HDR'] = b'rachel@foo.com'
yield self.auth.maybeAutoLogin(self.request)
self.assertEqual(self.request.session.user_info, {
'username': 'rachel',
'realm': 'foo.com',
'email': 'rachel'})
@defer.inlineCallbacks
def test_maybeAutoLogin_no_header(self):
try:
yield self.auth.maybeAutoLogin(self.request)
except Error as e:
self.assertEqual(int(e.status), 403)
else:
self.fail("403 expected")
@defer.inlineCallbacks
def test_maybeAutoLogin_mismatched_value(self):
self.request.input_headers[b'HDR'] = b'rachel'
try:
yield self.auth.maybeAutoLogin(self.request)
except Error as e:
self.assertEqual(int(e.status), 403)
else:
self.fail("403 expected")
class AuthRealm(TestReactorMixin, www.WwwTestMixin, unittest.TestCase):
def setUp(self):
self.setUpTestReactor()
self.auth = auth.RemoteUserAuth(header=b'HDR')
self.auth = auth.NoAuth()
self.make_master()
def test_requestAvatar(self):
realm = auth.AuthRealm(self.master, self.auth)
itfc, rsrc, logout = realm.requestAvatar("me", None, IResource)
self.assertIdentical(itfc, IResource)
self.assertIsInstance(rsrc, auth.PreAuthenticatedLoginResource)
class TwistedICredAuthBase(TestReactorMixin, www.WwwTestMixin,
unittest.TestCase):
def setUp(self):
self.setUpTestReactor()
# twisted.web makes it difficult to simulate the authentication process, so
# this only tests the mechanics of the getLoginResource method.
def test_getLoginResource(self):
self.auth = auth.TwistedICredAuthBase(
credentialFactories=[BasicCredentialFactory("buildbot")],
checkers=[InMemoryUsernamePasswordDatabaseDontUse(good=b'guy')])
self.auth.master = self.make_master(url='h:/a/b/')
rsrc = self.auth.getLoginResource()
self.assertIsInstance(rsrc, HTTPAuthSessionWrapper)
class UserPasswordAuth(www.WwwTestMixin, unittest.TestCase):
def test_passwordStringToBytes(self):
login = {"user_string": "password",
"user_bytes": b"password"}
correct_login = {b"user_string": b"password",
b"user_bytes": b"password"}
self.auth = auth.UserPasswordAuth(login)
self.assertEqual(self.auth.checkers[0].users, correct_login)
login = [("user_string", "password"),
("user_bytes", b"password")]
correct_login = {b"user_string": b"password",
b"user_bytes": b"password"}
self.auth = auth.UserPasswordAuth(login)
self.assertEqual(self.auth.checkers[0].users, correct_login)
class CustomAuth(TestReactorMixin, www.WwwTestMixin, unittest.TestCase):
class MockCustomAuth(auth.CustomAuth):
def check_credentials(self, us, ps):
return us == 'fellow' and ps == 'correct'
def setUp(self):
self.setUpTestReactor()
@defer.inlineCallbacks
def test_callable(self):
self.auth = self.MockCustomAuth()
cred_good = UsernamePassword('fellow', 'correct')
result_good = yield self.auth.checkers[0].requestAvatarId(cred_good)
self.assertEqual(result_good, 'fellow')
cred_bad = UsernamePassword('bandid', 'incorrect')
defer_bad = self.auth.checkers[0].requestAvatarId(cred_bad)
yield self.assertFailure(defer_bad, UnauthorizedLogin)
class LoginResource(TestReactorMixin, www.WwwTestMixin, AuthResourceMixin,
unittest.TestCase):
def setUp(self):
self.setUpTestReactor()
self.setUpAuthResource()
@defer.inlineCallbacks
def test_render(self):
self.rsrc = auth.LoginResource(self.master)
self.rsrc.renderLogin = mock.Mock(
spec=self.rsrc.renderLogin, return_value=defer.succeed(b'hi'))
yield self.render_resource(self.rsrc, b'/auth/login')
self.rsrc.renderLogin.assert_called_with(mock.ANY)
class PreAuthenticatedLoginResource(TestReactorMixin, www.WwwTestMixin,
AuthResourceMixin, unittest.TestCase):
def setUp(self):
self.setUpTestReactor()
self.setUpAuthResource()
self.rsrc = auth.PreAuthenticatedLoginResource(self.master, 'him')
@defer.inlineCallbacks
def test_render(self):
self.auth.maybeAutoLogin = mock.Mock()
def updateUserInfo(request):
session = request.getSession()
session.user_info['email'] = session.user_info['username'] + "@org"
session.updateSession(request)
self.auth.updateUserInfo = mock.Mock(side_effect=updateUserInfo)
res = yield self.render_resource(self.rsrc, b'/auth/login')
self.assertEqual(res, {'redirected': b'h:/a/b/#/'})
self.assertFalse(self.auth.maybeAutoLogin.called)
self.auth.updateUserInfo.assert_called_with(mock.ANY)
self.assertEqual(self.master.session.user_info,
{'email': 'him@org', 'username': 'him'})
class LogoutResource(TestReactorMixin, www.WwwTestMixin, AuthResourceMixin,
unittest.TestCase):
def setUp(self):
self.setUpTestReactor()
self.setUpAuthResource()
self.rsrc = auth.LogoutResource(self.master)
@defer.inlineCallbacks
def test_render(self):
self.master.session.expire = mock.Mock()
res = yield self.render_resource(self.rsrc, b'/auth/logout')
self.assertEqual(res, {'redirected': b'h:/a/b/#/'})
self.master.session.expire.assert_called_with()
@defer.inlineCallbacks
def test_render_with_crlf(self):
self.master.session.expire = mock.Mock()
res = yield self.render_resource(self.rsrc, b'/auth/logout?redirect=%0d%0abla')
# everything after a %0d shall be stripped
self.assertEqual(res, {'redirected': b'h:/a/b/#'})
self.master.session.expire.assert_called_with()
|
dycodedev/taiga-back | refs/heads/master | taiga/projects/votes/services.py | 1 | # Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014 Anler Hernández <hello@anler.me>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db.models import F
from django.db.transaction import atomic
from django.apps import apps
from django.contrib.auth import get_user_model
from .models import Votes, Vote
def add_vote(obj, user):
"""Add a vote to an object.
If the user has already voted the object nothing happends, so this function can be considered
idempotent.
:param obj: Any Django model instance.
:param user: User adding the vote. :class:`~taiga.users.models.User` instance.
"""
obj_type = apps.get_model("contenttypes", "ContentType").objects.get_for_model(obj)
with atomic():
vote, created = Vote.objects.get_or_create(content_type=obj_type, object_id=obj.id, user=user)
if not created:
return
votes, _ = Votes.objects.get_or_create(content_type=obj_type, object_id=obj.id)
votes.count = F('count') + 1
votes.save()
return vote
def remove_vote(obj, user):
"""Remove an user vote from an object.
If the user has not voted the object nothing happens so this function can be considered
idempotent.
:param obj: Any Django model instance.
:param user: User removing her vote. :class:`~taiga.users.models.User` instance.
"""
obj_type = apps.get_model("contenttypes", "ContentType").objects.get_for_model(obj)
with atomic():
qs = Vote.objects.filter(content_type=obj_type, object_id=obj.id, user=user)
if not qs.exists():
return
qs.delete()
votes, _ = Votes.objects.get_or_create(content_type=obj_type, object_id=obj.id)
votes.count = F('count') - 1
votes.save()
def get_voters(obj):
"""Get the voters of an object.
:param obj: Any Django model instance.
:return: User queryset object representing the users that voted the object.
"""
obj_type = apps.get_model("contenttypes", "ContentType").objects.get_for_model(obj)
return get_user_model().objects.filter(votes__content_type=obj_type, votes__object_id=obj.id)
def get_votes(obj):
"""Get the number of votes an object has.
:param obj: Any Django model instance.
:return: Number of votes or `0` if the object has no votes at all.
"""
obj_type = apps.get_model("contenttypes", "ContentType").objects.get_for_model(obj)
try:
return Votes.objects.get(content_type=obj_type, object_id=obj.id).count
except Votes.DoesNotExist:
return 0
def get_voted(user_or_id, model):
"""Get the objects voted by an user.
:param user_or_id: :class:`~taiga.users.models.User` instance or id.
:param model: Show only objects of this kind. Can be any Django model class.
:return: Queryset of objects representing the votes of the user.
"""
obj_type = apps.get_model("contenttypes", "ContentType").objects.get_for_model(model)
conditions = ('votes_vote.content_type_id = %s',
'%s.id = votes_vote.object_id' % model._meta.db_table,
'votes_vote.user_id = %s')
if isinstance(user_or_id, get_user_model()):
user_id = user_or_id.id
else:
user_id = user_or_id
return model.objects.extra(where=conditions, tables=('votes_vote',),
params=(obj_type.id, user_id))
|
a8ksh4/solar | refs/heads/master | Rev-2/Solar.py | 1 | #!/usr/bin/python
'''
Created on Oct 23, 2013
@author: drnorris
'''
from Quad import *
import sys, os
MYNAME=sys.argv[0]
MYPATH=os.path.realpath(__file__)
MYDIR=os.path.dirname(os.path.abspath(__file__))
MYCWD=os.getcwd()
#print "sys.argv[0] is: " + MYNAME
#print "os.path.realpath(__file__) is: " + MYPATH
#print "os.path.dirname(os.path.abspath(__fiel__)) is: " + MYDIR
#print "os.getcwd() is: " + MYCWD
if __name__ == '__main__':
quad = Quad(360, (0,0), 0)
quad.insertRandStars(100)
quad.update()
print "getObjectCount is: " + str(quad.getObjectCount())
quad.printObjects()
#for object in quad.getObjectList():
# print object
|
wraiden/spacewalk | refs/heads/master | backend/server/test/test_leak3.py | 10 | #
# Copyright (c) 2008--2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import os
import time
from DCOracle2 import connect
print(os.getpid())
dbh = connect('rhnuser/rhnuser@webdev')
h = dbh.prepare("select 1 from dual")
start = time.time()
i = 0
while 1:
h.execute()
if 0:
print(h.fetchone_dict())
else:
print(i, "%.3f" % (time.time() - start))
i = i + 1
time.sleep(.01)
|
vinutah/projects | refs/heads/master | ml/pde/fd1d_heat_explicit.py | 1 | #!/usr/bin/env python
def save_training_data(dep_var, feature_1, feature_2, feature_3, mode):
path = './data/training/'
filename = path + str(mode) + '_train.svm'
with open(filename,'a') as f:
line = str(dep_var)
line += ' 1:' + str(feature_1)
line += ' 2:' + str(feature_2)
line += ' 3:' + str(feature_3)
line += '\n'
f.write(line)
def readweights(weightsFile,w):
with open(weightsFile,'r') as f:
W = f.readlines()
w.append(W[-1].strip())
w.append(W[-2].strip())
w.append(W[-3].strip())
f.close()
return w
def fd1d_heat_explicit ( x_num, x, t, dt, cfl, rhs, bc, h, mode , weightsFile):
import numpy as np
h_new = np.zeros ( x_num )
f = rhs ( x_num, x, t )
for c in range ( 1, x_num - 1 ):
l = c - 1
r = c + 1
exeKey = 'original'
if exeKey in mode:
h_new[c] = h[c] + cfl * ( h[l] - 2.0 * h[c] + h[r] ) + dt * f[c]
save_training_data(h_new[c] , h[l] , h[c] , h[r], mode )
if mode == 'ml_model':
w = list()
w = readweights(weightsFile,w)
#print 'w[0]=%f' % ( float(str(w[0])) )
#print 'w[1]=%f' % ( float(str(w[1])) )
#print 'w[2]=%f' % ( float(str(w[2])) )
#
#print 'h[l]=%f' % ( float(str(h[l])) )
#print 'h[c]=%f' % ( float(str(h[c])) )
#print 'h[r]=%f' % ( float(str(h[r])) )
w1 = float(str(w[0]))
w2 = float(str(w[1]))
w3 = float(str(w[2]))
f1 = float(str(h[l]))
f2 = float(str(h[c]))
f3 = float(str(h[r]))
h_new[c] = w1*f1 + w2*f2 + w3*f3
h_new = bc ( x_num, x, t + dt, h_new, mode )
return h_new
|
sminki/android_kernel_sony_u8500 | refs/heads/master | arch/ia64/scripts/unwcheck.py | 13143 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
|
tensorflow/probability | refs/heads/main | tensorflow_probability/python/layers/distribution_layer_test.py | 1 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
# Dependency imports
import numpy as np
import six
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python import layers as tfpl
from tensorflow_probability.python.internal import test_util
tfk = tf.keras
tfkl = tf.keras.layers
def _logit_avg_expit(t):
"""Computes `logit(mean(expit(t)))` in a numerically stable manner."""
log_avg_prob = (
tf.reduce_logsumexp(-tf.nn.softplus(-t), axis=0) -
tf.math.log(tf.cast(tf.shape(t)[0], t.dtype)))
return log_avg_prob - tf.math.log1p(-tf.exp(log_avg_prob))
def _vec_pad(x, value=0):
"""Prepends a column of zeros to a matrix."""
paddings = tf.concat(
[tf.zeros([tf.rank(x) - 1, 2], dtype=tf.int32), [[1, 0]]], axis=0)
return tf.pad(x, paddings=paddings, constant_values=value)
def _unwrap_tensor_coercible(dist):
inner_dist = getattr(dist, 'tensor_distribution', dist)
if inner_dist is dist:
return inner_dist
return _unwrap_tensor_coercible(inner_dist)
# TODO(b/143642032): Figure out how to solve issues with save/load, so that we
# can decorate all of these tests with @test_util.test_all_tf_execution_regimes
@test_util.test_graph_and_eager_modes
class EndToEndTest(test_util.TestCase):
"""Test tfp.layers work in all three Keras APIs.
For end-to-end tests we fit a Variational Autoencoder (VAE) because this
requires chaining two Keras models, an encoder and decoder. Chaining two
models is important because making a `Distribution` as output by a Keras model
the input of another Keras model--and concurrently fitting both--is the
primary value-add of using the `tfp.layers.DistributionLambda`. Otherwise,
under many circumstances you can directly return a Distribution from a Keras
layer, as long as the Distribution base class has a tensor conversion function
registered via `tf.register_tensor_conversion_function`.
Fundamentally, there are three ways to be Keras models:
1. `tf.keras.Sequential`
2. Functional API
3. Subclass `tf.keras.Model`.
Its important to have end-to-end tests for all three, because #1 and #2 call
`__call__` and `call` differently. (#3's call pattern depends on user
implementation details, but in general ends up being either #1 or #2.)
"""
def setUp(self):
self.encoded_size = 2
self.input_shape = [2, 2, 1]
self.train_size = 10000
self.test_size = 1000
self.x = (np.random.rand(
self.train_size, *self.input_shape) > 0.75).astype(np.float32)
self.x_test = (np.random.rand(
self.test_size, *self.input_shape) > 0.75).astype(np.float32)
super(EndToEndTest, self).setUp()
def test_keras_sequential_api(self):
"""Test `DistributionLambda`s are composable via Keras `Sequential` API."""
prior_model = tfk.Sequential([
tfpl.VariableLayer(shape=[self.encoded_size]),
tfpl.DistributionLambda(
lambda t: tfd.Independent(tfd.Normal(loc=t, scale=1), # pylint: disable=g-long-lambda
reinterpreted_batch_ndims=1)),
])
beta = tf.Variable(0.9, name='beta') # "beta" as in beta-VAE.
encoder_model = tfk.Sequential([
tfkl.InputLayer(input_shape=self.input_shape),
tfkl.Flatten(),
tfkl.Dense(tfpl.MultivariateNormalTriL.params_size(self.encoded_size)),
tfpl.MultivariateNormalTriL(
self.encoded_size,
activity_regularizer=tfpl.KLDivergenceRegularizer(
prior_model, weight=beta)),
])
decoder_model = tfk.Sequential([
tfkl.InputLayer(input_shape=[self.encoded_size]),
tfkl.Dense(tfpl.IndependentBernoulli.params_size(self.input_shape)),
tfpl.IndependentBernoulli(self.input_shape, tfd.Bernoulli.logits),
])
vae_model = tfk.Model(
inputs=encoder_model.inputs,
# TODO(b/139437503): remove training=False once cl/263432058 hits
# nightly.
outputs=decoder_model(encoder_model.outputs[0], training=False))
self.assertLen(vae_model.trainable_weights, 4 + 1 + 1)
def accuracy(x, rv_x):
rv_x = getattr(rv_x, '_tfp_distribution', rv_x)
return tf.reduce_mean(
tf.cast(tf.equal(x, rv_x.mode()), x.dtype),
axis=tf.range(-rv_x.event_shape.ndims, 0))
vae_model.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.5),
loss=lambda x, rv_x: -rv_x.log_prob(x),
metrics=[accuracy])
self.evaluate([v.initializer for v in vae_model.variables])
vae_model.fit(self.x, self.x,
batch_size=25,
epochs=1,
verbose=True,
validation_data=(self.x_test, self.x_test),
shuffle=True)
yhat = vae_model(tf.convert_to_tensor(self.x_test))
self.assertIsInstance(yhat.tensor_distribution, tfd.Independent)
self.assertIsInstance(yhat.tensor_distribution.distribution, tfd.Bernoulli)
def test_keras_functional_api(self):
"""Test `DistributionLambda`s are composable via Keras functional API."""
beta = tf.Variable( # 0 vars since not trainable.
0.9, trainable=False, name='beta') # "beta" as in beta-VAE
encoder_model = [
tfkl.Flatten(),
tfkl.Dense(10, activation='relu'), # 2 vars
tfkl.Dense(tfpl.MultivariateNormalTriL.params_size( # 2 vars
self.encoded_size)),
tfpl.MultivariateNormalTriL(self.encoded_size),
tfpl.KLDivergenceAddLoss(
tfd.Independent(
tfd.Normal(
loc=tf.Variable([0., 0.]), # 1 var
scale=tfp.util.TransformedVariable( # 1 var
1., bijector=tfb.Exp())),
reinterpreted_batch_ndims=1),
weight=beta),
]
decoder_model = [
tfkl.Dense(10, activation='relu'), # 2 vars
tfkl.Dense(tfpl.IndependentBernoulli.params_size( # 2 vars
self.input_shape)),
tfpl.IndependentBernoulli(self.input_shape, tfd.Bernoulli.logits),
]
images = tfkl.Input(shape=self.input_shape)
encoded = functools.reduce(lambda x, f: f(x), encoder_model, images)
decoded = functools.reduce(lambda x, f: f(x), decoder_model, encoded)
vae_model = tfk.Model(inputs=images, outputs=decoded)
vae_model.compile(
optimizer=tf.optimizers.Adam(),
loss=lambda x, rv_x: -rv_x.log_prob(x),
metrics=[])
self.assertLen(vae_model.trainable_weights,
(2 + 2) + (2 + 2) + (1 + 1) + 0)
self.evaluate([v.initializer for v in vae_model.variables])
vae_model.fit(self.x, self.x,
batch_size=25,
epochs=1,
verbose=True,
validation_data=(self.x_test, self.x_test),
shuffle=True)
yhat = vae_model(tf.convert_to_tensor(self.x_test))
self.assertIsInstance(yhat.tensor_distribution, tfd.Independent)
self.assertIsInstance(yhat.tensor_distribution.distribution, tfd.Bernoulli)
def test_keras_model_api(self):
"""Test `DistributionLambda`s are composable via Keras `Model` API."""
class Encoder(tfk.Model):
"""Encoder."""
def __init__(self, input_shape, encoded_size, train_size):
super(Encoder, self).__init__()
self._sub_layers = [
tfkl.Flatten(),
tfkl.Dense(10, activation='relu'),
tfkl.Dense(tfpl.MultivariateNormalTriL.params_size(encoded_size)),
tfpl.MultivariateNormalTriL(encoded_size),
tfpl.KLDivergenceAddLoss(
tfd.Independent(tfd.Normal(loc=tf.zeros(encoded_size), scale=1),
reinterpreted_batch_ndims=1),
weight=0.9), # "beta" as in beta-VAE.
]
def call(self, inputs):
return functools.reduce(lambda x, f: f(x), self._sub_layers, inputs)
class Decoder(tfk.Model):
"""Decoder."""
def __init__(self, output_shape):
super(Decoder, self).__init__()
self._sub_layers = [
tfkl.Dense(10, activation='relu'),
tfkl.Dense(tfpl.IndependentBernoulli.params_size(output_shape)),
tfpl.IndependentBernoulli(output_shape, tfd.Bernoulli.logits),
]
def call(self, inputs):
return functools.reduce(lambda x, f: f(x), self._sub_layers, inputs)
encoder = Encoder(self.input_shape, self.encoded_size, self.train_size)
decoder = Decoder(self.input_shape)
images = tfkl.Input(shape=self.input_shape)
encoded = encoder(images)
decoded = decoder(encoded)
vae_model = tfk.Model(inputs=images, outputs=decoded)
vae_model.compile(
optimizer=tf.optimizers.Adam(),
loss=lambda x, rv_x: -rv_x.log_prob(x),
metrics=[])
vae_model.fit(self.x, self.x,
batch_size=25,
epochs=1,
validation_data=(self.x_test, self.x_test))
yhat = vae_model(tf.convert_to_tensor(self.x_test))
self.assertIsInstance(yhat.tensor_distribution, tfd.Independent)
self.assertIsInstance(yhat.tensor_distribution.distribution, tfd.Bernoulli)
def test_keras_sequential_api_multiple_draws(self):
num_draws = 2
encoder_model = tfk.Sequential([
tfkl.InputLayer(input_shape=self.input_shape),
tfkl.Flatten(),
tfkl.Dense(10, activation='relu'),
tfkl.Dense(tfpl.MultivariateNormalTriL.params_size(self.encoded_size)),
tfpl.MultivariateNormalTriL(self.encoded_size,
lambda s: s.sample(num_draws, seed=42)),
tfpl.KLDivergenceAddLoss(
tfd.MultivariateNormalDiag(
loc=tf.Variable(tf.zeros([self.encoded_size]))),
weight=0.9), # "beta" as in beta-VAE.
])
decoder_model = tfk.Sequential([
tfkl.InputLayer(input_shape=[self.encoded_size]),
tfkl.Dense(10, activation='relu'),
tfkl.Dense(tfpl.IndependentBernoulli.params_size(
self.input_shape)),
tfkl.Lambda(_logit_avg_expit), # Same as averaging the Bernoullis.
tfpl.IndependentBernoulli(self.input_shape, tfd.Bernoulli.logits),
])
vae_model = tfk.Model(
inputs=encoder_model.inputs,
outputs=decoder_model(encoder_model.outputs[0]))
vae_model.compile(
optimizer=tf.optimizers.Adam(),
loss=lambda x, rv_x: -rv_x.log_prob(x),
metrics=[])
self.assertLen(encoder_model.trainable_variables, (2 + 2) + 1)
self.assertLen(decoder_model.trainable_variables, 2 + 2)
self.assertLen(vae_model.trainable_variables, (2 + 2) + (2 + 2) + 1)
vae_model.fit(self.x, self.x,
batch_size=25,
epochs=1,
steps_per_epoch=1, # Usually `n // batch_size`.
validation_data=(self.x_test, self.x_test))
yhat = vae_model(tf.convert_to_tensor(self.x_test))
self.assertIsInstance(yhat.tensor_distribution, tfd.Independent)
self.assertIsInstance(yhat.tensor_distribution.distribution, tfd.Bernoulli)
def test_side_variable_is_auto_tracked(self):
# `s` is the "side variable".
s = tfp.util.TransformedVariable(1., tfb.Softplus())
prior = tfd.Normal(tf.Variable(0.), 1.)
linear_regression = tf.keras.Sequential([
tf.keras.layers.Dense(1),
tfp.layers.DistributionLambda(
lambda t: tfd.Normal(t, s),
activity_regularizer=tfpl.KLDivergenceRegularizer(prior)),
])
linear_regression.build(tf.TensorShape([1, 3]))
self.assertLen(linear_regression.trainable_variables, 4)
self.assertIn(id(s.pretransformed_input),
[id(x) for x in linear_regression.trainable_variables])
self.assertIn(id(prior.loc),
[id(x) for x in linear_regression.trainable_variables])
@test_util.test_graph_and_eager_modes
class DistributionLambdaSerializationTest(test_util.TestCase):
def assertSerializable(self, model, batch_size=1):
"""Assert that a model can be saved/loaded via Keras Model.save/load_model.
Args:
model: A Keras model that outputs a `tfd.Distribution`.
batch_size: The batch size to use when checking that the model produces
the same results as a serialized/deserialized copy. Default value: 1.
"""
batch_shape = [batch_size]
input_shape = batch_shape + model.input.shape[1:].as_list()
dtype = model.input.dtype.as_numpy_dtype
model_file = self.create_tempfile()
model.save(model_file.full_path, save_format='h5')
model_copy = tfk.models.load_model(model_file.full_path)
x = np.random.uniform(-3., 3., input_shape).astype(dtype)
model_x_mean = self.evaluate(model(x).mean())
self.assertAllEqual(model_x_mean, self.evaluate(model_copy(x).mean()))
output_shape = model_x_mean.shape
y = np.random.uniform(0., 1., output_shape).astype(dtype)
self.assertAllEqual(self.evaluate(model(x).log_prob(y)),
self.evaluate(model_copy(x).log_prob(y)))
def assertExportable(self, model, batch_size=1):
"""Assert a Keras model supports export_saved_model/load_from_saved_model.
Args:
model: A Keras model with Tensor output.
batch_size: The batch size to use when checking that the model produces
the same results as a serialized/deserialized copy. Default value: 1.
"""
batch_shape = [batch_size]
input_shape = batch_shape + model.input.shape[1:].as_list()
dtype = model.input.dtype.as_numpy_dtype
model_dir = self.create_tempdir()
tf1.keras.experimental.export_saved_model(model, model_dir.full_path)
model_copy = tf1.keras.experimental.load_from_saved_model(
model_dir.full_path)
x = np.random.uniform(-3., 3., input_shape).astype(dtype)
self.assertAllEqual(self.evaluate(model(x)), self.evaluate(model_copy(x)))
self.assertAllEqual(model.predict(x), model_copy.predict(x))
def test_serialization(self):
model = tfk.Sequential([
tfkl.Dense(2, input_shape=(5,)),
# pylint: disable=g-long-lambda
tfpl.DistributionLambda(lambda t: tfd.Normal(
loc=t[..., 0:1], scale=tf.exp(t[..., 1:2])))
])
self.assertSerializable(model)
model = tfk.Sequential([
tfkl.Dense(2, input_shape=(5,)),
# pylint: disable=g-long-lambda
tfpl.DistributionLambda(lambda t: tfd.Normal(
loc=t[..., 0:1], scale=tf.exp(t[..., 1:2]))),
tfkl.Lambda(lambda d: d.mean() + d.stddev())
])
self.assertExportable(model, batch_size=4)
@staticmethod
def _make_distribution(t):
return tfpl.MixtureSameFamily.new(t, 3, tfpl.IndependentNormal([2]))
def test_serialization_static_method(self):
model = tfk.Sequential([
tfkl.Dense(15, input_shape=(5,)),
tfpl.DistributionLambda(
# pylint: disable=unnecessary-lambda
lambda t: DistributionLambdaSerializationTest._make_distribution(t))
])
model.compile(optimizer='adam', loss='mse')
self.assertSerializable(model, batch_size=3)
model = tfk.Sequential([
tfkl.Dense(15, input_shape=(5,)),
tfpl.DistributionLambda(
DistributionLambdaSerializationTest._make_distribution,
convert_to_tensor_fn=tfd.Distribution.mean),
tfkl.Lambda(lambda x: x + 1.)
])
model.compile(optimizer='adam', loss='mse')
self.assertExportable(model)
def test_serialization_closure_over_lambdas_tensors_and_numpy_array(self):
if six.PY2 and not tf.executing_eagerly():
self.skipTest('Serialization of constant graph-mode Tensors is not '
'supported under Python 2.')
num_components = np.array(3)
one = tf.convert_to_tensor(1)
mk_ind_norm = lambda event_shape: tfpl.IndependentNormal(event_shape + one)
def make_distribution(t):
return tfpl.MixtureSameFamily.new(
t, num_components, mk_ind_norm(1))
model = tfk.Sequential([
tfkl.Dense(15, input_shape=(5,)),
tfpl.DistributionLambda(make_distribution)
])
self.assertSerializable(model, batch_size=4)
model = tfk.Sequential([
tfkl.Dense(15, input_shape=(5,)),
# pylint: disable=unnecessary-lambda
tfpl.DistributionLambda(lambda t: make_distribution(t)),
tfkl.Lambda(lambda d: d.mean() + d.stddev())
])
self.assertExportable(model, batch_size=2)
@test_util.test_graph_and_eager_modes
class DistributionLambdaVariableCreation(test_util.TestCase):
def test_variable_creation(self):
conv1 = tfkl.Convolution2D(filters=1, kernel_size=[1, 3])
conv2 = tfkl.Convolution2D(filters=1, kernel_size=[2, 1])
pad1 = tfkl.ZeroPadding2D(padding=((0, 0), (1, 1)))
pad2 = tfkl.ZeroPadding2D(padding=((1, 0), (0, 0)))
loc = tfk.Sequential([conv1, pad1])
scale = tfk.Sequential([conv2, pad2])
x = tfkl.Input(shape=(3, 3, 1))
normal = tfpl.DistributionLambda(
lambda x: tfd.Normal(loc=loc(x), scale=tf.exp(scale(x))))
normal._loc_net = loc
normal._scale_net = scale
model = tfk.Model(x, normal(x)) # pylint: disable=unused-variable
model.compile(
optimizer=tf.optimizers.Adam(),
loss=lambda x, rv_x: -rv_x.log_prob(x),
metrics=[])
x_train = np.random.rand(1000, 3, 3, 1).astype(np.float32)
x_test = np.random.rand(100, 3, 3, 1).astype(np.float32)
model.fit(x_train, x_train,
batch_size=25,
epochs=5,
steps_per_epoch=10,
validation_data=(x_test, x_test))
@test_util.test_graph_and_eager_modes
class KLDivergenceAddLossTest(test_util.TestCase):
def test_approx_kl(self):
event_size = 2
prior = tfd.MultivariateNormalDiag(loc=tf.zeros(event_size))
model = tfk.Sequential([
tfpl.MultivariateNormalTriL(event_size,
lambda s: s.sample(int(1e3), seed=42)),
tfpl.KLDivergenceAddLoss(prior, test_points_reduce_axis=0),
])
loc = [-1., 1.]
scale_tril = [[1.1, 0.],
[0.2, 1.3]]
actual_kl = tfd.kl_divergence(
tfd.MultivariateNormalTriL(loc, scale_tril), prior)
# Insert a leading dimension to the input, such that the Keras
# batch-shape in `model.fit` is the same for `x` and `y`.
x = tf.concat(
[loc, tfb.FillScaleTriL().inverse(scale_tril)], axis=0)[tf.newaxis]
y = model(x)
self.assertEqual(1, len(model.losses))
y = model(x)
# In eager mode, multiple calls won't append new losses.
self.assertEqual(1 if tf.executing_eagerly() else 2, len(model.losses))
[loc_, scale_tril_, actual_kl_, approx_kl_] = self.evaluate([
y.loc, y.scale.to_dense(), actual_kl, model.losses[0]])
self.assertAllClose([loc], loc_, atol=0., rtol=1e-5)
self.assertAllClose([scale_tril], scale_tril_, atol=0., rtol=1e-5)
self.assertNear(actual_kl_, approx_kl_, err=0.15)
model.compile(
optimizer=tf.optimizers.Adam(),
loss=lambda x, dist: -dist.log_prob(x[0, :, :event_size]),
metrics=[])
model.fit(x=x,
# Append a dimension to `y` to account for sample-shape.
y=x[tf.newaxis],
batch_size=25,
epochs=1,
steps_per_epoch=1) # Usually `n // batch_size`.
def test_use_exact_kl(self):
event_size = 2
prior = tfd.MultivariateNormalDiag(loc=tf.zeros(event_size))
# Use a small number of samples because we want to verify that
# we calculated the exact KL divergence and not the one from sampling.
model = tfk.Sequential([
tfpl.MultivariateNormalTriL(event_size,
lambda s: s.sample(3, seed=42)),
tfpl.KLDivergenceAddLoss(prior, use_exact_kl=True),
])
loc = [-1., 1.]
scale_tril = [[1.1, 0.],
[0.2, 1.3]]
actual_kl = tfd.kl_divergence(
tfd.MultivariateNormalTriL(loc, scale_tril), prior)
# Insert a leading dimension to the input, such that the Keras
# batch-shape in `model.fit` is the same for `x` and `y`.
x = tf.concat(
[loc, tfb.FillScaleTriL().inverse(scale_tril)], axis=0)[tf.newaxis]
y = model(x)
self.assertEqual(1, len(model.losses))
y = model(x)
# In eager mode, multiple calls won't append new losses.
self.assertEqual(1 if tf.executing_eagerly() else 2, len(model.losses))
[loc_, scale_tril_, actual_kl_, evaluated_kl_] = self.evaluate([
y.loc, y.scale.to_dense(), actual_kl, model.losses[0]])
self.assertAllClose([loc], loc_, atol=0., rtol=1e-5)
self.assertAllClose([scale_tril], scale_tril_, atol=0., rtol=1e-5)
self.assertNear(actual_kl_, evaluated_kl_, err=1e-5)
model.compile(
optimizer=tf.optimizers.Adam(),
loss=lambda x, dist: -dist.log_prob(x[0, :, :event_size]),
metrics=[])
model.fit(x=x,
# Append a dimension to `y` to account for sample-shape.
y=x[tf.newaxis],
batch_size=25,
epochs=1,
steps_per_epoch=1) # Usually `n // batch_size`.
@test_util.test_graph_and_eager_modes
class MultivariateNormalTriLTest(test_util.TestCase):
def _check_distribution(self, t, x):
self.assertIsInstance(x, tfd.MultivariateNormalTriL)
t_back = tf.concat([
x.loc, tfb.FillScaleTriL().inverse(x.scale.to_dense())], axis=-1)
self.assertAllClose(*self.evaluate([t, t_back]), atol=1e-6, rtol=1e-5)
def test_new(self):
d = 4
p = tfpl.MultivariateNormalTriL.params_size(d)
t = tfd.Normal(0, 1).sample([2, 3, p], seed=42)
x = tfpl.MultivariateNormalTriL.new(t, d, validate_args=True)
self._check_distribution(t, x)
def test_layer(self):
d = 4
p = tfpl.MultivariateNormalTriL.params_size(d)
layer = tfpl.MultivariateNormalTriL(d, tfd.Distribution.mean)
t = tfd.Normal(0, 1).sample([2, 3, p], seed=42)
x = layer(t)
self._check_distribution(t, x.tensor_distribution)
def test_doc_string(self):
# Load data.
n = int(1e3)
scale_tril = np.array([[1.6180, 0.],
[-2.7183, 3.1416]]).astype(np.float32)
scale_noise = 0.01
x = self.evaluate(tfd.Normal(loc=0, scale=1).sample([n, 2]))
eps = tfd.Normal(loc=0, scale=scale_noise).sample([n, 2])
y = self.evaluate(tf.matmul(x, scale_tril) + eps)
d = y.shape[-1]
# To save testing time, let's encode the answer (i.e., _cheat_). Note: in
# writing this test we verified the correct answer is achieved with random
# initialization.
true_kernel = np.pad(scale_tril, [[0, 0], [0, 3]], 'constant')
true_bias = np.array([0, 0, np.log(scale_noise), 0, np.log(scale_noise)])
# Create model.
model = tf.keras.Sequential([
tf.keras.layers.Dense(
tfpl.MultivariateNormalTriL.params_size(d),
kernel_initializer=lambda s, **_: true_kernel,
bias_initializer=lambda s, **_: true_bias),
tfpl.MultivariateNormalTriL(d),
])
# Fit.
model.compile(
optimizer=tf.optimizers.Adam(),
loss=lambda y, model: -model.log_prob(y),
metrics=[])
batch_size = 100
model.fit(x, y,
batch_size=batch_size,
epochs=1, # One ping only.
steps_per_epoch=n // batch_size)
self.assertAllClose(true_kernel, model.get_weights()[0],
atol=1e-2, rtol=1e-3)
self.assertAllClose(true_bias, model.get_weights()[1],
atol=1e-2, rtol=1e-3)
@test_util.test_graph_and_eager_modes
class OneHotCategoricalTest(test_util.TestCase):
def _check_distribution(self, t, x):
self.assertIsInstance(x, tfd.OneHotCategorical)
[t_, x_logits_, x_probs_, mean_] = self.evaluate([
t, x.logits_parameter(), x.probs_parameter(), x.mean()])
self.assertAllClose(t_, x_logits_, atol=1e-6, rtol=1e-5)
self.assertAllClose(x_probs_, mean_, atol=1e-6, rtol=1e-5)
def test_new(self):
d = 4
p = tfpl.OneHotCategorical.params_size(d)
t = tfd.Normal(0, 1).sample([2, 3, p], seed=42)
x = tfpl.OneHotCategorical.new(t, d, validate_args=True)
self._check_distribution(t, x)
def test_layer(self):
d = 4
p = tfpl.OneHotCategorical.params_size(d)
layer = tfpl.OneHotCategorical(d, validate_args=True)
t = tfd.Normal(0, 1).sample([2, 3, p], seed=42)
x = layer(t)
self._check_distribution(t, x.tensor_distribution)
def test_doc_string(self):
# Load data.
n = int(1e4)
scale_noise = 0.01
x = self.evaluate(tfd.Normal(loc=0, scale=1).sample([n, 2]))
eps = tfd.Normal(loc=0, scale=scale_noise).sample([n, 1])
y = self.evaluate(tfd.OneHotCategorical(
logits=_vec_pad(
0.3142 + 1.6180 * x[..., :1] - 2.7183 * x[..., 1:] + eps),
dtype=tf.float32).sample())
d = y.shape[-1]
# Create model.
model = tf.keras.Sequential([
tf.keras.layers.Dense(tfpl.OneHotCategorical.params_size(d) - 1),
tf.keras.layers.Lambda(_vec_pad),
tfpl.OneHotCategorical(d),
])
# Fit.
model.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.5),
loss=lambda y, model: -model.log_prob(y),
metrics=[])
batch_size = 100
model.fit(x, y,
batch_size=batch_size,
epochs=1,
steps_per_epoch=n // batch_size,
shuffle=True)
@test_util.test_graph_and_eager_modes
class CategoricalMixtureOfOneHotCategoricalTest(test_util.TestCase):
def _check_distribution(self, t, x):
self.assertIsInstance(_unwrap_tensor_coercible(x), tfd.MixtureSameFamily)
self.assertIsInstance(_unwrap_tensor_coercible(x.mixture_distribution),
tfd.Categorical)
self.assertIsInstance(_unwrap_tensor_coercible(x.components_distribution),
tfd.OneHotCategorical)
t_back = tf.concat([
x.mixture_distribution.logits,
tf.reshape(x.components_distribution.logits, shape=[2, 3, -1]),
], axis=-1)
[
t_,
t_back_,
x_mean_,
x_log_mean_,
sample_mean_,
] = self.evaluate([
t,
t_back,
x.mean(),
x.log_mean(),
tf.reduce_mean(x.sample(int(10e3), seed=42), axis=0),
])
self.assertAllClose(t_, t_back_, atol=1e-6, rtol=1e-5)
self.assertAllClose(x_mean_, np.exp(x_log_mean_), atol=1e-6, rtol=1e-5)
self.assertAllClose(sample_mean_, x_mean_, atol=1e-3, rtol=0.1)
def test_new(self):
k = 2 # num components
d = 4 # event size
p = tfpl.CategoricalMixtureOfOneHotCategorical.params_size(d, k)
t = tfd.Normal(0, 1).sample([2, 3, p], seed=42)
x = tfpl.CategoricalMixtureOfOneHotCategorical.new(
t, d, k, validate_args=True)
self._check_distribution(t, x)
def test_layer(self):
k = 2 # num components
d = 4 # event size
p = tfpl.CategoricalMixtureOfOneHotCategorical.params_size(d, k)
layer = tfpl.CategoricalMixtureOfOneHotCategorical(
d, k, validate_args=True)
t = tfd.Normal(0, 1).sample([2, 3, p], seed=42)
x = layer(t)
self._check_distribution(t, x)
def test_doc_string(self):
# Load data.
n = int(1e3)
scale_noise = 0.01
x = self.evaluate(tfd.Normal(loc=0, scale=1).sample([n, 2]))
eps = tfd.Normal(loc=0, scale=scale_noise).sample([n, 1])
y = self.evaluate(tfd.OneHotCategorical(
logits=_vec_pad(
0.3142 + 1.6180 * x[..., :1] - 2.7183 * x[..., 1:] + eps),
dtype=tf.float32).sample())
d = y.shape[-1]
# Create model.
k = 2
p = tfpl.CategoricalMixtureOfOneHotCategorical.params_size(d, k)
model = tf.keras.Sequential([
tf.keras.layers.Dense(p),
tfpl.CategoricalMixtureOfOneHotCategorical(d, k),
])
# Fit.
model.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.5),
loss=lambda y, model: -model.log_prob(y),
metrics=[])
batch_size = 100
model.fit(x, y,
batch_size=batch_size,
epochs=1,
steps_per_epoch=1, # Usually `n // batch_size`.
shuffle=True)
yhat = model(x)
self.assertIsInstance(_unwrap_tensor_coercible(yhat), tfd.MixtureSameFamily)
self.assertIsInstance(
_unwrap_tensor_coercible(yhat.mixture_distribution), tfd.Categorical)
self.assertIsInstance(
_unwrap_tensor_coercible(yhat.components_distribution),
tfd.OneHotCategorical)
# TODO(b/120221303): For now we just check that the code executes and we get
# back a distribution instance. Better would be to change the data
# generation so the model becomes well-specified (and we can check correctly
# fitted params). However, not doing this test is not critical since all
# components are unit-tested. (Ie, what we really want here--but don't
# strictly need--is another end-to-end test.)
@test_util.test_graph_and_eager_modes
class _IndependentLayerTest(object):
"""Base class for testing independent distribution layers.
Instances of subclasses must set:
self.layer_class: The independent distribution layer class.
self.dist_class: The underlying `tfd.Distribution` class.
self.dtype: The data type for the parameters passed to the layer.
self.use_static_shape: Whether or not test tensor inputs should have
statically-known shapes.
"""
def _distribution_to_params(self, distribution, batch_shape):
"""Given a self.layer_class instance, return a tensor of its parameters."""
raise NotImplementedError
def _build_tensor(self, ndarray, dtype=None):
# Enforce parameterized dtype and static/dynamic testing.
ndarray = np.asarray(ndarray).astype(
dtype if dtype is not None else self.dtype)
return tf1.placeholder_with_default(
ndarray, shape=ndarray.shape if self.use_static_shape else None)
def _check_distribution(self, t, x, batch_shape):
self.assertIsInstance(x, tfd.Independent)
self.assertIsInstance(x.distribution, self.dist_class)
self.assertEqual(self.dtype, x.dtype)
t_back = self._distribution_to_params(x.distribution, batch_shape)
[t_, t_back_] = self.evaluate([t, t_back])
self.assertAllClose(t_, t_back_, atol=1e-6, rtol=1e-5)
self.assertEqual(self.dtype, t_back_.dtype)
def test_new(self):
batch_shape = self._build_tensor([2], dtype=np.int32)
event_shape = self._build_tensor([2, 1, 2], dtype=np.int32)
p = self.layer_class.params_size(event_shape)
low = self._build_tensor(-3.)
high = self._build_tensor(3.)
t = tfd.Uniform(low, high).sample(tf.concat([batch_shape, [p]], 0), seed=42)
x = self.layer_class.new(t, event_shape, validate_args=True)
self._check_distribution(t, x, batch_shape)
def test_layer(self):
batch_shape = self._build_tensor([5, 5], dtype=np.int32)
p = self.layer_class.params_size()
self.assertDTypeEqual(p, np.int32)
low = self._build_tensor(-3.)
high = self._build_tensor(3.)
t = tfd.Uniform(low, high).sample(tf.concat([batch_shape, [p]], 0), seed=42)
layer = self.layer_class(validate_args=True, dtype=self.dtype)
x = layer(t)
self._check_distribution(t, x.tensor_distribution, batch_shape)
def test_serialization(self):
event_shape = []
params_size = self.layer_class.params_size(event_shape)
batch_shape = [4, 1]
low = self._build_tensor(-3., dtype=self.dtype)
high = self._build_tensor(3., dtype=self.dtype)
x = self.evaluate(tfd.Uniform(low, high).sample(
batch_shape + [params_size], seed=42))
model = tfk.Sequential([
tfkl.Dense(params_size, input_shape=(params_size,), dtype=self.dtype),
self.layer_class(event_shape, validate_args=True, dtype=self.dtype),
])
model_file = self.create_tempfile()
model.save(model_file.full_path, save_format='h5')
model_copy = tfk.models.load_model(model_file.full_path)
self.assertAllEqual(self.evaluate(model(x).mean()),
self.evaluate(model_copy(x).mean()))
self.assertEqual(self.dtype, model(x).mean().dtype.as_numpy_dtype)
ones = np.ones([7] + batch_shape + event_shape, dtype=self.dtype)
self.assertAllEqual(self.evaluate(model(x).log_prob(ones)),
self.evaluate(model_copy(x).log_prob(ones)))
def test_model_export(self):
event_shape = [3, 2]
params_size = self.layer_class.params_size(event_shape)
batch_shape = [4]
low = self._build_tensor(-3., dtype=self.dtype)
high = self._build_tensor(3., dtype=self.dtype)
x = self.evaluate(tfd.Uniform(low, high).sample(
batch_shape + [params_size], seed=42))
model = tfk.Sequential([
tfkl.Dense(params_size, input_shape=(params_size,), dtype=self.dtype),
self.layer_class(event_shape, validate_args=True,
convert_to_tensor_fn='mean', dtype=self.dtype),
# NOTE: For TensorFlow to be able to serialize the graph (i.e., for
# serving), the model must output a Tensor and not a Distribution.
tfkl.Dense(1, dtype=self.dtype),
])
model.compile(optimizer='adam', loss='mse')
model_dir = self.create_tempdir()
tf1.keras.experimental.export_saved_model(model, model_dir.full_path)
model_copy = tf1.keras.experimental.load_from_saved_model(
model_dir.full_path)
self.assertAllEqual(self.evaluate(model(x)), self.evaluate(model_copy(x)))
self.assertEqual(self.dtype, model(x).dtype.as_numpy_dtype)
@test_util.test_graph_and_eager_modes
class _IndependentBernoulliTest(_IndependentLayerTest):
layer_class = tfpl.IndependentBernoulli
dist_class = tfd.Bernoulli
def _distribution_to_params(self, distribution, batch_shape):
return tf.reshape(distribution.logits,
tf.concat([batch_shape, [-1]], axis=-1))
@test_util.test_graph_and_eager_modes
class IndependentBernoulliTestDynamicShape(test_util.TestCase,
_IndependentBernoulliTest):
dtype = np.float64
use_static_shape = False
@test_util.test_graph_and_eager_modes
class IndependentBernoulliTestStaticShape(test_util.TestCase,
_IndependentBernoulliTest):
dtype = np.float32
use_static_shape = True
def test_doc_string(self):
# Load data.
n = int(1e4)
scale_tril = np.array([[1.6180, 0.],
[-2.7183, 3.1416]]).astype(np.float32)
scale_noise = 0.01
x = self.evaluate(tfd.Normal(loc=0, scale=1).sample([n, 2]))
eps = tfd.Normal(loc=0, scale=scale_noise).sample([n, 2])
y = self.evaluate(tfd.Bernoulli(
logits=tf.reshape(tf.matmul(x, scale_tril) + eps,
shape=[n, 1, 2, 1])).sample())
event_shape = y.shape[1:]
# Create model.
model = tf.keras.Sequential([
tf.keras.layers.Dense(
tfpl.IndependentBernoulli.params_size(event_shape)),
tfpl.IndependentBernoulli(event_shape),
])
# Fit.
model.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.5),
loss=lambda y, model: -model.log_prob(y))
batch_size = 10000
model.fit(x, y,
batch_size=batch_size,
epochs=100,
shuffle=True)
self.assertAllClose(scale_tril, model.get_weights()[0],
atol=0.15, rtol=0.15)
self.assertAllClose([0., 0.], model.get_weights()[1],
atol=0.15, rtol=0.15)
@test_util.test_graph_and_eager_modes
class _IndependentLogisticTest(_IndependentLayerTest):
layer_class = tfpl.IndependentLogistic
dist_class = tfd.Logistic
def _distribution_to_params(self, distribution, batch_shape):
return tf.concat([
tf.reshape(distribution.loc, tf.concat([batch_shape, [-1]], axis=-1)),
tfp.math.softplus_inverse(tf.reshape(
distribution.scale, tf.concat([batch_shape, [-1]], axis=-1)))
], -1)
@test_util.test_graph_and_eager_modes
class IndependentLogisticTestDynamicShape(test_util.TestCase,
_IndependentLogisticTest):
dtype = np.float32
use_static_shape = False
@test_util.test_graph_and_eager_modes
class IndependentLogisticTestStaticShape(test_util.TestCase,
_IndependentLogisticTest):
dtype = np.float64
use_static_shape = True
def test_doc_string(self):
input_shape = [28, 28, 1]
encoded_shape = 2
encoder = tfk.Sequential([
tfkl.InputLayer(input_shape=input_shape, dtype=self.dtype),
tfkl.Flatten(dtype=self.dtype),
tfkl.Dense(10, activation='relu', dtype=self.dtype),
tfkl.Dense(tfpl.IndependentLogistic.params_size(encoded_shape),
dtype=self.dtype),
tfpl.IndependentLogistic(encoded_shape, dtype=self.dtype),
tfkl.Lambda(lambda x: x + 0., # To force conversion to tensor.
dtype=self.dtype)
])
# Test that we can run the model and get a sample.
x = np.random.randn(*([1] + input_shape)).astype(self.dtype)
self.assertEqual((1, 2), encoder.predict_on_batch(x).shape)
out = encoder(tf.convert_to_tensor(x))
self.assertEqual((1, 2), out.shape)
self.assertEqual((1, 2), self.evaluate(out).shape)
self.assertEqual(self.dtype, out.dtype)
@test_util.test_graph_and_eager_modes
class _IndependentNormalTest(_IndependentLayerTest):
layer_class = tfpl.IndependentNormal
dist_class = tfd.Normal
def _distribution_to_params(self, distribution, batch_shape):
return tf.concat([
tf.reshape(distribution.loc, tf.concat([batch_shape, [-1]], axis=-1)),
tfp.math.softplus_inverse(tf.reshape(
distribution.scale, tf.concat([batch_shape, [-1]], axis=-1)))
], -1)
def test_keras_sequential_with_unknown_input_size(self):
input_shape = [28, 28, 1]
encoded_shape = self._build_tensor([2], dtype=np.int32)
params_size = tfpl.IndependentNormal.params_size(encoded_shape)
def reshape(x):
return tf.reshape(
x, tf.concat([tf.shape(x)[:-1], [-1, params_size]], 0))
# Test a Sequential model where the input to IndependentNormal does not have
# a statically-known shape.
encoder = tfk.Sequential([
tfkl.InputLayer(input_shape=input_shape, dtype=self.dtype),
tfkl.Flatten(dtype=self.dtype),
tfkl.Dense(12, activation='relu', dtype=self.dtype),
tfkl.Lambda(reshape, dtype=self.dtype),
# When encoded_shape/params_size are placeholders, the input to the
# IndependentNormal has shape (?, ?, ?) or (1, ?, ?), depending on
# whether or not encoded_shape's shape is known.
tfpl.IndependentNormal(encoded_shape, dtype=self.dtype),
tfkl.Lambda(lambda x: x + 0., # To force conversion to tensor.
dtype=self.dtype)
])
x = np.random.randn(*([1] + input_shape)).astype(self.dtype)
self.assertEqual((1, 3, 2), encoder.predict_on_batch(x).shape)
out = encoder(tf.convert_to_tensor(x))
if tf.executing_eagerly():
self.assertEqual((1, 3, 2), out.shape)
elif self.use_static_shape:
self.assertEqual([1, None, None], out.shape.as_list())
self.assertEqual((1, 3, 2), self.evaluate(out).shape)
self.assertEqual(self.dtype, out.dtype)
@test_util.test_graph_and_eager_modes
class IndependentNormalTestDynamicShape(test_util.TestCase,
_IndependentNormalTest):
dtype = np.float32
use_static_shape = False
@test_util.test_graph_and_eager_modes
class IndependentNormalTestStaticShape(test_util.TestCase,
_IndependentNormalTest):
dtype = np.float64
use_static_shape = True
def test_doc_string(self):
input_shape = [28, 28, 1]
encoded_shape = 2
encoder = tfk.Sequential([
tfkl.InputLayer(input_shape=input_shape, dtype=self.dtype),
tfkl.Flatten(dtype=self.dtype),
tfkl.Dense(10, activation='relu', dtype=self.dtype),
tfkl.Dense(tfpl.IndependentNormal.params_size(encoded_shape),
dtype=self.dtype),
tfpl.IndependentNormal(encoded_shape, dtype=self.dtype),
tfkl.Lambda(lambda x: x + 0., # To force conversion to tensor.
dtype=self.dtype)
])
# Test that we can run the model and get a sample.
x = np.random.randn(*([1] + input_shape)).astype(self.dtype)
self.assertEqual((1, 2), encoder.predict_on_batch(x).shape)
out = encoder(tf.convert_to_tensor(x))
self.assertEqual((1, 2), out.shape)
self.assertEqual((1, 2), self.evaluate(out).shape)
self.assertEqual(self.dtype, out.dtype)
@test_util.test_graph_and_eager_modes
class _IndependentPoissonTest(_IndependentLayerTest):
layer_class = tfpl.IndependentPoisson
dist_class = tfd.Poisson
def _distribution_to_params(self, distribution, batch_shape):
return tf.reshape(distribution.log_rate,
tf.concat([batch_shape, [-1]], axis=-1))
@test_util.test_graph_and_eager_modes
class IndependentPoissonTestDynamicShape(test_util.TestCase,
_IndependentPoissonTest):
dtype = np.float32
use_static_shape = False
@test_util.test_graph_and_eager_modes
class IndependentPoissonTestStaticShape(test_util.TestCase,
_IndependentPoissonTest):
dtype = np.float64
use_static_shape = True
def test_doc_string(self):
# Create example data.
n = 2000
d = 4
x = self.evaluate(tfd.Uniform(low=1., high=10.).sample([n, d], seed=42))
w = [[0.314], [0.272], [-0.162], [0.058]]
log_rate = tf.matmul(x, w) - 0.141
y = self.evaluate(tfd.Poisson(log_rate=log_rate).sample())
# Poisson regression.
model = tfk.Sequential([
tfkl.Dense(tfpl.IndependentPoisson.params_size(1), dtype=self.dtype),
tfpl.IndependentPoisson(1, dtype=self.dtype)
])
# Fit.
model.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.05),
loss=lambda y, model: -model.log_prob(y),
metrics=[])
batch_size = 50
model.fit(x, y,
batch_size=batch_size,
epochs=1,
steps_per_epoch=1, # Usually `n // batch_size`.
verbose=True,
shuffle=True)
@test_util.test_graph_and_eager_modes
class _MixtureLayerTest(object):
"""Base class for testing mixture (same-family) distribution layers.
Instances of subclasses must set:
self.layer_class: The mixture distribution layer class.
self.dist_class: The underlying component `tfd.Distribution` class.
self.dtype: The data type for the parameters passed to the layer.
self.use_static_shape: Whether or not test tensor inputs should have
statically-known shapes.
"""
def _distribution_to_params(self, distribution, batch_shape):
"""Given a self.layer_class instance, return a tensor of its parameters."""
raise NotImplementedError
def _build_tensor(self, ndarray, dtype=None):
# Enforce parameterized dtype and static/dynamic testing.
ndarray = np.asarray(ndarray).astype(
dtype if dtype is not None else self.dtype)
return tf1.placeholder_with_default(
ndarray, shape=ndarray.shape if self.use_static_shape else None)
def _check_distribution(self, t, x, batch_shape):
self.assertIsInstance(_unwrap_tensor_coercible(x), tfd.MixtureSameFamily)
self.assertIsInstance(
_unwrap_tensor_coercible(x.mixture_distribution), tfd.Categorical)
self.assertIsInstance(
_unwrap_tensor_coercible(x.components_distribution), tfd.Independent)
self.assertIsInstance(
_unwrap_tensor_coercible(x.components_distribution.distribution),
self.dist_class)
self.assertEqual(self.dtype, x.dtype)
t_back = self._distribution_to_params(x, batch_shape)
[t_, t_back_] = self.evaluate([t, t_back])
self.assertAllClose(t_, t_back_, atol=1e-6, rtol=1e-5)
self.assertEqual(self.dtype, t_back_.dtype)
def test_new(self):
n = self._build_tensor(4, dtype=np.int32)
event_shape = self._build_tensor(3, dtype=np.int32)
p = self.layer_class.params_size(n, event_shape)
batch_shape = self._build_tensor([4, 2], dtype=np.int32)
low = self._build_tensor(-3.)
high = self._build_tensor(3.)
t = tfd.Uniform(low, high).sample(tf.concat([batch_shape, [p]], 0), seed=42)
x = self.layer_class.new(t, n, event_shape, validate_args=True)
self._check_distribution(t, x, batch_shape)
def test_layer(self):
n = self._build_tensor(3, dtype=np.int32)
event_shape = self._build_tensor([4, 2], dtype=np.int32)
p = self.layer_class.params_size(n, event_shape)
batch_shape = self._build_tensor([7, 3], dtype=np.int32)
low = self._build_tensor(-3.)
high = self._build_tensor(3.)
t = tfd.Uniform(low, high).sample(tf.concat([batch_shape, [p]], 0), seed=42)
layer = self.layer_class(n, event_shape, validate_args=True,
dtype=self.dtype)
x = layer(t)
self._check_distribution(t, x, batch_shape)
def test_serialization(self):
n = 3
event_shape = []
params_size = self.layer_class.params_size(n, event_shape)
batch_shape = [4, 1]
low = self._build_tensor(-3., dtype=self.dtype)
high = self._build_tensor(3., dtype=self.dtype)
x = self.evaluate(tfd.Uniform(low, high).sample(
batch_shape + [params_size], seed=42))
model = tfk.Sequential([
tfkl.Dense(params_size, input_shape=(params_size,), dtype=self.dtype),
self.layer_class(n, event_shape, validate_args=True, dtype=self.dtype),
])
model_file = self.create_tempfile()
model.save(model_file.full_path, save_format='h5')
model_copy = tfk.models.load_model(model_file.full_path)
self.assertAllEqual(self.evaluate(model(x).mean()),
self.evaluate(model_copy(x).mean()))
self.assertEqual(self.dtype, model(x).mean().dtype.as_numpy_dtype)
ones = np.ones([7] + batch_shape + event_shape, dtype=self.dtype)
self.assertAllEqual(self.evaluate(model(x).log_prob(ones)),
self.evaluate(model_copy(x).log_prob(ones)))
def test_model_export(self):
n = 5
event_shape = [3, 2]
params_size = self.layer_class.params_size(n, event_shape)
batch_shape = [4]
low = self._build_tensor(-3., dtype=self.dtype)
high = self._build_tensor(3., dtype=self.dtype)
x = self.evaluate(tfd.Uniform(low, high).sample(
batch_shape + [params_size], seed=42))
model = tfk.Sequential([
tfkl.Dense(params_size, input_shape=(params_size,), dtype=self.dtype),
self.layer_class(n, event_shape, validate_args=True,
convert_to_tensor_fn='mean', dtype=self.dtype),
# NOTE: For TensorFlow to be able to serialize the graph (i.e., for
# serving), the model must output a Tensor and not a Distribution.
tfkl.Dense(1, dtype=self.dtype),
])
model.compile(optimizer='adam', loss='mse')
model_dir = self.create_tempdir()
tf1.keras.experimental.export_saved_model(model, model_dir.full_path)
model_copy = tf1.keras.experimental.load_from_saved_model(
model_dir.full_path)
self.assertAllEqual(self.evaluate(model(x)), self.evaluate(model_copy(x)))
self.assertEqual(self.dtype, model(x).dtype.as_numpy_dtype)
@test_util.test_graph_and_eager_modes
class _MixtureLogisticTest(_MixtureLayerTest):
layer_class = tfpl.MixtureLogistic
dist_class = tfd.Logistic
def _distribution_to_params(self, distribution, batch_shape):
"""Given a self.layer_class instance, return a tensor of its parameters."""
params_shape = tf.concat([batch_shape, [-1]], axis=0)
batch_and_n_shape = tf.concat(
[tf.shape(distribution.mixture_distribution.logits), [-1]],
axis=0)
cd = distribution.components_distribution.distribution
return tf.concat([
distribution.mixture_distribution.logits,
tf.reshape(tf.concat([
tf.reshape(cd.loc, batch_and_n_shape),
tf.reshape(tfp.math.softplus_inverse(cd.scale), batch_and_n_shape)
], axis=-1), params_shape),
], axis=-1)
def test_doc_string(self):
# Load data (graph of a cardioid).
n = 2000
t = self.evaluate(tfd.Uniform(low=-np.pi, high=np.pi).sample([n, 1]))
r = 2 * (1 - tf.cos(t))
x = tf.convert_to_tensor(self.evaluate(
r * tf.sin(t) + tfd.Normal(loc=0., scale=0.1).sample([n, 1])))
y = tf.convert_to_tensor(self.evaluate(
r * tf.cos(t) + tfd.Normal(loc=0., scale=0.1).sample([n, 1])))
# Model the distribution of y given x with a Mixture Density Network.
event_shape = self._build_tensor([1], dtype=np.int32)
num_components = self._build_tensor(5, dtype=np.int32)
params_size = tfpl.MixtureNormal.params_size(num_components, event_shape)
model = tfk.Sequential([
tfkl.Dense(12, activation='relu'),
# NOTE: We must hard-code 15 below, instead of using `params_size`,
# because the first argument to `tfkl.Dense` must be an integer (and
# not, e.g., a placeholder tensor).
tfkl.Dense(15, activation=None),
tfpl.MixtureLogistic(num_components, event_shape),
])
# Fit.
batch_size = 100
model.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.02),
loss=lambda y, model: -model.log_prob(y))
model.fit(x, y,
batch_size=batch_size,
epochs=1,
steps_per_epoch=n // batch_size)
self.assertEqual(15, self.evaluate(tf.convert_to_tensor(params_size)))
@test_util.test_graph_and_eager_modes
class MixtureLogisticTestDynamicShape(test_util.TestCase,
_MixtureLogisticTest):
dtype = np.float64
use_static_shape = False
@test_util.test_graph_and_eager_modes
class MixtureLogisticTestStaticShape(test_util.TestCase,
_MixtureLogisticTest):
dtype = np.float32
use_static_shape = True
@test_util.test_graph_and_eager_modes
class _MixtureNormalTest(_MixtureLayerTest):
layer_class = tfpl.MixtureNormal
dist_class = tfd.Normal
def _distribution_to_params(self, distribution, batch_shape):
"""Given a self.layer_class instance, return a tensor of its parameters."""
params_shape = tf.concat([batch_shape, [-1]], axis=0)
batch_and_n_shape = tf.concat(
[tf.shape(distribution.mixture_distribution.logits), [-1]],
axis=0)
cd = distribution.components_distribution.distribution
return tf.concat([
distribution.mixture_distribution.logits,
tf.reshape(tf.concat([
tf.reshape(cd.loc, batch_and_n_shape),
tf.reshape(tfp.math.softplus_inverse(cd.scale), batch_and_n_shape)
], axis=-1), params_shape),
], axis=-1)
def test_doc_string(self):
# Load data (graph of a cardioid).
n = 2000
t = self.evaluate(tfd.Uniform(low=-np.pi, high=np.pi).sample([n, 1]))
r = 2 * (1 - tf.cos(t))
x = tf.convert_to_tensor(self.evaluate(
r * tf.sin(t) + tfd.Normal(loc=0., scale=0.1).sample([n, 1])))
y = tf.convert_to_tensor(self.evaluate(
r * tf.cos(t) + tfd.Normal(loc=0., scale=0.1).sample([n, 1])))
# Model the distribution of y given x with a Mixture Density Network.
event_shape = self._build_tensor([1], dtype=np.int32)
num_components = self._build_tensor(5, dtype=np.int32)
params_size = tfpl.MixtureNormal.params_size(num_components, event_shape)
model = tfk.Sequential([
tfkl.Dense(12, activation='relu'),
# NOTE: We must hard-code 15 below, instead of using `params_size`,
# because the first argument to `tfkl.Dense` must be an integer (and
# not, e.g., a placeholder tensor).
tfkl.Dense(15, activation=None),
tfpl.MixtureNormal(num_components, event_shape),
])
# Fit.
batch_size = 100
model.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.02),
loss=lambda y, model: -model.log_prob(y))
model.fit(x, y,
batch_size=batch_size,
epochs=1,
steps_per_epoch=n // batch_size)
self.assertEqual(15, self.evaluate(tf.convert_to_tensor(params_size)))
@test_util.test_graph_and_eager_modes
class MixtureNormalTestDynamicShape(test_util.TestCase,
_MixtureNormalTest):
dtype = np.float32
use_static_shape = False
@test_util.test_graph_and_eager_modes
class MixtureNormalTestStaticShape(test_util.TestCase,
_MixtureNormalTest):
dtype = np.float64
use_static_shape = True
@test_util.test_graph_and_eager_modes
class _MixtureSameFamilyTest(object):
def _build_tensor(self, ndarray, dtype=None):
# Enforce parameterized dtype and static/dynamic testing.
ndarray = np.asarray(ndarray).astype(
dtype if dtype is not None else self.dtype)
return tf1.placeholder_with_default(
ndarray, shape=ndarray.shape if self.use_static_shape else None)
def _check_distribution(self, t, x, batch_shape):
self.assertIsInstance(_unwrap_tensor_coercible(x), tfd.MixtureSameFamily)
self.assertIsInstance(
_unwrap_tensor_coercible(x.mixture_distribution), tfd.Categorical)
self.assertIsInstance(
_unwrap_tensor_coercible(x.components_distribution),
tfd.MultivariateNormalTriL)
shape = tf.concat([batch_shape, [-1]], axis=0)
batch_and_n_shape = tf.concat(
[tf.shape(x.mixture_distribution.logits), [-1]], axis=0)
cd = x.components_distribution
scale_tril = tfb.FillScaleTriL(diag_shift=np.array(1e-5, self.dtype))
t_back = tf.concat([
x.mixture_distribution.logits,
tf.reshape(tf.concat([
tf.reshape(cd.loc, batch_and_n_shape),
tf.reshape(
scale_tril.inverse(cd.scale.to_dense()),
batch_and_n_shape),
], axis=-1), shape),
], axis=-1)
[t_, t_back_] = self.evaluate([t, t_back])
self.assertAllClose(t_, t_back_, atol=1e-6, rtol=1e-5)
def test_new(self):
n = self._build_tensor(4, dtype=np.int32)
batch_shape = self._build_tensor([4, 2], dtype=np.int32)
event_size = self._build_tensor(3, dtype=np.int32)
low = self._build_tensor(-3.)
high = self._build_tensor(3.)
cps = tfpl.MultivariateNormalTriL.params_size(event_size)
p = tfpl.MixtureSameFamily.params_size(n, cps)
t = tfd.Uniform(low, high).sample(tf.concat([batch_shape, [p]], 0), seed=42)
normal = tfpl.MultivariateNormalTriL(event_size, validate_args=True,
dtype=self.dtype)
x = tfpl.MixtureSameFamily.new(t, n, normal, validate_args=True)
self._check_distribution(t, x, batch_shape)
def test_layer(self):
n = self._build_tensor(3, dtype=np.int32)
batch_shape = self._build_tensor([7, 3], dtype=np.int32)
event_size = self._build_tensor(4, dtype=np.int32)
low = self._build_tensor(-3.)
high = self._build_tensor(3.)
cps = tfpl.MultivariateNormalTriL.params_size(event_size)
p = tfpl.MixtureSameFamily.params_size(n, cps)
normal = tfpl.MultivariateNormalTriL(event_size, validate_args=True,
dtype=self.dtype)
layer = tfpl.MixtureSameFamily(n, normal, validate_args=True,
dtype=self.dtype)
t = tfd.Uniform(low, high).sample(tf.concat([batch_shape, [p]], 0), seed=42)
x = layer(t)
self._check_distribution(t, x, batch_shape)
def test_doc_string(self):
# Load data (graph of a cardioid).
n = 2000
t = self.evaluate(tfd.Uniform(low=-np.pi, high=np.pi).sample([n, 1]))
r = 2 * (1 - tf.cos(t))
x = tf.convert_to_tensor(self.evaluate(
r * tf.sin(t) + tfd.Normal(loc=0., scale=0.1).sample([n, 1])))
y = tf.convert_to_tensor(self.evaluate(
r * tf.cos(t) + tfd.Normal(loc=0., scale=0.1).sample([n, 1])))
# Model the distribution of y given x with a Mixture Density Network.
event_shape = self._build_tensor([1], dtype=np.int32)
num_components = self._build_tensor(5, dtype=np.int32)
params_size = tfpl.MixtureSameFamily.params_size(
num_components, tfpl.IndependentNormal.params_size(event_shape))
model = tfk.Sequential([
tfkl.Dense(12, activation='relu'),
# NOTE: We must hard-code 15 below, instead of using `params_size`,
# because the first argument to `tfkl.Dense` must be an integer (and
# not, e.g., a placeholder tensor).
tfkl.Dense(15, activation=None),
tfpl.MixtureSameFamily(num_components,
tfpl.IndependentNormal(event_shape)),
])
# Fit.
batch_size = 100
model.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.02),
loss=lambda y, model: -model.log_prob(y))
model.fit(x, y,
batch_size=batch_size,
epochs=1,
steps_per_epoch=1) # Usually `n // batch_size`.
self.assertEqual(15, self.evaluate(tf.convert_to_tensor(params_size)))
@test_util.test_graph_and_eager_modes
class MixtureSameFamilyTestDynamicShape(test_util.TestCase,
_MixtureSameFamilyTest):
dtype = np.float32
use_static_shape = False
@test_util.test_graph_and_eager_modes
class MixtureSameFamilyTestStaticShape(test_util.TestCase,
_MixtureSameFamilyTest):
dtype = np.float64
use_static_shape = True
@test_util.test_graph_and_eager_modes
class VariationalGaussianProcessEndToEnd(test_util.TestCase):
def testEndToEnd(self):
np.random.seed(43)
dtype = np.float64
n = 1000
w0 = 0.125
b0 = 5.
x_range = [-20, 60]
def s(x):
g = (x - x_range[0]) / (x_range[1] - x_range[0])
return 3*(0.25 + g**2.)
x = (x_range[1] - x_range[0]) * np.random.rand(n) + x_range[0]
eps = np.random.randn(n) * s(x)
y = (w0 * x * (1 + np.sin(x)) + b0) + eps
x0 = np.linspace(*x_range, num=1000)
class KernelFn(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(KernelFn, self).__init__(**kwargs)
self._amplitude = self.add_weight(
initializer=tf.initializers.constant(.54),
dtype=dtype,
name='amplitude')
def call(self, x):
return x
@property
def kernel(self):
return tfp.math.psd_kernels.ExponentiatedQuadratic(
amplitude=tf.nn.softplus(self._amplitude))
num_inducing_points = 50
# Add a leading dimension for the event_shape.
eyes = np.expand_dims(np.eye(num_inducing_points), 0)
variational_inducing_observations_scale_initializer = (
tf.initializers.constant(1e-3 * eyes))
model = tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=[1], dtype=dtype),
tf.keras.layers.Dense(1, kernel_initializer='Ones', use_bias=False,
activation=None, dtype=dtype),
tfp.layers.VariationalGaussianProcess(
num_inducing_points=num_inducing_points,
kernel_provider=KernelFn(dtype=dtype),
inducing_index_points_initializer=(
tf.initializers.constant(
np.linspace(*x_range,
num=num_inducing_points,
dtype=dtype)[..., np.newaxis])),
variational_inducing_observations_scale_initializer=(
variational_inducing_observations_scale_initializer)),
])
batch_size = 64
kl_weight = np.float64(batch_size) / n
loss = lambda y, d: d.variational_loss(y, kl_weight=kl_weight)
model.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.02),
loss=loss)
if not tf.executing_eagerly():
self.evaluate([v.initializer for v in model.variables])
# This should have no issues
model.fit(x, y, epochs=5, batch_size=batch_size, verbose=False)
vgp = model(x0[..., tf.newaxis])
num_samples = 7
samples_ = self.evaluate(vgp.sample(num_samples))
self.assertAllEqual(samples_.shape, (7, 1000, 1))
self.assertEqual(dtype, vgp.dtype)
if __name__ == '__main__':
tf.test.main()
|
nju520/flask | refs/heads/master | flask/exthook.py | 142 | # -*- coding: utf-8 -*-
"""
flask.exthook
~~~~~~~~~~~~~
Redirect imports for extensions. This module basically makes it possible
for us to transition from flaskext.foo to flask_foo without having to
force all extensions to upgrade at the same time.
When a user does ``from flask.ext.foo import bar`` it will attempt to
import ``from flask_foo import bar`` first and when that fails it will
try to import ``from flaskext.foo import bar``.
We're switching from namespace packages because it was just too painful for
everybody involved.
This is used by `flask.ext`.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
from ._compat import reraise
class ExtensionImporter(object):
"""This importer redirects imports from this submodule to other locations.
This makes it possible to transition from the old flaskext.name to the
newer flask_name without people having a hard time.
"""
def __init__(self, module_choices, wrapper_module):
self.module_choices = module_choices
self.wrapper_module = wrapper_module
self.prefix = wrapper_module + '.'
self.prefix_cutoff = wrapper_module.count('.') + 1
def __eq__(self, other):
return self.__class__.__module__ == other.__class__.__module__ and \
self.__class__.__name__ == other.__class__.__name__ and \
self.wrapper_module == other.wrapper_module and \
self.module_choices == other.module_choices
def __ne__(self, other):
return not self.__eq__(other)
def install(self):
sys.meta_path[:] = [x for x in sys.meta_path if self != x] + [self]
def find_module(self, fullname, path=None):
if fullname.startswith(self.prefix):
return self
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
modname = fullname.split('.', self.prefix_cutoff)[self.prefix_cutoff]
for path in self.module_choices:
realname = path % modname
try:
__import__(realname)
except ImportError:
exc_type, exc_value, tb = sys.exc_info()
# since we only establish the entry in sys.modules at the
# very this seems to be redundant, but if recursive imports
# happen we will call into the move import a second time.
# On the second invocation we still don't have an entry for
# fullname in sys.modules, but we will end up with the same
# fake module name and that import will succeed since this
# one already has a temporary entry in the modules dict.
# Since this one "succeeded" temporarily that second
# invocation now will have created a fullname entry in
# sys.modules which we have to kill.
sys.modules.pop(fullname, None)
# If it's an important traceback we reraise it, otherwise
# we swallow it and try the next choice. The skipped frame
# is the one from __import__ above which we don't care about
if self.is_important_traceback(realname, tb):
reraise(exc_type, exc_value, tb.tb_next)
continue
module = sys.modules[fullname] = sys.modules[realname]
if '.' not in modname:
setattr(sys.modules[self.wrapper_module], modname, module)
return module
raise ImportError('No module named %s' % fullname)
def is_important_traceback(self, important_module, tb):
"""Walks a traceback's frames and checks if any of the frames
originated in the given important module. If that is the case then we
were able to import the module itself but apparently something went
wrong when the module was imported. (Eg: import of an import failed).
"""
while tb is not None:
if self.is_important_frame(important_module, tb):
return True
tb = tb.tb_next
return False
def is_important_frame(self, important_module, tb):
"""Checks a single frame if it's important."""
g = tb.tb_frame.f_globals
if '__name__' not in g:
return False
module_name = g['__name__']
# Python 2.7 Behavior. Modules are cleaned up late so the
# name shows up properly here. Success!
if module_name == important_module:
return True
# Some python versions will will clean up modules so early that the
# module name at that point is no longer set. Try guessing from
# the filename then.
filename = os.path.abspath(tb.tb_frame.f_code.co_filename)
test_string = os.path.sep + important_module.replace('.', os.path.sep)
return test_string + '.py' in filename or \
test_string + os.path.sep + '__init__.py' in filename
|
ocadotechnology/boto | refs/heads/develop | boto/cloudsearch2/document.py | 136 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto.exception
from boto.compat import json
import requests
import boto
from boto.cloudsearchdomain.layer1 import CloudSearchDomainConnection
class SearchServiceException(Exception):
pass
class CommitMismatchError(Exception):
# Let's do some extra work and let the user handle errors on his/her own.
errors = None
class EncodingError(Exception):
"""
Content sent for Cloud Search indexing was incorrectly encoded.
This usually happens when a document is marked as unicode but non-unicode
characters are present.
"""
pass
class ContentTooLongError(Exception):
"""
Content sent for Cloud Search indexing was too long
This will usually happen when documents queued for indexing add up to more
than the limit allowed per upload batch (5MB)
"""
pass
class DocumentServiceConnection(object):
"""
A CloudSearch document service.
The DocumentServiceConection is used to add, remove and update documents in
CloudSearch. Commands are uploaded to CloudSearch in SDF (Search Document
Format).
To generate an appropriate SDF, use :func:`add` to add or update documents,
as well as :func:`delete` to remove documents.
Once the set of documents is ready to be index, use :func:`commit` to send
the commands to CloudSearch.
If there are a lot of documents to index, it may be preferable to split the
generation of SDF data and the actual uploading into CloudSearch. Retrieve
the current SDF with :func:`get_sdf`. If this file is the uploaded into S3,
it can be retrieved back afterwards for upload into CloudSearch using
:func:`add_sdf_from_s3`.
The SDF is not cleared after a :func:`commit`. If you wish to continue
using the DocumentServiceConnection for another batch upload of commands,
you will need to :func:`clear_sdf` first to stop the previous batch of
commands from being uploaded again.
"""
def __init__(self, domain=None, endpoint=None):
self.domain = domain
self.endpoint = endpoint
if not self.endpoint:
self.endpoint = domain.doc_service_endpoint
self.documents_batch = []
self._sdf = None
# Copy proxy settings from connection and check if request should be signed
self.proxy = {}
self.sign_request = False
if self.domain and self.domain.layer1:
if self.domain.layer1.use_proxy:
self.proxy = {'http': self.domain.layer1.get_proxy_url_with_auth()}
self.sign_request = getattr(self.domain.layer1, 'sign_request', False)
if self.sign_request:
# Create a domain connection to send signed requests
layer1 = self.domain.layer1
self.domain_connection = CloudSearchDomainConnection(
host=self.endpoint,
aws_access_key_id=layer1.aws_access_key_id,
aws_secret_access_key=layer1.aws_secret_access_key,
region=layer1.region,
provider=layer1.provider
)
def add(self, _id, fields):
"""
Add a document to be processed by the DocumentService
The document will not actually be added until :func:`commit` is called
:type _id: string
:param _id: A unique ID used to refer to this document.
:type fields: dict
:param fields: A dictionary of key-value pairs to be uploaded .
"""
d = {'type': 'add', 'id': _id, 'fields': fields}
self.documents_batch.append(d)
def delete(self, _id):
"""
Schedule a document to be removed from the CloudSearch service
The document will not actually be scheduled for removal until
:func:`commit` is called
:type _id: string
:param _id: The unique ID of this document.
"""
d = {'type': 'delete', 'id': _id}
self.documents_batch.append(d)
def get_sdf(self):
"""
Generate the working set of documents in Search Data Format (SDF)
:rtype: string
:returns: JSON-formatted string of the documents in SDF
"""
return self._sdf if self._sdf else json.dumps(self.documents_batch)
def clear_sdf(self):
"""
Clear the working documents from this DocumentServiceConnection
This should be used after :func:`commit` if the connection will be
reused for another set of documents.
"""
self._sdf = None
self.documents_batch = []
def add_sdf_from_s3(self, key_obj):
"""
Load an SDF from S3
Using this method will result in documents added through
:func:`add` and :func:`delete` being ignored.
:type key_obj: :class:`boto.s3.key.Key`
:param key_obj: An S3 key which contains an SDF
"""
#@todo:: (lucas) would be nice if this could just take an s3://uri..."
self._sdf = key_obj.get_contents_as_string()
def _commit_with_auth(self, sdf, api_version):
return self.domain_connection.upload_documents(sdf, 'application/json')
def _commit_without_auth(self, sdf, api_version):
url = "http://%s/%s/documents/batch" % (self.endpoint, api_version)
# Keep-alive is automatic in a post-1.0 requests world.
session = requests.Session()
session.proxies = self.proxy
adapter = requests.adapters.HTTPAdapter(
pool_connections=20,
pool_maxsize=50,
max_retries=5
)
session.mount('http://', adapter)
session.mount('https://', adapter)
resp = session.post(url, data=sdf, headers={'Content-Type': 'application/json'})
return resp
def commit(self):
"""
Actually send an SDF to CloudSearch for processing
If an SDF file has been explicitly loaded it will be used. Otherwise,
documents added through :func:`add` and :func:`delete` will be used.
:rtype: :class:`CommitResponse`
:returns: A summary of documents added and deleted
"""
sdf = self.get_sdf()
if ': null' in sdf:
boto.log.error('null value in sdf detected. This will probably '
'raise 500 error.')
index = sdf.index(': null')
boto.log.error(sdf[index - 100:index + 100])
api_version = '2013-01-01'
if self.domain and self.domain.layer1:
api_version = self.domain.layer1.APIVersion
if self.sign_request:
r = self._commit_with_auth(sdf, api_version)
else:
r = self._commit_without_auth(sdf, api_version)
return CommitResponse(r, self, sdf, signed_request=self.sign_request)
class CommitResponse(object):
"""Wrapper for response to Cloudsearch document batch commit.
:type response: :class:`requests.models.Response`
:param response: Response from Cloudsearch /documents/batch API
:type doc_service: :class:`boto.cloudsearch2.document.DocumentServiceConnection`
:param doc_service: Object containing the documents posted and methods to
retry
:raises: :class:`boto.exception.BotoServerError`
:raises: :class:`boto.cloudsearch2.document.SearchServiceException`
:raises: :class:`boto.cloudsearch2.document.EncodingError`
:raises: :class:`boto.cloudsearch2.document.ContentTooLongError`
"""
def __init__(self, response, doc_service, sdf, signed_request=False):
self.response = response
self.doc_service = doc_service
self.sdf = sdf
self.signed_request = signed_request
if self.signed_request:
self.content = response
else:
_body = response.content.decode('utf-8')
try:
self.content = json.loads(_body)
except:
boto.log.error('Error indexing documents.\nResponse Content:\n{0}'
'\n\nSDF:\n{1}'.format(_body, self.sdf))
raise boto.exception.BotoServerError(self.response.status_code, '',
body=_body)
self.status = self.content['status']
if self.status == 'error':
self.errors = [e.get('message') for e in self.content.get('errors',
[])]
for e in self.errors:
if "Illegal Unicode character" in e:
raise EncodingError("Illegal Unicode character in document")
elif e == "The Content-Length is too long":
raise ContentTooLongError("Content was too long")
else:
self.errors = []
self.adds = self.content['adds']
self.deletes = self.content['deletes']
self._check_num_ops('add', self.adds)
self._check_num_ops('delete', self.deletes)
def _check_num_ops(self, type_, response_num):
"""Raise exception if number of ops in response doesn't match commit
:type type_: str
:param type_: Type of commit operation: 'add' or 'delete'
:type response_num: int
:param response_num: Number of adds or deletes in the response.
:raises: :class:`boto.cloudsearch2.document.CommitMismatchError`
"""
commit_num = len([d for d in self.doc_service.documents_batch
if d['type'] == type_])
if response_num != commit_num:
if self.signed_request:
boto.log.debug(self.response)
else:
boto.log.debug(self.response.content)
# There will always be a commit mismatch error if there is any
# errors on cloudsearch. self.errors gets lost when this
# CommitMismatchError is raised. Whoever is using boto has no idea
# why their commit failed. They can't even notify the user of the
# cause by parsing the error messages from amazon. So let's
# attach the self.errors to the exceptions if we already spent
# time and effort collecting them out of the response.
exc = CommitMismatchError(
'Incorrect number of {0}s returned. Commit: {1} Response: {2}'
.format(type_, commit_num, response_num)
)
exc.errors = self.errors
raise exc
|
gratipay/gratipay.com | refs/heads/master | gratipay/models/exchange_route.py | 1 | from __future__ import absolute_import, division, print_function, unicode_literals
import braintree
from postgres.orm import Model
class ExchangeRoute(Model):
typname = "exchange_routes"
def __eq__(self, other):
if not isinstance(other, ExchangeRoute):
return False
return self.id == other.id
def __ne__(self, other):
if not isinstance(other, ExchangeRoute):
return True
return self.id != other.id
def __repr__(self):
return '<ExchangeRoute: %s on %s>' % (repr(self.address), repr(self.network))
# Constructors
# ============
@classmethod
def from_id(cls, id, cursor=None):
route = (cursor or cls.db).one("""
SELECT r.*::exchange_routes
FROM exchange_routes r
WHERE id = %(id)s
""", locals())
if route:
from gratipay.models.participant import Participant # XXX Red hot hack!
route.set_attributes(participant=Participant.from_id(route.participant))
return route
@classmethod
def from_network(cls, participant, network, cursor=None):
participant_id = participant.id
route = (cursor or cls.db).one("""
SELECT r.*::exchange_routes
FROM current_exchange_routes r
WHERE participant = %(participant_id)s
AND network = %(network)s
""", locals())
if route:
route.set_attributes(participant=participant)
return route
@classmethod
def from_address(cls, participant, network, address, cursor=None):
participant_id = participant.id
route = (cursor or cls.db).one("""
SELECT r.*::exchange_routes
FROM exchange_routes r
WHERE participant = %(participant_id)s
AND network = %(network)s
AND address = %(address)s
""", locals())
if route:
route.set_attributes(participant=participant)
return route
@classmethod
def insert(cls, participant, network, address, fee_cap=None, cursor=None):
participant_id = participant.id
error = ''
route = (cursor or cls.db).one("""
INSERT INTO exchange_routes
(participant, network, address, error, fee_cap)
VALUES (%(participant_id)s, %(network)s, %(address)s, %(error)s, %(fee_cap)s)
RETURNING exchange_routes.*::exchange_routes
""", locals())
if network == 'braintree-cc':
participant.update_giving_and_teams()
route.set_attributes(participant=participant)
return route
def invalidate(self):
if self.network == 'braintree-cc':
braintree.PaymentMethod.delete(self.address)
with self.db.get_cursor() as cursor:
self.db.run("UPDATE exchange_routes SET is_deleted=true WHERE id=%s", (self.id,))
payload = dict( id=self.participant.id
, exchange_route=self.id
, action='invalidate route'
, address=self.address
)
self.app.add_event(cursor, 'participant', payload)
self.set_attributes(is_deleted=True)
def revive(self):
assert self.network == 'paypal' # sanity check
with self.db.get_cursor() as cursor:
cursor.run("UPDATE exchange_routes SET is_deleted=false WHERE id=%s", (self.id,))
payload = dict( id=self.participant.id
, exchange_route=self.id
, action='revive route'
, address=self.address
)
self.app.add_event(cursor, 'participant', payload)
self.set_attributes(is_deleted=False)
def update_error(self, new_error):
if self.is_deleted:
return
id = self.id
old_error = self.error
self.db.run("""
UPDATE exchange_routes
SET error = %(new_error)s
WHERE id = %(id)s
""", locals())
self.set_attributes(error=new_error)
# Update cached amounts if requested and necessary
if self.network != 'braintree-cc':
return
if self.participant.is_suspicious or bool(new_error) == bool(old_error):
return
# XXX *White* hot hack!
# =====================
# During payday, participant is a record from a select of
# payday_participants (or whatever), *not* an actual Participant
# object. We need the real deal so we can use a method on it ...
from gratipay.models.participant import Participant
participant = Participant.from_username(self.participant.username)
participant.update_giving_and_teams()
|
kylerbrown/bark | refs/heads/master | bark/io/rhd/get_bytes_per_data_block.py | 2 | #! /bin/env python
#
# Michael Gibson 23 April 2015
def get_bytes_per_data_block(header):
"""Calculates the number of bytes in each 60-sample datablock."""
# Each data block contains 60 amplifier samples.
bytes_per_block = 60 * 4 # timestamp data
bytes_per_block = bytes_per_block + 60 * 2 * header['num_amplifier_channels']
# Auxiliary inputs are sampled 4x slower than amplifiers
bytes_per_block = bytes_per_block + 15 * 2 * header['num_aux_input_channels']
# Supply voltage is sampled 60x slower than amplifiers
bytes_per_block = bytes_per_block + 1 * 2 * header['num_supply_voltage_channels']
# Board analog inputs are sampled at same rate as amplifiers
bytes_per_block = bytes_per_block + 60 * 2 * header['num_board_adc_channels']
# Board digital inputs are sampled at same rate as amplifiers
if header['num_board_dig_in_channels'] > 0:
bytes_per_block = bytes_per_block + 60 * 2
# Board digital outputs are sampled at same rate as amplifiers
if header['num_board_dig_out_channels'] > 0:
bytes_per_block = bytes_per_block + 60 * 2
# Temp sensor is sampled 60x slower than amplifiers
if header['num_temp_sensor_channels'] > 0:
bytes_per_block = bytes_per_block + 1 * 2 * header['num_temp_sensor_channels']
return bytes_per_block
|
fhaoquan/kbengine | refs/heads/master | kbe/res/scripts/common/Lib/_markupbase.py | 891 | """Shared support for scanning document type declarations in HTML and XHTML.
This module is used as a foundation for the html.parser module. It has no
documented public API and should not be used directly.
"""
import re
_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match
_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match
_commentclose = re.compile(r'--\s*>')
_markedsectionclose = re.compile(r']\s*]\s*>')
# An analysis of the MS-Word extensions is available at
# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf
_msmarkedsectionclose = re.compile(r']\s*>')
del re
class ParserBase:
"""Parser base class which provides some common support methods used
by the SGML/HTML and XHTML parsers."""
def __init__(self):
if self.__class__ is ParserBase:
raise RuntimeError(
"_markupbase.ParserBase must be subclassed")
def error(self, message):
raise NotImplementedError(
"subclasses of ParserBase must override error()")
def reset(self):
self.lineno = 1
self.offset = 0
def getpos(self):
"""Return current line number and offset."""
return self.lineno, self.offset
# Internal -- update line number and offset. This should be
# called for each piece of data exactly once, in order -- in other
# words the concatenation of all the input strings to this
# function should be exactly the entire input.
def updatepos(self, i, j):
if i >= j:
return j
rawdata = self.rawdata
nlines = rawdata.count("\n", i, j)
if nlines:
self.lineno = self.lineno + nlines
pos = rawdata.rindex("\n", i, j) # Should not fail
self.offset = j-(pos+1)
else:
self.offset = self.offset + j-i
return j
_decl_otherchars = ''
# Internal -- parse declaration (for use by subclasses).
def parse_declaration(self, i):
# This is some sort of declaration; in "HTML as
# deployed," this should only be the document type
# declaration ("<!DOCTYPE html...>").
# ISO 8879:1986, however, has more complex
# declaration syntax for elements in <!...>, including:
# --comment--
# [marked section]
# name in the following list: ENTITY, DOCTYPE, ELEMENT,
# ATTLIST, NOTATION, SHORTREF, USEMAP,
# LINKTYPE, LINK, IDLINK, USELINK, SYSTEM
rawdata = self.rawdata
j = i + 2
assert rawdata[i:j] == "<!", "unexpected call to parse_declaration"
if rawdata[j:j+1] == ">":
# the empty comment <!>
return j + 1
if rawdata[j:j+1] in ("-", ""):
# Start of comment followed by buffer boundary,
# or just a buffer boundary.
return -1
# A simple, practical version could look like: ((name|stringlit) S*) + '>'
n = len(rawdata)
if rawdata[j:j+2] == '--': #comment
# Locate --.*-- as the body of the comment
return self.parse_comment(i)
elif rawdata[j] == '[': #marked section
# Locate [statusWord [...arbitrary SGML...]] as the body of the marked section
# Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA
# Note that this is extended by Microsoft Office "Save as Web" function
# to include [if...] and [endif].
return self.parse_marked_section(i)
else: #all other declaration elements
decltype, j = self._scan_name(j, i)
if j < 0:
return j
if decltype == "doctype":
self._decl_otherchars = ''
while j < n:
c = rawdata[j]
if c == ">":
# end of declaration syntax
data = rawdata[i+2:j]
if decltype == "doctype":
self.handle_decl(data)
else:
# According to the HTML5 specs sections "8.2.4.44 Bogus
# comment state" and "8.2.4.45 Markup declaration open
# state", a comment token should be emitted.
# Calling unknown_decl provides more flexibility though.
self.unknown_decl(data)
return j + 1
if c in "\"'":
m = _declstringlit_match(rawdata, j)
if not m:
return -1 # incomplete
j = m.end()
elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
name, j = self._scan_name(j, i)
elif c in self._decl_otherchars:
j = j + 1
elif c == "[":
# this could be handled in a separate doctype parser
if decltype == "doctype":
j = self._parse_doctype_subset(j + 1, i)
elif decltype in {"attlist", "linktype", "link", "element"}:
# must tolerate []'d groups in a content model in an element declaration
# also in data attribute specifications of attlist declaration
# also link type declaration subsets in linktype declarations
# also link attribute specification lists in link declarations
self.error("unsupported '[' char in %s declaration" % decltype)
else:
self.error("unexpected '[' char in declaration")
else:
self.error(
"unexpected %r char in declaration" % rawdata[j])
if j < 0:
return j
return -1 # incomplete
# Internal -- parse a marked section
# Override this to handle MS-word extension syntax <![if word]>content<![endif]>
def parse_marked_section(self, i, report=1):
rawdata= self.rawdata
assert rawdata[i:i+3] == '<![', "unexpected call to parse_marked_section()"
sectName, j = self._scan_name( i+3, i )
if j < 0:
return j
if sectName in {"temp", "cdata", "ignore", "include", "rcdata"}:
# look for standard ]]> ending
match= _markedsectionclose.search(rawdata, i+3)
elif sectName in {"if", "else", "endif"}:
# look for MS Office ]> ending
match= _msmarkedsectionclose.search(rawdata, i+3)
else:
self.error('unknown status keyword %r in marked section' % rawdata[i+3:j])
if not match:
return -1
if report:
j = match.start(0)
self.unknown_decl(rawdata[i+3: j])
return match.end(0)
# Internal -- parse comment, return length or -1 if not terminated
def parse_comment(self, i, report=1):
rawdata = self.rawdata
if rawdata[i:i+4] != '<!--':
self.error('unexpected call to parse_comment()')
match = _commentclose.search(rawdata, i+4)
if not match:
return -1
if report:
j = match.start(0)
self.handle_comment(rawdata[i+4: j])
return match.end(0)
# Internal -- scan past the internal subset in a <!DOCTYPE declaration,
# returning the index just past any whitespace following the trailing ']'.
def _parse_doctype_subset(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
j = i
while j < n:
c = rawdata[j]
if c == "<":
s = rawdata[j:j+2]
if s == "<":
# end of buffer; incomplete
return -1
if s != "<!":
self.updatepos(declstartpos, j + 1)
self.error("unexpected char in internal subset (in %r)" % s)
if (j + 2) == n:
# end of buffer; incomplete
return -1
if (j + 4) > n:
# end of buffer; incomplete
return -1
if rawdata[j:j+4] == "<!--":
j = self.parse_comment(j, report=0)
if j < 0:
return j
continue
name, j = self._scan_name(j + 2, declstartpos)
if j == -1:
return -1
if name not in {"attlist", "element", "entity", "notation"}:
self.updatepos(declstartpos, j + 2)
self.error(
"unknown declaration %r in internal subset" % name)
# handle the individual names
meth = getattr(self, "_parse_doctype_" + name)
j = meth(j, declstartpos)
if j < 0:
return j
elif c == "%":
# parameter entity reference
if (j + 1) == n:
# end of buffer; incomplete
return -1
s, j = self._scan_name(j + 1, declstartpos)
if j < 0:
return j
if rawdata[j] == ";":
j = j + 1
elif c == "]":
j = j + 1
while j < n and rawdata[j].isspace():
j = j + 1
if j < n:
if rawdata[j] == ">":
return j
self.updatepos(declstartpos, j)
self.error("unexpected char after internal subset")
else:
return -1
elif c.isspace():
j = j + 1
else:
self.updatepos(declstartpos, j)
self.error("unexpected char %r in internal subset" % c)
# end of buffer reached
return -1
# Internal -- scan past <!ELEMENT declarations
def _parse_doctype_element(self, i, declstartpos):
name, j = self._scan_name(i, declstartpos)
if j == -1:
return -1
# style content model; just skip until '>'
rawdata = self.rawdata
if '>' in rawdata[j:]:
return rawdata.find(">", j) + 1
return -1
# Internal -- scan past <!ATTLIST declarations
def _parse_doctype_attlist(self, i, declstartpos):
rawdata = self.rawdata
name, j = self._scan_name(i, declstartpos)
c = rawdata[j:j+1]
if c == "":
return -1
if c == ">":
return j + 1
while 1:
# scan a series of attribute descriptions; simplified:
# name type [value] [#constraint]
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
c = rawdata[j:j+1]
if c == "":
return -1
if c == "(":
# an enumerated type; look for ')'
if ")" in rawdata[j:]:
j = rawdata.find(")", j) + 1
else:
return -1
while rawdata[j:j+1].isspace():
j = j + 1
if not rawdata[j:]:
# end of buffer, incomplete
return -1
else:
name, j = self._scan_name(j, declstartpos)
c = rawdata[j:j+1]
if not c:
return -1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if m:
j = m.end()
else:
return -1
c = rawdata[j:j+1]
if not c:
return -1
if c == "#":
if rawdata[j:] == "#":
# end of buffer
return -1
name, j = self._scan_name(j + 1, declstartpos)
if j < 0:
return j
c = rawdata[j:j+1]
if not c:
return -1
if c == '>':
# all done
return j + 1
# Internal -- scan past <!NOTATION declarations
def _parse_doctype_notation(self, i, declstartpos):
name, j = self._scan_name(i, declstartpos)
if j < 0:
return j
rawdata = self.rawdata
while 1:
c = rawdata[j:j+1]
if not c:
# end of buffer; incomplete
return -1
if c == '>':
return j + 1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if not m:
return -1
j = m.end()
else:
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
# Internal -- scan past <!ENTITY declarations
def _parse_doctype_entity(self, i, declstartpos):
rawdata = self.rawdata
if rawdata[i:i+1] == "%":
j = i + 1
while 1:
c = rawdata[j:j+1]
if not c:
return -1
if c.isspace():
j = j + 1
else:
break
else:
j = i
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
while 1:
c = self.rawdata[j:j+1]
if not c:
return -1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if m:
j = m.end()
else:
return -1 # incomplete
elif c == ">":
return j + 1
else:
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
# Internal -- scan a name token and the new position and the token, or
# return -1 if we've reached the end of the buffer.
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = _declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.updatepos(declstartpos, i)
self.error("expected name token at %r"
% rawdata[declstartpos:declstartpos+20])
# To be overridden -- handlers for unknown objects
def unknown_decl(self, data):
pass
|
pengli09/Paddle | refs/heads/develop | python/paddle/utils/image_multiproc.py | 4 | import os, sys
import numpy as np
from PIL import Image
from cStringIO import StringIO
import multiprocessing
import functools
import itertools
from paddle.utils.image_util import *
from paddle.trainer.config_parser import logger
try:
import cv2
except ImportError:
logger.warning("OpenCV2 is not installed, using PIL to process")
cv2 = None
__all__ = ["CvTransformer", "PILTransformer", "MultiProcessImageTransformer"]
class CvTransformer(ImageTransformer):
"""
CvTransformer used python-opencv to process image.
"""
def __init__(
self,
min_size=None,
crop_size=None,
transpose=(2, 0, 1), # transpose to C * H * W
channel_swap=None,
mean=None,
is_train=True,
is_color=True):
ImageTransformer.__init__(self, transpose, channel_swap, mean, is_color)
self.min_size = min_size
self.crop_size = crop_size
self.is_train = is_train
def resize(self, im, min_size):
row, col = im.shape[:2]
new_row, new_col = min_size, min_size
if row > col:
new_row = min_size * row / col
else:
new_col = min_size * col / row
im = cv2.resize(im, (new_row, new_col), interpolation=cv2.INTER_CUBIC)
return im
def crop_and_flip(self, im):
"""
Return cropped image.
The size of the cropped image is inner_size * inner_size.
im: (H x W x K) ndarrays
"""
row, col = im.shape[:2]
start_h, start_w = 0, 0
if self.is_train:
start_h = np.random.randint(0, row - self.crop_size + 1)
start_w = np.random.randint(0, col - self.crop_size + 1)
else:
start_h = (row - self.crop_size) / 2
start_w = (col - self.crop_size) / 2
end_h, end_w = start_h + self.crop_size, start_w + self.crop_size
if self.is_color:
im = im[start_h:end_h, start_w:end_w, :]
else:
im = im[start_h:end_h, start_w:end_w]
if (self.is_train) and (np.random.randint(2) == 0):
if self.is_color:
im = im[:, ::-1, :]
else:
im = im[:, ::-1]
return im
def transform(self, im):
im = self.resize(im, self.min_size)
im = self.crop_and_flip(im)
# transpose, swap channel, sub mean
im = im.astype('float32')
ImageTransformer.transformer(self, im)
return im
def load_image_from_string(self, data):
flag = cv2.CV_LOAD_IMAGE_COLOR if self.is_color else cv2.CV_LOAD_IMAGE_GRAYSCALE
im = cv2.imdecode(np.fromstring(data, np.uint8), flag)
return im
def transform_from_string(self, data):
im = self.load_image_from_string(data)
return self.transform(im)
def load_image_from_file(self, file):
flag = cv2.CV_LOAD_IMAGE_COLOR if self.is_color else cv2.CV_LOAD_IMAGE_GRAYSCALE
im = cv2.imread(file, flag)
return im
def transform_from_file(self, file):
im = self.load_image_from_file(file)
return self.transform(im)
class PILTransformer(ImageTransformer):
"""
PILTransformer used PIL to process image.
"""
def __init__(
self,
min_size=None,
crop_size=None,
transpose=(2, 0, 1), # transpose to C * H * W
channel_swap=None,
mean=None,
is_train=True,
is_color=True):
ImageTransformer.__init__(self, transpose, channel_swap, mean, is_color)
self.min_size = min_size
self.crop_size = crop_size
self.is_train = is_train
def resize(self, im, min_size):
row, col = im.size[:2]
new_row, new_col = min_size, min_size
if row > col:
new_row = min_size * row / col
else:
new_col = min_size * col / row
im = im.resize((new_row, new_col), Image.ANTIALIAS)
return im
def crop_and_flip(self, im):
"""
Return cropped image.
The size of the cropped image is inner_size * inner_size.
"""
row, col = im.size[:2]
start_h, start_w = 0, 0
if self.is_train:
start_h = np.random.randint(0, row - self.crop_size + 1)
start_w = np.random.randint(0, col - self.crop_size + 1)
else:
start_h = (row - self.crop_size) / 2
start_w = (col - self.crop_size) / 2
end_h, end_w = start_h + self.crop_size, start_w + self.crop_size
im = im.crop((start_h, start_w, end_h, end_w))
if (self.is_train) and (np.random.randint(2) == 0):
im = im.transpose(Image.FLIP_LEFT_RIGHT)
return im
def transform(self, im):
im = self.resize(im, self.min_size)
im = self.crop_and_flip(im)
im = np.array(im, dtype=np.float32) # convert to numpy.array
# transpose, swap channel, sub mean
ImageTransformer.transformer(self, im)
return im
def load_image_from_string(self, data):
im = Image.open(StringIO(data))
return im
def transform_from_string(self, data):
im = self.load_image_from_string(data)
return self.transform(im)
def load_image_from_file(self, file):
im = Image.open(file)
return im
def transform_from_file(self, file):
im = self.load_image_from_file(file)
return self.transform(im)
def job(is_img_string, transformer, (data, label)):
if is_img_string:
return transformer.transform_from_string(data), label
else:
return transformer.transform_from_file(data), label
class MultiProcessImageTransformer(object):
def __init__(self,
procnum=10,
resize_size=None,
crop_size=None,
transpose=(2, 0, 1),
channel_swap=None,
mean=None,
is_train=True,
is_color=True,
is_img_string=True):
"""
Processing image with multi-process. If it is used in PyDataProvider,
the simple usage for CNN is as follows:
.. code-block:: python
def hool(settings, is_train, **kwargs):
settings.is_train = is_train
settings.mean_value = np.array([103.939,116.779,123.68], dtype=np.float32)
settings.input_types = [
dense_vector(3 * 224 * 224),
integer_value(1)]
settings.transformer = MultiProcessImageTransformer(
procnum=10,
resize_size=256,
crop_size=224,
transpose=(2, 0, 1),
mean=settings.mean_values,
is_train=settings.is_train)
@provider(init_hook=hook, pool_size=20480)
def process(settings, file_list):
with open(file_list, 'r') as fdata:
for line in fdata:
data_dic = np.load(line.strip()) # load the data batch pickled by Pickle.
data = data_dic['data']
labels = data_dic['label']
labels = np.array(labels, dtype=np.float32)
for im, lab in settings.dp.run(data, labels):
yield [im.astype('float32'), int(lab)]
:param procnum: processor number.
:type procnum: int
:param resize_size: the shorter edge size of image after resizing.
:type resize_size: int
:param crop_size: the croping size.
:type crop_size: int
:param transpose: the transpose order, Paddle only allow C * H * W order.
:type transpose: tuple or list
:param channel_swap: the channel swap order, RGB or BRG.
:type channel_swap: tuple or list
:param mean: the mean values of image, per-channel mean or element-wise mean.
:type mean: array, The dimension is 1 for per-channel mean.
The dimension is 3 for element-wise mean.
:param is_train: training peroid or testing peroid.
:type is_train: bool.
:param is_color: the image is color or gray.
:type is_color: bool.
:param is_img_string: The input can be the file name of image or image string.
:type is_img_string: bool.
"""
self.procnum = procnum
self.pool = multiprocessing.Pool(procnum)
self.is_img_string = is_img_string
if cv2 is not None:
self.transformer = CvTransformer(resize_size, crop_size, transpose,
channel_swap, mean, is_train,
is_color)
else:
self.transformer = PILTransformer(resize_size, crop_size, transpose,
channel_swap, mean, is_train,
is_color)
def run(self, data, label):
fun = functools.partial(job, self.is_img_string, self.transformer)
return self.pool.imap_unordered(
fun, itertools.izip(data, label), chunksize=100 * self.procnum)
|
asteven/dpkt | refs/heads/master | dpkt/ip6.py | 17 | # $Id$
"""Internet Protocol, version 6."""
import dpkt
class IP6(dpkt.Packet):
__hdr__ = (
('v_fc_flow', 'I', 0x60000000L),
('plen', 'H', 0), # payload length (not including header)
('nxt', 'B', 0), # next header protocol
('hlim', 'B', 0), # hop limit
('src', '16s', ''),
('dst', '16s', '')
)
# XXX - to be shared with IP. We cannot refer to the ip module
# right now because ip.__load_protos() expects the IP6 class to be
# defined.
_protosw = None
def _get_v(self):
return self.v_fc_flow >> 28
def _set_v(self, v):
self.v_fc_flow = (self.v_fc_flow & ~0xf0000000L) | (v << 28)
v = property(_get_v, _set_v)
def _get_fc(self):
return (self.v_fc_flow >> 20) & 0xff
def _set_fc(self, v):
self.v_fc_flow = (self.v_fc_flow & ~0xff00000L) | (v << 20)
fc = property(_get_fc, _set_fc)
def _get_flow(self):
return self.v_fc_flow & 0xfffff
def _set_flow(self, v):
self.v_fc_flow = (self.v_fc_flow & ~0xfffff) | (v & 0xfffff)
flow = property(_get_flow, _set_flow)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.extension_hdrs = dict(((i, None) for i in ext_hdrs))
if self.plen:
buf = self.data[:self.plen]
else: # due to jumbo payload or TSO
buf = self.data
next = self.nxt
while (next in ext_hdrs):
ext = ext_hdrs_cls[next](buf)
self.extension_hdrs[next] = ext
buf = buf[ext.length:]
next = ext.nxt
# set the payload protocol id
setattr(self, 'p', next)
try:
self.data = self._protosw[next](buf)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, dpkt.UnpackError):
self.data = buf
def headers_str(self):
"""
Output extension headers in order defined in RFC1883 (except dest opts)
"""
header_str = ""
for hdr in ext_hdrs:
if not self.extension_hdrs[hdr] is None:
header_str += str(self.extension_hdrs[hdr])
return header_str
def __str__(self):
if (self.nxt == 6 or self.nxt == 17 or self.nxt == 58) and \
not self.data.sum:
# XXX - set TCP, UDP, and ICMPv6 checksums
p = str(self.data)
s = dpkt.struct.pack('>16s16sxBH', self.src, self.dst, self.nxt, len(p))
s = dpkt.in_cksum_add(0, s)
s = dpkt.in_cksum_add(s, p)
try:
self.data.sum = dpkt.in_cksum_done(s)
except AttributeError:
pass
return self.pack_hdr() + self.headers_str() + str(self.data)
def set_proto(cls, p, pktclass):
cls._protosw[p] = pktclass
set_proto = classmethod(set_proto)
def get_proto(cls, p):
return cls._protosw[p]
get_proto = classmethod(get_proto)
import ip
# We are most likely still in the middle of ip.__load_protos() which
# implicitly loads this module through __import__(), so the content of
# ip.IP._protosw is still incomplete at the moment. By sharing the
# same dictionary by reference as opposed to making a copy, when
# ip.__load_protos() finishes, we will also automatically get the most
# up-to-date dictionary.
IP6._protosw = ip.IP._protosw
class IP6ExtensionHeader(dpkt.Packet):
"""
An extension header is very similar to a 'sub-packet'.
We just want to re-use all the hdr unpacking etc.
"""
pass
class IP6OptsHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0) # option data length in 8 octect units (ignoring first 8 octets) so, len 0 == 64bit header
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
setattr(self, 'length', (self.len + 1) * 8)
options = []
index = 0
while (index < self.length - 2):
opt_type = ord(self.data[index])
# PAD1 option
if opt_type == 0:
index += 1
continue;
opt_length = ord(self.data[index + 1])
if opt_type == 1: # PADN option
# PADN uses opt_length bytes in total
index += opt_length + 2
continue
options.append({'type': opt_type, 'opt_length': opt_length, 'data': self.data[index + 2:index + 2 + opt_length]})
# add the two chars and the option_length, to move to the next option
index += opt_length + 2
setattr(self, 'options', options)
class IP6HopOptsHeader(IP6OptsHeader): pass
class IP6DstOptsHeader(IP6OptsHeader): pass
class IP6RoutingHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0), # extension data length in 8 octect units (ignoring first 8 octets) (<= 46 for type 0)
('type', 'B', 0), # routing type (currently, only 0 is used)
('segs_left', 'B', 0), # remaining segments in route, until destination (<= 23)
('rsvd_sl_bits', 'I', 0), # reserved (1 byte), strict/loose bitmap for addresses
)
def _get_sl_bits(self):
return self.rsvd_sl_bits & 0xffffff
def _set_sl_bits(self, v):
self.rsvd_sl_bits = (self.rsvd_sl_bits & ~0xfffff) | (v & 0xfffff)
sl_bits = property(_get_sl_bits, _set_sl_bits)
def unpack(self, buf):
hdr_size = 8
addr_size = 16
dpkt.Packet.unpack(self, buf)
addresses = []
num_addresses = self.len / 2
buf = buf[hdr_size:hdr_size + num_addresses * addr_size]
for i in range(num_addresses):
addresses.append(buf[i * addr_size: i * addr_size + addr_size])
self.data = buf
setattr(self, 'addresses', addresses)
setattr(self, 'length', self.len * 8 + 8)
class IP6FragmentHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('resv', 'B', 0), # reserved, set to 0
('frag_off_resv_m', 'H', 0), # frag offset (13 bits), reserved zero (2 bits), More frags flag
('id', 'I', 0) # fragments id
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
setattr(self, 'length', self.__hdr_len__)
def _get_frag_off(self):
return self.frag_off_resv_m >> 3
def _set_frag_off(self, v):
self.frag_off_resv_m = (self.frag_off_resv_m & ~0xfff8) | (v << 3)
frag_off = property(_get_frag_off, _set_frag_off)
def _get_m_flag(self):
return self.frag_off_resv_m & 1
def _set_m_flag(self, v):
self.frag_off_resv_m = (self.frag_off_resv_m & ~0xfffe) | v
m_flag = property(_get_m_flag, _set_m_flag)
class IP6AHHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0), # length of header in 4 octet units (ignoring first 2 units)
('resv', 'H', 0), # reserved, 2 bytes of 0
('spi', 'I', 0), # SPI security parameter index
('seq', 'I', 0) # sequence no.
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
setattr(self, 'length', (self.len + 2) * 4)
setattr(self, 'auth_data', self.data[:(self.len - 1) * 4])
class IP6ESPHeader(IP6ExtensionHeader):
def unpack(self, buf):
raise NotImplementedError("ESP extension headers are not supported.")
ext_hdrs = [ip.IP_PROTO_HOPOPTS, ip.IP_PROTO_ROUTING, ip.IP_PROTO_FRAGMENT, ip.IP_PROTO_AH, ip.IP_PROTO_ESP, ip.IP_PROTO_DSTOPTS]
ext_hdrs_cls = {ip.IP_PROTO_HOPOPTS: IP6HopOptsHeader,
ip.IP_PROTO_ROUTING: IP6RoutingHeader,
ip.IP_PROTO_FRAGMENT: IP6FragmentHeader,
ip.IP_PROTO_ESP: IP6ESPHeader,
ip.IP_PROTO_AH: IP6AHHeader,
ip.IP_PROTO_DSTOPTS: IP6DstOptsHeader}
if __name__ == '__main__':
import unittest
class IP6TestCase(unittest.TestCase):
def test_IP6(self):
s = '`\x00\x00\x00\x00(\x06@\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x11$\xff\xfe\x8c\x11\xde\xfe\x80\x00\x00\x00\x00\x00\x00\x02\xb0\xd0\xff\xfe\xe1\x80r\xcd\xca\x00\x16\x04\x84F\xd5\x00\x00\x00\x00\xa0\x02\xff\xff\xf8\t\x00\x00\x02\x04\x05\xa0\x01\x03\x03\x00\x01\x01\x08\n}\x185?\x00\x00\x00\x00'
ip = IP6(s)
#print `ip`
ip.data.sum = 0
s2 = str(ip)
ip2 = IP6(s)
#print `ip2`
assert(s == s2)
def test_IP6RoutingHeader(self):
s = '`\x00\x00\x00\x00<+@ H\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca G\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xca\xfe\x06\x04\x00\x02\x00\x00\x00\x00 \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x00\x14\x00P\x00\x00\x00\x00\x00\x00\x00\x00P\x02 \x00\x91\x7f\x00\x00'
ip = IP6(s)
s2 = str(ip)
# 43 is Routing header id
assert(len(ip.extension_hdrs[43].addresses) == 2)
assert(ip.tcp)
assert(s == s2)
def test_IP6FragmentHeader(self):
s = '\x06\xee\xff\xfb\x00\x00\xff\xff'
fh = IP6FragmentHeader(s)
s2 = str(fh)
assert(fh.nxt == 6)
assert(fh.id == 65535)
assert(fh.frag_off == 8191)
assert(fh.m_flag == 1)
def test_IP6OptionsHeader(self):
s = ';\x04\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\xc2\x04\x00\x00\x00\x00\x05\x02\x00\x00\x01\x02\x00\x00'
options = IP6OptsHeader(s).options
assert(len(options) == 3)
def test_IP6AHHeader(self):
s = ';\x04\x00\x00\x02\x02\x02\x02\x01\x01\x01\x01\x78\x78\x78\x78\x78\x78\x78\x78'
ah = IP6AHHeader(s)
assert(ah.length == 24)
assert(ah.auth_data == 'xxxxxxxx')
assert(ah.spi == 0x2020202)
assert(ah.seq == 0x1010101)
def test_IP6ExtensionHeaders(self):
p = '`\x00\x00\x00\x00<+@ H\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca G\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xca\xfe\x06\x04\x00\x02\x00\x00\x00\x00 \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x00\x14\x00P\x00\x00\x00\x00\x00\x00\x00\x00P\x02 \x00\x91\x7f\x00\x00'
ip = IP6(p)
o = ';\x04\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\xc2\x04\x00\x00\x00\x00\x05\x02\x00\x00\x01\x02\x00\x00'
options = IP6HopOptsHeader(o)
ip.extension_hdrs[0] = options
fh = '\x06\xee\xff\xfb\x00\x00\xff\xff'
ip.extension_hdrs[44] = IP6FragmentHeader(fh)
ah = ';\x04\x00\x00\x02\x02\x02\x02\x01\x01\x01\x01\x78\x78\x78\x78\x78\x78\x78\x78'
ip.extension_hdrs[51] = IP6AHHeader(ah)
do = ';\x02\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
ip.extension_hdrs[60] = IP6DstOptsHeader(do)
assert(len([k for k in ip.extension_hdrs if (not ip.extension_hdrs[k] is None)]) == 5)
unittest.main()
|
bgris/ODL_bgris | refs/heads/master | lib/python3.5/site-packages/networkx/algorithms/flow/tests/test_maxflow.py | 43 | # -*- coding: utf-8 -*-
"""Maximum flow algorithms test suite.
"""
from nose.tools import *
import networkx as nx
from networkx.algorithms.flow import build_flow_dict, build_residual_network
from networkx.algorithms.flow import edmonds_karp, preflow_push, shortest_augmenting_path
flow_funcs = [edmonds_karp, preflow_push, shortest_augmenting_path]
max_min_funcs = [nx.maximum_flow, nx.minimum_cut]
flow_value_funcs = [nx.maximum_flow_value, nx.minimum_cut_value]
interface_funcs = sum([max_min_funcs, flow_value_funcs], [])
all_funcs = sum([flow_funcs, interface_funcs], [])
msg = "Assertion failed in function: {0}"
msgi = "Assertion failed in function: {0} in interface {1}"
def compute_cutset(G, partition):
reachable, non_reachable = partition
cutset = set()
for u, nbrs in ((n, G[n]) for n in reachable):
cutset.update((u, v) for v in nbrs if v in non_reachable)
return cutset
def validate_flows(G, s, t, flowDict, solnValue, capacity, flow_func):
assert_equal(set(G), set(flowDict), msg=msg.format(flow_func.__name__))
for u in G:
assert_equal(set(G[u]), set(flowDict[u]),
msg=msg.format(flow_func.__name__))
excess = dict((u, 0) for u in flowDict)
for u in flowDict:
for v, flow in flowDict[u].items():
if capacity in G[u][v]:
ok_(flow <= G[u][v][capacity])
ok_(flow >= 0, msg=msg.format(flow_func.__name__))
excess[u] -= flow
excess[v] += flow
for u, exc in excess.items():
if u == s:
assert_equal(exc, -solnValue, msg=msg.format(flow_func.__name__))
elif u == t:
assert_equal(exc, solnValue, msg=msg.format(flow_func.__name__))
else:
assert_equal(exc, 0, msg=msg.format(flow_func.__name__))
def validate_cuts(G, s, t, solnValue, partition, capacity, flow_func):
assert_true(all(n in G for n in partition[0]),
msg=msg.format(flow_func.__name__))
assert_true(all(n in G for n in partition[1]),
msg=msg.format(flow_func.__name__))
cutset = compute_cutset(G, partition)
assert_true(all(G.has_edge(u, v) for (u, v) in cutset),
msg=msg.format(flow_func.__name__))
assert_equal(solnValue, sum(G[u][v][capacity] for (u, v) in cutset),
msg=msg.format(flow_func.__name__))
H = G.copy()
H.remove_edges_from(cutset)
if not G.is_directed():
assert_false(nx.is_connected(H), msg=msg.format(flow_func.__name__))
else:
assert_false(nx.is_strongly_connected(H),
msg=msg.format(flow_func.__name__))
def compare_flows_and_cuts(G, s, t, solnFlows, solnValue, capacity='capacity'):
for flow_func in flow_funcs:
R = flow_func(G, s, t, capacity)
# Test both legacy and new implementations.
flow_value = R.graph['flow_value']
flow_dict = build_flow_dict(G, R)
assert_equal(flow_value, solnValue, msg=msg.format(flow_func.__name__))
validate_flows(G, s, t, flow_dict, solnValue, capacity, flow_func)
# Minimum cut
cut_value, partition = nx.minimum_cut(G, s, t, capacity=capacity,
flow_func=flow_func)
validate_cuts(G, s, t, solnValue, partition, capacity, flow_func)
class TestMaxflowMinCutCommon:
def test_graph1(self):
# Trivial undirected graph
G = nx.Graph()
G.add_edge(1,2, capacity = 1.0)
solnFlows = {1: {2: 1.0},
2: {1: 1.0}}
compare_flows_and_cuts(G, 1, 2, solnFlows, 1.0)
def test_graph2(self):
# A more complex undirected graph
# adapted from www.topcoder.com/tc?module=Statc&d1=tutorials&d2=maxFlow
G = nx.Graph()
G.add_edge('x','a', capacity = 3.0)
G.add_edge('x','b', capacity = 1.0)
G.add_edge('a','c', capacity = 3.0)
G.add_edge('b','c', capacity = 5.0)
G.add_edge('b','d', capacity = 4.0)
G.add_edge('d','e', capacity = 2.0)
G.add_edge('c','y', capacity = 2.0)
G.add_edge('e','y', capacity = 3.0)
H = {'x': {'a': 3, 'b': 1},
'a': {'c': 3, 'x': 3},
'b': {'c': 1, 'd': 2, 'x': 1},
'c': {'a': 3, 'b': 1, 'y': 2},
'd': {'b': 2, 'e': 2},
'e': {'d': 2, 'y': 2},
'y': {'c': 2, 'e': 2}}
compare_flows_and_cuts(G, 'x', 'y', H, 4.0)
def test_digraph1(self):
# The classic directed graph example
G = nx.DiGraph()
G.add_edge('a','b', capacity = 1000.0)
G.add_edge('a','c', capacity = 1000.0)
G.add_edge('b','c', capacity = 1.0)
G.add_edge('b','d', capacity = 1000.0)
G.add_edge('c','d', capacity = 1000.0)
H = {'a': {'b': 1000.0, 'c': 1000.0},
'b': {'c': 0, 'd': 1000.0},
'c': {'d': 1000.0},
'd': {}}
compare_flows_and_cuts(G, 'a', 'd', H, 2000.0)
def test_digraph2(self):
# An example in which some edges end up with zero flow.
G = nx.DiGraph()
G.add_edge('s', 'b', capacity = 2)
G.add_edge('s', 'c', capacity = 1)
G.add_edge('c', 'd', capacity = 1)
G.add_edge('d', 'a', capacity = 1)
G.add_edge('b', 'a', capacity = 2)
G.add_edge('a', 't', capacity = 2)
H = {'s': {'b': 2, 'c': 0},
'c': {'d': 0},
'd': {'a': 0},
'b': {'a': 2},
'a': {'t': 2},
't': {}}
compare_flows_and_cuts(G, 's', 't', H, 2)
def test_digraph3(self):
# A directed graph example from Cormen et al.
G = nx.DiGraph()
G.add_edge('s','v1', capacity = 16.0)
G.add_edge('s','v2', capacity = 13.0)
G.add_edge('v1','v2', capacity = 10.0)
G.add_edge('v2','v1', capacity = 4.0)
G.add_edge('v1','v3', capacity = 12.0)
G.add_edge('v3','v2', capacity = 9.0)
G.add_edge('v2','v4', capacity = 14.0)
G.add_edge('v4','v3', capacity = 7.0)
G.add_edge('v3','t', capacity = 20.0)
G.add_edge('v4','t', capacity = 4.0)
H = {'s': {'v1': 12.0, 'v2': 11.0},
'v2': {'v1': 0, 'v4': 11.0},
'v1': {'v2': 0, 'v3': 12.0},
'v3': {'v2': 0, 't': 19.0},
'v4': {'v3': 7.0, 't': 4.0},
't': {}}
compare_flows_and_cuts(G, 's', 't', H, 23.0)
def test_digraph4(self):
# A more complex directed graph
# from www.topcoder.com/tc?module=Statc&d1=tutorials&d2=maxFlow
G = nx.DiGraph()
G.add_edge('x','a', capacity = 3.0)
G.add_edge('x','b', capacity = 1.0)
G.add_edge('a','c', capacity = 3.0)
G.add_edge('b','c', capacity = 5.0)
G.add_edge('b','d', capacity = 4.0)
G.add_edge('d','e', capacity = 2.0)
G.add_edge('c','y', capacity = 2.0)
G.add_edge('e','y', capacity = 3.0)
H = {'x': {'a': 2.0, 'b': 1.0},
'a': {'c': 2.0},
'b': {'c': 0, 'd': 1.0},
'c': {'y': 2.0},
'd': {'e': 1.0},
'e': {'y': 1.0},
'y': {}}
compare_flows_and_cuts(G, 'x', 'y', H, 3.0)
def test_optional_capacity(self):
# Test optional capacity parameter.
G = nx.DiGraph()
G.add_edge('x','a', spam = 3.0)
G.add_edge('x','b', spam = 1.0)
G.add_edge('a','c', spam = 3.0)
G.add_edge('b','c', spam = 5.0)
G.add_edge('b','d', spam = 4.0)
G.add_edge('d','e', spam = 2.0)
G.add_edge('c','y', spam = 2.0)
G.add_edge('e','y', spam = 3.0)
solnFlows = {'x': {'a': 2.0, 'b': 1.0},
'a': {'c': 2.0},
'b': {'c': 0, 'd': 1.0},
'c': {'y': 2.0},
'd': {'e': 1.0},
'e': {'y': 1.0},
'y': {}}
solnValue = 3.0
s = 'x'
t = 'y'
compare_flows_and_cuts(G, s, t, solnFlows, solnValue, capacity = 'spam')
def test_digraph_infcap_edges(self):
# DiGraph with infinite capacity edges
G = nx.DiGraph()
G.add_edge('s', 'a')
G.add_edge('s', 'b', capacity = 30)
G.add_edge('a', 'c', capacity = 25)
G.add_edge('b', 'c', capacity = 12)
G.add_edge('a', 't', capacity = 60)
G.add_edge('c', 't')
H = {'s': {'a': 85, 'b': 12},
'a': {'c': 25, 't': 60},
'b': {'c': 12},
'c': {'t': 37},
't': {}}
compare_flows_and_cuts(G, 's', 't', H, 97)
# DiGraph with infinite capacity digon
G = nx.DiGraph()
G.add_edge('s', 'a', capacity = 85)
G.add_edge('s', 'b', capacity = 30)
G.add_edge('a', 'c')
G.add_edge('c', 'a')
G.add_edge('b', 'c', capacity = 12)
G.add_edge('a', 't', capacity = 60)
G.add_edge('c', 't', capacity = 37)
H = {'s': {'a': 85, 'b': 12},
'a': {'c': 25, 't': 60},
'c': {'a': 0, 't': 37},
'b': {'c': 12},
't': {}}
compare_flows_and_cuts(G, 's', 't', H, 97)
def test_digraph_infcap_path(self):
# Graph with infinite capacity (s, t)-path
G = nx.DiGraph()
G.add_edge('s', 'a')
G.add_edge('s', 'b', capacity = 30)
G.add_edge('a', 'c')
G.add_edge('b', 'c', capacity = 12)
G.add_edge('a', 't', capacity = 60)
G.add_edge('c', 't')
for flow_func in all_funcs:
assert_raises(nx.NetworkXUnbounded,
flow_func, G, 's', 't')
def test_graph_infcap_edges(self):
# Undirected graph with infinite capacity edges
G = nx.Graph()
G.add_edge('s', 'a')
G.add_edge('s', 'b', capacity = 30)
G.add_edge('a', 'c', capacity = 25)
G.add_edge('b', 'c', capacity = 12)
G.add_edge('a', 't', capacity = 60)
G.add_edge('c', 't')
H = {'s': {'a': 85, 'b': 12},
'a': {'c': 25, 's': 85, 't': 60},
'b': {'c': 12, 's': 12},
'c': {'a': 25, 'b': 12, 't': 37},
't': {'a': 60, 'c': 37}}
compare_flows_and_cuts(G, 's', 't', H, 97)
def test_digraph4(self):
# From ticket #429 by mfrasca.
G = nx.DiGraph()
G.add_edge('s', 'a', capacity = 2)
G.add_edge('s', 'b', capacity = 2)
G.add_edge('a', 'b', capacity = 5)
G.add_edge('a', 't', capacity = 1)
G.add_edge('b', 'a', capacity = 1)
G.add_edge('b', 't', capacity = 3)
flowSoln = {'a': {'b': 1, 't': 1},
'b': {'a': 0, 't': 3},
's': {'a': 2, 'b': 2},
't': {}}
compare_flows_and_cuts(G, 's', 't', flowSoln, 4)
def test_disconnected(self):
G = nx.Graph()
G.add_weighted_edges_from([(0,1,1),(1,2,1),(2,3,1)],weight='capacity')
G.remove_node(1)
assert_equal(nx.maximum_flow_value(G,0,3), 0)
flowSoln = {0: {}, 2: {3: 0}, 3: {2: 0}}
compare_flows_and_cuts(G, 0, 3, flowSoln, 0)
def test_source_target_not_in_graph(self):
G = nx.Graph()
G.add_weighted_edges_from([(0,1,1),(1,2,1),(2,3,1)],weight='capacity')
G.remove_node(0)
for flow_func in all_funcs:
assert_raises(nx.NetworkXError, flow_func, G, 0, 3)
G.add_weighted_edges_from([(0,1,1),(1,2,1),(2,3,1)],weight='capacity')
G.remove_node(3)
for flow_func in all_funcs:
assert_raises(nx.NetworkXError, flow_func, G, 0, 3)
def test_source_target_coincide(self):
G = nx.Graph()
G.add_node(0)
for flow_func in all_funcs:
assert_raises(nx.NetworkXError, flow_func, G, 0, 0)
def test_multigraphs_raise(self):
G = nx.MultiGraph()
M = nx.MultiDiGraph()
G.add_edges_from([(0, 1), (1, 0)], capacity=True)
for flow_func in all_funcs:
assert_raises(nx.NetworkXError, flow_func, G, 0, 0)
class TestMaxFlowMinCutInterface:
def setup(self):
G = nx.DiGraph()
G.add_edge('x','a', capacity = 3.0)
G.add_edge('x','b', capacity = 1.0)
G.add_edge('a','c', capacity = 3.0)
G.add_edge('b','c', capacity = 5.0)
G.add_edge('b','d', capacity = 4.0)
G.add_edge('d','e', capacity = 2.0)
G.add_edge('c','y', capacity = 2.0)
G.add_edge('e','y', capacity = 3.0)
self.G = G
H = nx.DiGraph()
H.add_edge(0, 1, capacity = 1.0)
H.add_edge(1, 2, capacity = 1.0)
self.H = H
def test_flow_func_not_callable(self):
elements = ['this_should_be_callable', 10, set([1,2,3])]
G = nx.Graph()
G.add_weighted_edges_from([(0,1,1),(1,2,1),(2,3,1)], weight='capacity')
for flow_func in interface_funcs:
for element in elements:
assert_raises(nx.NetworkXError,
flow_func, G, 0, 1, flow_func=element)
assert_raises(nx.NetworkXError,
flow_func, G, 0, 1, flow_func=element)
def test_flow_func_parameters(self):
G = self.G
fv = 3.0
for interface_func in interface_funcs:
for flow_func in flow_funcs:
result = interface_func(G, 'x', 'y', flow_func=flow_func)
if interface_func in max_min_funcs:
result = result[0]
assert_equal(fv, result, msg=msgi.format(flow_func.__name__,
interface_func.__name__))
def test_minimum_cut_no_cutoff(self):
G = self.G
for flow_func in flow_funcs:
assert_raises(nx.NetworkXError, nx.minimum_cut, G, 'x', 'y',
flow_func=flow_func, cutoff=1.0)
assert_raises(nx.NetworkXError, nx.minimum_cut_value, G, 'x', 'y',
flow_func=flow_func, cutoff=1.0)
def test_kwargs(self):
G = self.H
fv = 1.0
to_test = (
(shortest_augmenting_path, dict(two_phase=True)),
(preflow_push, dict(global_relabel_freq=5)),
)
for interface_func in interface_funcs:
for flow_func, kwargs in to_test:
result = interface_func(G, 0, 2, flow_func=flow_func, **kwargs)
if interface_func in max_min_funcs:
result = result[0]
assert_equal(fv, result, msg=msgi.format(flow_func.__name__,
interface_func.__name__))
def test_kwargs_default_flow_func(self):
G = self.H
for interface_func in interface_funcs:
assert_raises(nx.NetworkXError, interface_func,
G, 0, 1, global_relabel_freq=2)
def test_reusing_residual(self):
G = self.G
fv = 3.0
s, t = 'x', 'y'
R = build_residual_network(G, 'capacity')
for interface_func in interface_funcs:
for flow_func in flow_funcs:
for i in range(3):
result = interface_func(G, 'x', 'y', flow_func=flow_func,
residual=R)
if interface_func in max_min_funcs:
result = result[0]
assert_equal(fv, result,
msg=msgi.format(flow_func.__name__,
interface_func.__name__))
# Tests specific to one algorithm
def test_preflow_push_global_relabel_freq():
G = nx.DiGraph()
G.add_edge(1, 2, capacity=1)
R = preflow_push(G, 1, 2, global_relabel_freq=None)
assert_equal(R.graph['flow_value'], 1)
assert_raises(nx.NetworkXError, preflow_push, G, 1, 2,
global_relabel_freq=-1)
def test_preflow_push_makes_enough_space():
#From ticket #1542
G = nx.DiGraph()
G.add_path([0, 1, 3], capacity=1)
G.add_path([1, 2, 3], capacity=1)
R = preflow_push(G, 0, 3, value_only=False)
assert_equal(R.graph['flow_value'], 1)
def test_shortest_augmenting_path_two_phase():
k = 5
p = 1000
G = nx.DiGraph()
for i in range(k):
G.add_edge('s', (i, 0), capacity=1)
G.add_path(((i, j) for j in range(p)), capacity=1)
G.add_edge((i, p - 1), 't', capacity=1)
R = shortest_augmenting_path(G, 's', 't', two_phase=True)
assert_equal(R.graph['flow_value'], k)
R = shortest_augmenting_path(G, 's', 't', two_phase=False)
assert_equal(R.graph['flow_value'], k)
class TestCutoff:
def test_cutoff(self):
k = 5
p = 1000
G = nx.DiGraph()
for i in range(k):
G.add_edge('s', (i, 0), capacity=2)
G.add_path(((i, j) for j in range(p)), capacity=2)
G.add_edge((i, p - 1), 't', capacity=2)
R = shortest_augmenting_path(G, 's', 't', two_phase=True, cutoff=k)
ok_(k <= R.graph['flow_value'] <= 2 * k)
R = shortest_augmenting_path(G, 's', 't', two_phase=False, cutoff=k)
ok_(k <= R.graph['flow_value'] <= 2 * k)
R = edmonds_karp(G, 's', 't', cutoff=k)
ok_(k <= R.graph['flow_value'] <= 2 * k)
def test_complete_graph_cutoff(self):
G = nx.complete_graph(5)
nx.set_edge_attributes(G, 'capacity',
dict(((u, v), 1) for u, v in G.edges()))
for flow_func in [shortest_augmenting_path, edmonds_karp]:
for cutoff in [3, 2, 1]:
result = nx.maximum_flow_value(G, 0, 4, flow_func=flow_func,
cutoff=cutoff)
assert_equal(cutoff, result,
msg="cutoff error in {0}".format(flow_func.__name__))
|
MenZil/kuma | refs/heads/master | vendor/packages/translate/storage/mozilla_lang.py | 23 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2008, 2011 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
# Original Author: Dan Schafer <dschafer@mozilla.com>
# Date: 10 Jun 2008
"""A class to manage Mozilla .lang files."""
from translate.storage import base, txt
class LangUnit(base.TranslationUnit):
"""This is just a normal unit with a weird string output"""
def __init__(self, source=None):
self.locations = []
base.TranslationUnit.__init__(self, source)
def __str__(self):
if self.source == self.target:
unchanged = " {ok}"
else:
unchanged = ""
if not self.istranslated():
target = self.source
else:
target = self.target
if self.getnotes():
notes = ('\n').join(["# %s" % note for note in self.getnotes('developer').split("\n")])
return u"%s\n;%s\n%s%s" % (notes, self.source, target, unchanged)
return u";%s\n%s%s" % (self.source, target, unchanged)
def getlocations(self):
return self.locations
def addlocation(self, location):
self.locations.append(location)
class LangStore(txt.TxtFile):
"""We extend TxtFile, since that has a lot of useful stuff for encoding"""
UnitClass = LangUnit
Name = "Mozilla .lang"
Extensions = ['lang']
def __init__(self, inputfile=None, flavour=None, encoding="utf-8", mark_active=False):
self.is_active = False
self.mark_active = mark_active
super(LangStore, self).__init__(inputfile, flavour, encoding)
def parse(self, lines):
# Have we just seen a ';' line, and so are ready for a translation
readyTrans = False
comment = ""
if not isinstance(lines, list):
lines = lines.split("\n")
for lineoffset, line in enumerate(lines):
line = line.decode(self.encoding).rstrip("\n").rstrip("\r")
if lineoffset == 0 and line == "## active ##":
self.is_active = True
continue
if len(line) == 0 and not readyTrans: # Skip blank lines
continue
if readyTrans: # If we are expecting a translation, set the target
if line != u.source:
u.target = line.replace(" {ok}", "")
else:
u.target = ""
readyTrans = False # We already have our translation
continue
if line.startswith('#') and not line.startswith('##'):
# Read comments, but not meta tags (e.g. '## TAG')
comment += line[1:].strip() + "\n"
if line.startswith(';'):
u = self.addsourceunit(line[1:])
readyTrans = True # Now expecting a translation on the next line
u.addlocation("%s:%d" % (self.filename, lineoffset + 1))
if comment is not None:
u.addnote(comment[:-1], 'developer')
comment = ""
def __str__(self):
ret_string = ""
if self.is_active or self.mark_active:
ret_string += "## active ##\n"
ret_string += u"\n\n\n".join([unicode(unit) for unit in self.units]).encode('utf-8')
ret_string += "\n"
return ret_string
|
intelie/pycollector | refs/heads/master | src/third/SimpleCV/Features/BlobMaker.py | 2 | from SimpleCV.base import *
class BlobMaker:
"""
Blob maker encapsulates all of the contour extraction process and data, so
it can be used inside the image class, or extended and used outside the image
class. The general idea is that the blob maker provides the utilites that one
would use for blob extraction. Later implementations may include tracking and
other features.
"""
mMemStorage = None
def __init__(self):
self.mMemStorage = cv.CreateMemStorage()
return None
def extractUsingModel(self, img, colormodel,minsize=10, maxsize=0):
"""
Extract blobs using a color model
img - The input image
colormodel - The color model to use.
minsize - The minimum size of the returned features.
maxsize - The maximum size of the returned features 0=uses the default value.
Parameters:
img - Image
colormodel - ColorModel object
minsize - Int
maxsize - Int
"""
if (maxsize <= 0):
maxsize = img.width * img.height
gray = colormodel.threshold(img)
blobs = self.extractFromBinary(gray,img,minArea=minsize,maxArea=maxsize)
retVal = sorted(blobs,key=lambda x: x.mArea, reverse=True)
return FeatureSet(retVal)
def extract(self, img, threshval = 127, minsize=10, maxsize=0, threshblocksize=3, threshconstant=5):
"""
This method performs a threshold operation on the input image and then
extracts and returns the blobs.
img - The input image (color or b&w)
threshval - The threshold value for the binarize operation. If threshval = -1 adaptive thresholding is used
minsize - The minimum blob size in pixels.
maxsize - The maximum blob size in pixels. 0=uses the default value.
threshblocksize - The adaptive threhold block size.
threshconstant - The minimum to subtract off the adaptive threshold
"""
if (maxsize <= 0):
maxsize = img.width * img.height
#create a single channel image, thresholded to parameters
blobs = self.extractFromBinary(img.binarize(threshval, 255, threshblocksize, threshconstant).invert(),img,minsize,maxsize)
retVal = sorted(blobs,key=lambda x: x.mArea, reverse=True)
return FeatureSet(retVal)
def extractFromBinary(self,binaryImg,colorImg, minsize = 5, maxsize = -1):
"""
This method performs blob extraction given a binary source image that is used
to get the blob images, and a color source image.
binaryImg- The binary image with the blobs.
colorImg - The color image.
minSize - The minimum size of the blobs in pixels.
maxSize - The maximum blob size in pixels.
"""
#If you hit this recursion limit may god have mercy on your soul.
#If you really are having problems set the value higher, but this means
# you have over 10,000,000 blobs in your image.
sys.setrecursionlimit(5000)
#h_next moves to the next external contour
#v_next() moves to the next internal contour
if (maxsize <= 0):
maxsize = colorImg.width * colorImg.height
retVal = []
test = binaryImg.meanColor()
if( test[0]==0.00 and test[1]==0.00 and test[2]==0.00):
return FeatureSet(retVal)
# There are a couple of weird corner cases with the opencv
# connect components libraries - when you try to find contours
# in an all black image, or an image with a single white pixel
# that sits on the edge of an image the whole thing explodes
# this check catches those bugs. -KAS
# Also I am submitting a bug report to Willow Garage - please bare with us.
ptest = 510.0/(binaryImg.width*binaryImg.height) # val if two pixels are white
if( test[0]<ptest and test[1]<ptest and test[2]<ptest):
return retVal
seq = cv.FindContours( binaryImg._getGrayscaleBitmap(), self.mMemStorage, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE)
try:
# note to self
# http://code.activestate.com/recipes/474088-tail-call-optimization-decorator/
retVal = self._extractFromBinary(seq,False,colorImg,minsize,maxsize)
except RuntimeError,e:
warnings.warn("You exceeded the recursion limit. This means you probably have too many blobs in your image. We suggest you do some morphological operations (erode/dilate) to reduce the number of blobs in your image. This function was designed to max out at about 5000 blobs per image.")
except:
warnings.warn("SimpleCV Find Blobs Failed - This could be an OpenCV python binding issue")
del seq
return FeatureSet(retVal)
def _extractFromBinary(self, seq, isaHole, colorImg,minsize,maxsize):
"""
The recursive entry point for the blob extraction. The blobs and holes are presented
as a tree and we traverse up and across the tree.
"""
retVal = []
if( seq is None ):
return retVal
if( not isaHole ): #if we aren't a hole then we are an object, so get and return our featuress
temp = self._extractData(seq,colorImg,minsize,maxsize)
if( temp is not None ):
retVal.append(temp)
#get the current feature
nextBlob = seq.h_next() # move to the next feature on our level
if( nextBlob is not None ):
#the next object is whatever this object is, add its list to ours
retVal += self._extractFromBinary(nextBlob, isaHole, colorImg, minsize,maxsize)
nextLayer = seq.v_next() # move down a layer
if(nextLayer is not None): #the next object, since it is down a layer is different
retVal += self._extractFromBinary(nextLayer, not isaHole, colorImg, minsize,maxsize)
return retVal
def _extractData(self,seq,color,minsize,maxsize):
"""
Extract the bulk of the data from a give blob. If the blob's are is too large
or too small the method returns none.
"""
if( seq is None or not len(seq)):
return None
area = cv.ContourArea(seq)
if( area < minsize or area > maxsize):
return None
retVal = Blob()
retVal.image = color
retVal.mArea = area
retVal.mMinRectangle = cv.MinAreaRect2(seq)
retVal.mBoundingBox = cv.BoundingRect(seq)
retVal.x = retVal.mBoundingBox[0]+(retVal.mBoundingBox[2]/2)
retVal.y = retVal.mBoundingBox[1]+(retVal.mBoundingBox[3]/2)
retVal.mPerimeter = cv.ArcLength(seq)
if( seq is not None): #KAS
retVal.mContour = list(seq)
chull = cv.ConvexHull2(seq,cv.CreateMemStorage(),return_points=1)
retVal.mConvexHull = list(chull)
retVal.mHullMask = self._getHullMask(chull,retVal.mBoundingBox)
del chull
moments = cv.Moments(seq)
retVal.m00 = area
retVal.m10 = moments.m10
retVal.m01 = moments.m01
retVal.m11 = moments.m11
retVal.m20 = moments.m20
retVal.m02 = moments.m02
retVal.m21 = moments.m21
retVal.m12 = moments.m12
retVal.mHu = cv.GetHuMoments(moments)
retVal.mMask = self._getMask(seq,retVal.mBoundingBox)
mask = retVal.mMask
retVal.mAvgColor = self._getAvg(color.getBitmap(),retVal.mBoundingBox,mask)
retVal.mAvgColor = retVal.mAvgColor[0:3]
retVal.mAvgColor = self._getAvg(color.getBitmap(),retVal.mBoundingBox,mask)
retVal.mAvgColor = retVal.mAvgColor[0:3]
retVal.mImg = self._getBlobAsImage(seq,retVal.mBoundingBox,color.getBitmap(),mask)
retVal.mHoleContour = self._getHoles(seq)
retVal.mAspectRatio = retVal.mMinRectangle[1][0]/retVal.mMinRectangle[1][1]
bb = retVal.mBoundingBox
retVal.points.append((bb[0], bb[1]))
retVal.points.append((bb[0] + bb[2], bb[1]))
retVal.points.append((bb[0] + bb[2], bb[1] + bb[3]))
retVal.points.append((bb[0], bb[1] + bb[3]))
return retVal
def _getHoles(self,seq):
"""
This method returns the holes associated with a blob as a list of tuples.
"""
retVal = None
holes = seq.v_next()
if( holes is not None ):
retVal = [list(holes)]
while( holes.h_next() is not None ):
holes = holes.h_next();
temp = list(holes)
if( len(temp) >= 3 ): #exclude single pixel holes
retVal.append(temp)
return retVal
def _getMask(self,seq,bb):
"""
Return a binary image of a particular contour sequence.
"""
#bb = cv.BoundingRect(seq)
mask = cv.CreateImage((bb[2],bb[3]),cv.IPL_DEPTH_8U,1)
cv.Zero(mask)
cv.DrawContours(mask,seq,(255),(0),0,thickness=-1, offset=(-1*bb[0],-1*bb[1]))
holes = seq.v_next()
if( holes is not None ):
cv.DrawContours(mask,holes,(0),(255),0,thickness=-1, offset=(-1*bb[0],-1*bb[1]))
while( holes.h_next() is not None ):
holes = holes.h_next();
if(holes is not None):
cv.DrawContours(mask,holes,(0),(255),0,thickness=-1, offset=(-1*bb[0],-1*bb[1]))
return mask
def _getHullMask(self,hull,bb):
"""
Return a mask of the convex hull of a blob.
"""
bb = cv.BoundingRect(hull)
mask = cv.CreateImage((bb[2],bb[3]),cv.IPL_DEPTH_8U,1)
cv.Zero(mask)
cv.DrawContours(mask,hull,(255),(0),0,thickness=-1, offset=(-1*bb[0],-1*bb[1]))
return mask
def _getAvg(self,colorbitmap,bb,mask):
"""
Calculate the average color of a blob given the mask.
"""
cv.SetImageROI(colorbitmap,bb)
#may need the offset parameter
avg = cv.Avg(colorbitmap,mask)
cv.ResetImageROI(colorbitmap)
return avg
def _getBlobAsImage(self,seq,bb,colorbitmap,mask):
"""
Return an image that contains just pixels defined by the blob sequence.
"""
cv.SetImageROI(colorbitmap,bb)
outputImg = cv.CreateImage((bb[2],bb[3]),cv.IPL_DEPTH_8U,3)
cv.Zero(outputImg)
cv.Copy(colorbitmap,outputImg,mask)
cv.ResetImageROI(colorbitmap)
return(Image(outputImg))
from SimpleCV.ImageClass import Image
from SimpleCV.Features.Features import FeatureSet
from SimpleCV.Features.Blob import Blob
|
okwow123/djangol2 | refs/heads/master | example/env/lib/python2.7/site-packages/requests/packages/urllib3/fields.py | 288 | from __future__ import absolute_import
import email.utils
import mimetypes
from .packages import six
def guess_content_type(filename, default='application/octet-stream'):
"""
Guess the "Content-Type" of a file.
:param filename:
The filename to guess the "Content-Type" of using :mod:`mimetypes`.
:param default:
If no "Content-Type" can be guessed, default to `default`.
"""
if filename:
return mimetypes.guess_type(filename)[0] or default
return default
def format_header_param(name, value):
"""
Helper function to format and quote a single header parameter.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows RFC 2231, as
suggested by RFC 2388 Section 4.4.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
if not any(ch in value for ch in '"\\\r\n'):
result = '%s="%s"' % (name, value)
try:
result.encode('ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
pass
else:
return result
if not six.PY3 and isinstance(value, six.text_type): # Python 2:
value = value.encode('utf-8')
value = email.utils.encode_rfc2231(value, 'utf-8')
value = '%s*=%s' % (name, value)
return value
class RequestField(object):
"""
A data container for request body parameters.
:param name:
The name of this request field.
:param data:
The data/value body.
:param filename:
An optional filename of the request field.
:param headers:
An optional dict-like object of headers to initially use for the field.
"""
def __init__(self, name, data, filename=None, headers=None):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
@classmethod
def from_tuples(cls, fieldname, value):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from
parameter of key/value strings AND key/filetuple. A filetuple is a
(filename, data, MIME type) tuple where the MIME type is optional.
For example::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name, value):
"""
Overridable helper function to format a single header parameter.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
return format_header_param(name, value)
def _render_parts(self, header_parts):
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
as `k1="v1"; k2="v2"; ...`.
"""
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for name, value in iterable:
if value is not None:
parts.append(self._render_part(name, value))
return '; '.join(parts)
def render_headers(self):
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append('%s: %s' % (header_name, header_value))
lines.append('\r\n')
return '\r\n'.join(lines)
def make_multipart(self, content_disposition=None, content_type=None,
content_location=None):
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
self.headers['Content-Disposition'] = content_disposition or 'form-data'
self.headers['Content-Disposition'] += '; '.join([
'', self._render_parts(
(('name', self._name), ('filename', self._filename))
)
])
self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location
|
A-deLuna/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/pywebsocket/src/test/testdata/handlers/origin_check_wsh.py | 499 | # Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def web_socket_do_extra_handshake(request):
if request.ws_origin == 'http://example.com':
return
raise ValueError('Unacceptable origin: %r' % request.ws_origin)
def web_socket_transfer_data(request):
request.connection.write('origin_check_wsh.py is called for %s, %s' %
(request.ws_resource, request.ws_protocol))
# vi:sts=4 sw=4 et
|
zanderle/django | refs/heads/master | django/contrib/gis/db/models/sql/__init__.py | 476 | from django.contrib.gis.db.models.sql.conversion import (
AreaField, DistanceField, GeomField, GMLField,
)
__all__ = [
'AreaField', 'DistanceField', 'GeomField', 'GMLField'
]
|
indrajitr/ansible | refs/heads/devel | test/support/windows-integration/plugins/modules/win_whoami.py | 68 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_whoami
version_added: "2.5"
short_description: Get information about the current user and process
description:
- Designed to return the same information as the C(whoami /all) command.
- Also includes information missing from C(whoami) such as logon metadata like
logon rights, id, type.
notes:
- If running this module with a non admin user, the logon rights will be an
empty list as Administrator rights are required to query LSA for the
information.
seealso:
- module: win_credential
- module: win_group_membership
- module: win_user_right
author:
- Jordan Borean (@jborean93)
'''
EXAMPLES = r'''
- name: Get whoami information
win_whoami:
'''
RETURN = r'''
authentication_package:
description: The name of the authentication package used to authenticate the
user in the session.
returned: success
type: str
sample: Negotiate
user_flags:
description: The user flags for the logon session, see UserFlags in
U(https://msdn.microsoft.com/en-us/library/windows/desktop/aa380128).
returned: success
type: str
sample: Winlogon
upn:
description: The user principal name of the current user.
returned: success
type: str
sample: Administrator@DOMAIN.COM
logon_type:
description: The logon type that identifies the logon method, see
U(https://msdn.microsoft.com/en-us/library/windows/desktop/aa380129.aspx).
returned: success
type: str
sample: Network
privileges:
description: A dictionary of privileges and their state on the logon token.
returned: success
type: dict
sample: {
"SeChangeNotifyPrivileges": "enabled-by-default",
"SeRemoteShutdownPrivilege": "disabled",
"SeDebugPrivilege": "enabled"
}
label:
description: The mandatory label set to the logon session.
returned: success
type: complex
contains:
domain_name:
description: The domain name of the label SID.
returned: success
type: str
sample: Mandatory Label
sid:
description: The SID in string form.
returned: success
type: str
sample: S-1-16-12288
account_name:
description: The account name of the label SID.
returned: success
type: str
sample: High Mandatory Level
type:
description: The type of SID.
returned: success
type: str
sample: Label
impersonation_level:
description: The impersonation level of the token, only valid if
C(token_type) is C(TokenImpersonation), see
U(https://msdn.microsoft.com/en-us/library/windows/desktop/aa379572.aspx).
returned: success
type: str
sample: SecurityAnonymous
login_time:
description: The logon time in ISO 8601 format
returned: success
type: str
sample: '2017-11-27T06:24:14.3321665+10:00'
groups:
description: A list of groups and attributes that the user is a member of.
returned: success
type: list
sample: [
{
"account_name": "Domain Users",
"domain_name": "DOMAIN",
"attributes": [
"Mandatory",
"Enabled by default",
"Enabled"
],
"sid": "S-1-5-21-1654078763-769949647-2968445802-513",
"type": "Group"
},
{
"account_name": "Administrators",
"domain_name": "BUILTIN",
"attributes": [
"Mandatory",
"Enabled by default",
"Enabled",
"Owner"
],
"sid": "S-1-5-32-544",
"type": "Alias"
}
]
account:
description: The running account SID details.
returned: success
type: complex
contains:
domain_name:
description: The domain name of the account SID.
returned: success
type: str
sample: DOMAIN
sid:
description: The SID in string form.
returned: success
type: str
sample: S-1-5-21-1654078763-769949647-2968445802-500
account_name:
description: The account name of the account SID.
returned: success
type: str
sample: Administrator
type:
description: The type of SID.
returned: success
type: str
sample: User
login_domain:
description: The name of the domain used to authenticate the owner of the
session.
returned: success
type: str
sample: DOMAIN
rights:
description: A list of logon rights assigned to the logon.
returned: success and running user is a member of the local Administrators group
type: list
sample: [
"SeNetworkLogonRight",
"SeInteractiveLogonRight",
"SeBatchLogonRight",
"SeRemoteInteractiveLogonRight"
]
logon_server:
description: The name of the server used to authenticate the owner of the
logon session.
returned: success
type: str
sample: DC01
logon_id:
description: The unique identifier of the logon session.
returned: success
type: int
sample: 20470143
dns_domain_name:
description: The DNS name of the logon session, this is an empty string if
this is not set.
returned: success
type: str
sample: DOMAIN.COM
token_type:
description: The token type to indicate whether it is a primary or
impersonation token.
returned: success
type: str
sample: TokenPrimary
'''
|
wojciechtanski/robotframework | refs/heads/master | utest/utils/test_robotpath.py | 21 | import unittest
import os
import os.path
from robot.utils import abspath, normpath, get_link_path, WINDOWS
from robot.utils.robotpath import CASE_INSENSITIVE_FILESYSTEM
from robot.utils.asserts import assert_equal, assert_true
class TestAbspathNormpath(unittest.TestCase):
def test_abspath(self):
for inp, exp in self._get_inputs():
exp = os.path.abspath(exp)
path = abspath(inp)
assert_equal(path, exp, inp)
assert_true(isinstance(path, unicode), inp)
exp = exp.lower() if CASE_INSENSITIVE_FILESYSTEM else exp
path = abspath(inp, case_normalize=True)
assert_equal(path, exp, inp)
assert_true(isinstance(path, unicode), inp)
def test_abspath_when_cwd_is_non_ascii(self):
orig = abspath('.')
nonasc = u'\xe4'
os.mkdir(nonasc)
os.chdir(nonasc)
try:
assert_equal(abspath('.'), orig + os.sep + nonasc)
finally:
os.chdir('..')
os.rmdir(nonasc)
if WINDOWS:
unc_path = r'\\server\D$\dir\.\f1\..\\f2'
unc_exp = r'\\server\D$\dir\f2'
def test_unc_path(self):
assert_equal(abspath(self.unc_path), self.unc_exp)
def test_unc_path_when_chdir_is_root(self):
orig = abspath('.')
os.chdir('\\')
try:
assert_equal(abspath(self.unc_path), self.unc_exp)
finally:
os.chdir(orig)
def test_normpath(self):
for inp, exp in self._get_inputs():
path = normpath(inp)
assert_equal(path, exp, inp)
assert_true(isinstance(path, unicode), inp)
exp = exp.lower() if CASE_INSENSITIVE_FILESYSTEM else exp
path = normpath(inp, case_normalize=True)
assert_equal(path, exp, inp)
assert_true(isinstance(path, unicode), inp)
def _get_inputs(self):
inputs = self._windows_inputs if WINDOWS else self._posix_inputs
for inp, exp in inputs():
yield inp, exp
if inp not in ['', os.sep]:
for ext in [os.sep, os.sep+'.', os.sep+'.'+os.sep]:
yield inp + ext, exp
if inp.endswith(os.sep):
for ext in ['.', '.'+os.sep, '.'+os.sep+'.']:
yield inp + ext, exp
yield inp + 'foo' + os.sep + '..', exp
def _posix_inputs(self):
return [('/tmp/', '/tmp'),
('/var/../opt/../tmp/.', '/tmp'),
('/non/Existing/..', '/non'),
('/', '/')] + self._generic_inputs()
def _windows_inputs(self):
inputs = [('c:\\temp', 'c:\\temp'),
('C:\\TEMP\\', 'C:\\TEMP'),
('C:\\xxx\\..\\yyy\\..\\temp\\.', 'C:\\temp'),
('c:\\Non\\Existing\\..', 'c:\\Non')]
for x in 'ABCDEFGHIJKLMNOPQRSTUVXYZ':
base = '%s:\\' % x
inputs.append((base, base))
inputs.append((base.lower(), base.lower()))
inputs.append((base[:2], base))
inputs.append((base[:2].lower(), base.lower()))
inputs.append((base+'\\foo\\..\\.\\BAR\\\\', base+'BAR'))
inputs += [(inp.replace('/', '\\'), exp) for inp, exp in inputs]
for inp, exp in self._generic_inputs():
exp = exp.replace('/', '\\')
inputs.extend([(inp, exp), (inp.replace('/', '\\'), exp)])
return inputs
def _generic_inputs(self):
return [('', '.'),
('.', '.'),
('./', '.'),
('..', '..'),
('../', '..'),
('../..', '../..'),
('foo', 'foo'),
('foo/bar', 'foo/bar'),
(u'\xe4', u'\xe4'),
(u'\xe4/\xf6', u'\xe4/\xf6'),
('./foo', 'foo'),
('foo/.', 'foo'),
('foo/..', '.'),
('foo/../bar', 'bar'),
('foo/bar/zap/..', 'foo/bar')]
class TestGetLinkPath(unittest.TestCase):
def test_get_link_path(self):
inputs = self._posix_inputs if os.sep == '/' else self._windows_inputs
for basedir, target, expected in inputs():
assert_equal(get_link_path(target, basedir).replace('R:', 'r:'),
expected, '%s -> %s' % (target, basedir))
def test_get_link_path_to_non_existing_path(self):
assert_equal(get_link_path('/non-ex/foo.txt', '/non-ex/nothing_here.txt'),
'../foo.txt')
def test_get_non_ascii_link_path(self):
assert_equal(get_link_path(u'\xe4\xf6.txt', ''), '%C3%A4%C3%B6.txt')
def _posix_inputs(self):
return [('/tmp/', '/tmp/bar.txt', 'bar.txt'),
('/tmp', '/tmp/x/bar.txt', 'x/bar.txt'),
('/tmp/', '/tmp/x/y/bar.txt', 'x/y/bar.txt'),
('/tmp/', '/tmp/x/y/z/bar.txt', 'x/y/z/bar.txt'),
('/tmp', '/x/y/z/bar.txt', '../x/y/z/bar.txt'),
('/tmp/', '/x/y/z/bar.txt', '../x/y/z/bar.txt'),
('/tmp', '/x/bar.txt', '../x/bar.txt'),
('/tmp', '/x/y/z/bar.txt', '../x/y/z/bar.txt'),
('/', '/x/bar.txt', 'x/bar.txt'),
('/path/to', '/path/to/result_in_same_dir.html',
'result_in_same_dir.html'),
('/path/to/dir', '/path/to/result_in_parent_dir.html',
'../result_in_parent_dir.html'),
('/path/to', '/path/to/dir/result_in_sub_dir.html',
'dir/result_in_sub_dir.html'),
('/commonprefix/sucks/baR', '/commonprefix/sucks/baZ.txt',
'../baZ.txt'),
('/a/very/long/path', '/no/depth/limitation',
'../../../../no/depth/limitation'),
('/etc/hosts', '/path/to/existing/file',
'../path/to/existing/file'),
('/path/to/identity', '/path/to/identity', 'identity')]
def _windows_inputs(self):
return [('c:\\temp\\', 'c:\\temp\\bar.txt', 'bar.txt'),
('c:\\temp', 'c:\\temp\\x\\bar.txt', 'x/bar.txt'),
('c:\\temp\\', 'c:\\temp\\x\\y\\bar.txt', 'x/y/bar.txt'),
('c:\\temp', 'c:\\temp\\x\\y\\z\\bar.txt', 'x/y/z/bar.txt'),
('c:\\temp\\', 'c:\\x\\y\\bar.txt', '../x/y/bar.txt'),
('c:\\temp', 'c:\\x\\y\\bar.txt', '../x/y/bar.txt'),
('c:\\temp', 'c:\\x\\bar.txt', '../x/bar.txt'),
('c:\\temp', 'c:\\x\\y\\z\\bar.txt', '../x/y/z/bar.txt'),
('c:\\temp\\', 'r:\\x\\y\\bar.txt', 'file:///r:/x/y/bar.txt'),
('c:\\', 'c:\\x\\bar.txt', 'x/bar.txt'),
('c:\\path\\to', 'c:\\path\\to\\result_in_same_dir.html',
'result_in_same_dir.html'),
('c:\\path\\to\\dir', 'c:\\path\\to\\result_in_parent.dir',
'../result_in_parent.dir'),
('c:\\path\\to', 'c:\\path\\to\\dir\\result_in_sub_dir.html',
'dir/result_in_sub_dir.html'),
('c:\\commonprefix\\sucks\\baR',
'c:\\commonprefix\\sucks\\baZ.txt', '../baZ.txt'),
('c:\\a\\very\\long\\path', 'c:\\no\\depth\\limitation',
'../../../../no/depth/limitation'),
('c:\\windows\\explorer.exe',
'c:\\windows\\path\\to\\existing\\file',
'path/to/existing/file'),
('c:\\path\\2\\identity', 'c:\\path\\2\\identity', 'identity')]
if __name__ == '__main__':
unittest.main()
|
eywalker/attorch | refs/heads/master | attorch/optimizers.py | 2 | import math
import torch
from torch import optim
class ActiveSGD(optim.SGD):
def __init__(self, params, lr,
momentum=0, dampening=0, weight_decay=0, nesterov=False):
params = list(params)
assert not isinstance(
params[0], dict), 'Only a single param group is supported'
super().__init__(params, lr, momentum, dampening, weight_decay, nesterov)
def step(self, active_params=None, closure=None):
"""Performs a single optimization step.
Arguments:
active_params (iterable | None):
An iterable containing parameters to be updated by
this optimization step
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
lr = self.param_groups[0]['lr']
weight_decay = self.param_groups[0]['weight_decay']
momentum = self.param_groups[0]['momentum']
dampening = self.param_groups[0]['dampening']
nesterov = self.param_groups[0]['nesterov']
params = active_params if active_params is not None \
else self.param_groups[0]['params']
for p in params:
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.zeros_like(
p.data)
buf.mul_(momentum).add_(d_p)
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
p.data.add_(-lr, d_p)
return loss
def cosine_schedule(max_value, min_value, period_init=10, period_mult=2, n=1000):
""" Generator that produces cosine learning rate schedule,
as defined in Loshchilov & Hutter, 2017, https://arxiv.org/abs/1608.03983
Arguments:
max_value (float): maximum value
min_value (float): minimum value
period_init (int): intial learning rate restart period
period_mult (int): period multiplier that is applied at each restart
n (int): number of iterations
Yield:
learning rate
"""
i = 0
epoch = 0
period = period_init
while i < n:
lr = min_value + (max_value - min_value) * \
(1 + math.cos(math.pi * epoch / period)) / 2
yield lr
i += 1
epoch += 1
if epoch % period == 0:
period *= period_mult
epoch = 0
|
DanCech/graphite-web | refs/heads/master | contrib/memcache_whisper.py | 3 | #!/usr/bin/env python
# Copyright 2008 Orbitz WorldWide
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# This module is an implementation of the Whisper database API
# Here is the basic layout of a whisper data file
#
# File = Header,Data
# Header = Metadata,ArchiveInfo+
# Metadata = lastUpdate,maxRetention,xFilesFactor,archiveCount
# ArchiveInfo = Offset,SecondsPerPoint,Points
# Data = Archive+
# Archive = Point+
# Point = timestamp,value
"""
NOTE: This is a modified version of whisper.py
For details on the modification, read https://bugs.launchpad.net/graphite/+bug/245835
"""
import os, struct, time
try:
import fcntl
CAN_LOCK = True
except ImportError:
CAN_LOCK = False
LOCK = False
CACHE_HEADERS = False
__headerCache = {}
longFormat = "!L"
longSize = struct.calcsize(longFormat)
floatFormat = "!f"
floatSize = struct.calcsize(floatFormat)
timestampFormat = "!L"
timestampSize = struct.calcsize(timestampFormat)
valueFormat = "!d"
valueSize = struct.calcsize(valueFormat)
pointFormat = "!Ld"
pointSize = struct.calcsize(pointFormat)
metadataFormat = "!2LfL"
metadataSize = struct.calcsize(metadataFormat)
archiveInfoFormat = "!3L"
archiveInfoSize = struct.calcsize(archiveInfoFormat)
debug = startBlock = endBlock = lambda *a,**k: None
def exists(path):
return os.path.exists(path)
def drop(path):
os.remove(path)
def enableMemcache(servers = ['127.0.0.1:11211'], min_compress_len = 0):
from StringIO import StringIO
import memcache
global open, exists, drop
MC = memcache.Client(servers)
class open(StringIO):
def __init__(self,*args,**kwargs):
self.name = args[0]
self.mode = args[1]
if self.mode == "r+b" or self.mode == "rb":
StringIO.__init__(self, MC.get(self.name))
else:
StringIO.__init__(self)
def close(self):
if self.mode == "r+b" or self.mode == "wb":
MC.set(self.name, self.getvalue(), min_compress_len = min_compress_len)
StringIO.close(self)
def exists(path):
return MC.get(path) != None
def drop(path):
MC.delete(path)
def enableDebug():
global open, debug, startBlock, endBlock
class open(file):
def __init__(self,*args,**kwargs):
file.__init__(self,*args,**kwargs)
self.writeCount = 0
self.readCount = 0
def write(self,data):
self.writeCount += 1
debug('WRITE %d bytes #%d' % (len(data),self.writeCount))
return file.write(self,data)
def read(self,bytes):
self.readCount += 1
debug('READ %d bytes #%d' % (bytes,self.readCount))
return file.read(self,bytes)
def debug(message):
print('DEBUG :: %s' % message)
__timingBlocks = {}
def startBlock(name):
__timingBlocks[name] = time.time()
def endBlock(name):
debug("%s took %.5f seconds" % (name,time.time() - __timingBlocks.pop(name)))
def __readHeader(fh):
info = __headerCache.get(fh.name)
if info: return info
#startBlock('__readHeader')
originalOffset = fh.tell()
fh.seek(0)
packedMetadata = fh.read(metadataSize)
(lastUpdate,maxRetention,xff,archiveCount) = struct.unpack(metadataFormat,packedMetadata)
archives = []
for i in xrange(archiveCount):
packedArchiveInfo = fh.read(archiveInfoSize)
(offset,secondsPerPoint,points) = struct.unpack(archiveInfoFormat,packedArchiveInfo)
archiveInfo = {
'offset' : offset,
'secondsPerPoint' : secondsPerPoint,
'points' : points,
'retention' : secondsPerPoint * points,
'size' : points * pointSize,
}
archives.append(archiveInfo)
fh.seek(originalOffset)
info = {
'lastUpdate' : lastUpdate,
'maxRetention' : maxRetention,
'xFilesFactor' : xff,
'archives' : archives,
}
if CACHE_HEADERS:
__headerCache[fh.name] = info
#endBlock('__readHeader')
return info
def __changeLastUpdate(fh):
return #XXX Make this a NOP, use os.stat(filename).st_mtime instead
startBlock('__changeLastUpdate()')
originalOffset = fh.tell()
fh.seek(0) #Based on assumption that first field is lastUpdate
now = int( time.time() )
packedTime = struct.pack(timestampFormat,now)
fh.write(packedTime)
fh.seek(originalOffset)
endBlock('__changeLastUpdate()')
def create(path,archiveList,xFilesFactor=0.5):
"""create(path,archiveList,xFilesFactor=0.5)
path is a string
archiveList is a list of archives, each of which is of the form (secondsPerPoint,numberOfPoints)
xFilesFactor specifies the fraction of data points in a propagation interval that must have known values for a propagation to occur
"""
#Validate archive configurations...
assert archiveList, "You must specify at least one archive configuration!"
archiveList.sort(key=lambda a: a[0]) #sort by precision (secondsPerPoint)
for i,archive in enumerate(archiveList):
if i == len(archiveList) - 1: break
next = archiveList[i+1]
assert archive[0] < next[0],\
"You cannot configure two archives with the same precision %s,%s" % (archive,next)
assert (next[0] % archive[0]) == 0,\
"Higher precision archives' precision must evenly divide all lower precision archives' precision %s,%s" % (archive[0],next[0])
retention = archive[0] * archive[1]
nextRetention = next[0] * next[1]
assert nextRetention > retention,\
"Lower precision archives must cover larger time intervals than higher precision archives %s,%s" % (archive,next)
#Looks good, now we create the file and write the header
assert not exists(path), "File %s already exists!" % path
fh = open(path,'wb')
if LOCK: fcntl.flock( fh.fileno(), fcntl.LOCK_EX )
lastUpdate = struct.pack( timestampFormat, int(time.time()) )
oldest = sorted([secondsPerPoint * points for secondsPerPoint,points in archiveList])[-1]
maxRetention = struct.pack( longFormat, oldest )
xFilesFactor = struct.pack( floatFormat, float(xFilesFactor) )
archiveCount = struct.pack(longFormat, len(archiveList))
packedMetadata = lastUpdate + maxRetention + xFilesFactor + archiveCount
fh.write(packedMetadata)
headerSize = metadataSize + (archiveInfoSize * len(archiveList))
archiveOffsetPointer = headerSize
for secondsPerPoint,points in archiveList:
archiveInfo = struct.pack(archiveInfoFormat, archiveOffsetPointer, secondsPerPoint, points)
fh.write(archiveInfo)
archiveOffsetPointer += (points * pointSize)
zeroes = '\x00' * (archiveOffsetPointer - headerSize)
fh.write(zeroes)
fh.close()
def __propagate(fh,timestamp,xff,higher,lower):
lowerIntervalStart = timestamp - (timestamp % lower['secondsPerPoint'])
lowerIntervalEnd = lowerIntervalStart + lower['secondsPerPoint']
fh.seek(higher['offset'])
packedPoint = fh.read(pointSize)
(higherBaseInterval,higherBaseValue) = struct.unpack(pointFormat,packedPoint)
if higherBaseInterval == 0:
higherFirstOffset = higher['offset']
else:
timeDistance = lowerIntervalStart - higherBaseInterval
pointDistance = timeDistance / higher['secondsPerPoint']
byteDistance = pointDistance * pointSize
higherFirstOffset = higher['offset'] + (byteDistance % higher['size'])
higherPoints = lower['secondsPerPoint'] / higher['secondsPerPoint']
higherSize = higherPoints * pointSize
higherLastOffset = higherFirstOffset + (higherSize % higher['size'])
fh.seek(higherFirstOffset)
if higherFirstOffset < higherLastOffset: #we don't wrap the archive
seriesString = fh.read(higherLastOffset - higherFirstOffset)
else: #We do wrap the archive
higherEnd = higher['offset'] + higher['size']
seriesString = fh.read(higherEnd - higherFirstOffset)
fh.seek(higher['offset'])
seriesString += fh.read(higherLastOffset - higher['offset'])
#Now we unpack the series data we just read
byteOrder,pointTypes = pointFormat[0],pointFormat[1:]
points = len(seriesString) / pointSize
seriesFormat = byteOrder + (pointTypes * points)
unpackedSeries = struct.unpack(seriesFormat, seriesString)
#And finally we construct a list of values
neighborValues = [None] * points
currentInterval = lowerIntervalStart
step = higher['secondsPerPoint']
for i in xrange(0,len(unpackedSeries),2):
pointTime = unpackedSeries[i]
if pointTime == currentInterval:
neighborValues[i/2] = unpackedSeries[i+1]
currentInterval += step
#Propagate aggregateValue to propagate from neighborValues if we have enough known points
knownValues = [v for v in neighborValues if v is not None]
knownPercent = float(len(knownValues)) / float(len(neighborValues))
if knownPercent >= xff: #we have enough data to propagate a value!
aggregateValue = float(sum(knownValues)) / float(len(knownValues)) #TODO another CF besides average?
myPackedPoint = struct.pack(pointFormat,lowerIntervalStart,aggregateValue)
fh.seek(lower['offset'])
packedPoint = fh.read(pointSize)
(lowerBaseInterval,lowerBaseValue) = struct.unpack(pointFormat,packedPoint)
if lowerBaseInterval == 0: #First propagated update to this lower archive
fh.seek(lower['offset'])
fh.write(myPackedPoint)
else: #Not our first propagated update to this lower archive
timeDistance = lowerIntervalStart - lowerBaseInterval
pointDistance = timeDistance / lower['secondsPerPoint']
byteDistance = pointDistance * pointSize
lowerOffset = lower['offset'] + (byteDistance % lower['size'])
fh.seek(lowerOffset)
fh.write(myPackedPoint)
return True
else:
return False
def update(path,value,timestamp=None):
"""update(path,value,timestamp=None)
path is a string
value is a float
timestamp is either an int or float
"""
#startBlock('complete update')
value = float(value)
fh = open(path,'r+b')
if LOCK: fcntl.flock( fh.fileno(), fcntl.LOCK_EX )
header = __readHeader(fh)
now = int( time.time() )
if timestamp is None: timestamp = now
timestamp = int(timestamp)
diff = now - timestamp
assert diff < header['maxRetention'] and diff >= 0, "Timestamp not covered by any archives in this database"
for i,archive in enumerate(header['archives']): #Find the highest-precision archive that covers timestamp
if archive['retention'] < diff: continue
lowerArchives = header['archives'][i+1:] #We'll pass on the update to these lower precision archives later
break
#First we update the highest-precision archive
myInterval = timestamp - (timestamp % archive['secondsPerPoint'])
myPackedPoint = struct.pack(pointFormat,myInterval,value)
fh.seek(archive['offset'])
packedPoint = fh.read(pointSize)
(baseInterval,baseValue) = struct.unpack(pointFormat,packedPoint)
if baseInterval == 0: #This file's first update
fh.seek(archive['offset'])
fh.write(myPackedPoint)
baseInterval,baseValue = myInterval,value
else: #Not our first update
timeDistance = myInterval - baseInterval
pointDistance = timeDistance / archive['secondsPerPoint']
byteDistance = pointDistance * pointSize
myOffset = archive['offset'] + (byteDistance % archive['size'])
fh.seek(myOffset)
fh.write(myPackedPoint)
#Now we propagate the update to lower-precision archives
#startBlock('update propagation')
higher = archive
for lower in lowerArchives:
if not __propagate(fh,myInterval,header['xFilesFactor'],higher,lower): break
higher = lower
#endBlock('update propagation')
__changeLastUpdate(fh)
fh.close()
#endBlock('complete update')
def update_many(path,points):
"""update_many(path,points)
path is a string
points is a list of (timestamp,value) points
"""
#startBlock('complete update_many path=%s points=%d' % (path,len(points)))
if not points: return
points = [ (int(t),float(v)) for (t,v) in points]
points.sort(key=lambda p: p[0],reverse=True) #order points by timestamp, newest first
fh = open(path,'r+b')
if LOCK: fcntl.flock( fh.fileno(), fcntl.LOCK_EX )
header = __readHeader(fh)
now = int( time.time() )
archives = iter( header['archives'] )
currentArchive = next(archives)
#debug(' update_many currentArchive=%s' % str(currentArchive))
currentPoints = []
for point in points:
age = now - point[0]
#debug(' update_many iterating points, point=%s age=%d' % (str(point),age))
while currentArchive['retention'] < age: #we can't fit any more points in this archive
#debug(' update_many this point is too old to fit here, currentPoints=%d' % len(currentPoints))
if currentPoints: #commit all the points we've found that it can fit
currentPoints.reverse() #put points in chronological order
__archive_update_many(fh,header,currentArchive,currentPoints)
currentPoints = []
try:
currentArchive = next(archives)
#debug(' update_many using next archive %s' % str(currentArchive))
except StopIteration:
#debug(' update_many no more archives!')
currentArchive = None
break
if not currentArchive: break #drop remaining points that don't fit in the database
#debug(' update_many adding point=%s' % str(point))
currentPoints.append(point)
#debug(' update_many done iterating points')
if currentArchive and currentPoints: #don't forget to commit after we've checked all the archives
currentPoints.reverse()
__archive_update_many(fh,header,currentArchive,currentPoints)
__changeLastUpdate(fh)
fh.close()
#endBlock('complete update_many path=%s points=%d' % (path,len(points)))
def __archive_update_many(fh,header,archive,points):
step = archive['secondsPerPoint']
#startBlock('__archive_update_many file=%s archive=%s points=%d' % (fh.name,step,len(points)))
alignedPoints = [ (timestamp - (timestamp % step), value)
for (timestamp,value) in points ]
#Create a packed string for each contiguous sequence of points
#startBlock('__archive_update_many string packing')
packedStrings = []
previousInterval = None
currentString = ""
for (interval,value) in alignedPoints:
#debug('__archive_update_many iterating alignedPoint at %s' % interval)
if (not previousInterval) or (interval == previousInterval + step):
#debug('__archive_update_many was expected, packing onto currentString')
currentString += struct.pack(pointFormat,interval,value)
previousInterval = interval
else:
numberOfPoints = len(currentString) / pointSize
startInterval = previousInterval - (step * (numberOfPoints-1))
#debug('__archive_update_many was NOT expected, appending to packedStrings startInterval=%s currentString=%d bytes' % (startInterval,len(currentString)))
packedStrings.append( (startInterval,currentString) )
currentString = struct.pack(pointFormat,interval,value)
previousInterval = interval
if currentString:
#startInterval = previousInterval - (step * len(currentString) / pointSize) + step
numberOfPoints = len(currentString) / pointSize
startInterval = previousInterval - (step * (numberOfPoints-1))
#debug('__archive_update_many done iterating alignedPoints, remainder currentString of %d bytes, startInterval=%s' % (len(currentString),startInterval))
packedStrings.append( (startInterval,currentString) )
#endBlock('__archive_update_many string packing')
#Read base point and determine where our writes will start
fh.seek(archive['offset'])
packedBasePoint = fh.read(pointSize)
(baseInterval,baseValue) = struct.unpack(pointFormat,packedBasePoint)
if baseInterval == 0: #This file's first update
#debug('__archive_update_many first update')
baseInterval = packedStrings[0][0] #use our first string as the base, so we start at the start
#debug('__archive_update_many baseInterval is %s' % baseInterval)
#Write all of our packed strings in locations determined by the baseInterval
#startBlock('__archive_update_many write() operations')
for (interval,packedString) in packedStrings:
timeDistance = interval - baseInterval
pointDistance = timeDistance / step
byteDistance = pointDistance * pointSize
myOffset = archive['offset'] + (byteDistance % archive['size'])
fh.seek(myOffset)
archiveEnd = archive['offset'] + archive['size']
bytesBeyond = (myOffset + len(packedString)) - archiveEnd
#debug(' __archive_update_many myOffset=%d packedString=%d archiveEnd=%d bytesBeyond=%d' % (myOffset,len(packedString),archiveEnd,bytesBeyond))
if bytesBeyond > 0:
fh.write( packedString[:-bytesBeyond] )
#debug('We wrapped an archive!')
assert fh.tell() == archiveEnd, "archiveEnd=%d fh.tell=%d bytesBeyond=%d len(packedString)=%d" % (archiveEnd,fh.tell(),bytesBeyond,len(packedString))
fh.seek( archive['offset'] )
fh.write( packedString[-bytesBeyond:] ) #safe because it can't exceed the archive (retention checking logic above)
else:
fh.write(packedString)
#endBlock('__archive_update_many write() operations')
#Now we propagate the updates to lower-precision archives
#startBlock('__archive_update_many propagation')
higher = archive
lowerArchives = [arc for arc in header['archives'] if arc['secondsPerPoint'] > archive['secondsPerPoint']]
#debug('__archive_update_many I have %d lower archives' % len(lowerArchives))
for lower in lowerArchives:
fit = lambda i: i - (i % lower['secondsPerPoint'])
lowerIntervals = [fit(p[0]) for p in alignedPoints]
uniqueLowerIntervals = set(lowerIntervals)
#debug(' __archive_update_many points=%d unique=%d' % (len(alignedPoints),len(uniqueLowerIntervals)))
propagateFurther = False
for interval in uniqueLowerIntervals:
#debug(' __archive_update_many propagating from %d to %d, interval=%d' % (higher['secondsPerPoint'],lower['secondsPerPoint'],interval))
if __propagate(fh,interval,header['xFilesFactor'],higher,lower):
propagateFurther = True
#debug(' __archive_update_many Successful propagation!')
#debug(' __archive_update_many propagateFurther=%s' % propagateFurther)
if not propagateFurther: break
higher = lower
#endBlock('__archive_update_many propagation')
#endBlock('__archive_update_many file=%s archive=%s points=%d' % (fh.name,step,len(points)))
def info(path):
"""info(path)
path is a string
"""
fh = open(path,'rb')
info = __readHeader(fh)
fh.close()
return info
def fetch(path,fromTime,untilTime=None):
"""fetch(path,fromTime,untilTime=None)
path is a string
fromTime is an epoch time
untilTime is also an epoch time, but defaults to now
"""
fh = open(path,'rb')
header = __readHeader(fh)
now = int( time.time() )
if untilTime is None or untilTime > now:
untilTime = now
if fromTime < (now - header['maxRetention']):
fromTime = now - header['maxRetention']
assert fromTime < untilTime, "Invalid time interval"
diff = now - fromTime
for archive in header['archives']:
if archive['retention'] >= diff: break
fromInterval = int( fromTime - (fromTime % archive['secondsPerPoint']) )
untilInterval = int( untilTime - (untilTime % archive['secondsPerPoint']) )
fh.seek(archive['offset'])
packedPoint = fh.read(pointSize)
(baseInterval,baseValue) = struct.unpack(pointFormat,packedPoint)
if baseInterval == 0:
step = archive['secondsPerPoint']
points = (untilInterval - fromInterval) / step
timeInfo = (fromInterval,untilInterval,step)
valueList = [None] * points
return (timeInfo,valueList)
#Determine fromOffset
timeDistance = fromInterval - baseInterval
pointDistance = timeDistance / archive['secondsPerPoint']
byteDistance = pointDistance * pointSize
fromOffset = archive['offset'] + (byteDistance % archive['size'])
#Determine untilOffset
timeDistance = untilInterval - baseInterval
pointDistance = timeDistance / archive['secondsPerPoint']
byteDistance = pointDistance * pointSize
untilOffset = archive['offset'] + (byteDistance % archive['size'])
#Read all the points in the interval
fh.seek(fromOffset)
if fromOffset < untilOffset: #If we don't wrap around the archive
seriesString = fh.read(untilOffset - fromOffset)
else: #We do wrap around the archive, so we need two reads
archiveEnd = archive['offset'] + archive['size']
seriesString = fh.read(archiveEnd - fromOffset)
fh.seek(archive['offset'])
seriesString += fh.read(untilOffset - archive['offset'])
#Now we unpack the series data we just read (anything faster than unpack?)
byteOrder,pointTypes = pointFormat[0],pointFormat[1:]
points = len(seriesString) / pointSize
seriesFormat = byteOrder + (pointTypes * points)
unpackedSeries = struct.unpack(seriesFormat, seriesString)
#And finally we construct a list of values (optimize this!)
valueList = [None] * points #pre-allocate entire list for speed
currentInterval = fromInterval
step = archive['secondsPerPoint']
for i in xrange(0,len(unpackedSeries),2):
pointTime = unpackedSeries[i]
if pointTime == currentInterval:
pointValue = unpackedSeries[i+1]
valueList[i/2] = pointValue #in-place reassignment is faster than append()
currentInterval += step
fh.close()
timeInfo = (fromInterval,untilInterval,step)
return (timeInfo,valueList)
|
sztanko/hadoop-common | refs/heads/HADOOP-3628 | src/contrib/hod/hodlib/RingMaster/idleJobTracker.py | 182 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import os, re, time
from hodlib.Common.threads import loop, func
from hodlib.Common.threads import simpleCommand
from hodlib.Common.util import get_exception_string, hadoopVersion
class HadoopJobStatus:
"""This class represents the status of a single Hadoop job"""
def __init__(self, jobId, status):
self.__jobId = jobId
self.__status = status
def getJobId(self):
return self.__jobId
def getStatus(self):
return self.__status
class HadoopClientException(Exception):
"""This class represents an exception that is raised when we fail in
running the job client."""
def __init__(self, errorCode):
self.errorCode = errorCode
class JobTrackerMonitor:
"""This class monitors the JobTracker of an allocated cluster
periodically to detect whether it is idle. If it is found
to be idle for more than a configured limit, it calls back
registered handlers who can act upon the idle cluster."""
def __init__(self, log, idleJTHandler, interval, limit,
hadoopDir, javaHome, servInfoProvider):
self.__log = log
self.__idlenessLimit = limit
self.__idleJobTrackerHandler = idleJTHandler
self.__hadoopDir = hadoopDir
hadoopPath = os.path.join(self.__hadoopDir, "bin", "hadoop")
#hadoop directory can be from pkgs or a temp location like tarball. Verify once.
if not os.path.exists(hadoopPath):
raise Exception('Invalid Hadoop path specified: %s' % hadoopPath)
self.__javaHome = javaHome
# Note that when this object is created, we don't yet know the JT URL.
# The service info provider will be polled until we get the URL.
self.__serviceInfoProvider = servInfoProvider
self.__jobCountRegExp = re.compile("([0-9]+) jobs currently running.*")
self.__jobStatusRegExp = re.compile("(\S+)\s+(\d)\s+\d+\s+\S+$")
self.__firstIdleTime = 0
self.__hadoop15Version = { 'major' : '0', 'minor' : '15' }
#Assumption: we are not going to support versions older than 0.15 for Idle Job tracker.
if not self.__isCompatibleHadoopVersion(self.__hadoop15Version):
raise Exception('Incompatible Hadoop Version: Cannot check status')
self.__stopFlag = False
self.__jtURLFinderThread = func(name='JTURLFinderThread', functionRef=self.getJobTrackerURL)
self.__jtMonitorThread = loop(name='JTMonitorThread', functionRef=self.monitorJobTracker,
sleep=interval)
self.__jobTrackerURL = None
def start(self):
"""This method starts a thread that will determine the JobTracker URL"""
self.__jtURLFinderThread.start()
def stop(self):
self.__log.debug('Joining the monitoring thread.')
self.__stopFlag = True
if self.__jtMonitorThread.isAlive():
self.__jtMonitorThread.join()
self.__log.debug('Joined the monitoring thread.')
def getJobTrackerURL(self):
"""This method periodically checks the service info provider for the JT URL"""
self.__jobTrackerURL = self.__serviceInfoProvider.getServiceAddr('mapred')
while not self.__stopFlag and not self.__isValidJobTrackerURL():
time.sleep(10)
if not self.__stopFlag:
self.__jobTrackerURL = self.__serviceInfoProvider.getServiceAddr('mapred')
else:
break
if self.__isValidJobTrackerURL():
self.__log.debug('Got URL %s. Starting monitoring' % self.__jobTrackerURL)
self.__jtMonitorThread.start()
def monitorJobTracker(self):
"""This method is periodically called to monitor the JobTracker of the cluster."""
try:
if self.__isIdle():
if self.__idleJobTrackerHandler:
self.__log.info('Detected cluster as idle. Calling registered callback handler.')
self.__idleJobTrackerHandler.handleIdleJobTracker()
except:
self.__log.debug('Exception while monitoring job tracker. %s' % get_exception_string())
def getJobsStatus(self):
"""This method should return the status of all jobs that are run on the HOD allocated
hadoop cluster"""
jobStatusList = []
try:
hadoop16Version = { 'major' : '0', 'minor' : '16' }
if self.__isCompatibleHadoopVersion(hadoop16Version):
jtStatusCommand = self.__initStatusCommand(option='-list all')
jtStatusCommand.start()
jtStatusCommand.wait()
jtStatusCommand.join()
if jtStatusCommand.exit_code() == 0:
for line in jtStatusCommand.output():
jobStatus = self.__extractJobStatus(line)
if jobStatus is not None:
jobStatusList.append(jobStatus)
except:
self.__log.debug('Exception while getting job statuses. %s' % get_exception_string())
return jobStatusList
def __isValidJobTrackerURL(self):
"""This method checks that the passed in URL is not one of the special case strings
returned by the getServiceAddr API"""
return ((self.__jobTrackerURL != None) and (self.__jobTrackerURL != 'not found') \
and (not self.__jobTrackerURL.startswith('Error')))
def __extractJobStatus(self, line):
"""This method parses an output line from the job status command and creates
the JobStatus object if there is a match"""
jobStatus = None
line = line.strip()
jsMatch = self.__jobStatusRegExp.match(line)
if jsMatch:
jobStatus = HadoopJobStatus(jsMatch.group(1), int(jsMatch.group(2)))
return jobStatus
def __isIdle(self):
"""This method checks if the JobTracker is idle beyond a certain limit."""
jobCount = 0
err = False
try:
jobCount = self.__getJobCount()
except HadoopClientException, hce:
self.__log.debug('HadoopClientException handled in getting job count. \
Error code: %s' % hce.errorCode)
err = True
if (jobCount==0) or err:
if self.__firstIdleTime == 0:
#detecting idleness for the first time
self.__firstIdleTime = time.time()
else:
if ((time.time()-self.__firstIdleTime) >= self.__idlenessLimit):
self.__log.info('Idleness limit crossed for cluster')
return True
else:
# reset idleness time
self.__firstIdleTime = 0
return False
def __getJobCount(self):
"""This method executes the hadoop job -list command and parses the output to detect
the number of running jobs."""
# We assume here that the poll interval is small enough to detect running jobs.
# If jobs start and stop within the poll interval, the cluster would be incorrectly
# treated as idle. Hadoop 2266 will provide a better mechanism than this.
jobs = -1
jtStatusCommand = self.__initStatusCommand()
jtStatusCommand.start()
jtStatusCommand.wait()
jtStatusCommand.join()
if jtStatusCommand.exit_code() == 0:
for line in jtStatusCommand.output():
match = self.__jobCountRegExp.match(line)
if match:
jobs = int(match.group(1))
elif jtStatusCommand.exit_code() == 1:
# for now, exit code 1 comes for any exception raised by JobClient. If hadoop gets
# to differentiate and give more granular exit codes, we can check for those errors
# corresponding to network errors etc.
raise HadoopClientException(jtStatusCommand.exit_code())
return jobs
def __isCompatibleHadoopVersion(self, expectedVersion):
"""This method determines whether the version of hadoop being used is one that
is higher than the expectedVersion.
This can be used for checking if a particular feature is available or not"""
ver = hadoopVersion(self.__hadoopDir, self.__javaHome, self.__log)
ret = False
if (ver['major']!=None) and (int(ver['major']) >= int(expectedVersion['major'])) \
and (ver['minor']!=None) and (int(ver['minor']) >= int(expectedVersion['minor'])):
ret = True
return ret
def __initStatusCommand(self, option="-list"):
"""This method initializes the command to run to check the JT status"""
cmd = None
hadoopPath = os.path.join(self.__hadoopDir, 'bin', 'hadoop')
cmdStr = "%s job -jt %s" % (hadoopPath, self.__jobTrackerURL)
cmdStr = "%s %s" % (cmdStr, option)
self.__log.debug('cmd str %s' % cmdStr)
env = os.environ
env['JAVA_HOME'] = self.__javaHome
cmd = simpleCommand('HadoopStatus', cmdStr, env)
return cmd
|
spktklr/kansalaisrajoite | refs/heads/master | python/user.py | 1 | # coding=utf-8
import hmac
import json
from bottle import Bottle, HTTPError, request, template
from sqlalchemy.orm.exc import NoResultFound
import bcrypt
from utils import jsonplugin, gen_pw_reset_payload, send_email
import auth
import model
import config
import utils
app = Bottle()
app.install(model.plugin)
app.install(jsonplugin)
@app.post('/register')
def register(db):
try:
email = request.forms.email.strip()
if db.query(model.User).filter_by(email=email).first():
return HTTPError(409, 'Conflict')
user = model.User()
user.email = email
user.password = request.forms.password.strip()
user.name = request.forms.name.strip()
user.city = request.forms.city.strip()
db.add(user)
# create hmac verification token
token = utils.sign_message(user.email)
subject = config.verification_email_subject
body = template(
'mail_verification',
email=user.email,
site_name=config.site_name,
site_url=config.site_url,
token=token
)
send_email(email, subject, body)
except AssertionError:
return HTTPError(400, 'Bad request')
@app.post('/')
@auth.require_login
def modify(db, user):
try:
password = request.forms.get('password')
if password:
user.password = password
user.name = request.forms.name.strip()
user.city = request.forms.city.strip()
return user.toDict(True)
except AssertionError:
return HTTPError(400, 'Bad request')
@app.post('/verify')
def verify(db):
email = request.forms.email.strip()
token = request.forms.token.strip()
if not email or not token:
return HTTPError(400, 'Bad request')
expected_token = utils.sign_message(email)
if not hmac.compare_digest(token, expected_token):
return HTTPError(401, 'Unauthorized')
user = db.query(model.User).filter_by(email=email).first()
if user:
user.verified = True
session = request.environ['beaker.session']
session['user_id'] = user.id
return user.toDict(True)
else:
return HTTPError(404, 'Not found')
@app.post('/reset-password-1')
def send_reset_email(db):
email = request.forms.email.strip()
if not email:
return HTTPError(400, 'Bad request')
user = db.query(model.User).filter_by(email=email).first()
if user:
json_payload = json.dumps(gen_pw_reset_payload(user))
token = utils.sign_message(json_payload)
subject = config.pw_reset_email_subject
body = template(
'mail_pw_reset',
email=user.email,
site_name=config.site_name,
site_url=config.site_url,
token=token
)
send_email(email, subject, body)
# to keep emails private we don't want to tell the user if the email exists
@app.post('/reset-password-2')
def reset_password(db):
try:
email = request.forms.email.strip()
token = request.forms.token.strip()
if not email or not token:
return HTTPError(400, 'Bad request')
user = db.query(model.User).filter_by(email=email).first()
if user:
# validate hmac token
json_payload = json.dumps(gen_pw_reset_payload(user))
expected_token = utils.sign_message(json_payload)
if not hmac.compare_digest(token, expected_token):
return HTTPError(401, 'Unauthorized')
# change password
user.password = request.forms.password.strip()
# pw reset can also be used in activating the account
user.verified = True
else:
return HTTPError(401, 'Unauthorized')
except AssertionError:
return HTTPError(400, 'Bad request')
@app.post('/login')
def login(db):
email = request.forms.email.strip()
password = request.forms.password.strip()
if not email or not password:
return HTTPError(400, 'Bad request')
user = db.query(model.User).filter_by(email=email).first()
if user and not user.verified:
return HTTPError(412, 'Precondition failed')
if (user and user.verified and
bcrypt.checkpw(password.encode('utf-8'), user.password.encode('utf-8'))):
session = request.environ['beaker.session']
session['user_id'] = user.id
return user.toDict(True)
else:
return HTTPError(401, 'Unauthorized')
@app.post('/logout')
def logout(db):
session = request.environ.get('beaker.session')
if session:
session.delete()
else:
return HTTPError(401, 'Unauthorized')
@app.get('/')
@auth.optional_login
def my_data(db, user):
if user:
return user.toDict(True)
else:
return {}
@app.get('/<id:int>')
@auth.require_admin
def read_user(db, user, id):
try:
item = db.query(model.User).filter_by(id=id).one()
return item.toDict(True)
except NoResultFound:
return HTTPError(404, 'Not found')
|
mrquim/repository.mrquim | refs/heads/master | plugin.video.castaway/resources/lib/resolvers/onedrive.py | 4 | from resources.lib.modules import client
from resources.lib.modules.log_utils import log
import re,urllib,urlparse,base64
def resolve(url):
try:
encoded = 'u!' + base64.b64encode(b'%s'%url).rstrip('=').replace('/','_').replace('+','-')
url = 'http://api.onedrive.com/v1.0/shares/' + encoded + '/root'
result = client.request(url)
video = re.findall('downloadUrl[\"\']\s*:[\"\']([^\"\']+)[\"\']',result)[0].replace('https','http')
log(video)
return video + '|%s' % urllib.urlencode({'User-Agent':client.agent()})
except:
return ''
|
DaYeSquad/worktilerwdemo | refs/heads/master | model-builder/skr_cpp_builder/__init__.py | 6 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 - Frank Lin |
socrocket/trap-gen | refs/heads/master | cxx_writer/tests/testSimpleDecls.py | 1 | ################################################################################
#
# _/_/_/_/_/ _/_/_/ _/ _/_/_/
# _/ _/ _/ _/_/ _/ _/
# _/ _/ _/ _/ _/ _/ _/
# _/ _/_/_/ _/_/_/_/ _/_/_/
# _/ _/ _/ _/ _/ _/
# _/ _/ _/ _/ _/ _/
#
# @file testSimpleDecls.py
# @brief This file is part of the TRAP CXX code generator testsuite.
# @details
# @author Luca Fossati
# @author Lillian Tadros (Technische Universitaet Dortmund)
# @date 2008-2013 Luca Fossati
# 2015-2016 Technische Universitaet Dortmund
# @copyright
#
# This file is part of TRAP.
#
# TRAP is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# or see <http://www.gnu.org/licenses/>.
#
# (c) Luca Fossati, fossati@elet.polimi.it, fossati.l@gmail.com
#
################################################################################
try:
import cxx_writer
except ImportError:
import sys, os
sys.path.append(os.path.abspath(os.path.join('..')))
try:
import cxx_writer
except ImportError:
sys.path.append(os.path.abspath(os.path.join('..', '..')))
try:
import cxx_writer
except ImportError:
print ('Please specify location of core TRAP files in testSimpleDecls.py.')
import unittest
import os
class TestSimpleDecls(unittest.TestCase):
def setUp(self):
try:
os.remove('prova.cpp')
except:
pass
self.writer = cxx_writer.CodeWriter('prova.cpp', indentSize = 4, lineWidth = 80)
def tearDown(self):
del self.writer
os.remove('prova.cpp')
def testSimpleTemplateType(self):
innerType = cxx_writer.stringType
templ = cxx_writer.TemplateType('std::vector', [innerType])
templ.writeDeclaration(self.writer)
self.writer.flush()
testFile = open('prova.cpp', 'r')
lines = testFile.readlines()
testFile.close()
self.assertEqual(len(lines), 1)
self.assertEqual(lines[0], 'std::vector<std::string>')
def testDoubleTemplateType(self):
innerType1 = cxx_writer.stringType
innerType2 = cxx_writer.intType
templ = cxx_writer.TemplateType('std::map', [innerType1, innerType2])
templ.writeDeclaration(self.writer)
self.writer.flush()
testFile = open('prova.cpp', 'r')
lines = testFile.readlines()
testFile.close()
self.assertEqual(len(lines), 1)
self.assertEqual(lines[0], 'std::map<std::string, int>')
def testNestedTemplateType(self):
innerType1 = cxx_writer.stringType
innerType2 = cxx_writer.intType
innerType3 = cxx_writer.doubleType
templ1 = cxx_writer.TemplateType('std::map', [innerType2, innerType3])
templ2 = cxx_writer.TemplateType('std::map', [innerType1, templ1])
templ2.writeDeclaration(self.writer)
self.writer.flush()
testFile = open('prova.cpp', 'r')
lines = testFile.readlines()
testFile.close()
self.assertEqual(len(lines), 1)
self.assertEqual(lines[0], 'std::map<std::string, std::map<int, double> >')
def testSimpleVariable(self):
type = cxx_writer.stringType
var = cxx_writer.Variable('pippo', type)
var.writeDeclaration(self.writer)
self.writer.flush()
testFile = open('prova.cpp', 'r')
lines = testFile.readlines()
testFile.close()
self.assertEqual(len(lines), 0)
def testVariableInit(self):
type = cxx_writer.stringType
var = cxx_writer.Variable('pippo', type, False, '\"pippa\"')
var.writeDeclaration(self.writer)
self.writer.flush()
testFile = open('prova.cpp', 'r')
lines = testFile.readlines()
testFile.close()
self.assertEqual(len(lines), 0)
def testTemplatedVariable(self):
innerType1 = cxx_writer.stringType
innerType2 = cxx_writer.intType
innerType3 = cxx_writer.doubleType
templ1 = cxx_writer.TemplateType('std::map', [innerType2, innerType3])
type = cxx_writer.TemplateType('std::map', [innerType1, templ1])
var = cxx_writer.Variable('pippo', type)
var.writeDeclaration(self.writer)
self.writer.flush()
testFile = open('prova.cpp', 'r')
lines = testFile.readlines()
testFile.close()
self.assertEqual(len(lines), 0)
def testEnum(self):
enumInst = cxx_writer.Enum('myEnum', {'ONE':1, 'TWO':2, 'THREE':3})
enumInst.writeDeclaration(self.writer)
self.writer.flush()
testFile = open('prova.cpp', 'r')
lines = testFile.readlines()
testFile.close()
self.assertEqual(len(lines), 5)
self.assertEqual(lines[0], 'enum myEnum {\n')
self.assertEqual(lines[1], ' THREE = 3\n')
self.assertEqual(lines[2], ' ,TWO = 2\n')
self.assertEqual(lines[3], ' ,ONE = 1\n')
self.assertEqual(lines[4], '};\n')
def testUnion(self):
unionInst = cxx_writer.Union('myUnion')
type = cxx_writer.stringType
var = cxx_writer.Variable('pippo', type)
unionInst.addMember(var)
type = cxx_writer.intType
var = cxx_writer.Variable('duck', type)
unionInst.addMember(var)
unionInst.writeDeclaration(self.writer)
self.writer.flush()
testFile = open('prova.cpp', 'r')
lines = testFile.readlines()
testFile.close()
self.assertEqual(len(lines), 4)
self.assertEqual(lines[0], 'union myUnion {\n')
self.assertEqual(lines[1], ' std::string pippo;\n')
self.assertEqual(lines[2], ' int duck;\n')
self.assertEqual(lines[3], '};\n')
def testTypedef(self):
type = cxx_writer.intType
typedef = cxx_writer.Typedef('duck', type)
typedef.writeDeclaration(self.writer)
self.writer.flush()
testFile = open('prova.cpp', 'r')
lines = testFile.readlines()
testFile.close()
self.assertEqual(len(lines), 1)
self.assertEqual(lines[0], 'typedef duck int;\n')
def testSimpleFunction(self):
code = cxx_writer.Code('printf(\"Wow\");')
function = cxx_writer.Function('dummy_func', code)
function.writeImplementation(self.writer)
self.writer.flush()
testFile = open('prova.cpp', 'r')
lines = testFile.readlines()
testFile.close()
self.assertEqual(len(lines), 3)
self.assertEqual(lines[0], 'void dummy_func() {\n')
self.assertEqual(lines[1], ' printf(\"Wow\");\n')
self.assertEqual(lines[2], '} // dummy_func()\n')
def testReturnFunction(self):
code = cxx_writer.Code('if (works) {\nprintf(\"hummm\\n\");\nreturn 1;\n} else {\nreturn 0;\n}')
retType = cxx_writer.intType
function = cxx_writer.Function('dummy_func', code, retType)
function.writeImplementation(self.writer)
self.writer.flush()
testFile = open('prova.cpp', 'r')
lines = testFile.readlines()
testFile.close()
self.assertEqual(len(lines), 8)
self.assertEqual(lines[0], 'int dummy_func() {\n')
self.assertEqual(lines[1], ' if (works) {\n')
self.assertEqual(lines[2], ' printf(\"hummm\\n\");\n')
self.assertEqual(lines[3], ' return 1;\n')
self.assertEqual(lines[4], ' } else {\n')
self.assertEqual(lines[5], ' return 0;\n')
self.assertEqual(lines[6], ' }\n')
self.assertEqual(lines[7], '} // dummy_func()\n')
def testParameterFunction(self):
code = cxx_writer.Code('if (works) {\nprintf(\"hummm\\n\");\nreturn 1;\n} else {\nreturn 0;\n}')
intType = cxx_writer.intType
parameters = [cxx_writer.Parameter('param1', intType)]
function = cxx_writer.Function('dummy_func', code, intType, parameters)
function.writeImplementation(self.writer)
self.writer.flush()
testFile = open('prova.cpp', 'r')
lines = testFile.readlines()
testFile.close()
self.assertEqual(len(lines), 8)
self.assertEqual(lines[0], 'int dummy_func(int param1) {\n')
self.assertEqual(lines[1], ' if (works) {\n')
self.assertEqual(lines[2], ' printf(\"hummm\\n\");\n')
self.assertEqual(lines[3], ' return 1;\n')
self.assertEqual(lines[4], ' } else {\n')
self.assertEqual(lines[5], ' return 0;\n')
self.assertEqual(lines[6], ' }\n')
self.assertEqual(lines[7], '} // dummy_func()\n')
def testTemplateFunction(self):
code = cxx_writer.Code('if (works) {\nprintf(\"hummm\\n\");\nreturn 1;\n} else {\nreturn 0;\n}')
intType = cxx_writer.intType
parameters = [cxx_writer.Parameter('param1', intType)]
function = cxx_writer.Function('dummy_func', code, intType, parameters, template = ['A'])
function.writeDeclaration(self.writer)
self.writer.flush()
testFile = open('prova.cpp', 'r')
lines = testFile.readlines()
testFile.close()
self.assertEqual(len(lines), 8)
self.assertEqual(lines[0], 'template <typename A> int dummy_func(int param1) {\n')
self.assertEqual(lines[1], ' if (works) {\n')
self.assertEqual(lines[2], ' printf(\"hummm\\n\");\n')
self.assertEqual(lines[3], ' return 1;\n')
self.assertEqual(lines[4], ' } else {\n')
self.assertEqual(lines[5], ' return 0;\n')
self.assertEqual(lines[6], ' }\n')
self.assertEqual(lines[7], '} // dummy_func()\n')
def testInlineFunction(self):
code = cxx_writer.Code('if (works) {\nprintf(\"hummm\\n\");\nreturn 1;\n} else {\nreturn 0;\n}')
intType = cxx_writer.intType
parameters = [cxx_writer.Parameter('param1', intType)]
function = cxx_writer.Function('dummy_func', code, intType, parameters, inline = True)
function.writeDeclaration(self.writer)
self.writer.flush()
testFile = open('prova.cpp', 'r')
lines = testFile.readlines()
testFile.close()
self.assertEqual(len(lines), 8)
self.assertEqual(lines[0], 'inline int dummy_func(int param1) {\n')
self.assertEqual(lines[1], ' if (works) {\n')
self.assertEqual(lines[2], ' printf(\"hummm\\n\");\n')
self.assertEqual(lines[3], ' return 1;\n')
self.assertEqual(lines[4], ' } else {\n')
self.assertEqual(lines[5], ' return 0;\n')
self.assertEqual(lines[6], ' }\n')
self.assertEqual(lines[7], '} // dummy_func()\n')
def testFunctionDoc(self):
intType = cxx_writer.intType
code = cxx_writer.Code('')
parameters = [cxx_writer.Parameter('param1', intType)]
function = cxx_writer.Function('dummy_func', code, intType, parameters)
function.addDocString('Documentation test\nanother line\n')
function.writeImplementation(self.writer)
self.writer.flush()
testFile = open('prova.cpp', 'r')
lines = testFile.readlines()
testFile.close()
self.assertEqual(len(lines), 5)
self.assertEqual(lines[0], '/// Documentation test\n')
self.assertEqual(lines[1], '/// another line\n')
self.assertEqual(lines[2], 'int dummy_func(int param1) {\n')
self.assertEqual(lines[3], '\n')
self.assertEqual(lines[4], '} // dummy_func()\n')
################################################################################
|
havard024/prego | refs/heads/master | crm/lib/python2.7/site-packages/django/utils/hashcompat.py | 124 | """
The md5 and sha modules are deprecated since Python 2.5, replaced by the
hashlib module containing both hash algorithms. Here, we provide a common
interface to the md5 and sha constructors, depending on system version.
"""
import warnings
warnings.warn("django.utils.hashcompat is deprecated; use hashlib instead",
DeprecationWarning)
import hashlib
md5_constructor = hashlib.md5
md5_hmac = md5_constructor
sha_constructor = hashlib.sha1
sha_hmac = sha_constructor
|
PhilHarnish/forge | refs/heads/master | spec/puzzle/examples/mim/p1_2_spec.py | 1 | import astor
from data import warehouse
from puzzle.examples.mim import p1_2
from puzzle.problems import logic_problem
from puzzle.puzzlepedia import prod_config
from spec.mamba import *
with _description('p1_2'):
with before.all:
warehouse.save()
prod_config.init()
self.puzzle = p1_2.get()
with after.all:
prod_config.reset()
warehouse.restore()
with description('solution'):
with it('scores the source as a LogicProblem'):
expect(logic_problem.LogicProblem.score(
p1_2.SOURCE.split('\n'))).to(equal(1))
with it('identifies puzzle type'):
problems = self.puzzle.problems()
expect(problems).to(have_len(1))
problem = problems[0]
expect(problem).to(be_a(logic_problem.LogicProblem))
with _it('parses puzzle'):
node = logic_problem._parse(p1_2.SOURCE.split('\n'))
print(astor.to_source(node))
with _it('models puzzle'):
model = logic_problem._model(p1_2.SOURCE.split('\n'))
model.add(model.dimension_constraints())
print(str(model))
with it('exports a solution'):
problem = self.puzzle.problems()[0]
expect(problem.solution).to(look_like(p1_2.SOLUTION))
|
DreamerKing/LightweightHtmlWidgets | refs/heads/master | publish-rc/v1.0/files/Ipy.Lib/encodings/mbcs.py | 860 | """ Python 'mbcs' Codec for Windows
Cloned by Mark Hammond (mhammond@skippinet.com.au) from ascii.py,
which was written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
# Import them explicitly to cause an ImportError
# on non-Windows systems
from codecs import mbcs_encode, mbcs_decode
# for IncrementalDecoder, IncrementalEncoder, ...
import codecs
### Codec APIs
encode = mbcs_encode
def decode(input, errors='strict'):
return mbcs_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return mbcs_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = mbcs_decode
class StreamWriter(codecs.StreamWriter):
encode = mbcs_encode
class StreamReader(codecs.StreamReader):
decode = mbcs_decode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mbcs',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
iksteen/aiohttp | refs/heads/master | tests/test_web.py | 6 | import asyncio
import unittest
from unittest import mock
from aiohttp import web, log
from aiohttp.multidict import CIMultiDict
from aiohttp.protocol import HttpVersion11, RawRequestMessage
class TestWeb(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def tearDown(self):
self.loop.close()
def test_handler_returns_not_response(self):
app = web.Application(loop=self.loop)
def handler(request):
return 'abc'
app.router.add_route('GET', '/', handler)
h = app.make_handler()()
message = RawRequestMessage('GET', '/', HttpVersion11,
CIMultiDict(), False, False)
payload = mock.Mock()
with self.assertRaises(AssertionError):
self.loop.run_until_complete(h.handle_request(message, payload))
def test_app_ctor(self):
app = web.Application(loop=self.loop)
self.assertIs(self.loop, app.loop)
self.assertIs(app.logger, log.web_logger)
def test_app_call(self):
app = web.Application(loop=self.loop)
self.assertIs(app, app())
def test_app_default_loop(self):
asyncio.set_event_loop(self.loop)
app = web.Application()
self.assertIs(self.loop, app.loop)
def test_app_register_on_finish(self):
app = web.Application(loop=self.loop)
cb1 = mock.Mock()
cb2 = mock.Mock()
app.register_on_finish(cb1, 1, b=2)
app.register_on_finish(cb2, 2, c=3)
self.loop.run_until_complete(app.finish())
cb1.assert_called_once_with(app, 1, b=2)
cb2.assert_called_once_with(app, 2, c=3)
def test_app_register_coro(self):
app = web.Application(loop=self.loop)
fut = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def cb(app):
yield from asyncio.sleep(0.001, loop=self.loop)
fut.set_result(123)
app.register_on_finish(cb)
self.loop.run_until_complete(app.finish())
self.assertTrue(fut.done())
self.assertEqual(123, fut.result())
def test_app_error_in_finish_callbacks(self):
app = web.Application(loop=self.loop)
err = RuntimeError('bad call')
app.register_on_finish(mock.Mock(side_effect=err))
handler = mock.Mock()
self.loop.set_exception_handler(handler)
self.loop.run_until_complete(app.finish())
exc_info = {'exception': err,
'application': app,
'message': 'Error in finish callback'}
handler.assert_called_once_with(self.loop, exc_info)
def test_non_default_router(self):
router = web.UrlDispatcher()
app = web.Application(loop=self.loop, router=router)
self.assertIs(router, app.router)
def test_logging(self):
logger = mock.Mock()
app = web.Application(loop=self.loop)
app.logger = logger
self.assertIs(app.logger, logger)
class TestRequestHandlerFactory(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def tearDown(self):
self.loop.close()
def test_repr(self):
app = web.Application(loop=self.loop)
manager = app.make_handler()
handler = manager()
self.assertEqual(
'<RequestHandler none:none disconnected>', repr(handler))
handler.transport = object()
handler._meth = 'GET'
handler._path = '/index.html'
self.assertEqual(
'<RequestHandler GET:/index.html connected>', repr(handler))
def test_connections(self):
app = web.Application(loop=self.loop)
manager = app.make_handler()
self.assertEqual(manager.connections, [])
handler = object()
transport = object()
manager.connection_made(handler, transport)
self.assertEqual(manager.connections, [handler])
manager.connection_lost(handler, None)
self.assertEqual(manager.connections, [])
def test_finish_connection_no_timeout(self):
app = web.Application(loop=self.loop)
manager = app.make_handler()
handler = mock.Mock()
transport = mock.Mock()
manager.connection_made(handler, transport)
self.loop.run_until_complete(manager.finish_connections())
manager.connection_lost(handler, None)
self.assertEqual(manager.connections, [])
handler.closing.assert_called_with(timeout=None)
transport.close.assert_called_with()
def test_finish_connection_timeout(self):
app = web.Application(loop=self.loop)
manager = app.make_handler()
handler = mock.Mock()
transport = mock.Mock()
manager.connection_made(handler, transport)
self.loop.run_until_complete(manager.finish_connections(timeout=0.1))
manager.connection_lost(handler, None)
self.assertEqual(manager.connections, [])
handler.closing.assert_called_with(timeout=0.09)
transport.close.assert_called_with()
|
turtleloveshoes/kitsune | refs/heads/master | kitsune/forums/tests/test_notifications.py | 13 | from django.contrib import admin
from django.contrib.admin.options import ModelAdmin
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core import mail
from django.test.client import RequestFactory
import mock
from nose.tools import eq_
from kitsune.forums.events import NewPostEvent, NewThreadEvent
from kitsune.forums.models import Thread, Post
from kitsune.forums.tests import (
ForumTestCase, thread, forum, post as forum_post)
from kitsune.sumo.urlresolvers import reverse
from kitsune.sumo.tests import post, attrs_eq, starts_with
from kitsune.users.models import Setting
from kitsune.users.tests import user
# Some of these contain a locale prefix on included links, while
# others don't. This depends on whether the tests use them inside or
# outside the scope of a request. See the long explanation in
# questions.tests.test_notifications.
REPLY_EMAIL = u"""Reply to thread: {thread}
{username} has replied to a thread you're watching. Here is their reply:
========
a post
========
To view this post on the site, click the following link, or paste it into \
your browser's location bar:
https://testserver/en-US/forums/{forum_slug}/{thread_id}?utm_campaign=\
forums-post&utm_medium=email&utm_source=notification#post-{post_id}
--
Unsubscribe from these emails:
https://testserver/en-US/unsubscribe/"""
NEW_THREAD_EMAIL = u"""New thread: {thread}
{username} has posted a new thread in a forum you're watching. Here is \
the thread:
========
a post
========
To view this post on the site, click the following link, or paste it into \
your browser's location bar:
https://testserver/en-US/forums/{forum_slug}/{thread_id}?utm_campaign=\
forums-thread&utm_medium=email&utm_source=notification
--
Unsubscribe from these emails:
https://testserver/en-US/unsubscribe/"""
class NotificationsTests(ForumTestCase):
"""Test that notifications get sent."""
@mock.patch.object(NewPostEvent, 'fire')
def test_fire_on_reply(self, fire):
"""The event fires when there is a reply."""
u = user(save=True)
t = thread(save=True)
self.client.login(username=u.username, password='testpass')
post(self.client, 'forums.reply', {'content': 'a post'},
args=[t.forum.slug, t.id])
# NewPostEvent.fire() is called.
assert fire.called
@mock.patch.object(NewThreadEvent, 'fire')
def test_fire_on_new_thread(self, fire):
"""The event fires when there is a new thread."""
u = user(save=True)
f = forum(save=True)
self.client.login(username=u.username, password='testpass')
post(self.client, 'forums.new_thread',
{'title': 'a title', 'content': 'a post'},
args=[f.slug])
# NewThreadEvent.fire() is called.
assert fire.called
def _toggle_watch_thread_as(self, thread, user, turn_on=True):
"""Watch a thread and return it."""
self.client.login(username=user.username, password='testpass')
watch = 'yes' if turn_on else 'no'
post(self.client, 'forums.watch_thread', {'watch': watch},
args=[thread.forum.slug, thread.id])
# Watch exists or not, depending on watch.
if turn_on:
assert NewPostEvent.is_notifying(user, thread), (
'NewPostEvent should be notifying.')
else:
assert not NewPostEvent.is_notifying(user, thread), (
'NewPostEvent should not be notifying.')
def _toggle_watch_forum_as(self, forum, user, turn_on=True):
"""Watch a forum and return it."""
self.client.login(username=user.username, password='testpass')
watch = 'yes' if turn_on else 'no'
post(self.client, 'forums.watch_forum', {'watch': watch},
args=[forum.slug])
# Watch exists or not, depending on watch.
if turn_on:
assert NewThreadEvent.is_notifying(user, forum), (
'NewThreadEvent should be notifying.')
else:
assert not NewPostEvent.is_notifying(user, forum), (
'NewThreadEvent should not be notifying.')
@mock.patch.object(Site.objects, 'get_current')
def test_watch_thread_then_reply(self, get_current):
"""The event fires and sends emails when watching a thread."""
get_current.return_value.domain = 'testserver'
t = thread(save=True)
f = t.forum
poster = user(save=True)
watcher = user(save=True)
self._toggle_watch_thread_as(t, watcher, turn_on=True)
self.client.login(username=poster.username, password='testpass')
post(self.client, 'forums.reply', {'content': 'a post'},
args=[t.forum.slug, t.id])
p = Post.objects.all().order_by('-id')[0]
attrs_eq(mail.outbox[0], to=[watcher.email],
subject='Re: {f} - {t}'.format(f=f, t=t))
body = REPLY_EMAIL.format(
username=poster.username,
forum_slug=f.slug,
thread=t.title,
thread_id=t.id,
post_id=p.id)
starts_with(mail.outbox[0].body, body)
def test_watch_other_thread_then_reply(self):
# Watching a different thread than the one we're replying to
# shouldn't notify.
t1 = thread(save=True)
t2 = thread(save=True)
poster = user(save=True)
watcher = user(save=True)
self._toggle_watch_thread_as(t1, watcher, turn_on=True)
self.client.login(username=poster.username, password='testpass')
post(self.client, 'forums.reply', {'content': 'a post'},
args=[t2.forum.slug, t2.id])
assert not mail.outbox
@mock.patch.object(Site.objects, 'get_current')
def test_watch_forum_then_new_thread(self, get_current):
"""Watching a forum and creating a new thread should send email."""
get_current.return_value.domain = 'testserver'
f = forum(save=True)
poster = user(save=True)
watcher = user(save=True)
self._toggle_watch_forum_as(f, watcher, turn_on=True)
self.client.login(username=poster.username, password='testpass')
post(self.client, 'forums.new_thread',
{'title': 'a title', 'content': 'a post'}, args=[f.slug])
t = Thread.objects.all().order_by('-id')[0]
attrs_eq(mail.outbox[0], to=[watcher.email],
subject='{f} - {t}'.format(f=f, t=t))
body = NEW_THREAD_EMAIL.format(
username=poster.username,
forum_slug=f.slug,
thread=t.title,
thread_id=t.id)
starts_with(mail.outbox[0].body, body)
@mock.patch.object(Site.objects, 'get_current')
def test_watch_forum_then_new_thread_as_self(self, get_current):
# Watching a forum and creating a new thread as myself should
# not send email.
get_current.return_value.domain = 'testserver'
f = forum(save=True)
watcher = user(save=True)
self._toggle_watch_forum_as(f, watcher, turn_on=True)
self.client.login(username=watcher.username, password='testpass')
post(self.client, 'forums.new_thread',
{'title': 'a title', 'content': 'a post'}, args=[f.slug])
# Assert no email is sent.
assert not mail.outbox
@mock.patch.object(Site.objects, 'get_current')
def test_watch_forum_then_new_post(self, get_current):
"""Watching a forum and replying to a thread should send email."""
get_current.return_value.domain = 'testserver'
t = thread(save=True)
f = t.forum
forum_post(thread=t, save=True)
poster = user(save=True)
watcher = user(save=True)
self._toggle_watch_forum_as(f, watcher, turn_on=True)
self.client.login(username=poster.username, password='testpass')
post(self.client, 'forums.reply', {'content': 'a post'},
args=[f.slug, t.id])
p = Post.objects.all().order_by('-id')[0]
attrs_eq(mail.outbox[0], to=[watcher.email],
subject='Re: {f} - {t}'.format(f=f, t=t))
body = REPLY_EMAIL.format(
username=poster.username,
forum_slug=f.slug,
thread=t.title,
thread_id=t.id,
post_id=p.id)
starts_with(mail.outbox[0].body, body)
@mock.patch.object(Site.objects, 'get_current')
def test_watch_forum_then_new_post_as_self(self, get_current):
"""Watching a forum and replying as myself should not send email."""
get_current.return_value.domain = 'testserver'
t = thread(save=True)
f = t.forum
forum_post(thread=t, save=True)
watcher = user(save=True)
self._toggle_watch_forum_as(f, watcher, turn_on=True)
self.client.login(username=watcher.username, password='testpass')
post(self.client, 'forums.reply', {'content': 'a post'},
args=[f.slug, t.id])
# Assert no email is sent.
assert not mail.outbox
@mock.patch.object(Site.objects, 'get_current')
def test_watch_both_then_new_post(self, get_current):
"""Watching both forum and thread.
Replying to a thread should send ONE email."""
get_current.return_value.domain = 'testserver'
t = thread(save=True)
f = t.forum
forum_post(thread=t, save=True)
poster = user(save=True)
watcher = user(save=True)
self._toggle_watch_forum_as(f, watcher, turn_on=True)
self._toggle_watch_thread_as(t, watcher, turn_on=True)
self.client.login(username=poster.username, password='testpass')
post(self.client, 'forums.reply', {'content': 'a post'},
args=[f.slug, t.id])
eq_(1, len(mail.outbox))
p = Post.objects.all().order_by('-id')[0]
attrs_eq(mail.outbox[0], to=[watcher.email],
subject='Re: {f} - {t}'.format(f=f, t=t))
body = REPLY_EMAIL.format(
username=poster.username,
forum_slug=f.slug,
thread=t.title,
thread_id=t.id,
post_id=p.id)
starts_with(mail.outbox[0].body, body)
@mock.patch.object(Site.objects, 'get_current')
def test_autowatch_new_thread(self, get_current):
"""Creating a new thread should email responses"""
get_current.return_value.domain = 'testserver'
f = forum(save=True)
u = user(save=True)
self.client.login(username=u.username, password='testpass')
s = Setting.objects.create(user=u, name='forums_watch_new_thread',
value='False')
data = {'title': 'a title', 'content': 'a post'}
post(self.client, 'forums.new_thread', data, args=[f.slug])
t1 = Thread.objects.all().order_by('-id')[0]
assert not NewPostEvent.is_notifying(u, t1), (
'NewPostEvent should not be notifying.')
s.value = 'True'
s.save()
post(self.client, 'forums.new_thread', data, args=[f.slug])
t2 = Thread.objects.all().order_by('-id')[0]
assert NewPostEvent.is_notifying(u, t2), (
'NewPostEvent should be notifying.')
@mock.patch.object(Site.objects, 'get_current')
def test_autowatch_reply(self, get_current):
"""Replying to a thread creates a watch."""
get_current.return_value.domain = 'testserver'
u = user(save=True)
t1 = thread(save=True)
t2 = thread(save=True)
assert not NewPostEvent.is_notifying(u, t1)
assert not NewPostEvent.is_notifying(u, t2)
self.client.login(username=u.username, password='testpass')
# If the poster has the forums_watch_after_reply setting set to True,
# they will start watching threads they reply to.
s = Setting.objects.create(user=u, name='forums_watch_after_reply',
value='True')
data = {'content': 'some content'}
post(self.client, 'forums.reply', data, args=[t1.forum.slug, t1.pk])
assert NewPostEvent.is_notifying(u, t1)
# Setting forums_watch_after_reply back to False, now they shouldn't
# start watching threads they reply to.
s.value = 'False'
s.save()
post(self.client, 'forums.reply', data, args=[t2.forum.slug, t2.pk])
assert not NewPostEvent.is_notifying(u, t2)
@mock.patch.object(Site.objects, 'get_current')
def test_admin_delete_user_with_watched_thread(self, get_current):
"""Test the admin delete view for a user with a watched thread."""
get_current.return_value.domain = 'testserver'
t = thread(save=True)
u = t.creator
watcher = user(save=True)
admin_user = user(is_staff=True, is_superuser=True, save=True)
self.client.login(username=admin_user.username, password='testpass')
self._toggle_watch_thread_as(t, watcher, turn_on=True)
url = reverse('admin:auth_user_delete', args=[u.id])
request = RequestFactory().get(url)
request.user = admin_user
request.session = self.client.session
# The following blows up without our monkeypatch.
ModelAdmin(User, admin.site).delete_view(request, str(u.id))
|
ntuecon/server | refs/heads/master | pyenv/Lib/site-packages/django/contrib/auth/handlers/modwsgi.py | 537 | from django import db
from django.contrib import auth
from django.utils.encoding import force_bytes
def check_password(environ, username, password):
"""
Authenticates against Django's auth database
mod_wsgi docs specify None, True, False as return value depending
on whether the user exists and authenticates.
"""
UserModel = auth.get_user_model()
# db connection state is managed similarly to the wsgi handler
# as mod_wsgi may call these functions outside of a request/response cycle
db.reset_queries()
try:
try:
user = UserModel._default_manager.get_by_natural_key(username)
except UserModel.DoesNotExist:
return None
if not user.is_active:
return None
return user.check_password(password)
finally:
db.close_old_connections()
def groups_for_user(environ, username):
"""
Authorizes a user based on groups
"""
UserModel = auth.get_user_model()
db.reset_queries()
try:
try:
user = UserModel._default_manager.get_by_natural_key(username)
except UserModel.DoesNotExist:
return []
if not user.is_active:
return []
return [force_bytes(group.name) for group in user.groups.all()]
finally:
db.close_old_connections()
|
J861449197/edx-platform | refs/heads/master | common/djangoapps/django_locale/middleware.py | 81 | # TODO: This file is imported from the stable Django 1.8 branch. Remove this file
# and re-import this middleware from Django once the codebase is upgraded. [PLAT-671]
# pylint: disable=invalid-name, missing-docstring
"This is the locale selecting middleware that will look at accept headers"
from django.conf import settings
from django.core.urlresolvers import (
LocaleRegexURLResolver, get_resolver, get_script_prefix, is_valid_path,
)
from django.http import HttpResponseRedirect
from django.utils import translation
from django.utils.cache import patch_vary_headers
# Override the Django 1.4 implementation with the 1.8 implementation
from django_locale.trans_real import get_language_from_request
class LocaleMiddleware(object):
"""
This is a very simple middleware that parses a request
and decides what translation object to install in the current
thread context. This allows pages to be dynamically
translated to the language the user desires (if the language
is available, of course).
"""
response_redirect_class = HttpResponseRedirect
def __init__(self):
self._is_language_prefix_patterns_used = False
for url_pattern in get_resolver(None).url_patterns:
if isinstance(url_pattern, LocaleRegexURLResolver):
self._is_language_prefix_patterns_used = True
break
def process_request(self, request):
check_path = self.is_language_prefix_patterns_used()
# This call is broken in Django 1.4:
# https://github.com/django/django/blob/stable/1.4.x/django/utils/translation/trans_real.py#L399
# (we override parse_accept_lang_header to a fixed version in dark_lang.middleware)
language = get_language_from_request(
request, check_path=check_path)
translation.activate(language)
request.LANGUAGE_CODE = translation.get_language()
def process_response(self, request, response):
language = translation.get_language()
language_from_path = translation.get_language_from_path(request.path_info)
if (response.status_code == 404 and not language_from_path
and self.is_language_prefix_patterns_used()):
urlconf = getattr(request, 'urlconf', None)
language_path = '/%s%s' % (language, request.path_info)
path_valid = is_valid_path(language_path, urlconf)
if (not path_valid and settings.APPEND_SLASH
and not language_path.endswith('/')):
path_valid = is_valid_path("%s/" % language_path, urlconf)
if path_valid:
script_prefix = get_script_prefix()
language_url = "%s://%s%s" % (
request.scheme,
request.get_host(),
# insert language after the script prefix and before the
# rest of the URL
request.get_full_path().replace(
script_prefix,
'%s%s/' % (script_prefix, language),
1
)
)
return self.response_redirect_class(language_url)
if not (self.is_language_prefix_patterns_used()
and language_from_path):
patch_vary_headers(response, ('Accept-Language',))
if 'Content-Language' not in response:
response['Content-Language'] = language
return response
def is_language_prefix_patterns_used(self):
"""
Returns `True` if the `LocaleRegexURLResolver` is used
at root level of the urlpatterns, else it returns `False`.
"""
return self._is_language_prefix_patterns_used
|
dutchiexl/constructiv | refs/heads/master | node_modules/node-gyp/gyp/gyptest.py | 1752 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__doc__ = """
gyptest.py -- test runner for GYP tests.
"""
import os
import optparse
import subprocess
import sys
class CommandRunner(object):
"""
Executor class for commands, including "commands" implemented by
Python functions.
"""
verbose = True
active = True
def __init__(self, dictionary={}):
self.subst_dictionary(dictionary)
def subst_dictionary(self, dictionary):
self._subst_dictionary = dictionary
def subst(self, string, dictionary=None):
"""
Substitutes (via the format operator) the values in the specified
dictionary into the specified command.
The command can be an (action, string) tuple. In all cases, we
perform substitution on strings and don't worry if something isn't
a string. (It's probably a Python function to be executed.)
"""
if dictionary is None:
dictionary = self._subst_dictionary
if dictionary:
try:
string = string % dictionary
except TypeError:
pass
return string
def display(self, command, stdout=None, stderr=None):
if not self.verbose:
return
if type(command) == type(()):
func = command[0]
args = command[1:]
s = '%s(%s)' % (func.__name__, ', '.join(map(repr, args)))
if type(command) == type([]):
# TODO: quote arguments containing spaces
# TODO: handle meta characters?
s = ' '.join(command)
else:
s = self.subst(command)
if not s.endswith('\n'):
s += '\n'
sys.stdout.write(s)
sys.stdout.flush()
def execute(self, command, stdout=None, stderr=None):
"""
Executes a single command.
"""
if not self.active:
return 0
if type(command) == type(''):
command = self.subst(command)
cmdargs = shlex.split(command)
if cmdargs[0] == 'cd':
command = (os.chdir,) + tuple(cmdargs[1:])
if type(command) == type(()):
func = command[0]
args = command[1:]
return func(*args)
else:
if stdout is sys.stdout:
# Same as passing sys.stdout, except python2.4 doesn't fail on it.
subout = None
else:
# Open pipe for anything else so Popen works on python2.4.
subout = subprocess.PIPE
if stderr is sys.stderr:
# Same as passing sys.stderr, except python2.4 doesn't fail on it.
suberr = None
elif stderr is None:
# Merge with stdout if stderr isn't specified.
suberr = subprocess.STDOUT
else:
# Open pipe for anything else so Popen works on python2.4.
suberr = subprocess.PIPE
p = subprocess.Popen(command,
shell=(sys.platform == 'win32'),
stdout=subout,
stderr=suberr)
p.wait()
if stdout is None:
self.stdout = p.stdout.read()
elif stdout is not sys.stdout:
stdout.write(p.stdout.read())
if stderr not in (None, sys.stderr):
stderr.write(p.stderr.read())
return p.returncode
def run(self, command, display=None, stdout=None, stderr=None):
"""
Runs a single command, displaying it first.
"""
if display is None:
display = command
self.display(display)
return self.execute(command, stdout, stderr)
class Unbuffered(object):
def __init__(self, fp):
self.fp = fp
def write(self, arg):
self.fp.write(arg)
self.fp.flush()
def __getattr__(self, attr):
return getattr(self.fp, attr)
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
def is_test_name(f):
return f.startswith('gyptest') and f.endswith('.py')
def find_all_gyptest_files(directory):
result = []
for root, dirs, files in os.walk(directory):
if '.svn' in dirs:
dirs.remove('.svn')
result.extend([ os.path.join(root, f) for f in files if is_test_name(f) ])
result.sort()
return result
def main(argv=None):
if argv is None:
argv = sys.argv
usage = "gyptest.py [-ahlnq] [-f formats] [test ...]"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-a", "--all", action="store_true",
help="run all tests")
parser.add_option("-C", "--chdir", action="store", default=None,
help="chdir to the specified directory")
parser.add_option("-f", "--format", action="store", default='',
help="run tests with the specified formats")
parser.add_option("-G", '--gyp_option', action="append", default=[],
help="Add -G options to the gyp command line")
parser.add_option("-l", "--list", action="store_true",
help="list available tests and exit")
parser.add_option("-n", "--no-exec", action="store_true",
help="no execute, just print the command line")
parser.add_option("--passed", action="store_true",
help="report passed tests")
parser.add_option("--path", action="append", default=[],
help="additional $PATH directory")
parser.add_option("-q", "--quiet", action="store_true",
help="quiet, don't print test command lines")
opts, args = parser.parse_args(argv[1:])
if opts.chdir:
os.chdir(opts.chdir)
if opts.path:
extra_path = [os.path.abspath(p) for p in opts.path]
extra_path = os.pathsep.join(extra_path)
os.environ['PATH'] = extra_path + os.pathsep + os.environ['PATH']
if not args:
if not opts.all:
sys.stderr.write('Specify -a to get all tests.\n')
return 1
args = ['test']
tests = []
for arg in args:
if os.path.isdir(arg):
tests.extend(find_all_gyptest_files(os.path.normpath(arg)))
else:
if not is_test_name(os.path.basename(arg)):
print >>sys.stderr, arg, 'is not a valid gyp test name.'
sys.exit(1)
tests.append(arg)
if opts.list:
for test in tests:
print test
sys.exit(0)
CommandRunner.verbose = not opts.quiet
CommandRunner.active = not opts.no_exec
cr = CommandRunner()
os.environ['PYTHONPATH'] = os.path.abspath('test/lib')
if not opts.quiet:
sys.stdout.write('PYTHONPATH=%s\n' % os.environ['PYTHONPATH'])
passed = []
failed = []
no_result = []
if opts.format:
format_list = opts.format.split(',')
else:
# TODO: not duplicate this mapping from pylib/gyp/__init__.py
format_list = {
'aix5': ['make'],
'freebsd7': ['make'],
'freebsd8': ['make'],
'openbsd5': ['make'],
'cygwin': ['msvs'],
'win32': ['msvs', 'ninja'],
'linux2': ['make', 'ninja'],
'linux3': ['make', 'ninja'],
'darwin': ['make', 'ninja', 'xcode', 'xcode-ninja'],
}[sys.platform]
for format in format_list:
os.environ['TESTGYP_FORMAT'] = format
if not opts.quiet:
sys.stdout.write('TESTGYP_FORMAT=%s\n' % format)
gyp_options = []
for option in opts.gyp_option:
gyp_options += ['-G', option]
if gyp_options and not opts.quiet:
sys.stdout.write('Extra Gyp options: %s\n' % gyp_options)
for test in tests:
status = cr.run([sys.executable, test] + gyp_options,
stdout=sys.stdout,
stderr=sys.stderr)
if status == 2:
no_result.append(test)
elif status:
failed.append(test)
else:
passed.append(test)
if not opts.quiet:
def report(description, tests):
if tests:
if len(tests) == 1:
sys.stdout.write("\n%s the following test:\n" % description)
else:
fmt = "\n%s the following %d tests:\n"
sys.stdout.write(fmt % (description, len(tests)))
sys.stdout.write("\t" + "\n\t".join(tests) + "\n")
if opts.passed:
report("Passed", passed)
report("Failed", failed)
report("No result from", no_result)
if failed:
return 1
else:
return 0
if __name__ == "__main__":
sys.exit(main())
|
BBN-Q/pyqgl2 | refs/heads/master | test/code/qft.py | 1 | from qgl2.qgl2 import qgl2decl, qgl2main, qreg, QRegister
from qgl2.qgl1 import Id, X90, Y90, X, Y, Ztheta, MEAS, CNOT
from math import pi
@qgl2decl
def hadamard(q:qreg):
Y90(q)
X(q)
@qgl2decl
def CZ_k(c:qreg, t:qreg, k):
theta = 2 * pi / 2**k
Ztheta(t, angle=theta/2)
CNOT(c, t)
Ztheta(t, angle=-theta/2)
CNOT(c, t)
@qgl2decl
def qft(qs:qreg):
for i in range(len(qs)):
hadamard(qs[i])
for j in range(i+1, len(qs)):
CZ_k(qs[i], qs[j], j-i)
MEAS(qs)
|
ar7z1/ansible | refs/heads/devel | lib/ansible/modules/identity/ipa/ipa_dnszone.py | 67 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Fran Fitzpatrick (francis.x.fitzpatrick@gmail.com)
# Borrowed heavily from other work by Abhijeet Kasurde (akasurde@redhat.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ipa_dnszone
author: Fran Fitzpatrick (@fxfitz)
short_description: Manage FreeIPA DNS Zones
description:
- Add and delete an IPA DNS Zones using IPA API
options:
zone_name:
description:
- The DNS zone name to which needs to be managed.
required: true
state:
description: State to ensure
required: false
default: present
choices: ["present", "absent"]
extends_documentation_fragment: ipa.documentation
version_added: "2.5"
'''
EXAMPLES = '''
# Ensure dns zone is present
- ipa_dnszone:
ipa_host: spider.example.com
ipa_pass: Passw0rd!
state: present
zone_name: example.com
# Ensure that dns zone is removed
- ipa_dnszone:
zone_name: example.com
ipa_host: localhost
ipa_user: admin
ipa_pass: topsecret
state: absent
'''
RETURN = '''
zone:
description: DNS zone as returned by IPA API.
returned: always
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ipa import IPAClient, ipa_argument_spec
from ansible.module_utils._text import to_native
class DNSZoneIPAClient(IPAClient):
def __init__(self, module, host, port, protocol):
super(DNSZoneIPAClient, self).__init__(module, host, port, protocol)
def dnszone_find(self, zone_name):
return self._post_json(
method='dnszone_find',
name=zone_name,
item={'idnsname': zone_name}
)
def dnszone_add(self, zone_name=None, details=None):
return self._post_json(
method='dnszone_add',
name=zone_name,
item={}
)
def dnszone_del(self, zone_name=None, record_name=None, details=None):
return self._post_json(
method='dnszone_del', name=zone_name, item={})
def ensure(module, client):
zone_name = module.params['zone_name']
state = module.params['state']
ipa_dnszone = client.dnszone_find(zone_name)
changed = False
if state == 'present':
if not ipa_dnszone:
changed = True
if not module.check_mode:
client.dnszone_add(zone_name=zone_name)
else:
changed = False
else:
if ipa_dnszone:
changed = True
if not module.check_mode:
client.dnszone_del(zone_name=zone_name)
return changed, client.dnszone_find(zone_name)
def main():
argument_spec = ipa_argument_spec()
argument_spec.update(zone_name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
)
client = DNSZoneIPAClient(
module=module,
host=module.params['ipa_host'],
port=module.params['ipa_port'],
protocol=module.params['ipa_prot']
)
try:
client.login(
username=module.params['ipa_user'],
password=module.params['ipa_pass']
)
changed, zone = ensure(module, client)
module.exit_json(changed=changed, zone=zone)
except Exception as e:
module.fail_json(msg=to_native(e))
if __name__ == '__main__':
main()
|
openstack/python-muranoclient | refs/heads/master | muranoclient/tests/unit/osc/v1/test_environment.py | 1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import json
import tempfile
from unittest import mock
from muranoclient.osc.v1 import environment as osc_env
from muranoclient.tests.unit.osc.v1 import fakes
from muranoclient.v1 import environments as api_env
ENV_INFO = {'id': '1234',
'name': 'Fake Environment',
'created': '2015-12-16T17:31:54',
'updated': '2015-12-16T17:31:54',
'networking': {},
'services': ['fake services'],
'status': 'fake deployed',
'tenant_id': 'xyz123',
'version': '1'}
ENV_MODEL = {
"defaultNetworks": {
"environment": {
"name": "env-network",
"?": {
"type": "io.murano.resources.NeutronNetwork",
"id": "5678"
}
},
"flat": None
},
"region": "RegionOne",
"name": "env",
"?": {
"updated": "2016-10-03 09:33:41.039789",
"type": "io.murano.Environment",
"id": "1234"
}
}
class TestEnvironment(fakes.TestApplicationCatalog):
def setUp(self):
super(TestEnvironment, self).setUp()
self.environment_mock = self.app.client_manager.application_catalog.\
environments
self.session_mock = self.app.client_manager.application_catalog.\
sessions
self.services_mock = self.app.client_manager.application_catalog.\
services
self.environment_mock.reset_mock()
class TestListEnvironment(TestEnvironment):
def setUp(self):
super(TestListEnvironment, self).setUp()
self.environment_mock.list.return_value = [api_env.Environment(None,
ENV_INFO)]
# Command to test
self.cmd = osc_env.ListEnvironments(self.app, None)
@mock.patch('osc_lib.utils.get_item_properties')
def test_environment_list_with_no_options(self, mock_util):
arglist = []
verifylist = []
mock_util.return_value = ('1234', 'Environment of all tenants',
'fake deployed', '2015-12-16T17:31:54',
'2015-12-16T17:31:54'
)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that columns are correct
expected_columns = ['Id', 'Name', 'Status', 'Created', 'Updated']
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = [('1234', 'Environment of all tenants',
'fake deployed', '2015-12-16T17:31:54',
'2015-12-16T17:31:54')]
self.assertEqual(expected_data, data)
@mock.patch('osc_lib.utils.get_item_properties')
def test_environment_list_with_all_tenants(self, mock_util):
arglist = ['--all-tenants']
verifylist = [('all_tenants', True), ('tenant', None)]
mock_util.return_value = ('1234', 'Environment of all tenants',
'fake deployed', '2015-12-16T17:31:54',
'2015-12-16T17:31:54'
)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that columns are correct
expected_columns = ['Id', 'Name', 'Status', 'Created', 'Updated']
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = [('1234', 'Environment of all tenants',
'fake deployed', '2015-12-16T17:31:54',
'2015-12-16T17:31:54')]
self.assertEqual(expected_data, data)
self.environment_mock.list.assert_called_once_with(True, None)
@mock.patch('osc_lib.utils.get_item_properties')
def test_environment_list_with_tenant(self, mock_util):
arglist = ['--tenant=ABC']
verifylist = [('all_tenants', False), ('tenant', 'ABC')]
mock_util.return_value = ('1234', 'Environment of tenant ABC',
'fake deployed', '2015-12-16T17:31:54',
'2015-12-16T17:31:54'
)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that columns are correct
expected_columns = ['Id', 'Name', 'Status', 'Created', 'Updated']
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = [('1234', 'Environment of tenant ABC',
'fake deployed', '2015-12-16T17:31:54',
'2015-12-16T17:31:54')]
self.assertEqual(expected_data, data)
self.environment_mock.list.assert_called_once_with(False, 'ABC')
class TestShowEnvironment(TestEnvironment):
def setUp(self):
super(TestShowEnvironment, self).setUp()
mock_to_dict = self.environment_mock.get.return_value.to_dict
mock_to_dict.return_value = ENV_INFO
self.cmd = osc_env.ShowEnvironment(self.app, None)
@mock.patch('oslo_serialization.jsonutils.dumps')
def test_environment_show_with_no_options(self, mock_json):
arglist = ['fake']
verifylist = [('id', 'fake')]
mock_json.return_value = ['fake services']
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that columns are correct
expected_columns = ('created', 'id', 'name', 'networking', 'services',
'status', 'tenant_id', 'updated', 'version')
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = ('2015-12-16T17:31:54', '1234', 'Fake Environment',
{}, ['fake services'], 'fake deployed', 'xyz123',
'2015-12-16T17:31:54', '1')
self.assertEqual(expected_data, data)
@mock.patch('oslo_serialization.jsonutils.dumps')
def test_environment_show_with_only_app_option(self, mock_json):
arglist = ['fake', '--only-apps']
verifylist = [('id', 'fake'), ('only_apps', True)]
mock_json.return_value = ['fake services']
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that columns are correct
expected_columns = ['services']
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = [['fake services']]
self.assertEqual(expected_data, data)
@mock.patch('oslo_serialization.jsonutils.dumps')
def test_environment_show_with_session_id_option(self, mock_json):
arglist = ['fake', '--session-id', 'abc123']
verifylist = [('id', 'fake'), ('session_id', 'abc123')]
mock_json.return_value = ['fake services']
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that columns are correct
expected_columns = ('created', 'id', 'name', 'networking', 'services',
'status', 'tenant_id', 'updated', 'version')
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = ('2015-12-16T17:31:54', '1234', 'Fake Environment',
{}, ['fake services'], 'fake deployed', 'xyz123',
'2015-12-16T17:31:54', '1')
self.assertEqual(expected_data, data)
class TestRenameEnvironment(TestEnvironment):
def setUp(self):
super(TestRenameEnvironment, self).setUp()
self.environment_mock.update.return_value = [api_env.Environment(None,
ENV_INFO)]
# Command to test
self.cmd = osc_env.RenameEnvironment(self.app, None)
@mock.patch('osc_lib.utils.get_item_properties')
def test_environment_rename(self, mock_util):
arglist = ['1234', 'fake-1']
verifylist = [('id', '1234'), ('name', 'fake-1')]
mock_util.return_value = ('1234', 'fake-1', 'fake deployed',
'2015-12-16T17:31:54', '2015-12-16T17:31:54'
)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that columns are correct
expected_columns = ['Id', 'Name', 'Status', 'Created', 'Updated']
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = [('1234', 'fake-1', 'fake deployed',
'2015-12-16T17:31:54', '2015-12-16T17:31:54')]
self.assertEqual(expected_data, data)
class TestEnvironmentSessionCreate(TestEnvironment):
def setUp(self):
super(TestEnvironmentSessionCreate, self).setUp()
# Command to test
self.cmd = osc_env.EnvironmentSessionCreate(self.app, None)
@mock.patch('muranoclient.common.utils.text_wrap_formatter')
def test_environment_session_create(self, mock_util):
arglist = ['1234']
verifylist = [('id', '1234')]
mock_util.return_value = '1abc2xyz'
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that columns are correct
expected_columns = ['id']
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = ['1abc2xyz']
self.assertEqual(expected_data, data)
class TestEnvironmentCreate(TestEnvironment):
def setUp(self):
super(TestEnvironmentCreate, self).setUp()
self.environment_mock.create.return_value = [api_env.Environment(None,
ENV_INFO)]
# Command to test
self.cmd = osc_env.EnvironmentCreate(self.app, None)
@mock.patch('osc_lib.utils.get_item_properties')
def test_environment_create_with_no_option(self, mock_util):
arglist = ['fake']
verifylist = [('name', 'fake')]
mock_util.return_value = ('1234', 'fake', 'ready',
'2015-12-16T17:31:54', '2015-12-16T17:31:54')
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that columns are correct
expected_columns = ['Id', 'Name', 'Status', 'Created', 'Updated']
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = [('1234', 'fake', 'ready',
'2015-12-16T17:31:54', '2015-12-16T17:31:54')]
self.assertEqual(expected_data, data)
@mock.patch('osc_lib.utils.get_item_properties')
def test_environment_create_with_region_option(self, mock_util):
arglist = ['fake', '--region', 'region_one']
verifylist = [('name', 'fake'), ('region', 'region_one')]
mock_util.return_value = ('1234', 'fake', 'ready',
'2015-12-16T17:31:54', '2015-12-16T17:31:54')
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that correct arguments are passed
self.environment_mock.create.assert_has_calls([mock.call(
{'name': 'fake', 'region': 'region_one'})])
# Check that columns are correct
expected_columns = ['Id', 'Name', 'Status', 'Created', 'Updated']
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = [('1234', 'fake', 'ready',
'2015-12-16T17:31:54', '2015-12-16T17:31:54')]
self.assertEqual(expected_data, data)
@mock.patch('osc_lib.utils.get_item_properties')
def test_environment_create_with_net_option(self, mock_util):
arglist = ['fake', '--join-net-id', 'x1y2z3']
verifylist = [('name', 'fake'), ('join_net_id', 'x1y2z3')]
mock_util.return_value = ('1234', 'fake', 'ready',
'2015-12-16T17:31:54', '2015-12-16T17:31:54')
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
expected_call = {
'defaultNetworks': {
'environment': {
'internalNetworkName': 'x1y2z3',
'?': {
'type': 'io.murano.resources.ExistingNeutronNetwork',
'id': mock.ANY
}
},
'flat': None
},
'name': 'fake',
'region': None
}
# Check that correct arguments are passed
self.environment_mock.create.assert_called_with(expected_call)
# Check that columns are correct
expected_columns = ['Id', 'Name', 'Status', 'Created', 'Updated']
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = [('1234', 'fake', 'ready',
'2015-12-16T17:31:54', '2015-12-16T17:31:54')]
self.assertEqual(expected_data, data)
@mock.patch('osc_lib.utils.get_item_properties')
def test_environment_create_with_subnet_option(self, mock_util):
arglist = ['fake', '--join-subnet-id', 'x1y2z3']
verifylist = [('name', 'fake'), ('join_subnet_id', 'x1y2z3')]
mock_util.return_value = ('1234', 'fake', 'ready',
'2015-12-16T17:31:54', '2015-12-16T17:31:54')
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
expected_call = {
'defaultNetworks': {
'environment': {
'internalSubnetworkName': 'x1y2z3',
'?': {
'type': 'io.murano.resources.ExistingNeutronNetwork',
'id': mock.ANY
}
},
'flat': None
},
'name': 'fake',
'region': None
}
# Check that correct arguments are passed
self.environment_mock.create.assert_called_with(expected_call)
# Check that columns are correct
expected_columns = ['Id', 'Name', 'Status', 'Created', 'Updated']
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = [('1234', 'fake', 'ready',
'2015-12-16T17:31:54', '2015-12-16T17:31:54')]
self.assertEqual(expected_data, data)
class TestEnvironmentDelete(TestEnvironment):
def setUp(self):
super(TestEnvironmentDelete, self).setUp()
self.environment_mock.delete.return_value = None
self.environment_mock.list.return_value = [api_env.Environment(None,
ENV_INFO)]
# Command to test
self.cmd = osc_env.EnvironmentDelete(self.app, None)
@mock.patch('osc_lib.utils.get_item_properties')
def test_environment_delete(self, mock_util):
arglist = ['fake1', 'fake2']
verifylist = [('id', ['fake1', 'fake2'])]
mock_util.return_value = ('1234', 'Environment of all tenants',
'fake deployed', '2015-12-16T17:31:54',
'2015-12-16T17:31:54'
)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that columns are correct
expected_columns = ['Id', 'Name', 'Status', 'Created', 'Updated']
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = [('1234', 'Environment of all tenants',
'fake deployed', '2015-12-16T17:31:54',
'2015-12-16T17:31:54')]
self.assertEqual(expected_data, data)
class TestEnvironmentDeploy(TestEnvironment):
def setUp(self):
super(TestEnvironmentDeploy, self).setUp()
mock_to_dict = self.environment_mock.get.return_value.to_dict
mock_to_dict.return_value = ENV_INFO
# Command to test
self.cmd = osc_env.EnvironmentDeploy(self.app, None)
@mock.patch('oslo_serialization.jsonutils.dumps')
def test_environment_deploy(self, mock_json):
arglist = ['fake', '--session-id', 'abc123']
verifylist = [('id', 'fake'), ('session_id', 'abc123')]
mock_json.return_value = ['fake services']
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that columns are correct
expected_columns = ('created', 'id', 'name', 'networking', 'services',
'status', 'tenant_id', 'updated', 'version')
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = ('2015-12-16T17:31:54', '1234', 'Fake Environment',
{}, ['fake services'], 'fake deployed', 'xyz123',
'2015-12-16T17:31:54', '1')
self.assertEqual(expected_data, data)
class TestEnvironmentAppsEdit(TestEnvironment):
def setUp(self):
super(TestEnvironmentAppsEdit, self).setUp()
# Command to test
self.cmd = osc_env.EnvironmentAppsEdit(self.app, None)
def test_environment_apps_edit(self):
fake = collections.namedtuple('fakeEnv', 'services')
self.environment_mock.get.side_effect = [
fake(services=[
{'?': {'name': "foo"}}
]),
]
temp_file = tempfile.NamedTemporaryFile(prefix="murano-test", mode='w')
json.dump([
{'op': 'replace', 'path': '/0/?/name',
'value': "dummy"
}
], temp_file)
temp_file.file.flush()
arglist = ['fake', '--session-id', 'abc123', temp_file.name]
parsed_args = self.check_parser(self.cmd, arglist, [])
self.cmd.take_action(parsed_args)
self.services_mock.put.assert_called_once_with(
'fake',
session_id='abc123',
path='/',
data=[{'?': {'name': 'dummy'}}]
)
class TestEnvironmentModelShow(TestEnvironment):
def setUp(self):
super(TestEnvironmentModelShow, self).setUp()
self.env_mock = \
self.app.client_manager.application_catalog.environments
self.env_mock.get_model.return_value = ENV_MODEL
# Command to test
self.cmd = osc_env.EnvironmentModelShow(self.app, None)
def test_environment_model_show_basic(self):
arglist = ['env-id']
verifylist = [('id', 'env-id')]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that columns are correct
expected_columns = ('?', 'defaultNetworks', 'name', 'region')
self.assertEqual(expected_columns, columns)
# Check that data is correct
self.assertCountEqual(ENV_MODEL.values(), data)
def test_environment_model_show_full(self):
arglist = ['env-id', '--path', '/path', '--session-id', 'sess-id']
verifylist = [('id', 'env-id'), ('path', '/path'),
('session_id', 'sess-id')]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that columns are correct
expected_columns = ('?', 'defaultNetworks', 'name', 'region')
self.assertEqual(expected_columns, columns)
# Check that data is correct
self.assertCountEqual(ENV_MODEL.values(), data)
class TestEnvironmentModelEdit(TestEnvironment):
def setUp(self):
super(TestEnvironmentModelEdit, self).setUp()
self.env_mock = \
self.app.client_manager.application_catalog.environments
self.env_mock.update_model.return_value = ENV_MODEL
# Command to test
self.cmd = osc_env.EnvironmentModelEdit(self.app, None)
def test_environment_model_edit(self):
temp_file = tempfile.NamedTemporaryFile(prefix="murano-test", mode='w')
patch = [{'op': 'replace', 'path': '/name', 'value': 'dummy'}]
json.dump(patch, temp_file)
temp_file.file.flush()
arglist = ['env-id', temp_file.name, '--session-id', 'sess-id']
verifylist = [('id', 'env-id'), ('filename', temp_file.name),
('session_id', 'sess-id')]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that columns are correct
expected_columns = ('?', 'defaultNetworks', 'name', 'region')
self.assertEqual(expected_columns, columns)
# Check that data is correct
self.assertCountEqual(ENV_MODEL.values(), data)
|
kenshay/ImageScripter | refs/heads/master | ProgramData/SystemFiles/Python/Lib/site-packages/traitlets/config/manager.py | 13 | """Manager to read and modify config data in JSON files.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import errno
import io
import json
import os
from six import PY3
from traitlets.config import LoggingConfigurable
from traitlets.traitlets import Unicode
def recursive_update(target, new):
"""Recursively update one dictionary using another.
None values will delete their keys.
"""
for k, v in new.items():
if isinstance(v, dict):
if k not in target:
target[k] = {}
recursive_update(target[k], v)
if not target[k]:
# Prune empty subdicts
del target[k]
elif v is None:
target.pop(k, None)
else:
target[k] = v
class BaseJSONConfigManager(LoggingConfigurable):
"""General JSON config manager
Deals with persisting/storing config in a json file
"""
config_dir = Unicode('.')
def ensure_config_dir_exists(self):
try:
os.makedirs(self.config_dir, 0o755)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def file_name(self, section_name):
return os.path.join(self.config_dir, section_name+'.json')
def get(self, section_name):
"""Retrieve the config data for the specified section.
Returns the data as a dictionary, or an empty dictionary if the file
doesn't exist.
"""
filename = self.file_name(section_name)
if os.path.isfile(filename):
with io.open(filename, encoding='utf-8') as f:
return json.load(f)
else:
return {}
def set(self, section_name, data):
"""Store the given config data.
"""
filename = self.file_name(section_name)
self.ensure_config_dir_exists()
if PY3:
f = io.open(filename, 'w', encoding='utf-8')
else:
f = open(filename, 'wb')
with f:
json.dump(data, f, indent=2)
def update(self, section_name, new_data):
"""Modify the config section by recursively updating it with new_data.
Returns the modified config data as a dictionary.
"""
data = self.get(section_name)
recursive_update(data, new_data)
self.set(section_name, data)
return data
|
nicoddemus/backtrader | refs/heads/master | tests/test_ind_pgo.py | 4 | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import testcommon
import backtrader as bt
import backtrader.indicators as btind
chkdatas = 1
chkvals = [
['0.543029', '-2.347884', '0.416325']
]
chkmin = 15
chkind = btind.PGO
def test_run(main=False):
datas = [testcommon.getdata(i) for i in range(chkdatas)]
testcommon.runtest(datas,
testcommon.TestStrategy,
main=main,
plot=main,
chkind=chkind,
chkmin=chkmin,
chkvals=chkvals)
if __name__ == '__main__':
test_run(main=True)
|
ghchinoy/tensorflow | refs/heads/master | tensorflow/contrib/framework/python/ops/checkpoint_ops.py | 76 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for generating and loading vocab remappings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import init_ops
from tensorflow.python.training import checkpoint_ops
# pylint: disable=protected-access,line-too-long
load_and_remap_matrix_initializer = checkpoint_ops._load_and_remap_matrix_initializer
# pylint: enable=line-too-long
load_embedding_initializer = checkpoint_ops._load_embedding_initializer
# pylint: enable=protected-access
def load_linear_multiclass_bias_initializer(ckpt_path,
bias_tensor_name,
new_class_vocab_size,
old_class_vocab_file,
new_class_vocab_file,
num_class_oov_buckets=0,
initializer=None,
max_rows_in_memory=-1):
"""Loads pre-trained multi-class biases for linear models from checkpoint.
Wrapper around `load_and_remap_matrix_initializer()` specialized for loading
multi-class bias and remapping according to the provided vocab files. See docs
for `load_and_remap_matrix_initializer()` for more details. In this case, the
provided row_vocab is the class vocabulary, and the expected shape is
`[new_class_vocab_size, 1]`.
Args:
ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`)
from which the old matrix `Tensor` will be loaded.
bias_tensor_name: Tensor name to load from in the checkpoints.
new_class_vocab_size: Number of entries in the new class vocab.
old_class_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old class vocabulary file.
new_class_vocab_file: A scalar `Tensor` of type `string` containing the
path to the new class vocabulary file.
num_class_oov_buckets: `int` specifying the number of out-of-vocabulary
buckets to use for the classes. Must be >= 0.
initializer: Initializer function that accepts a 1-D tensor as the arg to
specify the shape of the returned tensor. If `None`, defaults to using
`zeros_initializer()`.
max_rows_in_memory: `int` specifying the maximum number of rows to load from
the checkpoint at once. If less than or equal to 0, the entire matrix will
be loaded into memory. Setting this arg trades increased disk reads for
lower memory usage.
Returns:
A variable initializer function.
"""
# Linear multi-class biases should be zero-initialized.
if initializer is None:
initializer = init_ops.zeros_initializer()
return load_and_remap_matrix_initializer(
ckpt_path=ckpt_path,
old_tensor_name=bias_tensor_name,
new_row_vocab_size=new_class_vocab_size,
new_col_vocab_size=1,
old_row_vocab_file=old_class_vocab_file,
new_row_vocab_file=new_class_vocab_file,
old_col_vocab_file=None,
new_col_vocab_file=None,
num_row_oov_buckets=num_class_oov_buckets,
num_col_oov_buckets=0,
initializer=initializer,
max_rows_in_memory=max_rows_in_memory)
def load_variable_slot_initializer(ckpt_path,
old_tensor_name,
primary_partition_info,
new_row_vocab_size,
new_col_vocab_size,
old_row_vocab_file=None,
new_row_vocab_file=None,
old_col_vocab_file=None,
new_col_vocab_file=None,
num_row_oov_buckets=0,
num_col_oov_buckets=0,
initializer=None,
max_rows_in_memory=-1):
"""Loads pre-trained multi-class slots for linear models from checkpoint.
Wrapper around `load_and_remap_matrix_initializer()` specialized for loading
multi-class slots (such as optimizer accumulators) and remapping them
according to the provided vocab files. See docs for
`load_and_remap_matrix_initializer()` for more details. Takes in a
`variable_scope._PartitionInfo` representing the slot's primary `Variable`'s
partitioning. This is necessary since accumulator `Variable` creation ignores
primary scoping and partitioning information.
Args:
ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`)
from which the old matrix `Tensor` will be loaded.
old_tensor_name: Name of the 2-D `Tensor` to load from checkpoint.
primary_partition_info: A `variable_scope._PartitionInfo` containing this
slot's primary `Variable`'s partitioning information. This is used to
calculate the offset and override the partition_info passed to the call to
_initialize.
new_row_vocab_size: `int` specifying the number of entries in
`new_row_vocab_file`. If no row remapping is needed (no row vocab
provided), this should be equal to the number of rows to load from the old
matrix (which can theoretically be smaller than the number of rows in the
old matrix).
new_col_vocab_size: `int` specifying the number of entries in
`new_col_vocab_file`. If no column remapping is needed (no column vocab
provided), this should be equal to the number of columns in the old
matrix.
old_row_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old row vocabulary file. Can be None, which represents no
remapping on the row axis.
new_row_vocab_file: A scalar `Tensor` of type `string` containing the path
to the new row vocabulary file. Can be None, which represents no remapping
on the row axis.
old_col_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old column vocabulary file. Can be None, which represents no
remapping on the column axis.
new_col_vocab_file: A scalar `Tensor` of type `string` containing the path
to the new column vocabulary file. Can be None, which represents no
remapping on the column axis.
num_row_oov_buckets: `int` specifying the number of out-of-vocabulary rows
to append. Must be >= 0.
num_col_oov_buckets: `int` specifying the number of out-of-vocabulary
columns to append. Must be >= 0.
initializer: Initializer function to initialize missing values. Accepts a
1-D tensor as the arg to specify the shape of the returned tensor. If
`None`, defaults to using `zeros_initializer()`.
max_rows_in_memory: `int` specifying the maximum number of rows to load from
the checkpoint at once. If less than or equal to 0, the entire matrix will
be loaded into memory. Setting this arg trades increased disk reads for
lower memory usage.
Returns:
A variable initializer function that should be used to initialize a
(potentially partitioned) `Variable` whose complete shape is
`[new_row_vocab_size + num_row_oov_buckets, new_col_vocab_size +
num_col_oov_buckets]`.
Raises:
TypeError: If `initializer` is specified but not callable.
"""
initializer_fn = load_and_remap_matrix_initializer(
ckpt_path=ckpt_path,
old_tensor_name=old_tensor_name,
new_row_vocab_size=new_row_vocab_size,
new_col_vocab_size=new_col_vocab_size,
old_row_vocab_file=old_row_vocab_file,
new_row_vocab_file=new_row_vocab_file,
old_col_vocab_file=old_col_vocab_file,
new_col_vocab_file=new_col_vocab_file,
num_row_oov_buckets=num_row_oov_buckets,
num_col_oov_buckets=num_col_oov_buckets,
initializer=initializer,
max_rows_in_memory=max_rows_in_memory)
def _initializer(shape, dtype=dtypes.float32, partition_info=None):
del partition_info # Unused by this override.
return initializer_fn(shape, dtype, partition_info=primary_partition_info)
return _initializer
|
ppizarror/Hero-of-Antair | refs/heads/master | data/images/pil/Image.py | 1 | #
# The Python Imaging Library.
# $Id$
#
# the Image class wrapper
#
# partial release history:
# 1995-09-09 fl Created
# 1996-03-11 fl PIL release 0.0 (proof of concept)
# 1996-04-30 fl PIL release 0.1b1
# 1999-07-28 fl PIL release 1.0 final
# 2000-06-07 fl PIL release 1.1
# 2000-10-20 fl PIL release 1.1.1
# 2001-05-07 fl PIL release 1.1.2
# 2002-03-15 fl PIL release 1.1.3
# 2003-05-10 fl PIL release 1.1.4
# 2005-03-28 fl PIL release 1.1.5
# 2006-12-02 fl PIL release 1.1.6
# 2009-11-15 fl PIL release 1.1.7
#
# Copyright (c) 1997-2009 by Secret Labs AB. All rights reserved.
# Copyright (c) 1995-2009 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
VERSION = "1.1.7"
try:
import warnings
except ImportError:
warnings = None
class _imaging_not_installed:
# module placeholder
def __getattr__(self, id):
raise ImportError("The _imaging C module is not installed")
try:
# give Tk a chance to set up the environment, in case we're
# using an _imaging module linked against libtcl/libtk (use
# __import__ to hide this from naive packagers; we don't really
# depend on Tk unless ImageTk is used, and that module already
# imports Tkinter)
__import__("FixTk")
except ImportError:
pass
try:
# If the _imaging C module is not present, you can still use
# the "open" function to identify files, but you cannot load
# them. Note that other modules should not refer to _imaging
# directly; import Image and use the Image.core variable instead.
import _imaging
core = _imaging
del _imaging
except ImportError, v:
core = _imaging_not_installed()
if str(v)[:20] == "Module use of python" and warnings:
# The _imaging C module is present, but not compiled for
# the right version (windows only). Print a warning, if
# possible.
warnings.warn(
"The _imaging extension was built for another version "
"of Python; most PIL functions will be disabled",
RuntimeWarning
)
from operator import isNumberType, isSequenceType
import os, string, sys
from types import IntType, StringType, TupleType
import ImageMode
import ImagePalette
# type stuff
try:
UnicodeStringType = type(unicode(""))
##
# (Internal) Checks if an object is a string. If the current
# Python version supports Unicode, this checks for both 8-bit
# and Unicode strings.
def isStringType(t):
return isinstance(t, StringType) or isinstance(t, UnicodeStringType)
except NameError:
def isStringType(t):
return isinstance(t, StringType)
##
# (Internal) Checks if an object is a tuple.
def isTupleType(t):
return isinstance(t, TupleType)
##
# (Internal) Checks if an object is an image object.
def isImageType(t):
return hasattr(t, "im")
##
# (Internal) Checks if an object is a string, and that it points to a
# directory.
def isDirectory(f):
return isStringType(f) and os.path.isdir(f)
#
# Debug level
DEBUG = 0
#
# Constants (also defined in _imagingmodule.c!)
NONE = 0
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
ROTATE_90 = 2
ROTATE_180 = 3
ROTATE_270 = 4
# transforms
AFFINE = 0
EXTENT = 1
PERSPECTIVE = 2
QUAD = 3
MESH = 4
# resampling filters
NONE = 0
NEAREST = 0
ANTIALIAS = 1 # 3-lobed lanczos
LINEAR = BILINEAR = 2
CUBIC = BICUBIC = 3
# dithers
NONE = 0
NEAREST = 0
ORDERED = 1 # Not yet implemented
RASTERIZE = 2 # Not yet implemented
FLOYDSTEINBERG = 3 # default
# palettes/quantizers
WEB = 0
ADAPTIVE = 1
# categories
NORMAL = 0
SEQUENCE = 1
CONTAINER = 2
# --------------------------------------------------------------------
# Registries
ID = []
OPEN = {}
MIME = {}
SAVE = {}
EXTENSION = {}
# --------------------------------------------------------------------
# Modes supported by this version
_MODEINFO = {
# NOTE: this table will be removed in future versions. use
# getmode* functions or ImageMode descriptors instead.
# official modes
"1": ("L", "L", ("1",)),
"L": ("L", "L", ("L",)),
"I": ("L", "I", ("I",)),
"F": ("L", "F", ("F",)),
"P": ("RGB", "L", ("P",)),
"RGB": ("RGB", "L", ("R", "G", "B")),
"RGBX": ("RGB", "L", ("R", "G", "B", "X")),
"RGBA": ("RGB", "L", ("R", "G", "B", "A")),
"CMYK": ("RGB", "L", ("C", "M", "Y", "K")),
"YCbCr": ("RGB", "L", ("Y", "Cb", "Cr")),
# Experimental modes include I;16, I;16L, I;16B, RGBa, BGR;15, and
# BGR;24. Use these modes only if you know exactly what you're
# doing...
}
try:
byteorder = sys.byteorder
except AttributeError:
import struct
if struct.unpack("h", "\0\1")[0] == 1:
byteorder = "big"
else:
byteorder = "little"
if byteorder == 'little':
_ENDIAN = '<'
else:
_ENDIAN = '>'
_MODE_CONV = {
# official modes
"1": ('|b1', None), # broken
"L": ('|u1', None),
"I": (_ENDIAN + 'i4', None),
"F": (_ENDIAN + 'f4', None),
"P": ('|u1', None),
"RGB": ('|u1', 3),
"RGBX": ('|u1', 4),
"RGBA": ('|u1', 4),
"CMYK": ('|u1', 4),
"YCbCr": ('|u1', 4),
}
def _conv_type_shape(im):
shape = im.size[1], im.size[0]
typ, extra = _MODE_CONV[im.mode]
if extra is None:
return shape, typ
else:
return shape+(extra,), typ
MODES = _MODEINFO.keys()
MODES.sort()
# raw modes that may be memory mapped. NOTE: if you change this, you
# may have to modify the stride calculation in map.c too!
_MAPMODES = ("L", "P", "RGBX", "RGBA", "CMYK", "I;16", "I;16L", "I;16B")
##
# Gets the "base" mode for given mode. This function returns "L" for
# images that contain grayscale data, and "RGB" for images that
# contain color data.
#
# @param mode Input mode.
# @return "L" or "RGB".
# @exception KeyError If the input mode was not a standard mode.
def getmodebase(mode):
return ImageMode.getmode(mode).basemode
##
# Gets the storage type mode. Given a mode, this function returns a
# single-layer mode suitable for storing individual bands.
#
# @param mode Input mode.
# @return "L", "I", or "F".
# @exception KeyError If the input mode was not a standard mode.
def getmodetype(mode):
return ImageMode.getmode(mode).basetype
##
# Gets a list of individual band names. Given a mode, this function
# returns a tuple containing the names of individual bands (use
# {@link #getmodetype} to get the mode used to store each individual
# band.
#
# @param mode Input mode.
# @return A tuple containing band names. The length of the tuple
# gives the number of bands in an image of the given mode.
# @exception KeyError If the input mode was not a standard mode.
def getmodebandnames(mode):
return ImageMode.getmode(mode).bands
##
# Gets the number of individual bands for this mode.
#
# @param mode Input mode.
# @return The number of bands in this mode.
# @exception KeyError If the input mode was not a standard mode.
def getmodebands(mode):
return len(ImageMode.getmode(mode).bands)
# --------------------------------------------------------------------
# Helpers
_initialized = 0
##
# Explicitly loads standard file format drivers.
def preinit():
"Load standard file format drivers."
global _initialized
if _initialized >= 1:
return
try:
import BmpImagePlugin
except ImportError:
pass
try:
import GifImagePlugin
except ImportError:
pass
try:
import JpegImagePlugin
except ImportError:
pass
try:
import PpmImagePlugin
except ImportError:
pass
try:
import PngImagePlugin
except ImportError:
pass
# try:
# import TiffImagePlugin
# except ImportError:
# pass
_initialized = 1
##
# Explicitly initializes the Python Imaging Library. This function
# loads all available file format drivers.
def init():
"Load all file format drivers."
global _initialized
if _initialized >= 2:
return 0
visited = {}
directories = sys.path
try:
directories = directories + [os.path.dirname(__file__)]
except NameError:
pass
# only check directories (including current, if present in the path)
for directory in filter(isDirectory, directories):
fullpath = os.path.abspath(directory)
if visited.has_key(fullpath):
continue
for file in os.listdir(directory):
if file[-14:] == "ImagePlugin.py":
f, e = os.path.splitext(file)
try:
sys.path.insert(0, directory)
try:
__import__(f, globals(), locals(), [])
finally:
del sys.path[0]
except ImportError:
if DEBUG:
print "Image: failed to import",
print f, ":", sys.exc_value
visited[fullpath] = None
if OPEN or SAVE:
_initialized = 2
return 1
# --------------------------------------------------------------------
# Codec factories (used by tostring/fromstring and ImageFile.load)
def _getdecoder(mode, decoder_name, args, extra=()):
# tweak arguments
if args is None:
args = ()
elif not isTupleType(args):
args = (args,)
try:
# get decoder
decoder = getattr(core, decoder_name + "_decoder")
# print decoder, (mode,) + args + extra
return apply(decoder, (mode,) + args + extra)
except AttributeError:
raise IOError("decoder %s not available" % decoder_name)
def _getencoder(mode, encoder_name, args, extra=()):
# tweak arguments
if args is None:
args = ()
elif not isTupleType(args):
args = (args,)
try:
# get encoder
encoder = getattr(core, encoder_name + "_encoder")
# print encoder, (mode,) + args + extra
return apply(encoder, (mode,) + args + extra)
except AttributeError:
raise IOError("encoder %s not available" % encoder_name)
# --------------------------------------------------------------------
# Simple expression analyzer
class _E:
def __init__(self, data): self.data = data
def __coerce__(self, other): return self, _E(other)
def __add__(self, other): return _E((self.data, "__add__", other.data))
def __mul__(self, other): return _E((self.data, "__mul__", other.data))
def _getscaleoffset(expr):
stub = ["stub"]
data = expr(_E(stub)).data
try:
(a, b, c) = data # simplified syntax
if (a is stub and b == "__mul__" and isNumberType(c)):
return c, 0.0
if (a is stub and b == "__add__" and isNumberType(c)):
return 1.0, c
except TypeError: pass
try:
((a, b, c), d, e) = data # full syntax
if (a is stub and b == "__mul__" and isNumberType(c) and
d == "__add__" and isNumberType(e)):
return c, e
except TypeError: pass
raise ValueError("illegal expression")
# --------------------------------------------------------------------
# Implementation wrapper
##
# This class represents an image object. To create Image objects, use
# the appropriate factory functions. There's hardly ever any reason
# to call the Image constructor directly.
#
# @see #open
# @see #new
# @see #fromstring
class Image:
format = None
format_description = None
def __init__(self):
# FIXME: take "new" parameters / other image?
# FIXME: turn mode and size into delegating properties?
self.im = None
self.mode = ""
self.size = (0, 0)
self.palette = None
self.info = {}
self.category = NORMAL
self.readonly = 0
def _new(self, im):
new = Image()
new.im = im
new.mode = im.mode
new.size = im.size
new.palette = self.palette
if im.mode == "P":
new.palette = ImagePalette.ImagePalette()
try:
new.info = self.info.copy()
except AttributeError:
# fallback (pre-1.5.2)
new.info = {}
for k, v in self.info:
new.info[k] = v
return new
_makeself = _new # compatibility
def _copy(self):
self.load()
self.im = self.im.copy()
self.readonly = 0
def _dump(self, file=None, format=None):
import tempfile
if not file:
file = tempfile.mktemp()
self.load()
if not format or format == "PPM":
self.im.save_ppm(file)
else:
file = file + "." + format
self.save(file, format)
return file
def __repr__(self):
return "<%s.%s image mode=%s size=%dx%d at 0x%X>" % (
self.__class__.__module__, self.__class__.__name__,
self.mode, self.size[0], self.size[1],
id(self)
)
def __getattr__(self, name):
if name == "__array_interface__":
# numpy array interface support
new = {}
shape, typestr = _conv_type_shape(self)
new['shape'] = shape
new['typestr'] = typestr
new['data'] = self.tostring()
return new
raise AttributeError(name)
##
# Returns a string containing pixel data.
#
# @param encoder_name What encoder to use. The default is to
# use the standard "raw" encoder.
# @param *args Extra arguments to the encoder.
# @return An 8-bit string.
def tostring(self, encoder_name="raw", *args):
"Return image as a binary string"
# may pass tuple instead of argument list
if len(args) == 1 and isTupleType(args[0]):
args = args[0]
if encoder_name == "raw" and args == ():
args = self.mode
self.load()
# unpack data
e = _getencoder(self.mode, encoder_name, args)
e.setimage(self.im)
bufsize = max(65536, self.size[0] * 4) # see RawEncode.c
data = []
while 1:
l, s, d = e.encode(bufsize)
data.append(d)
if s:
break
if s < 0:
raise RuntimeError("encoder error %d in tostring" % s)
return string.join(data, "")
##
# Returns the image converted to an X11 bitmap. This method
# only works for mode "1" images.
#
# @param name The name prefix to use for the bitmap variables.
# @return A string containing an X11 bitmap.
# @exception ValueError If the mode is not "1"
def tobitmap(self, name="image"):
"Return image as an XBM bitmap"
self.load()
if self.mode != "1":
raise ValueError("not a bitmap")
data = self.tostring("xbm")
return string.join(["#define %s_width %d\n" % (name, self.size[0]),
"#define %s_height %d\n"% (name, self.size[1]),
"static char %s_bits[] = {\n" % name, data, "};"], "")
##
# Loads this image with pixel data from a string.
# <p>
# This method is similar to the {@link #fromstring} function, but
# loads data into this image instead of creating a new image
# object.
def fromstring(self, data, decoder_name="raw", *args):
"Load data to image from binary string"
# may pass tuple instead of argument list
if len(args) == 1 and isTupleType(args[0]):
args = args[0]
# default format
if decoder_name == "raw" and args == ():
args = self.mode
# unpack data
d = _getdecoder(self.mode, decoder_name, args)
d.setimage(self.im)
s = d.decode(data)
if s[0] >= 0:
raise ValueError("not enough image data")
if s[1] != 0:
raise ValueError("cannot decode image data")
##
# Allocates storage for the image and loads the pixel data. In
# normal cases, you don't need to call this method, since the
# Image class automatically loads an opened image when it is
# accessed for the first time.
#
# @return An image access object.
def load(self):
"Explicitly load pixel data."
if self.im and self.palette and self.palette.dirty:
# realize palette
apply(self.im.putpalette, self.palette.getdata())
self.palette.dirty = 0
self.palette.mode = "RGB"
self.palette.rawmode = None
if self.info.has_key("transparency"):
self.im.putpalettealpha(self.info["transparency"], 0)
self.palette.mode = "RGBA"
if self.im:
return self.im.pixel_access(self.readonly)
##
# Verifies the contents of a file. For data read from a file, this
# method attempts to determine if the file is broken, without
# actually decoding the image data. If this method finds any
# problems, it raises suitable exceptions. If you need to load
# the image after using this method, you must reopen the image
# file.
def verify(self):
"Verify file contents."
pass
##
# Returns a converted copy of this image. For the "P" mode, this
# method translates pixels through the palette. If mode is
# omitted, a mode is chosen so that all information in the image
# and the palette can be represented without a palette.
# <p>
# The current version supports all possible conversions between
# "L", "RGB" and "CMYK."
# <p>
# When translating a colour image to black and white (mode "L"),
# the library uses the ITU-R 601-2 luma transform:
# <p>
# <b>L = R * 299/1000 + G * 587/1000 + B * 114/1000</b>
# <p>
# When translating a greyscale image into a bilevel image (mode
# "1"), all non-zero values are set to 255 (white). To use other
# thresholds, use the {@link #Image.point} method.
#
# @def convert(mode, matrix=None, **options)
# @param mode The requested mode.
# @param matrix An optional conversion matrix. If given, this
# should be 4- or 16-tuple containing floating point values.
# @param options Additional options, given as keyword arguments.
# @keyparam dither Dithering method, used when converting from
# mode "RGB" to "P".
# Available methods are NONE or FLOYDSTEINBERG (default).
# @keyparam palette Palette to use when converting from mode "RGB"
# to "P". Available palettes are WEB or ADAPTIVE.
# @keyparam colors Number of colors to use for the ADAPTIVE palette.
# Defaults to 256.
# @return An Image object.
def convert(self, mode=None, data=None, dither=None,
palette=WEB, colors=256):
"Convert to other pixel format"
if not mode:
# determine default mode
if self.mode == "P":
self.load()
if self.palette:
mode = self.palette.mode
else:
mode = "RGB"
else:
return self.copy()
self.load()
if data:
# matrix conversion
if mode not in ("L", "RGB"):
raise ValueError("illegal conversion")
im = self.im.convert_matrix(mode, data)
return self._new(im)
if mode == "P" and palette == ADAPTIVE:
im = self.im.quantize(colors)
return self._new(im)
# colourspace conversion
if dither is None:
dither = FLOYDSTEINBERG
try:
im = self.im.convert(mode, dither)
except ValueError:
try:
# normalize source image and try again
im = self.im.convert(getmodebase(self.mode))
im = im.convert(mode, dither)
except KeyError:
raise ValueError("illegal conversion")
return self._new(im)
def quantize(self, colors=256, method=0, kmeans=0, palette=None):
# methods:
# 0 = median cut
# 1 = maximum coverage
# NOTE: this functionality will be moved to the extended
# quantizer interface in a later version of PIL.
self.load()
if palette:
# use palette from reference image
palette.load()
if palette.mode != "P":
raise ValueError("bad mode for palette image")
if self.mode != "RGB" and self.mode != "L":
raise ValueError(
"only RGB or L mode images can be quantized to a palette"
)
im = self.im.convert("P", 1, palette.im)
return self._makeself(im)
im = self.im.quantize(colors, method, kmeans)
return self._new(im)
##
# Copies this image. Use this method if you wish to paste things
# into an image, but still retain the original.
#
# @return An Image object.
def copy(self):
"Copy raster data"
self.load()
im = self.im.copy()
return self._new(im)
##
# Returns a rectangular region from this image. The box is a
# 4-tuple defining the left, upper, right, and lower pixel
# coordinate.
# <p>
# This is a lazy operation. Changes to the source image may or
# may not be reflected in the cropped image. To break the
# connection, call the {@link #Image.load} method on the cropped
# copy.
#
# @param The crop rectangle, as a (left, upper, right, lower)-tuple.
# @return An Image object.
def crop(self, box=None):
"Crop region from image"
self.load()
if box is None:
return self.copy()
# lazy operation
return _ImageCrop(self, box)
##
# Configures the image file loader so it returns a version of the
# image that as closely as possible matches the given mode and
# size. For example, you can use this method to convert a colour
# JPEG to greyscale while loading it, or to extract a 128x192
# version from a PCD file.
# <p>
# Note that this method modifies the Image object in place. If
# the image has already been loaded, this method has no effect.
#
# @param mode The requested mode.
# @param size The requested size.
def draft(self, mode, size):
"Configure image decoder"
pass
def _expand(self, xmargin, ymargin=None):
if ymargin is None:
ymargin = xmargin
self.load()
return self._new(self.im.expand(xmargin, ymargin, 0))
##
# Filters this image using the given filter. For a list of
# available filters, see the <b>ImageFilter</b> module.
#
# @param filter Filter kernel.
# @return An Image object.
# @see ImageFilter
def filter(self, filter):
"Apply environment filter to image"
self.load()
if callable(filter):
filter = filter()
if not hasattr(filter, "filter"):
raise TypeError("filter argument should be ImageFilter.Filter instance or class")
if self.im.bands == 1:
return self._new(filter.filter(self.im))
# fix to handle multiband images since _imaging doesn't
ims = []
for c in range(self.im.bands):
ims.append(self._new(filter.filter(self.im.getband(c))))
return merge(self.mode, ims)
##
# Returns a tuple containing the name of each band in this image.
# For example, <b>getbands</b> on an RGB image returns ("R", "G", "B").
#
# @return A tuple containing band names.
def getbands(self):
"Get band names"
return ImageMode.getmode(self.mode).bands
##
# Calculates the bounding box of the non-zero regions in the
# image.
#
# @return The bounding box is returned as a 4-tuple defining the
# left, upper, right, and lower pixel coordinate. If the image
# is completely empty, this method returns None.
def getbbox(self):
"Get bounding box of actual data (non-zero pixels) in image"
self.load()
return self.im.getbbox()
##
# Returns a list of colors used in this image.
#
# @param maxcolors Maximum number of colors. If this number is
# exceeded, this method returns None. The default limit is
# 256 colors.
# @return An unsorted list of (count, pixel) values.
def getcolors(self, maxcolors=256):
"Get colors from image, up to given limit"
self.load()
if self.mode in ("1", "L", "P"):
h = self.im.histogram()
out = []
for i in range(256):
if h[i]:
out.append((h[i], i))
if len(out) > maxcolors:
return None
return out
return self.im.getcolors(maxcolors)
##
# Returns the contents of this image as a sequence object
# containing pixel values. The sequence object is flattened, so
# that values for line one follow directly after the values of
# line zero, and so on.
# <p>
# Note that the sequence object returned by this method is an
# internal PIL data type, which only supports certain sequence
# operations. To convert it to an ordinary sequence (e.g. for
# printing), use <b>list(im.getdata())</b>.
#
# @param band What band to return. The default is to return
# all bands. To return a single band, pass in the index
# value (e.g. 0 to get the "R" band from an "RGB" image).
# @return A sequence-like object.
def getdata(self, band = None):
"Get image data as sequence object."
self.load()
if band is not None:
return self.im.getband(band)
return self.im # could be abused
##
# Gets the the minimum and maximum pixel values for each band in
# the image.
#
# @return For a single-band image, a 2-tuple containing the
# minimum and maximum pixel value. For a multi-band image,
# a tuple containing one 2-tuple for each band.
def getextrema(self):
"Get min/max value"
self.load()
if self.im.bands > 1:
extrema = []
for i in range(self.im.bands):
extrema.append(self.im.getband(i).getextrema())
return tuple(extrema)
return self.im.getextrema()
##
# Returns a PyCObject that points to the internal image memory.
#
# @return A PyCObject object.
def getim(self):
"Get PyCObject pointer to internal image memory"
self.load()
return self.im.ptr
##
# Returns the image palette as a list.
#
# @return A list of color values [r, g, b, ...], or None if the
# image has no palette.
def getpalette(self):
"Get palette contents."
self.load()
try:
return map(ord, self.im.getpalette())
except ValueError:
return None # no palette
##
# Returns the pixel value at a given position.
#
# @param xy The coordinate, given as (x, y).
# @return The pixel value. If the image is a multi-layer image,
# this method returns a tuple.
def getpixel(self, xy):
"Get pixel value"
self.load()
return self.im.getpixel(xy)
##
# Returns the horizontal and vertical projection.
#
# @return Two sequences, indicating where there are non-zero
# pixels along the X-axis and the Y-axis, respectively.
def getprojection(self):
"Get projection to x and y axes"
self.load()
x, y = self.im.getprojection()
return map(ord, x), map(ord, y)
##
# Returns a histogram for the image. The histogram is returned as
# a list of pixel counts, one for each pixel value in the source
# image. If the image has more than one band, the histograms for
# all bands are concatenated (for example, the histogram for an
# "RGB" image contains 768 values).
# <p>
# A bilevel image (mode "1") is treated as a greyscale ("L") image
# by this method.
# <p>
# If a mask is provided, the method returns a histogram for those
# parts of the image where the mask image is non-zero. The mask
# image must have the same size as the image, and be either a
# bi-level image (mode "1") or a greyscale image ("L").
#
# @def histogram(mask=None)
# @param mask An optional mask.
# @return A list containing pixel counts.
def histogram(self, mask=None, extrema=None):
"Take histogram of image"
self.load()
if mask:
mask.load()
return self.im.histogram((0, 0), mask.im)
if self.mode in ("I", "F"):
if extrema is None:
extrema = self.getextrema()
return self.im.histogram(extrema)
return self.im.histogram()
##
# (Deprecated) Returns a copy of the image where the data has been
# offset by the given distances. Data wraps around the edges. If
# yoffset is omitted, it is assumed to be equal to xoffset.
# <p>
# This method is deprecated. New code should use the <b>offset</b>
# function in the <b>ImageChops</b> module.
#
# @param xoffset The horizontal distance.
# @param yoffset The vertical distance. If omitted, both
# distances are set to the same value.
# @return An Image object.
def offset(self, xoffset, yoffset=None):
"(deprecated) Offset image in horizontal and/or vertical direction"
if warnings:
warnings.warn(
"'offset' is deprecated; use 'ImageChops.offset' instead",
DeprecationWarning, stacklevel=2
)
import ImageChops
return ImageChops.offset(self, xoffset, yoffset)
##
# Pastes another image into this image. The box argument is either
# a 2-tuple giving the upper left corner, a 4-tuple defining the
# left, upper, right, and lower pixel coordinate, or None (same as
# (0, 0)). If a 4-tuple is given, the size of the pasted image
# must match the size of the region.
# <p>
# If the modes don't match, the pasted image is converted to the
# mode of this image (see the {@link #Image.convert} method for
# details).
# <p>
# Instead of an image, the source can be a integer or tuple
# containing pixel values. The method then fills the region
# with the given colour. When creating RGB images, you can
# also use colour strings as supported by the ImageColor module.
# <p>
# If a mask is given, this method updates only the regions
# indicated by the mask. You can use either "1", "L" or "RGBA"
# images (in the latter case, the alpha band is used as mask).
# Where the mask is 255, the given image is copied as is. Where
# the mask is 0, the current value is preserved. Intermediate
# values can be used for transparency effects.
# <p>
# Note that if you paste an "RGBA" image, the alpha band is
# ignored. You can work around this by using the same image as
# both source image and mask.
#
# @param im Source image or pixel value (integer or tuple).
# @param box An optional 4-tuple giving the region to paste into.
# If a 2-tuple is used instead, it's treated as the upper left
# corner. If omitted or None, the source is pasted into the
# upper left corner.
# <p>
# If an image is given as the second argument and there is no
# third, the box defaults to (0, 0), and the second argument
# is interpreted as a mask image.
# @param mask An optional mask image.
# @return An Image object.
def paste(self, im, box=None, mask=None):
"Paste other image into region"
if isImageType(box) and mask is None:
# abbreviated paste(im, mask) syntax
mask = box; box = None
if box is None:
# cover all of self
box = (0, 0) + self.size
if len(box) == 2:
# lower left corner given; get size from image or mask
if isImageType(im):
size = im.size
elif isImageType(mask):
size = mask.size
else:
# FIXME: use self.size here?
raise ValueError(
"cannot determine region size; use 4-item box"
)
box = box + (box[0]+size[0], box[1]+size[1])
if isStringType(im):
import ImageColor
im = ImageColor.getcolor(im, self.mode)
elif isImageType(im):
im.load()
if self.mode != im.mode:
if self.mode != "RGB" or im.mode not in ("RGBA", "RGBa"):
# should use an adapter for this!
im = im.convert(self.mode)
im = im.im
self.load()
if self.readonly:
self._copy()
if mask:
mask.load()
self.im.paste(im, box, mask.im)
else:
self.im.paste(im, box)
##
# Maps this image through a lookup table or function.
#
# @param lut A lookup table, containing 256 values per band in the
# image. A function can be used instead, it should take a single
# argument. The function is called once for each possible pixel
# value, and the resulting table is applied to all bands of the
# image.
# @param mode Output mode (default is same as input). In the
# current version, this can only be used if the source image
# has mode "L" or "P", and the output has mode "1".
# @return An Image object.
def point(self, lut, mode=None):
"Map image through lookup table"
self.load()
if isinstance(lut, ImagePointHandler):
return lut.point(self)
if not isSequenceType(lut):
# if it isn't a list, it should be a function
if self.mode in ("I", "I;16", "F"):
# check if the function can be used with point_transform
scale, offset = _getscaleoffset(lut)
return self._new(self.im.point_transform(scale, offset))
# for other modes, convert the function to a table
lut = map(lut, range(256)) * self.im.bands
if self.mode == "F":
# FIXME: _imaging returns a confusing error message for this case
raise ValueError("point operation not supported for this mode")
return self._new(self.im.point(lut, mode))
##
# Adds or replaces the alpha layer in this image. If the image
# does not have an alpha layer, it's converted to "LA" or "RGBA".
# The new layer must be either "L" or "1".
#
# @param im The new alpha layer. This can either be an "L" or "1"
# image having the same size as this image, or an integer or
# other color value.
def putalpha(self, alpha):
"Set alpha layer"
self.load()
if self.readonly:
self._copy()
if self.mode not in ("LA", "RGBA"):
# attempt to promote self to a matching alpha mode
try:
mode = getmodebase(self.mode) + "A"
try:
self.im.setmode(mode)
except (AttributeError, ValueError):
# do things the hard way
im = self.im.convert(mode)
if im.mode not in ("LA", "RGBA"):
raise ValueError # sanity check
self.im = im
self.mode = self.im.mode
except (KeyError, ValueError):
raise ValueError("illegal image mode")
if self.mode == "LA":
band = 1
else:
band = 3
if isImageType(alpha):
# alpha layer
if alpha.mode not in ("1", "L"):
raise ValueError("illegal image mode")
alpha.load()
if alpha.mode == "1":
alpha = alpha.convert("L")
else:
# constant alpha
try:
self.im.fillband(band, alpha)
except (AttributeError, ValueError):
# do things the hard way
alpha = new("L", self.size, alpha)
else:
return
self.im.putband(alpha.im, band)
##
# Copies pixel data to this image. This method copies data from a
# sequence object into the image, starting at the upper left
# corner (0, 0), and continuing until either the image or the
# sequence ends. The scale and offset values are used to adjust
# the sequence values: <b>pixel = value*scale + offset</b>.
#
# @param data A sequence object.
# @param scale An optional scale value. The default is 1.0.
# @param offset An optional offset value. The default is 0.0.
def putdata(self, data, scale=1.0, offset=0.0):
"Put data from a sequence object into an image."
self.load()
if self.readonly:
self._copy()
self.im.putdata(data, scale, offset)
##
# Attaches a palette to this image. The image must be a "P" or
# "L" image, and the palette sequence must contain 768 integer
# values, where each group of three values represent the red,
# green, and blue values for the corresponding pixel
# index. Instead of an integer sequence, you can use an 8-bit
# string.
#
# @def putpalette(data)
# @param data A palette sequence (either a list or a string).
def putpalette(self, data, rawmode="RGB"):
"Put palette data into an image."
if self.mode not in ("L", "P"):
raise ValueError("illegal image mode")
self.load()
if isinstance(data, ImagePalette.ImagePalette):
palette = ImagePalette.raw(data.rawmode, data.palette)
else:
if not isStringType(data):
data = string.join(map(chr, data), "")
palette = ImagePalette.raw(rawmode, data)
self.mode = "P"
self.palette = palette
self.palette.mode = "RGB"
self.load() # install new palette
##
# Modifies the pixel at the given position. The colour is given as
# a single numerical value for single-band images, and a tuple for
# multi-band images.
# <p>
# Note that this method is relatively slow. For more extensive
# changes, use {@link #Image.paste} or the <b>ImageDraw</b> module
# instead.
#
# @param xy The pixel coordinate, given as (x, y).
# @param value The pixel value.
# @see #Image.paste
# @see #Image.putdata
# @see ImageDraw
def putpixel(self, xy, value):
"Set pixel value"
self.load()
if self.readonly:
self._copy()
return self.im.putpixel(xy, value)
##
# Returns a resized copy of this image.
#
# @def resize(size, filter=NEAREST)
# @param size The requested size in pixels, as a 2-tuple:
# (width, height).
# @param filter An optional resampling filter. This can be
# one of <b>NEAREST</b> (use nearest neighbour), <b>BILINEAR</b>
# (linear interpolation in a 2x2 environment), <b>BICUBIC</b>
# (cubic spline interpolation in a 4x4 environment), or
# <b>ANTIALIAS</b> (a high-quality downsampling filter).
# If omitted, or if the image has mode "1" or "P", it is
# set <b>NEAREST</b>.
# @return An Image object.
def resize(self, size, resample=NEAREST):
"Resize image"
if resample not in (NEAREST, BILINEAR, BICUBIC, ANTIALIAS):
raise ValueError("unknown resampling filter")
self.load()
if self.mode in ("1", "P"):
resample = NEAREST
if resample == ANTIALIAS:
# requires stretch support (imToolkit & PIL 1.1.3)
try:
im = self.im.stretch(size, resample)
except AttributeError:
raise ValueError("unsupported resampling filter")
else:
im = self.im.resize(size, resample)
return self._new(im)
##
# Returns a rotated copy of this image. This method returns a
# copy of this image, rotated the given number of degrees counter
# clockwise around its centre.
#
# @def rotate(angle, filter=NEAREST)
# @param angle In degrees counter clockwise.
# @param filter An optional resampling filter. This can be
# one of <b>NEAREST</b> (use nearest neighbour), <b>BILINEAR</b>
# (linear interpolation in a 2x2 environment), or <b>BICUBIC</b>
# (cubic spline interpolation in a 4x4 environment).
# If omitted, or if the image has mode "1" or "P", it is
# set <b>NEAREST</b>.
# @param expand Optional expansion flag. If true, expands the output
# image to make it large enough to hold the entire rotated image.
# If false or omitted, make the output image the same size as the
# input image.
# @return An Image object.
def rotate(self, angle, resample=NEAREST, expand=0):
"Rotate image. Angle given as degrees counter-clockwise."
if expand:
import math
angle = -angle * math.pi / 180
matrix = [
math.cos(angle), math.sin(angle), 0.0,
-math.sin(angle), math.cos(angle), 0.0
]
def transform(x, y, (a, b, c, d, e, f)=matrix):
return a*x + b*y + c, d*x + e*y + f
# calculate output size
w, h = self.size
xx = []
yy = []
for x, y in ((0, 0), (w, 0), (w, h), (0, h)):
x, y = transform(x, y)
xx.append(x)
yy.append(y)
w = int(math.ceil(max(xx)) - math.floor(min(xx)))
h = int(math.ceil(max(yy)) - math.floor(min(yy)))
# adjust center
x, y = transform(w / 2.0, h / 2.0)
matrix[2] = self.size[0] / 2.0 - x
matrix[5] = self.size[1] / 2.0 - y
return self.transform((w, h), AFFINE, matrix, resample)
if resample not in (NEAREST, BILINEAR, BICUBIC):
raise ValueError("unknown resampling filter")
self.load()
if self.mode in ("1", "P"):
resample = NEAREST
return self._new(self.im.rotate(angle, resample))
##
# Saves this image under the given filename. If no format is
# specified, the format to use is determined from the filename
# extension, if possible.
# <p>
# Keyword options can be used to provide additional instructions
# to the writer. If a writer doesn't recognise an option, it is
# silently ignored. The available options are described later in
# this handbook.
# <p>
# You can use a file object instead of a filename. In this case,
# you must always specify the format. The file object must
# implement the <b>seek</b>, <b>tell</b>, and <b>write</b>
# methods, and be opened in binary mode.
#
# @def save(file, format=None, **options)
# @param file File name or file object.
# @param format Optional format override. If omitted, the
# format to use is determined from the filename extension.
# If a file object was used instead of a filename, this
# parameter should always be used.
# @param **options Extra parameters to the image writer.
# @return None
# @exception KeyError If the output format could not be determined
# from the file name. Use the format option to solve this.
# @exception IOError If the file could not be written. The file
# may have been created, and may contain partial data.
def save(self, fp, format=None, **params):
"Save image to file or stream"
if isStringType(fp):
filename = fp
else:
if hasattr(fp, "name") and isStringType(fp.name):
filename = fp.name
else:
filename = ""
# may mutate self!
self.load()
self.encoderinfo = params
self.encoderconfig = ()
preinit()
ext = string.lower(os.path.splitext(filename)[1])
if not format:
try:
format = EXTENSION[ext]
except KeyError:
init()
try:
format = EXTENSION[ext]
except KeyError:
raise KeyError(ext) # unknown extension
try:
save_handler = SAVE[string.upper(format)]
except KeyError:
init()
save_handler = SAVE[string.upper(format)] # unknown format
if isStringType(fp):
import __builtin__
fp = __builtin__.open(fp, "wb")
close = 1
else:
close = 0
try:
save_handler(self, fp, filename)
finally:
# do what we can to clean up
if close:
fp.close()
##
# Seeks to the given frame in this sequence file. If you seek
# beyond the end of the sequence, the method raises an
# <b>EOFError</b> exception. When a sequence file is opened, the
# library automatically seeks to frame 0.
# <p>
# Note that in the current version of the library, most sequence
# formats only allows you to seek to the next frame.
#
# @param frame Frame number, starting at 0.
# @exception EOFError If the call attempts to seek beyond the end
# of the sequence.
# @see #Image.tell
def seek(self, frame):
"Seek to given frame in sequence file"
# overridden by file handlers
if frame != 0:
raise EOFError
##
# Displays this image. This method is mainly intended for
# debugging purposes.
# <p>
# On Unix platforms, this method saves the image to a temporary
# PPM file, and calls the <b>xv</b> utility.
# <p>
# On Windows, it saves the image to a temporary BMP file, and uses
# the standard BMP display utility to show it (usually Paint).
#
# @def show(title=None)
# @param title Optional title to use for the image window,
# where possible.
def show(self, title=None, command=None):
"Display image (for debug purposes only)"
_show(self, title=title, command=command)
##
# Split this image into individual bands. This method returns a
# tuple of individual image bands from an image. For example,
# splitting an "RGB" image creates three new images each
# containing a copy of one of the original bands (red, green,
# blue).
#
# @return A tuple containing bands.
def split(self):
"Split image into bands"
if self.im.bands == 1:
ims = [self.copy()]
else:
ims = []
self.load()
for i in range(self.im.bands):
ims.append(self._new(self.im.getband(i)))
return tuple(ims)
##
# Returns the current frame number.
#
# @return Frame number, starting with 0.
# @see #Image.seek
def tell(self):
"Return current frame number"
return 0
##
# Make this image into a thumbnail. This method modifies the
# image to contain a thumbnail version of itself, no larger than
# the given size. This method calculates an appropriate thumbnail
# size to preserve the aspect of the image, calls the {@link
# #Image.draft} method to configure the file reader (where
# applicable), and finally resizes the image.
# <p>
# Note that the bilinear and bicubic filters in the current
# version of PIL are not well-suited for thumbnail generation.
# You should use <b>ANTIALIAS</b> unless speed is much more
# important than quality.
# <p>
# Also note that this function modifies the Image object in place.
# If you need to use the full resolution image as well, apply this
# method to a {@link #Image.copy} of the original image.
#
# @param size Requested size.
# @param resample Optional resampling filter. This can be one
# of <b>NEAREST</b>, <b>BILINEAR</b>, <b>BICUBIC</b>, or
# <b>ANTIALIAS</b> (best quality). If omitted, it defaults
# to <b>NEAREST</b> (this will be changed to ANTIALIAS in a
# future version).
# @return None
def thumbnail(self, size, resample=NEAREST):
"Create thumbnail representation (modifies image in place)"
# FIXME: the default resampling filter will be changed
# to ANTIALIAS in future versions
# preserve aspect ratio
x, y = self.size
if x > size[0]: y = max(y * size[0] / x, 1); x = size[0]
if y > size[1]: x = max(x * size[1] / y, 1); y = size[1]
size = x, y
if size == self.size:
return
self.draft(None, size)
self.load()
try:
im = self.resize(size, resample)
except ValueError:
if resample != ANTIALIAS:
raise
im = self.resize(size, NEAREST) # fallback
self.im = im.im
self.mode = im.mode
self.size = size
self.readonly = 0
# FIXME: the different tranform methods need further explanation
# instead of bloating the method docs, add a separate chapter.
##
# Transforms this image. This method creates a new image with the
# given size, and the same mode as the original, and copies data
# to the new image using the given transform.
# <p>
# @def transform(size, method, data, resample=NEAREST)
# @param size The output size.
# @param method The transformation method. This is one of
# <b>EXTENT</b> (cut out a rectangular subregion), <b>AFFINE</b>
# (affine transform), <b>PERSPECTIVE</b> (perspective
# transform), <b>QUAD</b> (map a quadrilateral to a
# rectangle), or <b>MESH</b> (map a number of source quadrilaterals
# in one operation).
# @param data Extra data to the transformation method.
# @param resample Optional resampling filter. It can be one of
# <b>NEAREST</b> (use nearest neighbour), <b>BILINEAR</b>
# (linear interpolation in a 2x2 environment), or
# <b>BICUBIC</b> (cubic spline interpolation in a 4x4
# environment). If omitted, or if the image has mode
# "1" or "P", it is set to <b>NEAREST</b>.
# @return An Image object.
def transform(self, size, method, data=None, resample=NEAREST, fill=1):
"Transform image"
if isinstance(method, ImageTransformHandler):
return method.transform(size, self, resample=resample, fill=fill)
if hasattr(method, "getdata"):
# compatibility w. old-style transform objects
method, data = method.getdata()
if data is None:
raise ValueError("missing method data")
im = new(self.mode, size, None)
if method == MESH:
# list of quads
for box, quad in data:
im.__transformer(box, self, QUAD, quad, resample, fill)
else:
im.__transformer((0, 0)+size, self, method, data, resample, fill)
return im
def __transformer(self, box, image, method, data,
resample=NEAREST, fill=1):
# FIXME: this should be turned into a lazy operation (?)
w = box[2]-box[0]
h = box[3]-box[1]
if method == AFFINE:
# change argument order to match implementation
data = (data[2], data[0], data[1],
data[5], data[3], data[4])
elif method == EXTENT:
# convert extent to an affine transform
x0, y0, x1, y1 = data
xs = float(x1 - x0) / w
ys = float(y1 - y0) / h
method = AFFINE
data = (x0 + xs/2, xs, 0, y0 + ys/2, 0, ys)
elif method == PERSPECTIVE:
# change argument order to match implementation
data = (data[2], data[0], data[1],
data[5], data[3], data[4],
data[6], data[7])
elif method == QUAD:
# quadrilateral warp. data specifies the four corners
# given as NW, SW, SE, and NE.
nw = data[0:2]; sw = data[2:4]; se = data[4:6]; ne = data[6:8]
x0, y0 = nw; As = 1.0 / w; At = 1.0 / h
data = (x0, (ne[0]-x0)*As, (sw[0]-x0)*At,
(se[0]-sw[0]-ne[0]+x0)*As*At,
y0, (ne[1]-y0)*As, (sw[1]-y0)*At,
(se[1]-sw[1]-ne[1]+y0)*As*At)
else:
raise ValueError("unknown transformation method")
if resample not in (NEAREST, BILINEAR, BICUBIC):
raise ValueError("unknown resampling filter")
image.load()
self.load()
if image.mode in ("1", "P"):
resample = NEAREST
self.im.transform2(box, image.im, method, data, resample, fill)
##
# Returns a flipped or rotated copy of this image.
#
# @param method One of <b>FLIP_LEFT_RIGHT</b>, <b>FLIP_TOP_BOTTOM</b>,
# <b>ROTATE_90</b>, <b>ROTATE_180</b>, or <b>ROTATE_270</b>.
def transpose(self, method):
"Transpose image (flip or rotate in 90 degree steps)"
self.load()
im = self.im.transpose(method)
return self._new(im)
# --------------------------------------------------------------------
# Lazy operations
class _ImageCrop(Image):
def __init__(self, im, box):
Image.__init__(self)
x0, y0, x1, y1 = box
if x1 < x0:
x1 = x0
if y1 < y0:
y1 = y0
self.mode = im.mode
self.size = x1-x0, y1-y0
self.__crop = x0, y0, x1, y1
self.im = im.im
def load(self):
# lazy evaluation!
if self.__crop:
self.im = self.im.crop(self.__crop)
self.__crop = None
if self.im:
return self.im.pixel_access(self.readonly)
# FIXME: future versions should optimize crop/paste
# sequences!
# --------------------------------------------------------------------
# Abstract handlers.
class ImagePointHandler:
# used as a mixin by point transforms (for use with im.point)
pass
class ImageTransformHandler:
# used as a mixin by geometry transforms (for use with im.transform)
pass
# --------------------------------------------------------------------
# Factories
#
# Debugging
def _wedge():
"Create greyscale wedge (for debugging only)"
return Image()._new(core.wedge("L"))
##
# Creates a new image with the given mode and size.
#
# @param mode The mode to use for the new image.
# @param size A 2-tuple, containing (width, height) in pixels.
# @param color What colour to use for the image. Default is black.
# If given, this should be a single integer or floating point value
# for single-band modes, and a tuple for multi-band modes (one value
# per band). When creating RGB images, you can also use colour
# strings as supported by the ImageColor module. If the colour is
# None, the image is not initialised.
# @return An Image object.
def new(mode, size, color=0):
"Create a new image"
if color is None:
# don't initialize
return Image()._new(core.new(mode, size))
if isStringType(color):
# css3-style specifier
import ImageColor
color = ImageColor.getcolor(color, mode)
return Image()._new(core.fill(mode, size, color))
##
# Creates an image memory from pixel data in a string.
# <p>
# In its simplest form, this function takes three arguments
# (mode, size, and unpacked pixel data).
# <p>
# You can also use any pixel decoder supported by PIL. For more
# information on available decoders, see the section <a
# href="pil-decoder.htm"><i>Writing Your Own File Decoder</i></a>.
# <p>
# Note that this function decodes pixel data only, not entire images.
# If you have an entire image in a string, wrap it in a
# <b>StringIO</b> object, and use {@link #open} to load it.
#
# @param mode The image mode.
# @param size The image size.
# @param data An 8-bit string containing raw data for the given mode.
# @param decoder_name What decoder to use.
# @param *args Additional parameters for the given decoder.
# @return An Image object.
def fromstring(mode, size, data, decoder_name="raw", *args):
"Load image from string"
# may pass tuple instead of argument list
if len(args) == 1 and isTupleType(args[0]):
args = args[0]
if decoder_name == "raw" and args == ():
args = mode
im = new(mode, size)
im.fromstring(data, decoder_name, args)
return im
##
# (New in 1.1.4) Creates an image memory from pixel data in a string
# or byte buffer.
# <p>
# This function is similar to {@link #fromstring}, but uses data in
# the byte buffer, where possible. This means that changes to the
# original buffer object are reflected in this image). Not all modes
# can share memory; supported modes include "L", "RGBX", "RGBA", and
# "CMYK".
# <p>
# Note that this function decodes pixel data only, not entire images.
# If you have an entire image file in a string, wrap it in a
# <b>StringIO</b> object, and use {@link #open} to load it.
# <p>
# In the current version, the default parameters used for the "raw"
# decoder differs from that used for {@link fromstring}. This is a
# bug, and will probably be fixed in a future release. The current
# release issues a warning if you do this; to disable the warning,
# you should provide the full set of parameters. See below for
# details.
#
# @param mode The image mode.
# @param size The image size.
# @param data An 8-bit string or other buffer object containing raw
# data for the given mode.
# @param decoder_name What decoder to use.
# @param *args Additional parameters for the given decoder. For the
# default encoder ("raw"), it's recommended that you provide the
# full set of parameters:
# <b>frombuffer(mode, size, data, "raw", mode, 0, 1)</b>.
# @return An Image object.
# @since 1.1.4
def frombuffer(mode, size, data, decoder_name="raw", *args):
"Load image from string or buffer"
# may pass tuple instead of argument list
if len(args) == 1 and isTupleType(args[0]):
args = args[0]
if decoder_name == "raw":
if args == ():
if warnings:
warnings.warn(
"the frombuffer defaults may change in a future release; "
"for portability, change the call to read:\n"
" frombuffer(mode, size, data, 'raw', mode, 0, 1)",
RuntimeWarning, stacklevel=2
)
args = mode, 0, -1 # may change to (mode, 0, 1) post-1.1.6
if args[0] in _MAPMODES:
im = new(mode, (1,1))
im = im._new(
core.map_buffer(data, size, decoder_name, None, 0, args)
)
im.readonly = 1
return im
return fromstring(mode, size, data, decoder_name, args)
##
# (New in 1.1.6) Creates an image memory from an object exporting
# the array interface (using the buffer protocol).
#
# If obj is not contiguous, then the tostring method is called
# and {@link frombuffer} is used.
#
# @param obj Object with array interface
# @param mode Mode to use (will be determined from type if None)
# @return An image memory.
def fromarray(obj, mode=None):
arr = obj.__array_interface__
shape = arr['shape']
ndim = len(shape)
try:
strides = arr['strides']
except KeyError:
strides = None
if mode is None:
try:
typekey = (1, 1) + shape[2:], arr['typestr']
mode, rawmode = _fromarray_typemap[typekey]
except KeyError:
# print typekey
raise TypeError("Cannot handle this data type")
else:
rawmode = mode
if mode in ["1", "L", "I", "P", "F"]:
ndmax = 2
elif mode == "RGB":
ndmax = 3
else:
ndmax = 4
if ndim > ndmax:
raise ValueError("Too many dimensions.")
size = shape[1], shape[0]
if strides is not None:
obj = obj.tostring()
return frombuffer(mode, size, obj, "raw", rawmode, 0, 1)
_fromarray_typemap = {
# (shape, typestr) => mode, rawmode
# first two members of shape are set to one
# ((1, 1), "|b1"): ("1", "1"), # broken
((1, 1), "|u1"): ("L", "L"),
((1, 1), "|i1"): ("I", "I;8"),
((1, 1), "<i2"): ("I", "I;16"),
((1, 1), ">i2"): ("I", "I;16B"),
((1, 1), "<i4"): ("I", "I;32"),
((1, 1), ">i4"): ("I", "I;32B"),
((1, 1), "<f4"): ("F", "F;32F"),
((1, 1), ">f4"): ("F", "F;32BF"),
((1, 1), "<f8"): ("F", "F;64F"),
((1, 1), ">f8"): ("F", "F;64BF"),
((1, 1, 3), "|u1"): ("RGB", "RGB"),
((1, 1, 4), "|u1"): ("RGBA", "RGBA"),
}
# shortcuts
_fromarray_typemap[((1, 1), _ENDIAN + "i4")] = ("I", "I")
_fromarray_typemap[((1, 1), _ENDIAN + "f4")] = ("F", "F")
##
# Opens and identifies the given image file.
# <p>
# This is a lazy operation; this function identifies the file, but the
# actual image data is not read from the file until you try to process
# the data (or call the {@link #Image.load} method).
#
# @def open(file, mode="r")
# @param file A filename (string) or a file object. The file object
# must implement <b>read</b>, <b>seek</b>, and <b>tell</b> methods,
# and be opened in binary mode.
# @param mode The mode. If given, this argument must be "r".
# @return An Image object.
# @exception IOError If the file cannot be found, or the image cannot be
# opened and identified.
# @see #new
def open(fp, mode="r"):
"Open an image file, without loading the raster data"
if mode != "r":
raise ValueError("bad mode")
if isStringType(fp):
import __builtin__
filename = fp
fp = __builtin__.open(fp, "rb")
else:
filename = ""
prefix = fp.read(16)
preinit()
for i in ID:
try:
factory, accept = OPEN[i]
if not accept or accept(prefix):
fp.seek(0)
return factory(fp, filename)
except (SyntaxError, IndexError, TypeError):
pass
if init():
for i in ID:
try:
factory, accept = OPEN[i]
if not accept or accept(prefix):
fp.seek(0)
return factory(fp, filename)
except (SyntaxError, IndexError, TypeError):
pass
raise IOError("cannot identify image file")
#
# Image processing.
##
# Creates a new image by interpolating between two input images, using
# a constant alpha.
#
# <pre>
# out = image1 * (1.0 - alpha) + image2 * alpha
# </pre>
#
# @param im1 The first image.
# @param im2 The second image. Must have the same mode and size as
# the first image.
# @param alpha The interpolation alpha factor. If alpha is 0.0, a
# copy of the first image is returned. If alpha is 1.0, a copy of
# the second image is returned. There are no restrictions on the
# alpha value. If necessary, the result is clipped to fit into
# the allowed output range.
# @return An Image object.
def blend(im1, im2, alpha):
"Interpolate between images."
im1.load()
im2.load()
return im1._new(core.blend(im1.im, im2.im, alpha))
##
# Creates a new image by interpolating between two input images,
# using the mask as alpha.
#
# @param image1 The first image.
# @param image2 The second image. Must have the same mode and
# size as the first image.
# @param mask A mask image. This image can can have mode
# "1", "L", or "RGBA", and must have the same size as the
# other two images.
def composite(image1, image2, mask):
"Create composite image by blending images using a transparency mask"
image = image2.copy()
image.paste(image1, None, mask)
return image
##
# Applies the function (which should take one argument) to each pixel
# in the given image. If the image has more than one band, the same
# function is applied to each band. Note that the function is
# evaluated once for each possible pixel value, so you cannot use
# random components or other generators.
#
# @def eval(image, function)
# @param image The input image.
# @param function A function object, taking one integer argument.
# @return An Image object.
def eval(image, *args):
"Evaluate image expression"
return image.point(args[0])
##
# Creates a new image from a number of single-band images.
#
# @param mode The mode to use for the output image.
# @param bands A sequence containing one single-band image for
# each band in the output image. All bands must have the
# same size.
# @return An Image object.
def merge(mode, bands):
"Merge a set of single band images into a new multiband image."
if getmodebands(mode) != len(bands) or "*" in mode:
raise ValueError("wrong number of bands")
for im in bands[1:]:
if im.mode != getmodetype(mode):
raise ValueError("mode mismatch")
if im.size != bands[0].size:
raise ValueError("size mismatch")
im = core.new(mode, bands[0].size)
for i in range(getmodebands(mode)):
bands[i].load()
im.putband(bands[i].im, i)
return bands[0]._new(im)
# --------------------------------------------------------------------
# Plugin registry
##
# Register an image file plugin. This function should not be used
# in application code.
#
# @param id An image format identifier.
# @param factory An image file factory method.
# @param accept An optional function that can be used to quickly
# reject images having another format.
def register_open(id, factory, accept=None):
id = string.upper(id)
ID.append(id)
OPEN[id] = factory, accept
##
# Registers an image MIME type. This function should not be used
# in application code.
#
# @param id An image format identifier.
# @param mimetype The image MIME type for this format.
def register_mime(id, mimetype):
MIME[string.upper(id)] = mimetype
##
# Registers an image save function. This function should not be
# used in application code.
#
# @param id An image format identifier.
# @param driver A function to save images in this format.
def register_save(id, driver):
SAVE[string.upper(id)] = driver
##
# Registers an image extension. This function should not be
# used in application code.
#
# @param id An image format identifier.
# @param extension An extension used for this format.
def register_extension(id, extension):
EXTENSION[string.lower(extension)] = string.upper(id)
# --------------------------------------------------------------------
# Simple display support. User code may override this.
def _show(image, **options):
# override me, as necessary
apply(_showxv, (image,), options)
def _showxv(image, title=None, **options):
import ImageShow
apply(ImageShow.show, (image, title), options)
|
hzruandd/AutobahnPython | refs/heads/master | examples/twisted/websocket/multiproto/client.py | 13 | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import sys
from twisted.internet import reactor
from twisted.python import log
from autobahn.twisted.websocket import WebSocketClientFactory, \
WebSocketClientProtocol, \
connectWS
class EchoClientProtocol(WebSocketClientProtocol):
def sendHello(self):
self.sendMessage("Hello, world!".encode('utf8'))
def onOpen(self):
self.sendHello()
def onClose(self, wasClean, code, reason):
print(reason)
def onMessage(self, payload, isBinary):
if not isBinary:
print("Text message received: {}".format(payload.decode('utf8')))
reactor.callLater(1, self.sendHello)
class EchoClientFactory(WebSocketClientFactory):
protocol = EchoClientProtocol
def clientConnectionLost(self, connector, reason):
print(reason)
reactor.stop()
def clientConnectionFailed(self, connector, reason):
print(reason)
reactor.stop()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Need the WebSocket server address, i.e. ws://127.0.0.1:9000/echo1")
sys.exit(1)
factory = EchoClientFactory(sys.argv[1])
connectWS(factory)
reactor.run()
|
cryptobanana/ansible | refs/heads/devel | lib/ansible/modules/cloud/cloudstack/cs_iso.py | 49 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_iso
short_description: Manages ISO images on Apache CloudStack based clouds.
description:
- Register and remove ISO images.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the ISO.
required: true
display_text:
description:
- Display text of the ISO.
- If not specified, C(name) will be used.
required: false
default: null
version_added: "2.4"
url:
description:
- URL where the ISO can be downloaded from. Required if C(state) is present.
required: false
default: null
os_type:
description:
- Name of the OS that best represents the OS of this ISO. If the iso is bootable this parameter needs to be passed. Required if C(state) is present.
required: false
default: null
is_ready:
description:
- This flag is used for searching existing ISOs. If set to C(true), it will only list ISO ready for deployment e.g.
successfully downloaded and installed. Recommended to set it to C(false).
required: false
default: false
is_public:
description:
- Register the ISO to be publicly available to all users. Only used if C(state) is present.
required: false
default: null
is_featured:
description:
- Register the ISO to be featured. Only used if C(state) is present.
required: false
default: null
is_dynamically_scalable:
description:
- Register the ISO having XS/VMWare tools installed inorder to support dynamic scaling of VM cpu/memory. Only used if C(state) is present.
required: false
default: null
checksum:
description:
- The MD5 checksum value of this ISO. If set, we search by checksum instead of name.
required: false
default: null
bootable:
description:
- Register the ISO to be bootable. Only used if C(state) is present.
required: false
default: null
domain:
description:
- Domain the ISO is related to.
required: false
default: null
account:
description:
- Account the ISO is related to.
required: false
default: null
project:
description:
- Name of the project the ISO to be registered in.
required: false
default: null
zone:
description:
- Name of the zone you wish the ISO to be registered or deleted from.
- If not specified, first zone found will be used.
required: false
default: null
cross_zones:
description:
- Whether the ISO should be synced or removed across zones.
- Mutually exclusive with C(zone).
required: false
default: false
version_added: "2.4"
iso_filter:
description:
- Name of the filter used to search for the ISO.
required: false
default: 'self'
choices: [ 'featured', 'self', 'selfexecutable','sharedexecutable','executable', 'community' ]
state:
description:
- State of the ISO.
required: false
default: 'present'
choices: [ 'present', 'absent' ]
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
version_added: "2.3"
tags:
description:
- List of tags. Tags are a list of dictionaries having keys C(key) and C(value).
- "To delete all tags, set a empty list e.g. C(tags: [])."
required: false
default: null
aliases: [ 'tag' ]
version_added: "2.4"
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Register an ISO if ISO name does not already exist.
- local_action:
module: cs_iso
name: Debian 7 64-bit
url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso
os_type: Debian GNU/Linux 7(64-bit)
# Register an ISO with given name if ISO md5 checksum does not already exist.
- local_action:
module: cs_iso
name: Debian 7 64-bit
url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso
os_type: Debian GNU/Linux 7(64-bit)
checksum: 0b31bccccb048d20b551f70830bb7ad0
# Remove an ISO by name
- local_action:
module: cs_iso
name: Debian 7 64-bit
state: absent
# Remove an ISO by checksum
- local_action:
module: cs_iso
name: Debian 7 64-bit
checksum: 0b31bccccb048d20b551f70830bb7ad0
state: absent
'''
RETURN = '''
---
id:
description: UUID of the ISO.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
name:
description: Name of the ISO.
returned: success
type: string
sample: Debian 7 64-bit
display_text:
description: Text to be displayed of the ISO.
returned: success
type: string
sample: Debian 7.7 64-bit minimal 2015-03-19
zone:
description: Name of zone the ISO is registered in.
returned: success
type: string
sample: zuerich
status:
description: Status of the ISO.
returned: success
type: string
sample: Successfully Installed
is_ready:
description: True if the ISO is ready to be deployed from.
returned: success
type: boolean
sample: true
is_public:
description: True if the ISO is public.
returned: success
type: boolean
sample: true
version_added: "2.4"
bootable:
description: True if the ISO is bootable.
returned: success
type: boolean
sample: true
version_added: "2.4"
is_featured:
description: True if the ISO is featured.
returned: success
type: boolean
sample: true
version_added: "2.4"
format:
description: Format of the ISO.
returned: success
type: string
sample: ISO
version_added: "2.4"
os_type:
description: Typo of the OS.
returned: success
type: string
sample: CentOS 6.5 (64-bit)
version_added: "2.4"
checksum:
description: MD5 checksum of the ISO.
returned: success
type: string
sample: 0b31bccccb048d20b551f70830bb7ad0
created:
description: Date of registering.
returned: success
type: string
sample: 2015-03-29T14:57:06+0200
cross_zones:
description: true if the ISO is managed across all zones, false otherwise.
returned: success
type: boolean
sample: false
version_added: "2.4"
domain:
description: Domain the ISO is related to.
returned: success
type: string
sample: example domain
account:
description: Account the ISO is related to.
returned: success
type: string
sample: example account
project:
description: Project the ISO is related to.
returned: success
type: string
sample: example project
tags:
description: List of resource tags associated with the ISO.
returned: success
type: dict
sample: '[ { "key": "foo", "value": "bar" } ]'
version_added: "2.4"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together
)
class AnsibleCloudStackIso(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackIso, self).__init__(module)
self.returns = {
'checksum': 'checksum',
'status': 'status',
'isready': 'is_ready',
'crossZones': 'cross_zones',
'format': 'format',
'ostypename': 'os_type',
'isfeatured': 'is_featured',
'bootable': 'bootable',
'ispublic': 'is_public',
}
self.iso = None
def _get_common_args(self):
return {
'name': self.module.params.get('name'),
'displaytext': self.get_or_fallback('display_text', 'name'),
'isdynamicallyscalable': self.module.params.get('is_dynamically_scalable'),
'ostypeid': self.get_os_type('id'),
'bootable': self.module.params.get('bootable'),
}
def register_iso(self):
args = self._get_common_args()
args.update({
'domainid': self.get_domain('id'),
'account': self.get_account('name'),
'projectid': self.get_project('id'),
'checksum': self.module.params.get('checksum'),
'isfeatured': self.module.params.get('is_featured'),
'ispublic': self.module.params.get('is_public'),
})
if not self.module.params.get('cross_zones'):
args['zoneid'] = self.get_zone(key='id')
else:
args['zoneid'] = -1
if args['bootable'] and not args['ostypeid']:
self.module.fail_json(msg="OS type 'os_type' is requried if 'bootable=true'.")
args['url'] = self.module.params.get('url')
if not args['url']:
self.module.fail_json(msg="URL is requried.")
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('registerIso', **args)
self.iso = res['iso'][0]
return self.iso
def present_iso(self):
iso = self.get_iso()
if not iso:
iso = self.register_iso()
else:
iso = self.update_iso(iso)
if iso:
iso = self.ensure_tags(resource=iso, resource_type='ISO')
self.iso = iso
return iso
def update_iso(self, iso):
args = self._get_common_args()
args.update({
'id': iso['id'],
})
if self.has_changed(args, iso):
self.result['changed'] = True
if not self.module.params.get('cross_zones'):
args['zoneid'] = self.get_zone(key='id')
else:
# Workaround API does not return cross_zones=true
self.result['cross_zones'] = True
args['zoneid'] = -1
if not self.module.check_mode:
res = self.query_api('updateIso', **args)
self.iso = res['iso']
return self.iso
def get_iso(self):
if not self.iso:
args = {
'isready': self.module.params.get('is_ready'),
'isofilter': self.module.params.get('iso_filter'),
'domainid': self.get_domain('id'),
'account': self.get_account('name'),
'projectid': self.get_project('id'),
}
if not self.module.params.get('cross_zones'):
args['zoneid'] = self.get_zone(key='id')
# if checksum is set, we only look on that.
checksum = self.module.params.get('checksum')
if not checksum:
args['name'] = self.module.params.get('name')
isos = self.query_api('listIsos', **args)
if isos:
if not checksum:
self.iso = isos['iso'][0]
else:
for i in isos['iso']:
if i['checksum'] == checksum:
self.iso = i
break
return self.iso
def absent_iso(self):
iso = self.get_iso()
if iso:
self.result['changed'] = True
args = {
'id': iso['id'],
'projectid': self.get_project('id'),
}
if not self.module.params.get('cross_zones'):
args['zoneid'] = self.get_zone(key='id')
if not self.module.check_mode:
res = self.query_api('deleteIso', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'iso')
return iso
def get_result(self, iso):
super(AnsibleCloudStackIso, self).get_result(iso)
# Workaround API does not return cross_zones=true
if self.module.params.get('cross_zones'):
self.result['cross_zones'] = True
if 'zone' in self.result:
del self.result['zone']
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
display_text=dict(),
url=dict(),
os_type=dict(),
zone=dict(),
cross_zones=dict(type='bool', default=False),
iso_filter=dict(default='self', choices=['featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community']),
domain=dict(),
account=dict(),
project=dict(),
checksum=dict(),
is_ready=dict(type='bool', default=False),
bootable=dict(type='bool'),
is_featured=dict(type='bool'),
is_dynamically_scalable=dict(type='bool'),
state=dict(choices=['present', 'absent'], default='present'),
poll_async=dict(type='bool', default=True),
tags=dict(type='list', aliases=['tag']),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
mutually_exclusive=(
['zone', 'cross_zones'],
),
supports_check_mode=True
)
acs_iso = AnsibleCloudStackIso(module)
state = module.params.get('state')
if state in ['absent']:
iso = acs_iso.absent_iso()
else:
iso = acs_iso.present_iso()
result = acs_iso.get_result(iso)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
xiandiancloud/ji | refs/heads/master | lms/djangoapps/django_comment_client/permissions.py | 19 | """
Module for checking permissions with the comment_client backend
"""
import logging
from types import NoneType
from django.core import cache
from opaque_keys.edx.keys import CourseKey
CACHE = cache.get_cache('default')
CACHE_LIFESPAN = 60
def cached_has_permission(user, permission, course_id=None):
"""
Call has_permission if it's not cached. A change in a user's role or
a role's permissions will only become effective after CACHE_LIFESPAN seconds.
"""
assert isinstance(course_id, (NoneType, CourseKey))
key = u"permission_{user_id:d}_{course_id}_{permission}".format(
user_id=user.id, course_id=course_id, permission=permission)
val = CACHE.get(key, None)
if val not in [True, False]:
val = has_permission(user, permission, course_id=course_id)
CACHE.set(key, val, CACHE_LIFESPAN)
return val
def has_permission(user, permission, course_id=None):
assert isinstance(course_id, (NoneType, CourseKey))
for role in user.roles.filter(course_id=course_id):
if role.has_permission(permission):
return True
return False
CONDITIONS = ['is_open', 'is_author']
def _check_condition(user, condition, course_id, data):
def check_open(user, condition, course_id, data):
try:
return data and not data['content']['closed']
except KeyError:
return False
def check_author(user, condition, course_id, data):
try:
return data and data['content']['user_id'] == str(user.id)
except KeyError:
return False
handlers = {
'is_open': check_open,
'is_author': check_author,
}
return handlers[condition](user, condition, course_id, data)
def _check_conditions_permissions(user, permissions, course_id, **kwargs):
"""
Accepts a list of permissions and proceed if any of the permission is valid.
Note that ["can_view", "can_edit"] will proceed if the user has either
"can_view" or "can_edit" permission. To use AND operator in between, wrap them in
a list.
"""
def test(user, per, operator="or"):
if isinstance(per, basestring):
if per in CONDITIONS:
return _check_condition(user, per, course_id, kwargs)
return cached_has_permission(user, per, course_id=course_id)
elif isinstance(per, list) and operator in ["and", "or"]:
results = [test(user, x, operator="and") for x in per]
if operator == "or":
return True in results
elif operator == "and":
return not False in results
return test(user, permissions, operator="or")
VIEW_PERMISSIONS = {
'update_thread': ['edit_content', ['update_thread', 'is_open', 'is_author']],
'create_comment': [["create_comment", "is_open"]],
'delete_thread': ['delete_thread', ['update_thread', 'is_author']],
'update_comment': ['edit_content', ['update_comment', 'is_open', 'is_author']],
'endorse_comment': ['endorse_comment'],
'openclose_thread': ['openclose_thread'],
'create_sub_comment': [['create_sub_comment', 'is_open']],
'delete_comment': ['delete_comment', ['update_comment', 'is_open', 'is_author']],
'vote_for_comment': [['vote', 'is_open']],
'undo_vote_for_comment': [['unvote', 'is_open']],
'vote_for_thread': [['vote', 'is_open']],
'flag_abuse_for_thread': [['vote', 'is_open']],
'un_flag_abuse_for_thread': [['vote', 'is_open']],
'flag_abuse_for_comment': [['vote', 'is_open']],
'un_flag_abuse_for_comment': [['vote', 'is_open']],
'undo_vote_for_thread': [['unvote', 'is_open']],
'pin_thread': ['openclose_thread'],
'un_pin_thread': ['openclose_thread'],
'follow_thread': ['follow_thread'],
'follow_commentable': ['follow_commentable'],
'follow_user': ['follow_user'],
'unfollow_thread': ['unfollow_thread'],
'unfollow_commentable': ['unfollow_commentable'],
'unfollow_user': ['unfollow_user'],
'create_thread': ['create_thread'],
}
def check_permissions_by_view(user, course_id, content, name):
assert isinstance(course_id, CourseKey)
try:
p = VIEW_PERMISSIONS[name]
except KeyError:
logging.warning("Permission for view named %s does not exist in permissions.py" % name)
return _check_conditions_permissions(user, p, course_id, content=content)
|
tatsuhirosatou/p2p-app-backend | refs/heads/master | p2p_tests.py | 1 | #!/usr/bin/python
import unittest
import p2p
import requests
import base64
import json
import code
import random
unittest.TestLoader.sortTestMethodsUsing = lambda _, x, y: random.choice([1,-1])
class P2PTests(unittest.TestCase):
def setUp(self):
p2p.app.config['TESTING'] = True
self.app = p2p.app.test_client()
self.url = "http://localhost:5000"
def tearDown(self):
pass
def test_register(self):
data = {'username':'testUser', 'password':'testPass', \
'email':'test@test.com'}
headers = {'content-type': 'application/json'}
rv = requests.post(self.url + '/register', data=json.dumps(data),\
headers=headers)
self.assertEqual(rv.status_code, 201)
def test_add_question(self):
headers = {'content-type': 'application/json'}
data = { 'title': 'Test Question', 'detailed': '''This is a sample
question''', 'tags':''}
rv = requests.post(self.url + '/questions', data = json.dumps(data), \
auth=('testUser', 'testPass'), \
headers=headers)
assert 'question' in rv.json()
if __name__ == '__main__':
unittest.main()
|
andymckay/addons-server | refs/heads/master | src/olympia/editors/tests/test_helpers.py | 2 | # -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from django.core import mail
from django.core.files.storage import default_storage as storage
import pytest
from mock import Mock, patch
from pyquery import PyQuery as pq
from olympia import amo
from olympia.amo.tests import TestCase
from olympia.addons.models import Addon
from olympia.amo.urlresolvers import reverse
from olympia.devhub.models import ActivityLog
from olympia.editors import helpers
from olympia.editors.models import ReviewerScore
from olympia.files.models import File
from olympia.translations.models import Translation
from olympia.users.models import UserProfile
from olympia.versions.models import Version
from . test_models import create_addon_file
pytestmark = pytest.mark.django_db
REVIEW_ADDON_STATUSES = (amo.STATUS_NOMINATED, amo.STATUS_LITE_AND_NOMINATED,
amo.STATUS_UNREVIEWED)
REVIEW_FILES_STATUSES = (amo.STATUS_PUBLIC,
amo.STATUS_DISABLED, amo.STATUS_LITE)
class TestViewPendingQueueTable(TestCase):
def setUp(self):
super(TestViewPendingQueueTable, self).setUp()
qs = Mock()
self.table = helpers.ViewPendingQueueTable(qs)
def test_addon_name(self):
row = Mock()
page = Mock()
page.start_index = Mock()
page.start_index.return_value = 1
row.addon_name = 'フォクすけといっしょ'.decode('utf8')
row.addon_slug = 'test'
row.latest_version = u'0.12'
self.table.set_page(page)
a = pq(self.table.render_addon_name(row))
assert a.attr('href') == (
reverse('editors.review', args=[str(row.addon_slug)]))
assert a.text() == "フォクすけといっしょ 0.12".decode('utf8')
def test_addon_type_id(self):
row = Mock()
row.addon_type_id = amo.ADDON_THEME
assert unicode(self.table.render_addon_type_id(row)) == (
u'Complete Theme')
def test_applications(self):
row = Mock()
row.application_ids = [amo.FIREFOX.id, amo.THUNDERBIRD.id]
doc = pq(self.table.render_applications(row))
assert sorted(a.attrib['class'] for a in doc('div div')) == (
['app-icon ed-sprite-firefox', 'app-icon ed-sprite-thunderbird'])
def test_waiting_time_in_days(self):
row = Mock()
row.waiting_time_days = 10
row.waiting_time_hours = 10 * 24
assert self.table.render_waiting_time_min(row) == u'10 days'
def test_waiting_time_one_day(self):
row = Mock()
row.waiting_time_days = 1
row.waiting_time_hours = 24
row.waiting_time_min = 60 * 24
assert self.table.render_waiting_time_min(row) == u'1 day'
def test_waiting_time_in_hours(self):
row = Mock()
row.waiting_time_days = 0
row.waiting_time_hours = 22
row.waiting_time_min = 60 * 22
assert self.table.render_waiting_time_min(row) == u'22 hours'
def test_waiting_time_in_min(self):
row = Mock()
row.waiting_time_days = 0
row.waiting_time_hours = 0
row.waiting_time_min = 11
assert self.table.render_waiting_time_min(row) == u'11 minutes'
def test_waiting_time_in_secs(self):
row = Mock()
row.waiting_time_days = 0
row.waiting_time_hours = 0
row.waiting_time_min = 0
assert self.table.render_waiting_time_min(row) == u'moments ago'
def test_flags(self):
row = Mock()
row.flags = [('admin-review', 'Admin Review')]
doc = pq(self.table.render_flags(row))
assert doc('div.ed-sprite-admin-review').length
class TestUnlistedViewAllListTable(TestCase):
def setUp(self):
super(TestUnlistedViewAllListTable, self).setUp()
qs = Mock()
self.table = helpers.ViewUnlistedAllListTable(qs)
def test_addon_name(self):
row = Mock()
page = Mock()
page.start_index = Mock()
page.start_index.return_value = 1
row.addon_name = 'フォクすけといっしょ'.decode('utf8')
row.addon_slug = 'test'
row.latest_version = u'0.12'
self.table.set_page(page)
a = pq(self.table.render_addon_name(row))
assert (a.attr('href') == reverse('editors.review',
args=[str(row.addon_slug)]))
assert a.text() == 'フォクすけといっしょ 0.12'.decode('utf8')
def test_last_review(self):
row = Mock()
row.review_version_num = u'0.34.3b'
row.review_date = u'2016-01-01'
doc = pq(self.table.render_review_date(row))
assert doc.text() == u'0.34.3b on 2016-01-01'
def test_no_review(self):
row = Mock()
row.review_version_num = None
row.review_date = None
doc = pq(self.table.render_review_date(row))
assert doc.text() == u'No Reviews'
def test_authors_few(self):
row = Mock()
row.authors = [(123, 'bob'), (456, 'steve')]
doc = pq(self.table.render_authors(row))
assert doc('span').text() == 'bob steve'
assert doc('span a:eq(0)').attr('href') == UserProfile.create_user_url(
123, username='bob')
assert doc('span a:eq(1)').attr('href') == UserProfile.create_user_url(
456, username='steve')
assert doc('span').attr('title') == 'bob steve'
def test_authors_four(self):
row = Mock()
row.authors = [(123, 'bob'), (456, 'steve'), (789, 'cvan'),
(999, 'basta')]
doc = pq(self.table.render_authors(row))
assert doc.text() == 'bob steve cvan ...'
assert doc('span a:eq(0)').attr('href') == UserProfile.create_user_url(
123, username='bob')
assert doc('span a:eq(1)').attr('href') == UserProfile.create_user_url(
456, username='steve')
assert doc('span a:eq(2)').attr('href') == UserProfile.create_user_url(
789, username='cvan')
assert doc('span').attr('title') == 'bob steve cvan basta', doc.html()
class TestAdditionalInfoInQueue(TestCase):
def setUp(self):
super(TestAdditionalInfoInQueue, self).setUp()
qs = Mock()
self.table = helpers.ViewPendingQueueTable(qs)
self.row = Mock()
self.row.is_site_specific = False
self.row.file_platform_ids = [amo.PLATFORM_ALL.id]
self.row.external_software = False
self.row.binary = False
self.row.binary_components = False
def test_no_info(self):
assert self.table.render_additional_info(self.row) == ''
def test_site_specific(self):
self.row.is_site_specific = True
assert self.table.render_additional_info(self.row) == u'Site Specific'
def test_platform(self):
self.row.file_platform_ids = [amo.PLATFORM_LINUX.id]
assert "plat-sprite-linux" in self.table.render_platforms(self.row)
def test_combo(self):
self.row.is_site_specific = True
self.row.external_software = True
assert self.table.render_additional_info(self.row) == (
u'Site Specific, Requires External Software')
def test_all_platforms(self):
self.row.file_platform_ids = [amo.PLATFORM_ALL.id]
assert "plat-sprite-all" in self.table.render_platforms(self.row)
def test_mixed_platforms(self):
self.row.file_platform_ids = [amo.PLATFORM_ALL.id,
amo.PLATFORM_LINUX.id]
assert "plat-sprite-linux" in self.table.render_platforms(self.row)
assert "plat-sprite-all" in self.table.render_platforms(self.row)
def test_external_software(self):
self.row.external_software = True
assert self.table.render_additional_info(self.row) == (
u'Requires External Software')
def test_binary(self):
self.row.binary = True
assert self.table.render_additional_info(self.row) == (
u'Binary Components')
yesterday = datetime.today() - timedelta(days=1)
class TestReviewHelper(TestCase):
fixtures = ['base/addon_3615', 'base/users']
preamble = 'Mozilla Add-ons: Delicious Bookmarks 2.1.072'
def setUp(self):
super(TestReviewHelper, self).setUp()
class FakeRequest:
user = UserProfile.objects.get(pk=10482)
self.request = FakeRequest()
self.addon = Addon.objects.get(pk=3615)
self.version = self.addon.versions.all()[0]
self.helper = self.get_helper()
self.file = self.version.files.all()[0]
self.create_paths()
def _check_score(self, reviewed_type):
scores = ReviewerScore.objects.all()
assert len(scores) > 0
assert scores[0].score == amo.REVIEWED_SCORES[reviewed_type]
assert scores[0].note_key == reviewed_type
def create_paths(self):
if not storage.exists(self.file.file_path):
with storage.open(self.file.file_path, 'w') as f:
f.write('test data\n')
def get_data(self):
return {'comments': 'foo', 'addon_files': self.version.files.all(),
'action': 'prelim', 'operating_systems': 'osx',
'applications': 'Firefox'}
def get_helper(self):
return helpers.ReviewHelper(request=self.request, addon=self.addon,
version=self.version)
def setup_type(self, status):
self.addon.update(status=status)
return self.get_helper().review_type
def check_log_count(self, id):
return (ActivityLog.objects.for_addons(self.helper.addon)
.filter(action=id).count())
def test_no_request(self):
self.request = None
helper = self.get_helper()
assert helper.actions == {}
def test_type_nominated(self):
assert self.setup_type(amo.STATUS_NOMINATED) == 'nominated'
assert self.setup_type(amo.STATUS_LITE_AND_NOMINATED) == 'nominated'
def test_type_preliminary(self):
assert self.setup_type(amo.STATUS_UNREVIEWED) == 'preliminary'
assert self.setup_type(amo.STATUS_LITE) == 'preliminary'
def test_type_pending(self):
assert self.setup_type(amo.STATUS_PENDING) == 'pending'
assert self.setup_type(amo.STATUS_NULL) == 'pending'
assert self.setup_type(amo.STATUS_PUBLIC) == 'pending'
assert self.setup_type(amo.STATUS_DISABLED) == 'pending'
assert self.setup_type(amo.STATUS_BETA) == 'pending'
assert self.setup_type(amo.STATUS_PURGATORY) == 'pending'
def test_no_version(self):
helper = helpers.ReviewHelper(request=self.request, addon=self.addon,
version=None)
assert helper.review_type == 'pending'
def test_review_files(self):
for status in REVIEW_FILES_STATUSES:
self.setup_data(status=status)
assert self.helper.handler.__class__ == helpers.ReviewFiles
def test_review_addon(self):
for status in REVIEW_ADDON_STATUSES:
self.setup_data(status=status)
assert self.helper.handler.__class__ == helpers.ReviewAddon
def test_process_action_none(self):
self.helper.set_data({'action': 'foo'})
self.assertRaises(self.helper.process)
def test_process_action_good(self):
self.helper.set_data({'action': 'info', 'comments': 'foo'})
self.helper.process()
assert len(mail.outbox) == 1
def test_clear_has_info_request(self):
self.version.update(has_info_request=True)
assert self.version.has_info_request
self.helper.set_data({'action': 'comment', 'comments': 'foo',
'clear_info_request': True})
self.helper.process()
assert not self.version.has_info_request
def test_do_not_clear_has_info_request(self):
self.version.update(has_info_request=True)
assert self.version.has_info_request
self.helper.set_data({'action': 'comment', 'comments': 'foo'})
self.helper.process()
assert self.version.has_info_request
def test_action_details(self):
for status in Addon.STATUS_CHOICES:
self.addon.update(status=status)
helper = self.get_helper()
actions = helper.actions
for k, v in actions.items():
assert unicode(v['details']), "Missing details for: %s" % k
def get_action(self, status, action):
self.addon.update(status=status)
return unicode(self.get_helper().actions[action]['details'])
def test_action_changes(self):
assert (self.get_action(amo.STATUS_LITE, 'reject')[:26] ==
'This will reject the files')
assert (self.get_action(amo.STATUS_UNREVIEWED, 'reject')[:27] ==
'This will reject the add-on')
assert (self.get_action(amo.STATUS_UNREVIEWED, 'prelim')[:25] ==
'This will mark the add-on')
assert (self.get_action(amo.STATUS_NOMINATED, 'prelim')[:25] ==
'This will mark the add-on')
assert (self.get_action(amo.STATUS_LITE, 'prelim')[:24] ==
'This will mark the files')
assert (
self.get_action(amo.STATUS_LITE_AND_NOMINATED, 'prelim')[:27] ==
'This will retain the add-on')
assert (self.get_action(amo.STATUS_NULL, 'info')[:41] ==
'Use this form to request more information')
assert (self.get_action(amo.STATUS_NOMINATED, 'public')[-31:] ==
'they are reviewed by an editor.')
assert (self.get_action(amo.STATUS_PUBLIC, 'public')[-29:] ==
'to appear on the public side.')
def test_set_files(self):
self.file.update(datestatuschanged=yesterday)
self.helper.set_data({'addon_files': self.version.files.all()})
self.helper.handler.set_files(amo.STATUS_PUBLIC,
self.helper.handler.data['addon_files'])
self.file = self.version.files.all()[0]
assert self.file.status == amo.STATUS_PUBLIC
assert self.file.datestatuschanged.date() > yesterday.date()
def test_set_files_copy(self):
self.helper.set_data({'addon_files': self.version.files.all()})
self.helper.handler.set_files(amo.STATUS_PUBLIC,
self.helper.handler.data['addon_files'],
copy_to_mirror=True)
assert storage.exists(self.file.mirror_file_path)
def test_set_files_remove(self):
with storage.open(self.file.mirror_file_path, 'wb') as f:
f.write('test data\n')
self.helper.set_data({'addon_files': self.version.files.all()})
self.helper.handler.set_files(amo.STATUS_PUBLIC,
self.helper.handler.data['addon_files'],
hide_disabled_file=True)
assert not storage.exists(self.file.mirror_file_path)
def test_logs(self):
self.helper.set_data({'comments': 'something'})
self.helper.handler.log_action(amo.LOG.APPROVE_VERSION)
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 1
def test_notify_email(self):
self.helper.set_data(self.get_data())
for template in ['nominated_to_nominated', 'nominated_to_preliminary',
'nominated_to_public', 'nominated_to_sandbox',
'pending_to_preliminary', 'pending_to_public',
'pending_to_sandbox', 'preliminary_to_preliminary',
'author_super_review', 'unlisted_to_reviewed',
'unlisted_to_reviewed_auto',
'unlisted_to_sandbox']:
mail.outbox = []
self.helper.handler.notify_email(template, 'Sample subject %s, %s')
assert len(mail.outbox) == 1
assert mail.outbox[0].body, 'Expected a message'
def test_email_links(self):
expected = {
'nominated_to_nominated': 'addon_url',
'nominated_to_preliminary': 'addon_url',
'nominated_to_public': 'addon_url',
'nominated_to_sandbox': 'dev_versions_url',
'pending_to_preliminary': 'addon_url',
'pending_to_public': 'addon_url',
'pending_to_sandbox': 'dev_versions_url',
'preliminary_to_preliminary': 'addon_url',
'unlisted_to_reviewed': 'dev_versions_url',
'unlisted_to_reviewed_auto': 'dev_versions_url',
'unlisted_to_sandbox': 'dev_versions_url'
}
self.helper.set_data(self.get_data())
context_data = self.helper.handler.get_context_data()
for template, context_key in expected.iteritems():
mail.outbox = []
self.helper.handler.notify_email(template, 'Sample subject %s, %s')
assert len(mail.outbox) == 1
assert context_key in context_data
assert context_data.get(context_key) in mail.outbox[0].body
def setup_data(self, status, delete=[], is_listed=True):
mail.outbox = []
ActivityLog.objects.for_addons(self.helper.addon).delete()
self.addon.update(status=status, is_listed=is_listed)
self.file.update(status=status)
self.helper = self.get_helper()
data = self.get_data().copy()
for key in delete:
del data[key]
self.helper.set_data(data)
def test_request_more_information(self):
self.setup_data(amo.STATUS_PUBLIC, ['addon_files'])
self.helper.handler.request_information()
assert self.version.has_info_request
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == self.preamble
assert self.check_log_count(amo.LOG.REQUEST_INFORMATION.id) == 1
def test_request_more_information_no_versions(self):
assert len(mail.outbox) == 0
assert self.check_log_count(amo.LOG.REQUEST_INFORMATION.id) == 0
self.version.delete()
self.helper = helpers.ReviewHelper(request=self.request,
addon=self.addon)
data = {'comments': 'foo', 'action': 'info',
'operating_systems': 'osx', 'applications': 'Firefox'}
self.helper.set_data(data)
self.helper.handler.request_information()
assert len(mail.outbox) == 1
subject = 'Mozilla Add-ons: Delicious Bookmarks '
assert mail.outbox[0].subject == subject
assert self.check_log_count(amo.LOG.REQUEST_INFORMATION.id) == 1
def test_request_more_information_deleted_addon(self):
self.addon.delete()
self.test_request_more_information()
def test_email_no_locale(self):
self.setup_data(amo.STATUS_NOMINATED, ['addon_files'])
self.helper.handler.process_public()
assert len(mail.outbox) == 1
assert '/en-US/firefox/addon/a3615' not in mail.outbox[0].body
assert '/addon/a3615' in mail.outbox[0].body
def test_nomination_to_public_no_files(self):
for status in helpers.NOMINATED_STATUSES:
self.setup_data(status, ['addon_files'])
self.helper.handler.process_public()
assert self.addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_PUBLIC)
def test_nomination_to_public_and_current_version(self):
for status in helpers.NOMINATED_STATUSES:
self.setup_data(status, ['addon_files'])
self.addon = Addon.objects.get(pk=3615)
self.addon.update(_current_version=None)
assert not self.addon.current_version
self.helper.handler.process_public()
self.addon = Addon.objects.get(pk=3615)
assert self.addon.current_version
def test_nomination_to_public_new_addon(self):
""" Make sure new add-ons can be made public (bug 637959) """
status = amo.STATUS_NOMINATED
self.setup_data(status)
# Make sure we have no public files
for i in self.addon.versions.all():
i.files.update(status=amo.STATUS_UNREVIEWED)
self.helper.handler.process_public()
# Re-fetch the add-on
addon = Addon.objects.get(pk=3615)
assert addon.status == amo.STATUS_PUBLIC
assert addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_PUBLIC)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == '%s Fully Reviewed' % self.preamble
assert storage.exists(self.file.mirror_file_path)
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 1
self._check_score(amo.REVIEWED_ADDON_FULL)
@patch('olympia.editors.helpers.sign_file')
def test_nomination_to_public(self, sign_mock):
for status in helpers.NOMINATED_STATUSES:
sign_mock.reset()
self.setup_data(status)
with self.settings(SIGNING_SERVER='full'):
self.helper.handler.process_public()
assert self.addon.status == amo.STATUS_PUBLIC
assert self.addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_PUBLIC)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s Fully Reviewed' % self.preamble)
assert 'has been fully reviewed' in mail.outbox[0].body
sign_mock.assert_called_with(self.file, 'full')
assert storage.exists(self.file.mirror_file_path)
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 1
self._check_score(amo.REVIEWED_ADDON_FULL)
@patch('olympia.editors.helpers.sign_file')
def test_nomination_to_public_unlisted(self, sign_mock):
for status in helpers.NOMINATED_STATUSES:
sign_mock.reset()
self.setup_data(status, is_listed=False)
with self.settings(SIGNING_SERVER='full'):
self.helper.handler.process_public()
assert self.addon.status == amo.STATUS_PUBLIC
assert self.addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_PUBLIC)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s signed and ready to download' % self.preamble)
assert 'has been reviewed and is now signed' in mail.outbox[0].body
sign_mock.assert_called_with(self.file, 'full')
assert storage.exists(self.file.mirror_file_path)
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 1
self._check_score(amo.REVIEWED_ADDON_FULL)
@patch('olympia.editors.helpers.sign_file')
def test_nomination_to_public_failed_signing(self, sign_mock):
sign_mock.side_effect = Exception
for status in helpers.NOMINATED_STATUSES:
sign_mock.reset()
self.setup_data(status)
with self.settings(SIGNING_SERVER='full'):
with self.assertRaises(Exception):
self.helper.handler.process_public()
# Status unchanged.
assert self.addon.status == status
assert self.addon.versions.all()[0].files.all()[0].status == status
assert len(mail.outbox) == 0
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 0
@patch('olympia.editors.helpers.sign_file')
def test_nomination_to_preliminary(self, sign_mock):
for status in helpers.NOMINATED_STATUSES:
sign_mock.reset()
self.setup_data(status)
with self.settings(PRELIMINARY_SIGNING_SERVER='prelim'):
self.helper.handler.process_preliminary()
assert self.addon.status == amo.STATUS_LITE
assert self.addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_LITE)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s Preliminary Reviewed' % self.preamble)
assert 'has been granted preliminary review' in mail.outbox[0].body
sign_mock.assert_called_with(self.file, 'prelim')
assert storage.exists(self.file.mirror_file_path)
assert self.check_log_count(amo.LOG.PRELIMINARY_VERSION.id) == 1
self._check_score(amo.REVIEWED_ADDON_FULL)
@patch('olympia.editors.helpers.sign_file')
def test_nomination_to_preliminary_unlisted(self, sign_mock):
for status in helpers.NOMINATED_STATUSES:
sign_mock.reset()
self.setup_data(status, is_listed=False)
with self.settings(PRELIMINARY_SIGNING_SERVER='prelim'):
self.helper.handler.process_preliminary()
assert self.addon.status == amo.STATUS_LITE
assert self.addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_LITE)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s signed and ready to download' % self.preamble)
assert 'has been reviewed and is now signed' in mail.outbox[0].body
sign_mock.assert_called_with(self.file, 'prelim')
assert storage.exists(self.file.mirror_file_path)
assert self.check_log_count(amo.LOG.PRELIMINARY_VERSION.id) == 1
self._check_score(amo.REVIEWED_ADDON_FULL)
@patch('olympia.editors.helpers.sign_file')
def test_nomination_to_preliminary_unlisted_auto(self, sign_mock):
for status in helpers.NOMINATED_STATUSES:
sign_mock.reset()
self.setup_data(status, is_listed=False)
with self.settings(PRELIMINARY_SIGNING_SERVER='prelim'):
self.helper.handler.process_preliminary(auto_validation=True)
assert self.addon.status == amo.STATUS_LITE
assert self.addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_LITE)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s signed and ready to download' % self.preamble)
assert 'has passed our automatic tests' in mail.outbox[0].body
sign_mock.assert_called_with(self.file, 'prelim')
assert storage.exists(self.file.mirror_file_path)
assert self.check_log_count(amo.LOG.PRELIMINARY_VERSION.id) == 1
assert not ReviewerScore.objects.all()
@patch('olympia.editors.helpers.sign_file')
def test_nomination_to_preliminary_failed_signing(self, sign_mock):
sign_mock.side_effect = Exception
for status in helpers.NOMINATED_STATUSES:
sign_mock.reset()
self.setup_data(status)
with self.assertRaises(Exception):
self.helper.handler.process_preliminary()
# Status unchanged.
assert self.addon.status == status
assert self.addon.versions.all()[0].files.all()[0].status == status
assert len(mail.outbox) == 0
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 0
@patch('olympia.editors.helpers.sign_file')
def test_nomination_to_sandbox(self, sign_mock):
for status in helpers.NOMINATED_STATUSES:
self.setup_data(status)
self.helper.handler.process_sandbox()
assert self.addon.status == amo.STATUS_NULL
assert self.addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_DISABLED)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s didn\'t pass review' % self.preamble)
assert 'did not meet the criteria' in mail.outbox[0].body
assert not sign_mock.called
assert not storage.exists(self.file.mirror_file_path)
assert self.check_log_count(amo.LOG.REJECT_VERSION.id) == 1
@patch('olympia.editors.helpers.sign_file')
def test_nomination_to_sandbox_unlisted(self, sign_mock):
for status in helpers.NOMINATED_STATUSES:
self.setup_data(status, is_listed=False)
self.helper.handler.process_sandbox()
assert self.addon.status == amo.STATUS_NULL
assert self.addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_DISABLED)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s didn\'t pass review' % self.preamble)
assert 'didn\'t pass review' in mail.outbox[0].body
assert not sign_mock.called
assert not storage.exists(self.file.mirror_file_path)
assert self.check_log_count(amo.LOG.REJECT_VERSION.id) == 1
def test_email_unicode_monster(self):
self.addon.name = u'TaobaoShopping淘宝网导航按钮'
self.addon.save()
self.setup_data(helpers.NOMINATED_STATUSES[0])
self.helper.handler.process_sandbox()
assert u'TaobaoShopping淘宝网导航按钮' in mail.outbox[0].subject
def test_super_review_email(self):
self.setup_data(amo.STATUS_NULL)
self.helper.handler.process_super_review()
url = reverse('editors.review', args=[self.addon.pk], add_prefix=False)
assert url in mail.outbox[1].body
def test_nomination_to_super_review(self):
for status in helpers.NOMINATED_STATUSES:
self.setup_data(status)
self.helper.handler.process_super_review()
assert self.addon.admin_review
assert len(mail.outbox) == 2
assert mail.outbox[1].subject == (
'Super review requested: Delicious Bookmarks')
assert mail.outbox[0].subject == (
('Mozilla Add-ons: Delicious Bookmarks 2.1.072 flagged for '
'Admin Review'))
assert self.check_log_count(amo.LOG.REQUEST_SUPER_REVIEW.id) == 1
def test_unreviewed_to_public(self):
self.setup_data(amo.STATUS_UNREVIEWED)
self.assertRaises(AssertionError,
self.helper.handler.process_public)
def test_lite_to_public(self):
self.setup_data(amo.STATUS_LITE)
self.assertRaises(AssertionError,
self.helper.handler.process_public)
@patch('olympia.editors.helpers.sign_file')
def test_preliminary_to_preliminary(self, sign_mock):
for status in helpers.PRELIMINARY_STATUSES:
self.setup_data(status)
self.helper.handler.process_preliminary()
for file in self.helper.handler.data['addon_files']:
assert file.status == amo.STATUS_LITE
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s Preliminary Reviewed' % self.preamble)
assert 'has been preliminarily reviewed' in mail.outbox[0].body
assert sign_mock.called
assert storage.exists(self.file.mirror_file_path)
assert self.check_log_count(amo.LOG.PRELIMINARY_VERSION.id) == 1
self._check_score(amo.REVIEWED_ADDON_PRELIM)
@patch('olympia.editors.helpers.sign_file')
def test_preliminary_to_preliminary_unlisted(self, sign_mock):
for status in helpers.PRELIMINARY_STATUSES:
self.setup_data(status, is_listed=False)
self.helper.handler.process_preliminary()
for file in self.helper.handler.data['addon_files']:
assert file.status == amo.STATUS_LITE
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s signed and ready to download' % self.preamble)
assert 'has been reviewed and is now signed' in mail.outbox[0].body
assert sign_mock.called
assert storage.exists(self.file.mirror_file_path)
assert self.check_log_count(amo.LOG.PRELIMINARY_VERSION.id) == 1
self._check_score(amo.REVIEWED_ADDON_PRELIM)
@patch('olympia.editors.helpers.sign_file')
def test_preliminary_to_preliminary_unlisted_auto(self, sign_mock):
for status in helpers.PRELIMINARY_STATUSES:
self.setup_data(status, is_listed=False)
self.helper.handler.process_preliminary(auto_validation=True)
for file in self.helper.handler.data['addon_files']:
assert file.status == amo.STATUS_LITE
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s signed and ready to download' % self.preamble)
assert 'has passed our automatic tests' in mail.outbox[0].body
assert sign_mock.called
assert storage.exists(self.file.mirror_file_path)
assert self.check_log_count(amo.LOG.PRELIMINARY_VERSION.id) == 1
assert not ReviewerScore.objects.all()
@patch('olympia.editors.helpers.sign_file')
def test_preliminary_to_sandbox(self, sign_mock):
for status in [amo.STATUS_UNREVIEWED, amo.STATUS_LITE_AND_NOMINATED]:
self.setup_data(status)
self.helper.handler.process_sandbox()
for file in self.helper.handler.data['addon_files']:
assert file.status == amo.STATUS_DISABLED
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s didn\'t pass review' % self.preamble)
assert 'did not meet the criteria' in mail.outbox[0].body
assert not sign_mock.called
assert not storage.exists(self.file.mirror_file_path)
assert self.check_log_count(amo.LOG.REJECT_VERSION.id) == 1
@patch('olympia.editors.helpers.sign_file')
def test_preliminary_to_sandbox_unlisted(self, sign_mock):
for status in [amo.STATUS_UNREVIEWED, amo.STATUS_LITE_AND_NOMINATED]:
self.setup_data(status, is_listed=False)
self.helper.handler.process_sandbox()
for file in self.helper.handler.data['addon_files']:
assert file.status == amo.STATUS_DISABLED
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s didn\'t pass review' % self.preamble)
assert 'didn\'t pass review' in mail.outbox[0].body
assert not sign_mock.called
assert not storage.exists(self.file.mirror_file_path)
assert self.check_log_count(amo.LOG.REJECT_VERSION.id) == 1
def test_preliminary_upgrade_to_sandbox(self):
self.setup_data(amo.STATUS_LITE)
assert self.addon.status == amo.STATUS_LITE
assert self.file.status == amo.STATUS_LITE
a = create_addon_file(self.addon.name, '2.2', amo.STATUS_LITE,
amo.STATUS_UNREVIEWED)
self.version = a['version']
self.addon.update(status=amo.STATUS_LITE_AND_NOMINATED)
self.helper = self.get_helper()
self.helper.set_data(self.get_data())
self.helper.handler.process_sandbox()
assert self.addon.status == amo.STATUS_LITE
assert self.file.status == amo.STATUS_LITE
f = File.objects.get(pk=a['file'].id)
assert f.status == amo.STATUS_DISABLED
def test_preliminary_to_super_review(self):
for status in helpers.PRELIMINARY_STATUSES:
self.setup_data(status)
self.helper.handler.process_super_review()
assert self.addon.admin_review
assert len(mail.outbox) == 2
assert mail.outbox[1].subject == (
'Super review requested: Delicious Bookmarks')
assert mail.outbox[0].subject == (
('Mozilla Add-ons: Delicious Bookmarks 2.1.072 flagged for '
'Admin Review'))
assert self.check_log_count(amo.LOG.REQUEST_SUPER_REVIEW.id) == 1
def test_nomination_to_super_review_and_escalate(self):
# Note we are changing the file status here.
for file_status in (amo.STATUS_PENDING, amo.STATUS_UNREVIEWED):
self.setup_data(amo.STATUS_LITE)
self.file.update(status=file_status)
self.helper.handler.process_super_review()
assert self.addon.admin_review
assert len(mail.outbox) == 2
assert mail.outbox[1].subject == (
'Super review requested: Delicious Bookmarks')
assert mail.outbox[0].subject == (
('Mozilla Add-ons: Delicious Bookmarks 2.1.072 flagged for '
'Admin Review'))
assert self.check_log_count(amo.LOG.REQUEST_SUPER_REVIEW.id) == 1
@patch('olympia.editors.helpers.sign_file')
def test_pending_to_public(self, sign_mock):
for status in [amo.STATUS_NOMINATED, amo.STATUS_LITE_AND_NOMINATED]:
self.setup_data(status)
self.create_paths()
self.helper.handler.process_public()
for file in self.helper.handler.data['addon_files']:
assert file.status == amo.STATUS_PUBLIC
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s Fully Reviewed' % self.preamble)
assert 'has been fully reviewed' in mail.outbox[0].body
assert sign_mock.called
assert storage.exists(self.file.mirror_file_path)
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 1
if status == amo.STATUS_PUBLIC:
self._check_score(amo.REVIEWED_ADDON_UPDATE)
@patch('olympia.editors.helpers.sign_file')
def test_pending_to_public_unlisted(self, sign_mock):
for status in [amo.STATUS_NOMINATED, amo.STATUS_LITE_AND_NOMINATED]:
self.setup_data(status, is_listed=False)
self.create_paths()
self.helper.handler.process_public()
for file in self.helper.handler.data['addon_files']:
assert file.status == amo.STATUS_PUBLIC
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s signed and ready to download' % self.preamble)
assert 'has been reviewed and is now signed' in mail.outbox[0].body
assert sign_mock.called
assert storage.exists(self.file.mirror_file_path)
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 1
if status == amo.STATUS_PUBLIC:
self._check_score(amo.REVIEWED_ADDON_UPDATE)
@patch('olympia.editors.helpers.sign_file')
def test_pending_to_sandbox(self, sign_mock):
for status in amo.UNDER_REVIEW_STATUSES:
self.setup_data(status)
self.helper.handler.process_sandbox()
for file in self.helper.handler.data['addon_files']:
assert file.status == amo.STATUS_DISABLED
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s didn\'t pass review' % self.preamble)
assert 'did not meet the criteria' in mail.outbox[0].body
assert not sign_mock.called
assert not storage.exists(self.file.mirror_file_path)
assert self.check_log_count(amo.LOG.REJECT_VERSION.id) == 1
@patch('olympia.editors.helpers.sign_file')
def test_pending_to_sandbox_unlisted(self, sign_mock):
for status in amo.UNDER_REVIEW_STATUSES:
self.setup_data(status, is_listed=False)
self.helper.handler.process_sandbox()
for file in self.helper.handler.data['addon_files']:
assert file.status == amo.STATUS_DISABLED
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s didn\'t pass review' % self.preamble)
assert 'didn\'t pass review' in mail.outbox[0].body
assert not sign_mock.called
assert not storage.exists(self.file.mirror_file_path)
assert self.check_log_count(amo.LOG.REJECT_VERSION.id) == 1
def test_operating_system_present(self):
self.setup_data(amo.STATUS_BETA)
self.helper.handler.process_sandbox()
assert 'Tested on osx with Firefox' in mail.outbox[0].body
def test_operating_system_not_present(self):
self.setup_data(amo.STATUS_BETA)
data = self.get_data().copy()
data['operating_systems'] = ''
self.helper.set_data(data)
self.helper.handler.process_sandbox()
assert 'Tested with Firefox' in mail.outbox[0].body
def test_application_not_present(self):
self.setup_data(amo.STATUS_BETA)
data = self.get_data().copy()
data['applications'] = ''
self.helper.set_data(data)
self.helper.handler.process_sandbox()
assert 'Tested on osx' in mail.outbox[0].body
def test_both_not_present(self):
self.setup_data(amo.STATUS_BETA)
data = self.get_data().copy()
data['applications'] = ''
data['operating_systems'] = ''
self.helper.set_data(data)
self.helper.handler.process_sandbox()
assert 'Tested' not in mail.outbox[0].body
def test_pending_to_super_review(self):
for status in helpers.PENDING_STATUSES:
self.setup_data(status)
self.helper.handler.process_super_review()
assert self.addon.admin_review
assert len(mail.outbox) == 2
assert mail.outbox[1].subject == (
'Super review requested: Delicious Bookmarks')
assert mail.outbox[0].subject == (
('Mozilla Add-ons: Delicious Bookmarks 2.1.072 flagged for '
'Admin Review'))
def test_nominated_review_time_set(self):
for status in REVIEW_ADDON_STATUSES:
for process in ['process_sandbox', 'process_preliminary',
'process_public']:
if (status == amo.STATUS_UNREVIEWED and
process == 'process_public'):
continue
self.version.update(reviewed=None)
self.setup_data(status)
getattr(self.helper.handler, process)()
assert self.version.reviewed, ('Reviewed for status %r, %s()'
% (status, process))
def test_preliminary_review_time_set(self):
for status in amo.UNDER_REVIEW_STATUSES:
for process in ['process_sandbox', 'process_preliminary']:
self.file.update(reviewed=None)
self.setup_data(status)
getattr(self.helper.handler, process)()
assert File.objects.get(pk=self.file.pk).reviewed, (
'Reviewed for status %r, %s()' % (status, process))
def test_page_title_unicode():
t = Translation(localized_string=u'\u30de\u30eb\u30c1\u30d712\u30eb')
request = Mock()
request.APP = amo.FIREFOX
helpers.editor_page_title({'request': request}, title=t)
def test_send_email_autoescape():
# Make sure HTML is not auto-escaped.
s = 'woo&&<>\'""'
ctx = dict(name=s, review_url=s, reviewer=s, comments=s, SITE_URL=s)
helpers.send_mail('editors/emails/super_review.ltxt',
'aww yeah', ['xx'], ctx)
assert len(mail.outbox) == 1
assert mail.outbox[0].body.count(s) == len(ctx)
class TestCompareLink(TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestCompareLink, self).setUp()
self.addon = Addon.objects.get(pk=3615)
self.current = File.objects.get(pk=67442)
self.version = Version.objects.create(addon=self.addon)
def test_same_platform(self):
file = File.objects.create(version=self.version,
platform=self.current.platform)
assert file.pk == helpers.file_compare(self.current, self.version).pk
def test_different_platform(self):
file = File.objects.create(version=self.version,
platform=self.current.platform)
File.objects.create(version=self.version,
platform=amo.PLATFORM_LINUX.id)
assert file.pk == helpers.file_compare(self.current, self.version).pk
def test_specific_platform(self):
self.current.platform_id = amo.PLATFORM_LINUX.id
self.current.save()
linux = File.objects.create(version=self.version,
platform=amo.PLATFORM_LINUX.id)
assert linux.pk == helpers.file_compare(self.current, self.version).pk
def test_no_platform(self):
self.current.platform_id = amo.PLATFORM_LINUX.id
self.current.save()
file = File.objects.create(version=self.version,
platform=amo.PLATFORM_WIN.id)
assert file.pk == helpers.file_compare(self.current, self.version).pk
def test_version_status():
addon = Addon()
version = Version()
version.all_files = [File(status=amo.STATUS_PUBLIC),
File(status=amo.STATUS_UNREVIEWED)]
assert u'Fully Reviewed,Awaiting Review' == (
helpers.version_status(addon, version))
version.all_files = [File(status=amo.STATUS_UNREVIEWED)]
assert u'Awaiting Review' == helpers.version_status(addon, version)
|
eezee-it/account-invoicing | refs/heads/8.0 | account_invoice_triple_discount/models/__init__.py | 12 | # -*- coding: utf-8 -*-
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from . import account_invoice
|
alviproject/alvi | refs/heads/master | alvi/config/django_settings.py | 1 | import os
import alvi
PROJECT_BASE_DIR = os.path.dirname(alvi.__file__)
BASE_DIR = os.path.dirname(PROJECT_BASE_DIR)
#TODO add warning about secret key and debug
SECRET_KEY = 'hn+-d5ox)bdr)a6+yn6+m3wazw0n2=6mi#cc839sb@=rs9=2%y'
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = ['localhost']
INSTALLED_APPS = (
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.debug",
"django.core.context_processors.static",
"alvi.context_processor",
)
ROOT_URLCONF = 'alvi.urls'
WSGI_APPLICATION = 'alvi.wsgi.application'
STATIC_URL = '/static/'
#STATICFILES_DIRS = (os.path.join(PROJECT_BASE_DIR, 'static'),)
STATIC_ROOT = os.path.join(PROJECT_BASE_DIR, '../static')
TEMPLATE_DIRS = (os.path.join(PROJECT_BASE_DIR, 'templates'),) |
ABaldwinHunter/django-clone | refs/heads/master | tests/admin_ordering/tests.py | 52 | from __future__ import unicode_literals
from django.contrib import admin
from django.contrib.admin.options import ModelAdmin
from django.contrib.auth.models import User
from django.test import RequestFactory, TestCase
from .models import (
Band, DynOrderingBandAdmin, Song, SongInlineDefaultOrdering,
SongInlineNewOrdering,
)
class MockRequest(object):
pass
class MockSuperUser(object):
def has_perm(self, perm):
return True
def has_module_perms(self, module):
return True
request = MockRequest()
request.user = MockSuperUser()
site = admin.AdminSite()
class TestAdminOrdering(TestCase):
"""
Let's make sure that ModelAdmin.get_queryset uses the ordering we define
in ModelAdmin rather that ordering defined in the model's inner Meta
class.
"""
def setUp(self):
self.request_factory = RequestFactory()
Band.objects.bulk_create([
Band(name='Aerosmith', bio='', rank=3),
Band(name='Radiohead', bio='', rank=1),
Band(name='Van Halen', bio='', rank=2),
])
def test_default_ordering(self):
"""
The default ordering should be by name, as specified in the inner Meta
class.
"""
ma = ModelAdmin(Band, site)
names = [b.name for b in ma.get_queryset(request)]
self.assertListEqual(['Aerosmith', 'Radiohead', 'Van Halen'], names)
def test_specified_ordering(self):
"""
Let's use a custom ModelAdmin that changes the ordering, and make sure
it actually changes.
"""
class BandAdmin(ModelAdmin):
ordering = ('rank',) # default ordering is ('name',)
ma = BandAdmin(Band, site)
names = [b.name for b in ma.get_queryset(request)]
self.assertListEqual(['Radiohead', 'Van Halen', 'Aerosmith'], names)
def test_dynamic_ordering(self):
"""
Let's use a custom ModelAdmin that changes the ordering dynamically.
"""
super_user = User.objects.create(username='admin', is_superuser=True)
other_user = User.objects.create(username='other')
request = self.request_factory.get('/')
request.user = super_user
ma = DynOrderingBandAdmin(Band, site)
names = [b.name for b in ma.get_queryset(request)]
self.assertListEqual(['Radiohead', 'Van Halen', 'Aerosmith'], names)
request.user = other_user
names = [b.name for b in ma.get_queryset(request)]
self.assertListEqual(['Aerosmith', 'Radiohead', 'Van Halen'], names)
class TestInlineModelAdminOrdering(TestCase):
"""
Let's make sure that InlineModelAdmin.get_queryset uses the ordering we
define in InlineModelAdmin.
"""
def setUp(self):
self.band = Band.objects.create(name='Aerosmith', bio='', rank=3)
Song.objects.bulk_create([
Song(band=self.band, name='Pink', duration=235),
Song(band=self.band, name='Dude (Looks Like a Lady)', duration=264),
Song(band=self.band, name='Jaded', duration=214),
])
def test_default_ordering(self):
"""
The default ordering should be by name, as specified in the inner Meta
class.
"""
inline = SongInlineDefaultOrdering(self.band, site)
names = [s.name for s in inline.get_queryset(request)]
self.assertListEqual(['Dude (Looks Like a Lady)', 'Jaded', 'Pink'], names)
def test_specified_ordering(self):
"""
Let's check with ordering set to something different than the default.
"""
inline = SongInlineNewOrdering(self.band, site)
names = [s.name for s in inline.get_queryset(request)]
self.assertListEqual(['Jaded', 'Pink', 'Dude (Looks Like a Lady)'], names)
class TestRelatedFieldsAdminOrdering(TestCase):
def setUp(self):
self.b1 = Band.objects.create(name='Pink Floyd', bio='', rank=1)
self.b2 = Band.objects.create(name='Foo Fighters', bio='', rank=5)
# we need to register a custom ModelAdmin (instead of just using
# ModelAdmin) because the field creator tries to find the ModelAdmin
# for the related model
class SongAdmin(admin.ModelAdmin):
pass
site.register(Song, SongAdmin)
def tearDown(self):
site.unregister(Song)
if Band in site._registry:
site.unregister(Band)
def check_ordering_of_field_choices(self, correct_ordering):
fk_field = site._registry[Song].formfield_for_foreignkey(Song.band.field, request=None)
m2m_field = site._registry[Song].formfield_for_manytomany(Song.other_interpreters.field, request=None)
self.assertListEqual(list(fk_field.queryset), correct_ordering)
self.assertListEqual(list(m2m_field.queryset), correct_ordering)
def test_no_admin_fallback_to_model_ordering(self):
# should be ordered by name (as defined by the model)
self.check_ordering_of_field_choices([self.b2, self.b1])
def test_admin_with_no_ordering_fallback_to_model_ordering(self):
class NoOrderingBandAdmin(admin.ModelAdmin):
pass
site.register(Band, NoOrderingBandAdmin)
# should be ordered by name (as defined by the model)
self.check_ordering_of_field_choices([self.b2, self.b1])
def test_admin_ordering_beats_model_ordering(self):
class StaticOrderingBandAdmin(admin.ModelAdmin):
ordering = ('rank',)
site.register(Band, StaticOrderingBandAdmin)
# should be ordered by rank (defined by the ModelAdmin)
self.check_ordering_of_field_choices([self.b1, self.b2])
def test_custom_queryset_still_wins(self):
"""Test that custom queryset has still precedence (#21405)"""
class SongAdmin(admin.ModelAdmin):
# Exclude one of the two Bands from the querysets
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == 'band':
kwargs["queryset"] = Band.objects.filter(rank__gt=2)
return super(SongAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def formfield_for_manytomany(self, db_field, request, **kwargs):
if db_field.name == 'other_interpreters':
kwargs["queryset"] = Band.objects.filter(rank__gt=2)
return super(SongAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
class StaticOrderingBandAdmin(admin.ModelAdmin):
ordering = ('rank',)
site.unregister(Song)
site.register(Song, SongAdmin)
site.register(Band, StaticOrderingBandAdmin)
self.check_ordering_of_field_choices([self.b2])
|
ndebuhr/thermo-state-solver | refs/heads/master | thermo-env/lib/python3.5/site-packages/pip/__main__.py | 834 | from __future__ import absolute_import
import os
import sys
# If we are running from a wheel, add the wheel to sys.path
# This allows the usage python pip-*.whl/pip install pip-*.whl
if __package__ == '':
# __file__ is pip-*.whl/pip/__main__.py
# first dirname call strips of '/__main__.py', second strips off '/pip'
# Resulting path is the name of the wheel itself
# Add that to sys.path so we can import pip
path = os.path.dirname(os.path.dirname(__file__))
sys.path.insert(0, path)
import pip # noqa
if __name__ == '__main__':
sys.exit(pip.main())
|
mwgit00/pox | refs/heads/master | test_util.py | 1 |
import unittest
import time
import poxutil as fu
class TestUtil(unittest.TestCase):
def test_timer1_basic(self):
# create a 1-second timer
# update and see if it expires
timer = fu.PolledTimer()
timer.start(1)
time.sleep(0.5)
f, t = timer.update()
self.assertEqual(f, False)
self.assertEqual(t, 1)
time.sleep(1)
f, t = timer.update()
self.assertEqual(f, True)
self.assertEqual(t, 0)
def test_timer2_restart(self):
# create a 2-second timer
# update twice then restart
timer = fu.PolledTimer()
timer.start(2)
time.sleep(0.5)
f, t = timer.update()
self.assertEqual(f, False)
self.assertEqual(t, 2)
time.sleep(1)
f, t = timer.update()
self.assertEqual(f, False)
self.assertEqual(t, 1)
time.sleep(1)
timer.start(2)
self.assertEqual(timer.sec(), 2)
def test_timer3_stop(self):
# create a 30-second timer
# do one update
# stop the timer
timer = fu.PolledTimer()
timer.start(30)
time.sleep(1.5)
f, t, = timer.update()
self.assertEqual(f, False)
self.assertEqual(t, 29)
timer.stop()
self.assertEqual(timer.sec(), 0)
def test_timer4_fast(self):
# update 10 times per second
# see if 1-sec timer expires after 10 updates
timer = fu.PolledTimer()
timer.start(1)
ct = 0
done = False
while not done:
# pad this a little to prevent any math weirdness
time.sleep(0.105)
ct += 1
done, t = timer.update()
self.assertEqual(ct, 10)
def test_pm1(self):
# see if we can handle non-existent file and get dummy phrase
pm = fu.PhraseManager()
self.assertFalse(pm.load("bogusname"))
self.assertEqual(pm.next_phrase(), "this is a test")
if __name__ == '__main__':
unittest.main()
|
kylon/pacman-fakeroot | refs/heads/upstream | test/pacman/tests/upgrade046.py | 11 | self.description = "File relocation between two packages (reverse order, --force)"
lp1 = pmpkg("dummy")
lp1.files = ["bin/dummy"]
lp2 = pmpkg("foobar")
lp2.files = ["bin/foobar",
"usr/share/file"]
for p in lp1, lp2:
self.addpkg2db("local", p)
p1 = pmpkg("dummy")
p1.files = ["bin/dummy",
"usr/share/file"]
p2 = pmpkg("foobar")
p2.files = ["bin/foobar"]
for p in p1, p2:
self.addpkg(p)
self.args = "-U --force %s" % " ".join([p.filename() for p in (p1, p2)])
self.addrule("PACMAN_RETCODE=0")
for p in p1, p2:
self.addrule("PKG_EXIST=%s" % p.name)
self.addrule("FILE_MODIFIED=bin/dummy")
self.addrule("FILE_MODIFIED=bin/foobar")
self.addrule("FILE_EXIST=usr/share/file")
self.addrule("FILE_MODIFIED=usr/share/file")
|
pdebuyl/numpy | refs/heads/master | numpy/doc/ufuncs.py | 17 | """
===================
Universal Functions
===================
Ufuncs are, generally speaking, mathematical functions or operations that are
applied element-by-element to the contents of an array. That is, the result
in each output array element only depends on the value in the corresponding
input array (or arrays) and on no other array elements. NumPy comes with a
large suite of ufuncs, and scipy extends that suite substantially. The simplest
example is the addition operator: ::
>>> np.array([0,2,3,4]) + np.array([1,1,-1,2])
array([1, 3, 2, 6])
The ufunc module lists all the available ufuncs in numpy. Documentation on
the specific ufuncs may be found in those modules. This documentation is
intended to address the more general aspects of ufuncs common to most of
them. All of the ufuncs that make use of Python operators (e.g., +, -, etc.)
have equivalent functions defined (e.g. add() for +)
Type coercion
=============
What happens when a binary operator (e.g., +,-,\\*,/, etc) deals with arrays of
two different types? What is the type of the result? Typically, the result is
the higher of the two types. For example: ::
float32 + float64 -> float64
int8 + int32 -> int32
int16 + float32 -> float32
float32 + complex64 -> complex64
There are some less obvious cases generally involving mixes of types
(e.g. uints, ints and floats) where equal bit sizes for each are not
capable of saving all the information in a different type of equivalent
bit size. Some examples are int32 vs float32 or uint32 vs int32.
Generally, the result is the higher type of larger size than both
(if available). So: ::
int32 + float32 -> float64
uint32 + int32 -> int64
Finally, the type coercion behavior when expressions involve Python
scalars is different than that seen for arrays. Since Python has a
limited number of types, combining a Python int with a dtype=np.int8
array does not coerce to the higher type but instead, the type of the
array prevails. So the rules for Python scalars combined with arrays is
that the result will be that of the array equivalent the Python scalar
if the Python scalar is of a higher 'kind' than the array (e.g., float
vs. int), otherwise the resultant type will be that of the array.
For example: ::
Python int + int8 -> int8
Python float + int8 -> float64
ufunc methods
=============
Binary ufuncs support 4 methods.
**.reduce(arr)** applies the binary operator to elements of the array in
sequence. For example: ::
>>> np.add.reduce(np.arange(10)) # adds all elements of array
45
For multidimensional arrays, the first dimension is reduced by default: ::
>>> np.add.reduce(np.arange(10).reshape(2,5))
array([ 5, 7, 9, 11, 13])
The axis keyword can be used to specify different axes to reduce: ::
>>> np.add.reduce(np.arange(10).reshape(2,5),axis=1)
array([10, 35])
**.accumulate(arr)** applies the binary operator and generates an an
equivalently shaped array that includes the accumulated amount for each
element of the array. A couple examples: ::
>>> np.add.accumulate(np.arange(10))
array([ 0, 1, 3, 6, 10, 15, 21, 28, 36, 45])
>>> np.multiply.accumulate(np.arange(1,9))
array([ 1, 2, 6, 24, 120, 720, 5040, 40320])
The behavior for multidimensional arrays is the same as for .reduce(),
as is the use of the axis keyword).
**.reduceat(arr,indices)** allows one to apply reduce to selected parts
of an array. It is a difficult method to understand. See the documentation
at:
**.outer(arr1,arr2)** generates an outer operation on the two arrays arr1 and
arr2. It will work on multidimensional arrays (the shape of the result is
the concatenation of the two input shapes.: ::
>>> np.multiply.outer(np.arange(3),np.arange(4))
array([[0, 0, 0, 0],
[0, 1, 2, 3],
[0, 2, 4, 6]])
Output arguments
================
All ufuncs accept an optional output array. The array must be of the expected
output shape. Beware that if the type of the output array is of a different
(and lower) type than the output result, the results may be silently truncated
or otherwise corrupted in the downcast to the lower type. This usage is useful
when one wants to avoid creating large temporary arrays and instead allows one
to reuse the same array memory repeatedly (at the expense of not being able to
use more convenient operator notation in expressions). Note that when the
output argument is used, the ufunc still returns a reference to the result.
>>> x = np.arange(2)
>>> np.add(np.arange(2),np.arange(2.),x)
array([0, 2])
>>> x
array([0, 2])
and & or as ufuncs
==================
Invariably people try to use the python 'and' and 'or' as logical operators
(and quite understandably). But these operators do not behave as normal
operators since Python treats these quite differently. They cannot be
overloaded with array equivalents. Thus using 'and' or 'or' with an array
results in an error. There are two alternatives:
1) use the ufunc functions logical_and() and logical_or().
2) use the bitwise operators & and \\|. The drawback of these is that if
the arguments to these operators are not boolean arrays, the result is
likely incorrect. On the other hand, most usages of logical_and and
logical_or are with boolean arrays. As long as one is careful, this is
a convenient way to apply these operators.
"""
|
cmtm/networkx | refs/heads/master | networkx/linalg/tests/test_laplacian.py | 21 | from nose import SkipTest
import networkx as nx
from networkx.generators.degree_seq import havel_hakimi_graph
class TestLaplacian(object):
numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
@classmethod
def setupClass(cls):
global numpy
global scipy
global assert_equal
global assert_almost_equal
try:
import numpy
import scipy
from numpy.testing import assert_equal,assert_almost_equal
except ImportError:
raise SkipTest('SciPy not available.')
def setUp(self):
deg=[3,2,2,1,0]
self.G=havel_hakimi_graph(deg)
self.WG=nx.Graph( (u,v,{'weight':0.5,'other':0.3})
for (u,v) in self.G.edges() )
self.WG.add_node(4)
self.MG=nx.MultiGraph(self.G)
# Graph with selfloops
self.Gsl = self.G.copy()
for node in self.Gsl.nodes():
self.Gsl.add_edge(node, node)
def test_laplacian(self):
"Graph Laplacian"
NL=numpy.array([[ 3, -1, -1, -1, 0],
[-1, 2, -1, 0, 0],
[-1, -1, 2, 0, 0],
[-1, 0, 0, 1, 0],
[ 0, 0, 0, 0, 0]])
WL=0.5*NL
OL=0.3*NL
assert_equal(nx.laplacian_matrix(self.G).todense(),NL)
assert_equal(nx.laplacian_matrix(self.MG).todense(),NL)
assert_equal(nx.laplacian_matrix(self.G,nodelist=[0,1]).todense(),
numpy.array([[ 1, -1],[-1, 1]]))
assert_equal(nx.laplacian_matrix(self.WG).todense(),WL)
assert_equal(nx.laplacian_matrix(self.WG,weight=None).todense(),NL)
assert_equal(nx.laplacian_matrix(self.WG,weight='other').todense(),OL)
def test_normalized_laplacian(self):
"Generalized Graph Laplacian"
GL=numpy.array([[ 1.00, -0.408, -0.408, -0.577, 0.00],
[-0.408, 1.00, -0.50, 0.00 , 0.00],
[-0.408, -0.50, 1.00, 0.00, 0.00],
[-0.577, 0.00, 0.00, 1.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00]])
Lsl = numpy.array([[ 0.75 , -0.2887, -0.2887, -0.3536, 0.],
[-0.2887, 0.6667, -0.3333, 0. , 0.],
[-0.2887, -0.3333, 0.6667, 0. , 0.],
[-0.3536, 0. , 0. , 0.5 , 0.],
[ 0. , 0. , 0. , 0. , 0.]])
assert_almost_equal(nx.normalized_laplacian_matrix(self.G).todense(),
GL,decimal=3)
assert_almost_equal(nx.normalized_laplacian_matrix(self.MG).todense(),
GL,decimal=3)
assert_almost_equal(nx.normalized_laplacian_matrix(self.WG).todense(),
GL,decimal=3)
assert_almost_equal(nx.normalized_laplacian_matrix(self.WG,weight='other').todense(),
GL,decimal=3)
assert_almost_equal(nx.normalized_laplacian_matrix(self.Gsl).todense(),
Lsl, decimal=3)
def test_directed_laplacian(self):
"Directed Laplacian"
# Graph used as an example in Sec. 4.1 of Langville and Meyer,
# "Google's PageRank and Beyond". The graph contains dangling nodes, so
# the pagerank random walk is selected by directed_laplacian
G = nx.DiGraph()
G.add_edges_from(((1,2), (1,3), (3,1), (3,2), (3,5), (4,5), (4,6),
(5,4), (5,6), (6,4)))
GL = numpy.array([[ 0.9833, -0.2941, -0.3882, -0.0291, -0.0231, -0.0261],
[-0.2941, 0.8333, -0.2339, -0.0536, -0.0589, -0.0554],
[-0.3882, -0.2339, 0.9833, -0.0278, -0.0896, -0.0251],
[-0.0291, -0.0536, -0.0278, 0.9833, -0.4878, -0.6675],
[-0.0231, -0.0589, -0.0896, -0.4878, 0.9833, -0.2078],
[-0.0261, -0.0554, -0.0251, -0.6675, -0.2078, 0.9833]])
assert_almost_equal(nx.directed_laplacian_matrix(G, alpha=0.9), GL, decimal=3)
# Make the graph strongly connected, so we can use a random and lazy walk
G.add_edges_from((((2,5), (6,1))))
GL = numpy.array([[ 1. , -0.3062, -0.4714, 0. , 0. , -0.3227],
[-0.3062, 1. , -0.1443, 0. , -0.3162, 0. ],
[-0.4714, -0.1443, 1. , 0. , -0.0913, 0. ],
[ 0. , 0. , 0. , 1. , -0.5 , -0.5 ],
[ 0. , -0.3162, -0.0913, -0.5 , 1. , -0.25 ],
[-0.3227, 0. , 0. , -0.5 , -0.25 , 1. ]])
assert_almost_equal(nx.directed_laplacian_matrix(G, walk_type='random'), GL, decimal=3)
GL = numpy.array([[ 0.5 , -0.1531, -0.2357, 0. , 0. , -0.1614],
[-0.1531, 0.5 , -0.0722, 0. , -0.1581, 0. ],
[-0.2357, -0.0722, 0.5 , 0. , -0.0456, 0. ],
[ 0. , 0. , 0. , 0.5 , -0.25 , -0.25 ],
[ 0. , -0.1581, -0.0456, -0.25 , 0.5 , -0.125 ],
[-0.1614, 0. , 0. , -0.25 , -0.125 , 0.5 ]])
assert_almost_equal(nx.directed_laplacian_matrix(G, walk_type='lazy'), GL, decimal=3)
|
isabernardes/Heriga | refs/heads/master | Herigaenv/lib/python2.7/site-packages/django/contrib/sites/migrations/0002_alter_domain_unique.py | 379 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.contrib.sites.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='site',
name='domain',
field=models.CharField(
max_length=100, unique=True, validators=[django.contrib.sites.models._simple_domain_name_validator],
verbose_name='domain name'
),
),
]
|
cloudera/recordservice | refs/heads/master | testdata/bin/generate-schema-statements.py | 1 | #!/usr/bin/env python
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
# This script generates the "CREATE TABLE", "INSERT", and "LOAD" statements for loading
# test data and writes them to create-*-generated.sql and
# load-*-generated.sql. These files are then executed by hive or impala, depending
# on their contents. Additionally, for hbase, the file is of the form
# create-*hbase*-generated.create.
#
# The statements that are generated are based on an input test vector
# (read from a file) that describes the coverage desired. For example, currently
# we want to run benchmarks with different data sets, across different file types, and
# with different compression algorithms set. To improve data loading performance this
# script will generate an INSERT INTO statement to generate the data if the file does
# not already exist in HDFS. If the file does already exist in HDFS then we simply issue a
# LOAD statement which is much faster.
#
# The input test vectors are generated via the generate_test_vectors.py so
# ensure that script has been run (or the test vector files already exist) before
# running this script.
#
# Note: This statement generation is assuming the following data loading workflow:
# 1) Load all the data in the specified source table
# 2) Create tables for the new file formats and compression types
# 3) Run INSERT OVERWRITE TABLE SELECT * from the source table into the new tables
# or LOAD directly if the file already exists in HDFS.
import collections
import csv
import glob
import math
import json
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
from itertools import product
from optparse import OptionParser
from tests.util.test_file_parser import *
from tests.common.test_dimensions import *
parser = OptionParser()
parser.add_option("-e", "--exploration_strategy", dest="exploration_strategy",
default="core", help="The exploration strategy for schema gen: 'core',"\
" 'pairwise', or 'exhaustive'")
parser.add_option("--hive_warehouse_dir", dest="hive_warehouse_dir",
default="/test-warehouse",
help="The HDFS path to the base Hive test warehouse directory")
parser.add_option("-w", "--workload", dest="workload",
help="The workload to generate schema for: tpch, hive-benchmark, ...")
parser.add_option("-s", "--scale_factor", dest="scale_factor", default="",
help="An optional scale factor to generate the schema for")
parser.add_option("-f", "--force_reload", dest="force_reload", action="store_true",
default= False, help='Skips HDFS exists check and reloads all tables')
parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
default = False, help="If set, outputs additional logging.")
parser.add_option("-b", "--backend", dest="backend", default="localhost:21000",
help="Backend connection to use, default: localhost:21000")
parser.add_option("--table_names", dest="table_names", default=None,
help="Only load the specified tables - specified as a comma-seperated "\
"list of base table names")
parser.add_option("--table_formats", dest="table_formats", default=None,
help="Override the test vectors and load using the specified table "\
"formats. Ex. --table_formats=seq/snap/block,text/none")
parser.add_option("--hdfs_namenode", dest="hdfs_namenode", default="localhost:20500",
help="HDFS name node for Avro schema URLs, default localhost:20500")
(options, args) = parser.parse_args()
if options.workload is None:
print "A workload name must be specified."
parser.print_help()
sys.exit(1)
DATA_LOAD_DIR = '/tmp/data-load-files'
WORKLOAD_DIR = os.path.join(os.environ['IMPALA_HOME'], 'testdata', 'workloads')
DATASET_DIR = os.path.join(os.environ['IMPALA_HOME'], 'testdata', 'datasets')
AVRO_SCHEMA_DIR = "avro_schemas"
DEFAULT_FS=os.environ['DEFAULT_FS']
IMPALA_SUPPORTED_INSERT_FORMATS = ['parquet', 'hbase', 'text', 'kudu']
COMPRESSION_TYPE = "SET mapred.output.compression.type=%s;"
COMPRESSION_ENABLED = "SET hive.exec.compress.output=%s;"
COMPRESSION_CODEC = "SET mapred.output.compression.codec=%s;"
AVRO_COMPRESSION_CODEC = "SET avro.output.codec=%s;"
SET_DYNAMIC_PARTITION_STATEMENT = "SET hive.exec.dynamic.partition=true;"
SET_PARTITION_MODE_NONSTRICT_STATEMENT = "SET hive.exec.dynamic.partition.mode=nonstrict;"
SET_HIVE_INPUT_FORMAT = "SET mapred.max.split.size=256000000;\n"\
"SET hive.input.format=org.apache.hadoop.hive.ql.io.%s;\n"
SET_HIVE_HBASE_BULK_LOAD = "SET hive.hbase.bulk = true"
FILE_FORMAT_IDX = 0
DATASET_IDX = 1
CODEC_IDX = 2
COMPRESSION_TYPE_IDX = 3
COMPRESSION_MAP = {'def': 'org.apache.hadoop.io.compress.DefaultCodec',
'gzip': 'org.apache.hadoop.io.compress.GzipCodec',
'bzip': 'org.apache.hadoop.io.compress.BZip2Codec',
'snap': 'org.apache.hadoop.io.compress.SnappyCodec',
'lzo': 'com.hadoop.compression.lzo.LzopCodec',
'none': ''
}
AVRO_COMPRESSION_MAP = {
'def': 'deflate',
'snap': 'snappy',
'none': '',
}
FILE_FORMAT_MAP = {
'text': 'TEXTFILE',
'seq': 'SEQUENCEFILE',
'rc': 'RCFILE',
'parquet': 'PARQUET',
'text_lzo':
"\nINPUTFORMAT 'com.hadoop.mapred.DeprecatedLzoTextInputFormat'" +
"\nOUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'",
'avro': 'AVRO',
'hbase': "'org.apache.hadoop.hive.hbase.HBaseStorageHandler'"
}
HIVE_TO_AVRO_TYPE_MAP = {
'STRING': 'string',
'INT': 'int',
'TINYINT': 'int',
'SMALLINT': 'int',
'BIGINT': 'long',
'BOOLEAN': 'boolean',
'FLOAT': 'float',
'DOUBLE': 'double',
# Avro has no timestamp type, so convert to string
# TODO: this allows us to create our Avro test tables, but any tests that use
# a timestamp column will fail. We probably want to convert back to timestamps
# in our tests.
'TIMESTAMP': 'string',
}
PARQUET_ALTER_STATEMENT = "ALTER TABLE %(table_name)s SET\n\
SERDEPROPERTIES ('blocksize' = '1073741824', 'compression' = '%(compression)s');"
HBASE_CREATE_STATEMENT = """
CREATE EXTERNAL TABLE IF NOT EXISTS {{db_name}}{{db_suffix}}.{{table_name}} (
{columns})
STORED BY {{file_format}}
WITH SERDEPROPERTIES (
"hbase.columns.mapping" =
"{hbase_column_mapping}")
{tbl_properties}{{hdfs_location}}"""
KNOWN_EXPLORATION_STRATEGIES = ['core', 'pairwise', 'exhaustive', 'lzo']
def build_create_statement(table_template, table_name, db_name, db_suffix,
file_format, compression, hdfs_location):
create_stmt = 'CREATE DATABASE IF NOT EXISTS %s%s;\n' % (db_name, db_suffix)
if (options.force_reload):
create_stmt += 'DROP TABLE IF EXISTS %s%s.%s;\n' % (db_name, db_suffix, table_name)
if compression == 'lzo':
file_format = '%s_%s' % (file_format, compression)
# hbase tables are external, and not read from hdfs. We don't need an hdfs_location.
if file_format == 'hbase':
hdfs_location = str()
create_stmt += table_template.format(db_name=db_name,
db_suffix=db_suffix,
table_name=table_name,
file_format=FILE_FORMAT_MAP[file_format],
hdfs_location=hdfs_location)
return create_stmt
def build_table_template(file_format, columns, partition_columns, row_format,
avro_schema_dir, table_name):
if file_format == 'hbase':
return build_hbase_create_stmt_in_hive(columns, partition_columns, table_name)
partitioned_by = str()
if partition_columns:
partitioned_by = 'PARTITIONED BY (%s)' % ', '.join(partition_columns.split('\n'))
row_format_stmt = str()
if row_format:
row_format_stmt = 'ROW FORMAT ' + row_format
tblproperties = str()
if file_format == 'avro':
# TODO Is this flag ever used?
if options.hdfs_namenode is None:
tblproperties["avro.schema.url"] = "%s/%s/%s/{table_name}.json" \
% (DEFAULT_FS, options.hive_warehouse_dir, avro_schema_dir)
else:
tblproperties["avro.schema.url"] = "hdfs://%s/%s/%s/{table_name}.json" \
% (options.hdfs_namenode, options.hive_warehouse_dir, avro_schema_dir)
elif file_format == 'parquet':
row_format_stmt = str()
# Note: columns are ignored but allowed if a custom serde is specified
# (e.g. Avro)
stmt = """
CREATE EXTERNAL TABLE IF NOT EXISTS {{db_name}}{{db_suffix}}.{{table_name}} (
{columns})
{partitioned_by}
{row_format}
STORED AS {{file_format}}
LOCATION '{{hdfs_location}}'
{tblproperties}
""".format(
row_format=row_format_stmt,
columns=',\n'.join(columns.split('\n')),
partitioned_by=partitioned_by,
tblproperties=tblproperties
).strip()
# Remove empty lines from the stmt string. There is an empty line for
# each of the sections that didn't have anything (e.g. partitioned_by)
stmt = os.linesep.join([s for s in stmt.splitlines() if s])
stmt += ';'
return stmt
def build_hbase_create_stmt_in_hive(columns, partition_columns, table_name):
# The hbase create statement differs sufficiently from the generic create to justify a
# separate method. Specifically, STORED AS becomes STORED BY. There is section called
# serdeproperties, the partition colmns have to be appended to columns in the schema.
columns = columns.split('\n')
# partition columns have to be appended to the columns in the schema.
# PARTITIONED BY is not supported and does not make sense for HBase.
if partition_columns:
columns.extend(partition_columns.split('\n'))
# stringid is a special case. It still points to functional_hbase.alltypesagg
if 'stringid' not in table_name:
tbl_properties = ('TBLPROPERTIES("hbase.table.name" = '
'"{db_name}{db_suffix}.{table_name}")')
else:
tbl_properties = ('TBLPROPERTIES("hbase.table.name" = '
'"{db_name}{db_suffix}.alltypesagg")')
# build hbase column mapping, the first column is implicitly the primary key
# which has a diffrerent representation [:key]
hbase_column_mapping = ["d:%s" % c.split(' ')[0] for c in columns[1:]]
hbase_column_mapping = ":key," + ','.join(hbase_column_mapping)
stmt = HBASE_CREATE_STATEMENT.format(
columns=',\n'.join(columns),
hbase_column_mapping=hbase_column_mapping,
tbl_properties=tbl_properties,
).strip()
return stmt + ';'
def avro_schema(columns):
record = {
"name": "a", # doesn't matter
"type": "record",
"fields": list()
}
for column_spec in columns.strip().split('\n'):
# column_spec looks something like "col_name col_type COMMENT comment"
# (comment may be omitted, we don't use it)
name = column_spec.split()[0]
if "DECIMAL" in column_spec.upper():
if column_spec.split()[1].upper() == "DECIMAL":
# No scale and precision specified, use defaults
scale = 0
precision = 9
else:
# Parse out scale and precision from decimal type
m = re.search("DECIMAL\((?P<precision>.*),(?P<scale>.*)\)", column_spec.upper())
assert m, "Could not parse decimal column spec: " + column_spec
scale = int(m.group('scale'))
precision = int(m.group('precision'))
type = {"type": "bytes", "logicalType": "decimal", "precision": precision,
"scale": scale}
else:
hive_type = column_spec.split()[1]
type = HIVE_TO_AVRO_TYPE_MAP[hive_type.upper()]
record["fields"].append(
{'name': name,
'type': [type, "null"]}) # all columns nullable
return json.dumps(record)
def build_compression_codec_statement(codec, compression_type, file_format):
codec = AVRO_COMPRESSION_MAP[codec] if file_format == 'avro' else COMPRESSION_MAP[codec]
if not codec:
return str()
return (AVRO_COMPRESSION_CODEC % codec) if file_format == 'avro' else (
COMPRESSION_TYPE % compression_type.upper() + '\n' + COMPRESSION_CODEC % codec)
def build_codec_enabled_statement(codec):
compression_enabled = 'false' if codec == 'none' else 'true'
return COMPRESSION_ENABLED % compression_enabled
def build_insert_into_statement(insert, db_name, db_suffix, table_name, file_format,
hdfs_path, for_impala=False):
insert_statement = insert.format(db_name=db_name,
db_suffix=db_suffix,
table_name=table_name,
hdfs_location=hdfs_path)
if for_impala:
return insert_statement
statement = SET_PARTITION_MODE_NONSTRICT_STATEMENT + "\n"
statement += SET_DYNAMIC_PARTITION_STATEMENT + "\n"
statement += "set hive.auto.convert.join=true;\n"
# For some reason (hive bug?) we need to have the CombineHiveInputFormat set
# for cases where we are compressing in bzip or lzo on certain tables that
# have multiple files.
if 'multi' in table_name and ('bzip' in db_suffix or 'lzo' in db_suffix):
statement += SET_HIVE_INPUT_FORMAT % "CombineHiveInputFormat"
else:
statement += SET_HIVE_INPUT_FORMAT % "HiveInputFormat"
return statement + insert_statement
def build_hbase_insert(db_name, db_suffix, table_name):
hbase_insert = SET_HIVE_HBASE_BULK_LOAD + ';\n'
hbase_insert += ("INSERT OVERWRITE TABLE {db_name}{db_suffix}.{table_name}"
" SELECT * FROM {db_name}.{table_name};\n").\
format(db_name=db_name, db_suffix=db_suffix,table_name=table_name)
return hbase_insert
def build_insert(insert, db_name, db_suffix, file_format,
codec, compression_type, table_name, hdfs_path, create_hive=False):
# HBASE inserts don't need the hive options to be set, and don't require and HDFS
# file location, so they're handled separately.
if file_format == 'hbase' and not create_hive:
return build_hbase_insert(db_name, db_suffix, table_name)
output = build_codec_enabled_statement(codec) + "\n"
output += build_compression_codec_statement(codec, compression_type, file_format) + "\n"
output += build_insert_into_statement(insert, db_name, db_suffix,
table_name, file_format, hdfs_path) + "\n"
return output
def build_load_statement(load_template, db_name, db_suffix, table_name):
# hbase does not need the hdfs path.
if table_name.startswith('hbase'):
load_template = load_template.format(table_name=table_name,
db_name=db_name,
db_suffix=db_suffix)
else:
load_template = load_template.format(table_name=table_name,
db_name=db_name,
db_suffix=db_suffix,
impala_home = os.environ['IMPALA_HOME'])
return load_template
def build_hbase_create_stmt(db_name, table_name, column_families):
hbase_table_name = "{db_name}_hbase.{table_name}".format(db_name=db_name,
table_name=table_name)
create_stmt = list()
create_stmt.append("disable '%s'" % hbase_table_name)
create_stmt.append("drop '%s'" % hbase_table_name)
column_families = ','.join(["'{0}'".format(cf) for cf in column_families.splitlines()])
create_stmt.append("create '%s', %s" % (hbase_table_name, column_families))
return create_stmt
def build_db_suffix(file_format, codec, compression_type):
if file_format == 'text' and codec == 'none':
return ''
elif codec == 'none':
return '_%s' % (file_format)
elif compression_type == 'record':
return '_%s_record_%s' % (file_format, codec)
else:
return '_%s_%s' % (file_format, codec)
# Does a hdfs directory listing and returns array with all the subdir names.
def get_hdfs_subdirs_with_data(path):
tmp_file = tempfile.TemporaryFile("w+")
cmd = "hadoop fs -du %s | grep -v '^0' | awk '{print $3}'" % path
subprocess.call([cmd], shell = True, stderr = open('/dev/null'), stdout = tmp_file)
tmp_file.seek(0)
# Results look like:
# <acls> - <user> <group> <date> /directory/subdirectory
# So to get subdirectory names just return everything after the last '/'
return [line[line.rfind('/') + 1:].strip() for line in tmp_file.readlines()]
class Statements(object):
"""Simple container object for storing SQL statements to be output to a
file. Useful for ordering the statements correctly."""
def __init__(self):
self.create = list()
self.load = list()
self.load_base = list()
def write_to_file(self, filename):
# If there is no content to write, skip
if self.__is_empty(): return
output = self.create + self.load_base + self.load
with open(filename, 'w') as f:
f.write('\n\n'.join(output))
def __is_empty(self):
return not (self.create or self.load or self.load_base)
def eval_section(section_str):
"""section_str should be the contents of a section (i.e. a string). If section_str
starts with `, evaluates section_str as a shell command and returns the
output. Otherwise returns section_str."""
if not section_str.startswith('`'): return section_str
cmd = section_str[1:]
# Use bash explicitly instead of setting shell=True so we get more advanced shell
# features (e.g. "for i in {1..n}")
p = subprocess.Popen(['/bin/bash', '-c', cmd], stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
if stderr: print stderr
assert p.returncode == 0
return stdout.strip()
def generate_statements(output_name, test_vectors, sections,
schema_include_constraints, schema_exclude_constraints):
# TODO: This method has become very unwieldy. It has to be re-factored sooner than
# later.
# Parquet statements to be executed separately by Impala
hive_output = Statements()
hbase_output = Statements()
hbase_post_load = Statements()
table_names = None
if options.table_names:
table_names = [name.lower() for name in options.table_names.split(',')]
existing_tables = get_hdfs_subdirs_with_data(options.hive_warehouse_dir)
for row in test_vectors:
impala_output = Statements()
impala_load = Statements()
file_format, data_set, codec, compression_type =\
[row.file_format, row.dataset, row.compression_codec, row.compression_type]
table_format = '%s/%s/%s' % (file_format, codec, compression_type)
for section in sections:
table_name = section['BASE_TABLE_NAME']
db_suffix = build_db_suffix(file_format, codec, compression_type)
db_name = '{0}{1}'.format(data_set, options.scale_factor)
db = '{0}{1}'.format(db_name, db_suffix)
if table_names and (table_name.lower() not in table_names):
print 'Skipping table: %s.%s' % (db, table_name)
continue
if schema_include_constraints[table_name.lower()] and \
table_format not in schema_include_constraints[table_name.lower()]:
print 'Skipping \'%s.%s\' due to include constraint match' % (db, table_name)
continue
if schema_exclude_constraints[table_name.lower()] and\
table_format in schema_exclude_constraints[table_name.lower()]:
print 'Skipping \'%s.%s\' due to exclude constraint match' % (db, table_name)
continue
alter = section.get('ALTER')
create = section['CREATE']
create_hive = section['CREATE_HIVE']
insert = eval_section(section['DEPENDENT_LOAD'])
load = eval_section(section['LOAD'])
# For some datasets we may want to use a different load strategy when running local
# tests versus tests against large scale factors. The most common reason is to
# reduce he number of partitions for the local test environment
if not options.scale_factor and section['LOAD_LOCAL']:
load = section['LOAD_LOCAL']
columns = eval_section(section['COLUMNS'])
partition_columns = section['PARTITION_COLUMNS']
row_format = section['ROW_FORMAT']
# Force reloading of the table if the user specified the --force option or
# if the table is partitioned and there was no ALTER section specified. This is to
# ensure the partition metadata is always properly created. The ALTER section is
# used to create partitions, so if that section exists there is no need to force
# reload.
# TODO: Rename the ALTER section to ALTER_TABLE_ADD_PARTITION
force_reload = options.force_reload or (partition_columns and not alter)
hdfs_location = '{0}.{1}{2}'.format(db_name, table_name, db_suffix)
# hdfs file names for hive-benchmark and functional datasets are stored
# directly under /test-warehouse
# TODO: We should not need to specify the hdfs file path in the schema file.
# This needs to be done programmatically.
if data_set in ['hive-benchmark', 'functional']:
hdfs_location = hdfs_location.split('.')[-1]
# hive does not allow hyphenated table names.
if data_set == 'hive-benchmark':
db_name = '{0}{1}'.format('hivebenchmark', options.scale_factor)
data_path = os.path.join(options.hive_warehouse_dir, hdfs_location)
# Empty tables (tables with no "LOAD" sections) are assumed to be used for insert
# testing. Since Impala currently only supports inserting into TEXT, PARQUET and
# HBASE we need to create these tables with a supported insert format.
create_file_format = file_format
create_codec = codec
if not (section['LOAD'] or section['LOAD_LOCAL'] or section['DEPENDENT_LOAD']):
create_codec = 'none'
create_file_format = file_format
if file_format not in IMPALA_SUPPORTED_INSERT_FORMATS:
create_file_format = 'text'
output = impala_output
if create_hive or file_format == 'hbase':
output = hive_output
elif codec == 'lzo':
# Impala CREATE TABLE doesn't allow INPUTFORMAT.
output = hive_output
# If a CREATE section is provided, use that. Otherwise a COLUMNS section
# must be provided (and optionally PARTITION_COLUMNS and ROW_FORMAT
# sections), which is used to generate the create table statement.
if create_hive:
table_template = create_hive
# Loading dependent Avro tables involves generating an Avro schema literal from
# the COLUMNS section, but the COLUMNS section is not provided for CREATE_HIVE.
# The custom CREATE TABLE leaves the columns opaque to us, so we cannot generate
# an Avro schema literal.
# However, if the schema constraints are set up such that we are only going to
# to load this single Avro table, then we can safely proceed assuming that the
# provided CREATE TABLE has all necessary information to create an Avro table.
# TODO: Remove this restriction once Impala has the ability to infer the Avro
# schema from column definitions. Then we do not need to generate an Avro
# schema literal for creating dependent Avro tables anymore.
load_single_table = len(schema_include_constraints[table_name.lower()]) == 1
if file_format == 'avro' and not load_single_table:
print 'CREATE_HIVE section not supported'
continue
elif create:
table_template = create
if file_format in ['avro', 'hbase']:
# We don't know how to generalize CREATE sections to Avro and hbase.
print ("CREATE section not supported with %s, "
"skipping: '%s'" % (file_format, table_name))
continue
elif columns:
avro_schema_dir = "%s/%s" % (AVRO_SCHEMA_DIR, data_set)
table_template = build_table_template(
create_file_format, columns, partition_columns,
row_format, avro_schema_dir, table_name)
# Write Avro schema to local file
if file_format == 'avro':
if not os.path.exists(avro_schema_dir):
os.makedirs(avro_schema_dir)
with open("%s/%s.json" % (avro_schema_dir, table_name),"w") as f:
f.write(avro_schema(columns))
else:
table_template = None
if table_template:
output.create.append(build_create_statement(table_template, table_name, db_name,
db_suffix, create_file_format, create_codec, data_path))
# HBASE create table
if file_format == 'hbase':
# If the HBASE_COLUMN_FAMILIES section does not exist, default to 'd'
column_families = section.get('HBASE_COLUMN_FAMILIES', 'd')
hbase_output.create.extend(build_hbase_create_stmt(db_name, table_name,
column_families))
hbase_post_load.load.append("flush '%s_hbase.%s'\n" % (db_name, table_name))
# The ALTER statement in hive does not accept fully qualified table names so
# insert a use statement. The ALTER statement is skipped for HBASE as it's
# used for adding partitions.
# TODO: Consider splitting the ALTER subsection into specific components. At the
# moment, it assumes we're only using ALTER for partitioning the table.
if alter and file_format != "hbase":
use_db = 'USE {db_name};\n'.format(db_name=db)
if output == hive_output and codec == 'lzo':
# Hive ALTER TABLE ADD PARTITION doesn't handle null partitions, so
# we can't run the ALTER section in this case.
if options.force_reload:
# IMPALA-2278: Hive INSERT OVERWRITE won't clear out partition directories
# that weren't already added to the table. So, for force reload, manually
# delete the partition directories.
output.create.append(("DFS -rm -R {data_path};").format(
data_path=data_path));
else:
# If this is not a force reload use msck repair to add the partitions
# into the table.
output.create.append(use_db + 'msck repair table %s;' % (table_name))
else:
output.create.append(use_db + alter.format(table_name=table_name))
# If the directory already exists in HDFS, assume that data files already exist
# and skip loading the data. Otherwise, the data is generated using either an
# INSERT INTO statement or a LOAD statement.
if not force_reload and hdfs_location in existing_tables:
print 'HDFS path:', data_path, 'contains data. Data loading can be skipped.'
else:
print 'HDFS path:', data_path, 'does not exists or is empty. Data will be loaded.'
if not db_suffix:
if load:
hive_output.load_base.append(build_load_statement(load, db_name,
db_suffix, table_name))
else:
print 'Empty base table load for %s. Skipping load generation' % table_name
elif file_format == 'parquet':
if insert:
impala_load.load.append(build_insert_into_statement(insert, db_name,
db_suffix, table_name, 'parquet', data_path, for_impala=True))
else:
print \
'Empty parquet load for table %s. Skipping insert generation' % table_name
else:
if insert:
hive_output.load.append(build_insert(insert, db_name, db_suffix, file_format,
codec, compression_type, table_name, data_path,
create_hive=create_hive))
else:
print 'Empty insert for table %s. Skipping insert generation' % table_name
impala_output.write_to_file("load-%s-impala-generated-%s-%s-%s.sql" %
(output_name, file_format, codec, compression_type))
impala_load.write_to_file("load-%s-impala-load-generated-%s-%s-%s.sql" %
(output_name, file_format, codec, compression_type))
hive_output.write_to_file('load-' + output_name + '-hive-generated.sql')
hbase_output.create.append("exit")
hbase_output.write_to_file('load-' + output_name + '-hbase-generated.create')
hbase_post_load.load.append("exit")
hbase_post_load.write_to_file('post-load-' + output_name + '-hbase-generated.sql')
def parse_schema_template_file(file_name):
VALID_SECTION_NAMES = ['DATASET', 'BASE_TABLE_NAME', 'COLUMNS', 'PARTITION_COLUMNS',
'ROW_FORMAT', 'CREATE', 'CREATE_HIVE', 'DEPENDENT_LOAD', 'LOAD',
'LOAD_LOCAL', 'ALTER', 'HBASE_COLUMN_FAMILIES']
return parse_test_file(file_name, VALID_SECTION_NAMES, skip_unknown_sections=False)
if __name__ == "__main__":
if options.table_formats is None:
if options.exploration_strategy not in KNOWN_EXPLORATION_STRATEGIES:
print 'Invalid exploration strategy:', options.exploration_strategy
print 'Valid values:', ', '.join(KNOWN_EXPLORATION_STRATEGIES)
sys.exit(1)
test_vectors = [vector.value for vector in\
load_table_info_dimension(options.workload, options.exploration_strategy)]
else:
table_formats = options.table_formats.split(',')
dataset = get_dataset_from_workload(options.workload)
test_vectors =\
[TableFormatInfo.create_from_string(dataset, tf) for tf in table_formats]
target_dataset = test_vectors[0].dataset
print 'Target Dataset: ' + target_dataset
dataset_load_dir = os.path.join(DATA_LOAD_DIR, target_dataset)
# If the directory containing the sql files does not exist, create it. Else nuke all the
# files corresponding to the current workload.
try:
os.makedirs(dataset_load_dir)
except OSError:
# Directory already exists, remove it.
shutil.rmtree(dataset_load_dir)
# Recreate the workload dir
os.makedirs(dataset_load_dir)
finally:
# Make sure that the directory was created and is empty.
assert os.path.isdir(dataset_load_dir)
assert len(os.listdir(dataset_load_dir)) == 0
# Make the dataset dir the current working directory
os.chdir(dataset_load_dir)
schema_template_file = os.path.join(DATASET_DIR, target_dataset,
'%s_schema_template.sql' % target_dataset)
if not os.path.isfile(schema_template_file):
print 'Schema file not found: ' + schema_template_file
sys.exit(1)
constraints_file = os.path.join(DATASET_DIR, target_dataset, 'schema_constraints.csv')
include_constraints, exclude_constraints = parse_table_constraints(constraints_file)
sections = parse_schema_template_file(schema_template_file)
generate_statements('%s-%s' % (options.workload, options.exploration_strategy),
test_vectors, sections, include_constraints, exclude_constraints)
|
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | refs/heads/master | python-packages/mne-python-0.10/tutorials/plot_raw_objects.py | 15 | """
.. _tut_raw_objects
The :class:`Raw <mne.io.RawFIF>` data structure: continuous data
================================================================
"""
from __future__ import print_function
import mne
import os.path as op
from matplotlib import pyplot as plt
###############################################################################
# Continuous data is stored in objects of type :class:`Raw <mne.io.RawFIF>`.
# The core data structure is simply a 2D numpy array (channels × samples,
# `._data`) combined with an :class:`Info <mne.io.meas_info.Info>` object
# (`.info`) (:ref:`tut_info_objects`.
#
# The most common way to load continuous data is from a .fif file. For more
# information on :ref:`loading data from other formats <ch_raw>`, or creating
# it :ref:`from scratch <tut_creating_data_structures>`.
###############################################################################
# Loading continuous data
# -----------------------
# Load an example dataset, the preload flag loads the data into memory now
data_path = op.join(mne.datasets.sample.data_path(), 'MEG',
'sample', 'sample_audvis_raw.fif')
raw = mne.io.RawFIF(data_path, preload=True, verbose=False)
# Give the sample rate
print('sample rate:', raw.info['sfreq'], 'Hz')
# Give the size of the data matrix
print('channels x samples:', raw._data.shape)
###############################################################################
# Information about the channels contained in the :class:`Raw <mne.io.RawFIF>`
# object is contained in the :class:`Info <mne.io.meas_info.Info>` attribute.
# This is essentially a dictionary with a number of relevant fields (see
# :ref:`tut_info_objects`).
###############################################################################
# Indexing data
# -------------
#
# There are two ways to access the data stored within :class:`Raw
# <mne.io.RawFIF>` objects. One is by accessing the underlying data array, and
# the other is to index the :class:`Raw <mne.io.RawFIF>` object directly.
#
# To access the data array of :class:`Raw <mne.io.Raw>` objects, use the
# `_data` attribute. Note that this is only present if `preload==True`.
print('Shape of data array:', raw._data.shape)
array_data = raw._data[0, :1000]
_ = plt.plot(array_data)
###############################################################################
# You can also pass an index directly to the :class:`Raw <mne.io.RawFIF>`
# object. This will return an array of times, as well as the data representing
# those timepoints. This may be used even if the data is not preloaded:
# Extract data from the first 5 channels, from 1 s to 3 s.
sfreq = raw.info['sfreq']
data, times = raw[:5, int(sfreq * 1):int(sfreq * 3)]
_ = plt.plot(times, data.T)
_ = plt.title('Sample channels')
###############################################################################
# -----------------------------------------
# Selecting subsets of channels and samples
# -----------------------------------------
#
# It is possible to use more intelligent indexing to extract data, using
# channel names, types or time ranges.
# Pull all MEG gradiometer channels:
# Make sure to use copy==True or it will overwrite the data
meg_only = raw.pick_types(meg=True, copy=True)
eeg_only = raw.pick_types(meg=False, eeg=True, copy=True)
# The MEG flag in particular lets you specify a string for more specificity
grad_only = raw.pick_types(meg='grad', copy=True)
# Or you can use custom channel names
pick_chans = ['MEG 0112', 'MEG 0111', 'MEG 0122', 'MEG 0123']
specific_chans = raw.pick_channels(pick_chans, copy=True)
print(meg_only, eeg_only, grad_only, specific_chans, sep='\n')
###############################################################################
# Notice the different scalings of these types
f, (a1, a2) = plt.subplots(2, 1)
eeg, times = eeg_only[0, :int(sfreq * 2)]
meg, times = meg_only[0, :int(sfreq * 2)]
a1.plot(times, meg[0])
a2.plot(times, eeg[0])
###############################################################################
# You can restrict the data to a specific time range
restricted = raw.crop(5, 7) # in seconds
print('New time range from', restricted.times.min(), 's to',
restricted.times.max(), 's')
###############################################################################
# And drop channels by name
restricted = restricted.drop_channels(['MEG 0241', 'EEG 001'])
print('Number of channels reduced from', raw.info['nchan'], 'to',
restricted.info['nchan'])
###############################################################################
# --------------------------------------------------
# Concatenating :class:`Raw <mne.io.RawFIF>` objects
# --------------------------------------------------
#
# :class:`Raw <mne.io.RawFIF>` objects can be concatenated in time by using the
# :func:`append <mne.io.RawFIF.append>` function. For this to work, they must
# have the same number of channels and their :class:`Info
# <mne.io.meas_info.Info>` structures should be compatible.
# Create multiple :class:`Raw <mne.io.RawFIF>` objects
raw1 = raw.copy().crop(0, 10)
raw2 = raw.copy().crop(10, 20)
raw3 = raw.copy().crop(20, 100)
# Concatenate in time (also works without preloading)
raw1.append([raw2, raw3])
print('Time extends from', raw1.times.min(), 's to', raw1.times.max(), 's')
|
ghchinoy/tensorflow | refs/heads/master | tensorflow/python/debug/lib/common.py | 78 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common values and methods for TensorFlow Debugger."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
GRPC_URL_PREFIX = "grpc://"
# A key for a Session.run() call.
RunKey = collections.namedtuple("RunKey", ["feed_names", "fetch_names"])
def get_graph_element_name(elem):
"""Obtain the name or string representation of a graph element.
If the graph element has the attribute "name", return name. Otherwise, return
a __str__ representation of the graph element. Certain graph elements, such as
`SparseTensor`s, do not have the attribute "name".
Args:
elem: The graph element in question.
Returns:
If the attribute 'name' is available, return the name. Otherwise, return
str(fetch).
"""
return elem.name if hasattr(elem, "name") else str(elem)
def get_flattened_names(feeds_or_fetches):
"""Get a flattened list of the names in run() call feeds or fetches.
Args:
feeds_or_fetches: Feeds or fetches of the `Session.run()` call. It maybe
a Tensor, an Operation or a Variable. It may also be nested lists, tuples
or dicts. See doc of `Session.run()` for more details.
Returns:
(list of str) A flattened list of fetch names from `feeds_or_fetches`.
"""
lines = []
if isinstance(feeds_or_fetches, (list, tuple)):
for item in feeds_or_fetches:
lines.extend(get_flattened_names(item))
elif isinstance(feeds_or_fetches, dict):
for key in feeds_or_fetches:
lines.extend(get_flattened_names(feeds_or_fetches[key]))
else:
# This ought to be a Tensor, an Operation or a Variable, for which the name
# attribute should be available. (Bottom-out condition of the recursion.)
lines.append(get_graph_element_name(feeds_or_fetches))
return lines
def get_run_key(feed_dict, fetches):
"""Summarize the names of feeds and fetches as a RunKey JSON string.
Args:
feed_dict: The feed_dict given to the `Session.run()` call.
fetches: The fetches from the `Session.run()` call.
Returns:
A JSON Array consisting of two items. They first items is a flattened
Array of the names of the feeds. The second item is a flattened Array of
the names of the fetches.
"""
return json.dumps(RunKey(get_flattened_names(feed_dict),
get_flattened_names(fetches)))
|
15Dkatz/pants | refs/heads/master | tests/python/pants_test/java/test_nailgun_protocol.py | 15 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import socket
import unittest
import mock
from pants.java.nailgun_protocol import ChunkType, NailgunProtocol
class TestChunkType(unittest.TestCase):
def test_chunktype_constants(self):
self.assertIsNotNone(ChunkType.ARGUMENT)
self.assertIsNotNone(ChunkType.ENVIRONMENT)
self.assertIsNotNone(ChunkType.WORKING_DIR)
self.assertIsNotNone(ChunkType.COMMAND)
self.assertIsNotNone(ChunkType.STDIN)
self.assertIsNotNone(ChunkType.STDOUT)
self.assertIsNotNone(ChunkType.STDERR)
self.assertIsNotNone(ChunkType.START_READING_INPUT)
self.assertIsNotNone(ChunkType.STDIN_EOF)
self.assertIsNotNone(ChunkType.EXIT)
class TestNailgunProtocol(unittest.TestCase):
EMPTY_PAYLOAD = ''
TEST_COMMAND = 'test'
TEST_OUTPUT = 't e s t'
TEST_UNICODE_PAYLOAD = u'([\d0-9]{1,4}\s?[年月日])'.encode('utf-8')
TEST_WORKING_DIR = '/path/to/a/repo'
TEST_ARGUMENTS = ['t', 'e', 's', 't']
TEST_ENVIRON = dict(TEST_VAR='success')
def setUp(self):
self.client_sock, self.server_sock = socket.socketpair()
def tearDown(self):
self.client_sock.close()
self.server_sock.close()
def test_send_and_parse_request(self):
# Send a test request over the client socket.
NailgunProtocol.send_request(
self.client_sock,
self.TEST_WORKING_DIR,
self.TEST_COMMAND,
*self.TEST_ARGUMENTS,
**self.TEST_ENVIRON
)
# Receive the request from the server-side context.
working_dir, command, arguments, environment = NailgunProtocol.parse_request(self.server_sock)
self.assertEqual(working_dir, self.TEST_WORKING_DIR)
self.assertEqual(command, self.TEST_COMMAND)
self.assertEqual(arguments, self.TEST_ARGUMENTS)
self.assertEqual(environment, self.TEST_ENVIRON)
def test_send_and_parse_request_bad_chunktype(self):
INVALID_CHUNK_TYPE = b';'
NailgunProtocol.write_chunk(self.client_sock, INVALID_CHUNK_TYPE, '1729')
with self.assertRaises(NailgunProtocol.ProtocolError):
NailgunProtocol.parse_request(self.server_sock)
def test_read_until(self):
recv_chunks = ['1', '234', '56', '789', '0']
mock_socket = mock.Mock()
mock_socket.recv.side_effect = recv_chunks
self.assertEqual(NailgunProtocol._read_until(mock_socket, 10), '1234567890')
self.assertEqual(mock_socket.recv.call_count, len(recv_chunks))
def test_read_until_truncated_recv(self):
self.server_sock.sendall(b'x')
self.server_sock.close()
with self.assertRaises(NailgunProtocol.TruncatedRead):
NailgunProtocol._read_until(self.client_sock, 3)
def test_iter_chunks(self):
expected_chunks = [
(ChunkType.COMMAND, self.TEST_COMMAND),
(ChunkType.STDOUT, self.TEST_OUTPUT),
(ChunkType.STDERR, self.TEST_OUTPUT),
(ChunkType.EXIT, self.EMPTY_PAYLOAD)
# N.B. without an EXIT chunk here (or socket failure), this test will deadlock in iter_chunks.
]
for chunk_type, payload in expected_chunks:
NailgunProtocol.write_chunk(self.server_sock, chunk_type, payload)
for i, chunk in enumerate(NailgunProtocol.iter_chunks(self.client_sock)):
self.assertEqual(chunk, expected_chunks[i])
def test_read_and_write_chunk(self):
# Write a command chunk to the server socket.
NailgunProtocol.write_chunk(self.server_sock, ChunkType.COMMAND, self.TEST_COMMAND)
# Read the chunk from the client socket.
chunk_type, payload = NailgunProtocol.read_chunk(self.client_sock)
self.assertEqual(
(chunk_type, payload),
(ChunkType.COMMAND, self.TEST_COMMAND)
)
def test_read_chunk_truncated_during_header(self):
"""Construct a chunk and truncate to the first 3 bytes ([:3]), an incomplete header."""
truncated_chunk = NailgunProtocol.construct_chunk(ChunkType.STDOUT, self.TEST_OUTPUT)[:3]
self.server_sock.sendall(truncated_chunk)
self.server_sock.close()
with self.assertRaises(NailgunProtocol.TruncatedHeaderError):
NailgunProtocol.read_chunk(self.client_sock)
def test_read_chunk_truncated_before_payload(self):
"""Construct a chunk and send exactly the header (first 5 bytes) and truncate the remainder."""
truncated_chunk = NailgunProtocol.construct_chunk(ChunkType.STDOUT, self.TEST_OUTPUT)[:5]
self.server_sock.sendall(truncated_chunk)
self.server_sock.close()
with self.assertRaises(NailgunProtocol.TruncatedPayloadError):
NailgunProtocol.read_chunk(self.client_sock)
def test_read_chunk_truncated_during_payload(self):
"""Construct a chunk and truncate the last 3 bytes of the payload ([:-3])."""
truncated_chunk = NailgunProtocol.construct_chunk(ChunkType.STDOUT, self.TEST_OUTPUT)[:-3]
self.server_sock.sendall(truncated_chunk)
self.server_sock.close()
with self.assertRaises(NailgunProtocol.TruncatedPayloadError):
NailgunProtocol.read_chunk(self.client_sock)
def test_send_start_reading_input(self):
NailgunProtocol.send_start_reading_input(self.server_sock)
chunk_type, payload = NailgunProtocol.read_chunk(self.client_sock)
self.assertEqual(
(chunk_type, payload),
(ChunkType.START_READING_INPUT, self.EMPTY_PAYLOAD)
)
def test_send_stdout(self):
NailgunProtocol.send_stdout(self.server_sock, self.TEST_OUTPUT)
chunk_type, payload = NailgunProtocol.read_chunk(self.client_sock)
self.assertEqual(
(chunk_type, payload),
(ChunkType.STDOUT, self.TEST_OUTPUT)
)
def test_send_stderr(self):
NailgunProtocol.send_stderr(self.server_sock, self.TEST_OUTPUT)
chunk_type, payload = NailgunProtocol.read_chunk(self.client_sock)
self.assertEqual(
(chunk_type, payload),
(ChunkType.STDERR, self.TEST_OUTPUT)
)
def test_send_exit_default(self):
NailgunProtocol.send_exit(self.server_sock)
chunk_type, payload = NailgunProtocol.read_chunk(self.client_sock)
self.assertEqual(
(chunk_type, payload),
(ChunkType.EXIT, self.EMPTY_PAYLOAD)
)
def test_send_exit(self):
NailgunProtocol.send_exit(self.server_sock, self.TEST_OUTPUT)
chunk_type, payload = NailgunProtocol.read_chunk(self.client_sock)
self.assertEqual(
(chunk_type, payload),
(ChunkType.EXIT, self.TEST_OUTPUT)
)
def test_send_unicode_chunk(self):
NailgunProtocol.send_stdout(self.server_sock, self.TEST_UNICODE_PAYLOAD)
chunk_type, payload = NailgunProtocol.read_chunk(self.client_sock, return_bytes=True)
self.assertEqual(
(chunk_type, payload),
(ChunkType.STDOUT, self.TEST_UNICODE_PAYLOAD)
)
def test_isatty_from_empty_env(self):
self.assertEquals(NailgunProtocol.isatty_from_env({}), (False, False, False))
def test_isatty_from_env(self):
self.assertEquals(
NailgunProtocol.isatty_from_env({
'NAILGUN_TTY_0': '1',
'NAILGUN_TTY_1': '0',
'NAILGUN_TTY_2': '1'
}),
(True, False, True)
)
def test_isatty_from_env_mixed(self):
self.assertEquals(
NailgunProtocol.isatty_from_env({
'NAILGUN_TTY_0': '0',
'NAILGUN_TTY_1': '1'
}),
(False, True, False)
)
def test_construct_chunk(self):
with self.assertRaises(TypeError):
NailgunProtocol.construct_chunk(ChunkType.STDOUT, 1111)
def test_construct_chunk_unicode(self):
NailgunProtocol.construct_chunk(ChunkType.STDOUT, u'Ø')
def test_construct_chunk_bytes(self):
NailgunProtocol.construct_chunk(ChunkType.STDOUT, b'yes')
|
orchidinfosys/odoo | refs/heads/master | addons/auth_signup/models/res_users.py | 2 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from ast import literal_eval
from odoo import api, fields, models, _
from odoo.exceptions import UserError
from odoo.tools.misc import ustr
from odoo.addons.base.ir.ir_mail_server import MailDeliveryException
from odoo.addons.auth_signup.models.res_partner import SignupError, now
class ResUsers(models.Model):
_inherit = 'res.users'
state = fields.Selection(compute='_compute_state', string='Status',
selection=[('new', 'Never Connected'), ('active', 'Activated')])
@api.multi
def _compute_state(self):
for user in self:
user.state = 'active' if user.login_date else 'new'
@api.model
def signup(self, values, token=None):
""" signup a user, to either:
- create a new user (no token), or
- create a user for a partner (with token, but no user for partner), or
- change the password of a user (with token, and existing user).
:param values: a dictionary with field values that are written on user
:param token: signup token (optional)
:return: (dbname, login, password) for the signed up user
"""
if token:
# signup with a token: find the corresponding partner id
partner = self.env['res.partner']._signup_retrieve_partner(token, check_validity=True, raise_exception=True)
# invalidate signup token
partner.write({'signup_token': False, 'signup_type': False, 'signup_expiration': False})
partner_user = partner.user_ids and partner.user_ids[0] or False
# avoid overwriting existing (presumably correct) values with geolocation data
if partner.country_id or partner.zip or partner.city:
values.pop('city', None)
values.pop('country_id', None)
if partner.lang:
values.pop('lang', None)
if partner_user:
# user exists, modify it according to values
values.pop('login', None)
values.pop('name', None)
partner_user.write(values)
return (self.env.cr.dbname, partner_user.login, values.get('password'))
else:
# user does not exist: sign up invited user
values.update({
'name': partner.name,
'partner_id': partner.id,
'email': values.get('email') or values.get('login'),
})
if partner.company_id:
values['company_id'] = partner.company_id.id
values['company_ids'] = [(6, 0, [partner.company_id.id])]
self._signup_create_user(values)
else:
# no token, sign up an external user
values['email'] = values.get('email') or values.get('login')
self._signup_create_user(values)
return (self.env.cr.dbname, values.get('login'), values.get('password'))
@api.model
def _signup_create_user(self, values):
""" create a new user from the template user """
IrConfigParam = self.env['ir.config_parameter']
template_user_id = literal_eval(IrConfigParam.get_param('auth_signup.template_user_id', 'False'))
template_user = self.browse(template_user_id)
assert template_user.exists(), 'Signup: invalid template user'
# check that uninvited users may sign up
if 'partner_id' not in values:
if not literal_eval(IrConfigParam.get_param('auth_signup.allow_uninvited', 'False')):
raise SignupError('Signup is not allowed for uninvited users')
assert values.get('login'), "Signup: no login given for new user"
assert values.get('partner_id') or values.get('name'), "Signup: no name or partner given for new user"
# create a copy of the template user (attached to a specific partner_id if given)
values['active'] = True
try:
with self.env.cr.savepoint():
return template_user.with_context(no_reset_password=True).copy(values)
except Exception, e:
# copy may failed if asked login is not available.
raise SignupError(ustr(e))
def reset_password(self, login):
""" retrieve the user corresponding to login (login or email),
and reset their password
"""
users = self.search([('login', '=', login)])
if not users:
users = self.search([('email', '=', login)])
if len(users) != 1:
raise Exception(_('Reset password: invalid username or email'))
return users.action_reset_password()
@api.multi
def action_reset_password(self):
""" create signup token for each user, and send their signup url by email """
# prepare reset password signup
create_mode = bool(self.env.context.get('create_user'))
# no time limit for initial invitation, only for reset password
expiration = False if create_mode else now(days=+1)
self.mapped('partner_id').signup_prepare(signup_type="reset", expiration=expiration)
# send email to users with their signup url
template = False
if create_mode:
try:
template = self.env.ref('auth_signup.set_password_email', raise_if_not_found=False)
except ValueError:
pass
if not template:
template = self.env.ref('auth_signup.reset_password_email')
assert template._name == 'mail.template'
for user in self:
if not user.email:
raise UserError(_("Cannot send email: user %s has no email address.") % user.name)
template.with_context(lang=user.lang).send_mail(user.id, force_send=True, raise_exception=True)
@api.model
def create(self, values):
# overridden to automatically invite user to sign up
user = super(ResUsers, self).create(values)
if user.email and not self.env.context.get('no_reset_password'):
try:
user.with_context(create_user=True).action_reset_password()
except MailDeliveryException:
user.partner_id.with_context(create_user=True).signup_cancel()
return user
@api.multi
def copy(self, default=None):
self.ensure_one()
sup = super(ResUsers, self)
if not default or not default.get('email'):
# avoid sending email to the user we are duplicating
sup = super(ResUsers, self.with_context(reset_password=False))
return sup.copy(default=default)
|
wilvk/ansible | refs/heads/devel | lib/ansible/modules/network/aci/aci_l3out_route_tag_policy.py | 5 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_l3out_route_tag_policy
short_description: Manage route tag policies on Cisco ACI fabrics (l3ext:RouteTagPol)
description:
- Manage route tag policies on Cisco ACI fabrics.
- More information from the internal APIC class I(l3ext:RouteTagPol) at
U(https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Dag Wieers (@dagwieers)
version_added: '2.4'
notes:
- The C(tenant) used must exist before using this module in your playbook.
The M(aci_tenant) module can be used for this.
options:
rtp:
description:
- The name of the route tag policy.
required: yes
aliases: [ name, rtp_name ]
description:
description:
- The description for the route tag policy.
aliases: [ descr ]
tenant:
description:
- The name of the tenant.
required: yes
aliases: [ tenant_name ]
tag:
description:
- The value of the route tag (range 0-4294967295).
default: '4294967295'
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
# FIXME: Add more, better examples
EXAMPLES = r'''
- aci_l3out_route_tag_policy:
host: apic
username: admin
password: SomeSecretPassword
rtp: '{{ rtp_name }}'
tenant: production
tag: '{{ tag }}'
description: '{{ description }}'
'''
RETURN = r'''
#
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
rtp=dict(type='str', required=False, aliases=['name', 'rtp_name']), # Not required for querying all objects
tenant=dict(type='str', required=False, aliases=['tenant_name']), # Not required for quering all objects
description=dict(type='str', aliases=['descr']),
tag=dict(type='int'),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
method=dict(type='str', choices=['delete', 'get', 'post'], aliases=['action'], removed_in_version='2.6'), # Deprecated starting from v2.6
protocol=dict(type='str', removed_in_version='2.6'), # Deprecated in v2.6
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['rtp', 'tenant']],
['state', 'present', ['rtp', 'tenant']],
],
)
rtp = module.params['rtp']
description = module.params['description']
tag = module.params['tag']
state = module.params['state']
tenant = module.params['tenant']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
filter_target='eq(fvTenant.name, "{0}")'.format(tenant),
module_object=tenant,
),
subclass_1=dict(
aci_class='l3extRouteTagPol',
aci_rn='rttag-{0}'.format(rtp),
filter_target='eq(l3extRouteTagPol.name, "{0}")'.format(rtp),
module_object=rtp,
),
)
aci.get_existing()
if state == 'present':
# Filter out module parameters with null values
aci.payload(
aci_class='l3extRouteTagPol',
class_config=dict(
name=rtp,
descr=description, tag=tag,
),
)
# Generate config diff which will be used as POST request body
aci.get_diff(aci_class='l3extRouteTagPol')
# Submit changes if module not in check_mode and the proposed is different than existing
aci.post_config()
elif state == 'absent':
aci.delete_config()
module.exit_json(**aci.result)
if __name__ == "__main__":
main()
|
d33tah/npyscreen | refs/heads/master | npyscreen/util_viewhelp.py | 15 | import textwrap
def view_help(message, title="Message", form_color="STANDOUT", scroll_exit=False, autowrap=False):
from . import fmForm
from . import wgmultiline
F = fmForm.Form(name=title, color=form_color)
mlw = F.add(wgmultiline.Pager, scroll_exit=True, autowrap=autowrap)
mlw_width = mlw.width-1
message_lines = []
for line in message.splitlines():
line = textwrap.wrap(line, mlw_width)
if line == []:
message_lines.append('')
else:
message_lines.extend(line)
mlw.values = message_lines
F.edit()
del mlw
del F
|
tensorflow/tensorflow | refs/heads/master | tensorflow/python/keras/saving/model_config.py | 5 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Functions that save the model's config into different formats."""
from tensorflow.python.keras.saving.saved_model import json_utils
from tensorflow.python.util.tf_export import keras_export
# pylint: disable=g-import-not-at-top
try:
import yaml
except ImportError:
yaml = None
# pylint: enable=g-import-not-at-top
@keras_export('keras.models.model_from_config')
def model_from_config(config, custom_objects=None):
"""Instantiates a Keras model from its config.
Usage:
```
# for a Functional API model
tf.keras.Model().from_config(model.get_config())
# for a Sequential model
tf.keras.Sequential().from_config(model.get_config())
```
Args:
config: Configuration dictionary.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
Returns:
A Keras model instance (uncompiled).
Raises:
TypeError: if `config` is not a dictionary.
"""
if isinstance(config, list):
raise TypeError('`model_from_config` expects a dictionary, not a list. '
'Maybe you meant to use '
'`Sequential.from_config(config)`?')
from tensorflow.python.keras.layers import deserialize # pylint: disable=g-import-not-at-top
return deserialize(config, custom_objects=custom_objects)
@keras_export('keras.models.model_from_yaml')
def model_from_yaml(yaml_string, custom_objects=None):
"""Parses a yaml model configuration file and returns a model instance.
Usage:
>>> model = tf.keras.Sequential([
... tf.keras.layers.Dense(5, input_shape=(3,)),
... tf.keras.layers.Softmax()])
>>> try:
... import yaml
... config = model.to_yaml()
... loaded_model = tf.keras.models.model_from_yaml(config)
... except ImportError:
... pass
Args:
yaml_string: YAML string or open file encoding a model configuration.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
Returns:
A Keras model instance (uncompiled).
Raises:
ImportError: if yaml module is not found.
"""
if yaml is None:
raise ImportError('Requires yaml module installed (`pip install pyyaml`).')
# The method unsafe_load only exists in PyYAML 5.x+, so which branch of the
# try block is covered by tests depends on the installed version of PyYAML.
try:
# PyYAML 5.x+
config = yaml.unsafe_load(yaml_string)
except AttributeError:
config = yaml.load(yaml_string)
from tensorflow.python.keras.layers import deserialize # pylint: disable=g-import-not-at-top
return deserialize(config, custom_objects=custom_objects)
@keras_export('keras.models.model_from_json')
def model_from_json(json_string, custom_objects=None):
"""Parses a JSON model configuration string and returns a model instance.
Usage:
>>> model = tf.keras.Sequential([
... tf.keras.layers.Dense(5, input_shape=(3,)),
... tf.keras.layers.Softmax()])
>>> config = model.to_json()
>>> loaded_model = tf.keras.models.model_from_json(config)
Args:
json_string: JSON string encoding a model configuration.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
Returns:
A Keras model instance (uncompiled).
"""
config = json_utils.decode(json_string)
from tensorflow.python.keras.layers import deserialize # pylint: disable=g-import-not-at-top
return deserialize(config, custom_objects=custom_objects)
|
campbellr/click-plugins | refs/heads/master | example/PrintItBold/printit_bold/__init__.py | 4 | """
A CLI plugin for `PrintIt` that adds bold text.
"""
|
orgito/ansible | refs/heads/devel | test/integration/targets/vault/test-vault-client.py | 139 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
import argparse
import sys
# TODO: could read these from the files I suppose...
secrets = {'vault-password': 'test-vault-password',
'vault-password-wrong': 'hunter42',
'vault-password-ansible': 'ansible',
'password': 'password',
'vault-client-password-1': 'password-1',
'vault-client-password-2': 'password-2'}
def build_arg_parser():
parser = argparse.ArgumentParser(description='Get a vault password from user keyring')
parser.add_argument('--vault-id', action='store', default=None,
dest='vault_id',
help='name of the vault secret to get from keyring')
parser.add_argument('--username', action='store', default=None,
help='the username whose keyring is queried')
parser.add_argument('--set', action='store_true', default=False,
dest='set_password',
help='set the password instead of getting it')
return parser
def get_secret(keyname):
return secrets.get(keyname, None)
def main():
rc = 0
arg_parser = build_arg_parser()
args = arg_parser.parse_args()
# print('args: %s' % args)
keyname = args.vault_id or 'ansible'
if args.set_password:
print('--set is not supported yet')
sys.exit(1)
secret = get_secret(keyname)
if secret is None:
sys.stderr.write('test-vault-client could not find key for vault-id="%s"\n' % keyname)
# key not found rc=2
return 2
sys.stdout.write('%s\n' % secret)
return rc
if __name__ == '__main__':
sys.exit(main())
|
UManPychron/pychron | refs/heads/develop | pychron/image/rpi_camera.py | 2 | # ===============================================================================
# Copyright 2016 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
import os
from threading import Thread
import picamera
import picamera.array
# ============= standard library imports ========================
# ============= local library imports ==========================
import time
from pychron.core.helpers.filetools import unique_path2
from pychron.headless_config_loadable import HeadlessConfigLoadable
from pychron.paths import paths
from six.moves import map
class RPiCamera(HeadlessConfigLoadable):
sharpness = 0
contrast = 0
brightness = 50
saturation = 0
ISO = 0
video_stabilization = False
exposure_compensation = 0
# exposure modes
# off, auto, night, nightpreview, backlight, spotlight, sports, snow, beach,
# verylong, fixedfps, antishake, fireworks,
exposure_mode = 'auto'
meter_mode = 'average' # stop, average, backlit, matrix
# awb_modes
# off, auto, sunlight, cloudy, shade, tungsten, fluorescent, incandescent, flash, horizon
awb_mode = 'auto'
# image effects
# none, negative, solarize, sketch, denoise, emboss, oilpaint, hatch,
# gpen, pastel, watercolor,film, blur, saturation, colorswap, washedout,
# posterise, colorpoint, colorbalance, cartoon, deinterlace1, deinterlace2
image_effect = 'none'
color_effects = None # (u,v)
rotation = 0 # 0,90,180,270
hflip = False
vflip = False
crop = (0.0, 0.0, 1.0, 1.0)
frame_rate = 10
def load_additional_args(self, *args, **kw):
config = self.get_configuration()
self.set_attribute(config, 'sharpness', 'Settings', 'sharpness', cast='int')
self.set_attribute(config, 'contrast', 'Settings', 'contrast', cast='int')
self.set_attribute(config, 'brightness', 'Settings', 'brightness', cast='int')
self.set_attribute(config, 'saturation', 'Settings', 'saturation', cast='int')
self.set_attribute(config, 'ISO', 'Settings', 'ISO', cast='int')
self.set_attribute(config, 'video_stabilization', 'Settings', 'video_stabilization', cast='boolean')
self.set_attribute(config, 'exposure_compensation', 'Settings', 'exposure_compensation', cast='int')
self.set_attribute(config, 'exposure_mode', 'Settings', 'exposure_mode')
self.set_attribute(config, 'meter_mode', 'Settings', 'meter_mode')
self.set_attribute(config, 'awb_mode', 'Settings', 'awb_mode')
self.set_attribute(config, 'image_effect', 'Settings', 'image_effect')
self.set_attribute(config, 'color_effects', 'Settings', 'color_effects')
self.set_attribute(config, 'rotation', 'Settings', 'rotation', cast='int')
self.set_attribute(config, 'hflip', 'Settings', 'hflip', cast='boolean')
self.set_attribute(config, 'vflip', 'Settings', 'vflip', cast='boolean')
crop = self.config_get(config, 'Settings', 'crop')
if crop:
self.crop = tuple(map(float, crop.split(',')))
return True
def start_video_service(self):
def func():
root = '/var/www/firm_cam'
if not os.path.isdir(root):
os.mkdir(root)
path = os.path.join(root, 'image.jpg')
with picamera.PiCamera() as camera:
self._setup_camera(camera)
camera.capture(path)
while 1:
camera.capture(path)
time.sleep(1/float(self.frame_rate))
t = Thread(target=func)
t.setDaemon(True)
t.start()
def get_image_array(self):
with picamera.PiCamera() as camera:
self._setup_camera(camera)
with picamera.array.PiRGBArray(camera) as output:
camera.capture(output, 'rgb')
return output.array
def capture(self, path=None, name=None, **options):
with picamera.PiCamera() as camera:
self._setup_camera(camera)
if path is None:
if name is None:
path, _ = unique_path2(paths.snapshot_dir, name, extension='.jpg')
else:
path, _ = unique_path2(paths.snapshot_dir, 'rpi', extension='.jpg')
camera.capture(path, **options)
# private
def _setup_camera(self, camera):
attrs = ('sharpness', 'contrast', 'brightness', 'saturation', 'ISO',
'video_stabilization', 'exposure_compensation', 'exposure_mode',
'meter_mode', 'awb_mode', 'image_effect', 'color_effects',
'rotation', 'hflip', 'vflip', 'crop')
for attr in attrs:
setattr(camera, attr, getattr(self, attr))
# ============= EOF =============================================
|
hankcs/HanLP | refs/heads/master | hanlp/utils/lang/ja/__init__.py | 1 | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2021-05-13 13:24
|
kinap/scapy | refs/heads/master | scapy/base_classes.py | 1 | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
Generators and packet meta classes.
"""
###############
## Generators ##
################
import re,random,socket
import types
class Gen(object):
__slots__ = []
def __iter__(self):
return iter([])
class SetGen(Gen):
def __init__(self, values, _iterpacket=1):
self._iterpacket=_iterpacket
if isinstance(values, (list, BasePacketList)):
self.values = list(values)
elif (type(values) is tuple) and (2 <= len(values) <= 3) and \
all(type(i) is int for i in values):
# We use values[1] + 1 as stop value for xrange to maintain
# the behavior of using tuples as field `values`
self.values = [xrange(*((values[0], values[1] + 1) + values[2:]))]
else:
self.values = [values]
def transf(self, element):
return element
def __iter__(self):
for i in self.values:
if (isinstance(i, Gen) and
(self._iterpacket or not isinstance(i,BasePacket))) or (
isinstance(i, (xrange, types.GeneratorType))):
for j in i:
yield j
else:
yield i
def __repr__(self):
return "<SetGen %r>" % self.values
class Net(Gen):
"""Generate a list of IPs from a network address or a name"""
name = "ip"
ipaddress = re.compile(r"^(\*|[0-2]?[0-9]?[0-9](-[0-2]?[0-9]?[0-9])?)\.(\*|[0-2]?[0-9]?[0-9](-[0-2]?[0-9]?[0-9])?)\.(\*|[0-2]?[0-9]?[0-9](-[0-2]?[0-9]?[0-9])?)\.(\*|[0-2]?[0-9]?[0-9](-[0-2]?[0-9]?[0-9])?)(/[0-3]?[0-9])?$")
@staticmethod
def _parse_digit(a,netmask):
netmask = min(8,max(netmask,0))
if a == "*":
a = (0,256)
elif a.find("-") >= 0:
x,y = map(int,a.split("-"))
if x > y:
y = x
a = (x & (0xffL<<netmask) , max(y, (x | (0xffL>>(8-netmask))))+1)
else:
a = (int(a) & (0xffL<<netmask),(int(a) | (0xffL>>(8-netmask)))+1)
return a
@classmethod
def _parse_net(cls, net):
tmp=net.split('/')+["32"]
if not cls.ipaddress.match(net):
tmp[0]=socket.gethostbyname(tmp[0])
netmask = int(tmp[1])
return map(lambda x,y: cls._parse_digit(x,y), tmp[0].split("."), map(lambda x,nm=netmask: x-nm, (8,16,24,32))),netmask
def __init__(self, net):
self.repr=net
self.parsed,self.netmask = self._parse_net(net)
def __iter__(self):
for d in xrange(*self.parsed[3]):
for c in xrange(*self.parsed[2]):
for b in xrange(*self.parsed[1]):
for a in xrange(*self.parsed[0]):
yield "%i.%i.%i.%i" % (a,b,c,d)
def choice(self):
ip = []
for v in self.parsed:
ip.append(str(random.randint(v[0],v[1]-1)))
return ".".join(ip)
def __repr__(self):
return "Net(%r)" % self.repr
def __eq__(self, other):
if hasattr(other, "parsed"):
p2 = other.parsed
else:
p2,nm2 = self._parse_net(other)
return self.parsed == p2
def __contains__(self, other):
if hasattr(other, "parsed"):
p2 = other.parsed
else:
p2,nm2 = self._parse_net(other)
for (a1,b1),(a2,b2) in zip(self.parsed,p2):
if a1 > a2 or b1 < b2:
return False
return True
def __rcontains__(self, other):
return self in self.__class__(other)
class OID(Gen):
name = "OID"
def __init__(self, oid):
self.oid = oid
self.cmpt = []
fmt = []
for i in oid.split("."):
if "-" in i:
fmt.append("%i")
self.cmpt.append(tuple(map(int, i.split("-"))))
else:
fmt.append(i)
self.fmt = ".".join(fmt)
def __repr__(self):
return "OID(%r)" % self.oid
def __iter__(self):
ii = [k[0] for k in self.cmpt]
while 1:
yield self.fmt % tuple(ii)
i = 0
while 1:
if i >= len(ii):
raise StopIteration
if ii[i] < self.cmpt[i][1]:
ii[i]+=1
break
else:
ii[i] = self.cmpt[i][0]
i += 1
######################################
## Packet abstract and base classes ##
######################################
class Packet_metaclass(type):
def __new__(cls, name, bases, dct):
if "fields_desc" in dct: # perform resolution of references to other packets
current_fld = dct["fields_desc"]
resolved_fld = []
for f in current_fld:
if isinstance(f, Packet_metaclass): # reference to another fields_desc
for f2 in f.fields_desc:
resolved_fld.append(f2)
else:
resolved_fld.append(f)
else: # look for a field_desc in parent classes
resolved_fld = None
for b in bases:
if hasattr(b,"fields_desc"):
resolved_fld = b.fields_desc
break
if resolved_fld: # perform default value replacements
final_fld = []
for f in resolved_fld:
if f.name in dct:
f = f.copy()
f.default = dct[f.name]
del(dct[f.name])
final_fld.append(f)
dct["fields_desc"] = final_fld
if "__slots__" not in dct:
dct["__slots__"] = []
for attr in ["name", "overload_fields"]:
try:
dct["_%s" % attr] = dct.pop(attr)
except KeyError:
pass
newcls = super(Packet_metaclass, cls).__new__(cls, name, bases, dct)
newcls.__all_slots__ = set(
attr
for cls in newcls.__mro__ if hasattr(cls, "__slots__")
for attr in cls.__slots__
)
if hasattr(newcls, "aliastypes"):
newcls.aliastypes = [newcls] + newcls.aliastypes
else:
newcls.aliastypes = [newcls]
if hasattr(newcls,"register_variant"):
newcls.register_variant()
for f in newcls.fields_desc:
if hasattr(f, "register_owner"):
f.register_owner(newcls)
from scapy import config
config.conf.layers.register(newcls)
return newcls
def __getattr__(self, attr):
for k in self.fields_desc:
if k.name == attr:
return k
raise AttributeError(attr)
def __call__(cls, *args, **kargs):
if "dispatch_hook" in cls.__dict__:
try:
cls = cls.dispatch_hook(*args, **kargs)
except:
from scapy import config
if config.conf.debug_dissector:
raise
cls = config.conf.raw_layer
i = cls.__new__(cls, cls.__name__, cls.__bases__, cls.__dict__)
i.__init__(*args, **kargs)
return i
class Field_metaclass(type):
def __new__(cls, name, bases, dct):
if "__slots__" not in dct:
dct["__slots__"] = []
newcls = super(Field_metaclass, cls).__new__(cls, name, bases, dct)
return newcls
class NewDefaultValues(Packet_metaclass):
"""NewDefaultValues is deprecated (not needed anymore)
remove this:
__metaclass__ = NewDefaultValues
and it should still work.
"""
def __new__(cls, name, bases, dct):
from scapy.error import log_loading
import traceback
try:
for tb in traceback.extract_stack()+[("??",-1,None,"")]:
f,l,_,line = tb
if line.startswith("class"):
break
except:
f,l="??",-1
raise
log_loading.warning("Deprecated (no more needed) use of NewDefaultValues (%s l. %i)." % (f,l))
return super(NewDefaultValues, cls).__new__(cls, name, bases, dct)
class BasePacket(Gen):
__slots__ = []
#############################
## Packet list base classe ##
#############################
class BasePacketList(object):
__slots__ = []
|
fitzgen/servo | refs/heads/master | tests/wpt/css-tests/tools/__init__.py | 138 | from . import localpaths as _localpaths
|
4eek/edx-platform | refs/heads/master | cms/djangoapps/contentstore/views/tests/test_transcripts.py | 121 | """Tests for items views."""
import copy
import json
import os
import tempfile
import textwrap
from uuid import uuid4
from mock import patch
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from django.conf import settings
from contentstore.tests.utils import CourseTestCase, mock_requests_get
from cache_toolbox.core import del_cached_content
from xmodule.modulestore.django import modulestore
from xmodule.contentstore.django import contentstore
from xmodule.contentstore.content import StaticContent
from xmodule.exceptions import NotFoundError
from opaque_keys.edx.keys import UsageKey
from xmodule.video_module import transcripts_utils
TEST_DATA_CONTENTSTORE = copy.deepcopy(settings.CONTENTSTORE)
TEST_DATA_CONTENTSTORE['DOC_STORE_CONFIG']['db'] = 'test_xcontent_%s' % uuid4().hex
@override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE)
class BaseTranscripts(CourseTestCase):
"""Base test class for transcripts tests."""
def clear_subs_content(self):
"""Remove, if transcripts content exists."""
for youtube_id in self.get_youtube_ids().values():
filename = 'subs_{0}.srt.sjson'.format(youtube_id)
content_location = StaticContent.compute_location(self.course.id, filename)
try:
content = contentstore().find(content_location)
contentstore().delete(content.get_id())
except NotFoundError:
pass
def setUp(self):
"""Create initial data."""
super(BaseTranscripts, self).setUp()
# Add video module
data = {
'parent_locator': unicode(self.course.location),
'category': 'video',
'type': 'video'
}
resp = self.client.ajax_post('/xblock/', data)
self.assertEqual(resp.status_code, 200)
self.video_usage_key = self._get_usage_key(resp)
self.item = modulestore().get_item(self.video_usage_key)
# hI10vDNYz4M - valid Youtube ID with transcripts.
# JMD_ifUUfsU, AKqURZnYqpk, DYpADpL7jAY - valid Youtube IDs without transcripts.
self.item.data = '<video youtube="0.75:JMD_ifUUfsU,1.0:hI10vDNYz4M,1.25:AKqURZnYqpk,1.50:DYpADpL7jAY" />'
modulestore().update_item(self.item, self.user.id)
self.item = modulestore().get_item(self.video_usage_key)
# Remove all transcripts for current module.
self.clear_subs_content()
def _get_usage_key(self, resp):
""" Returns the usage key from the response returned by a create operation. """
usage_key_string = json.loads(resp.content).get('locator')
return UsageKey.from_string(usage_key_string)
def get_youtube_ids(self):
"""Return youtube speeds and ids."""
item = modulestore().get_item(self.video_usage_key)
return {
0.75: item.youtube_id_0_75,
1: item.youtube_id_1_0,
1.25: item.youtube_id_1_25,
1.5: item.youtube_id_1_5
}
class TestUploadTranscripts(BaseTranscripts):
"""Tests for '/transcripts/upload' url."""
def setUp(self):
"""Create initial data."""
super(TestUploadTranscripts, self).setUp()
self.good_srt_file = tempfile.NamedTemporaryFile(suffix='.srt')
self.good_srt_file.write(textwrap.dedent("""
1
00:00:10,500 --> 00:00:13,000
Elephant's Dream
2
00:00:15,000 --> 00:00:18,000
At the left we can see...
"""))
self.good_srt_file.seek(0)
self.bad_data_srt_file = tempfile.NamedTemporaryFile(suffix='.srt')
self.bad_data_srt_file.write('Some BAD data')
self.bad_data_srt_file.seek(0)
self.bad_name_srt_file = tempfile.NamedTemporaryFile(suffix='.BAD')
self.bad_name_srt_file.write(textwrap.dedent("""
1
00:00:10,500 --> 00:00:13,000
Elephant's Dream
2
00:00:15,000 --> 00:00:18,000
At the left we can see...
"""))
self.bad_name_srt_file.seek(0)
self.ufeff_srt_file = tempfile.NamedTemporaryFile(suffix='.srt')
def test_success_video_module_source_subs_uploading(self):
self.item.data = textwrap.dedent("""
<video youtube="">
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/>
</video>
""")
modulestore().update_item(self.item, self.user.id)
link = reverse('upload_transcripts')
filename = os.path.splitext(os.path.basename(self.good_srt_file.name))[0]
resp = self.client.post(link, {
'locator': self.video_usage_key,
'transcript-file': self.good_srt_file,
'video_list': json.dumps([{
'type': 'html5',
'video': filename,
'mode': 'mp4',
}])
})
self.assertEqual(resp.status_code, 200)
self.assertEqual(json.loads(resp.content).get('status'), 'Success')
item = modulestore().get_item(self.video_usage_key)
self.assertEqual(item.sub, filename)
content_location = StaticContent.compute_location(
self.course.id, 'subs_{0}.srt.sjson'.format(filename))
self.assertTrue(contentstore().find(content_location))
def test_fail_data_without_id(self):
link = reverse('upload_transcripts')
resp = self.client.post(link, {'transcript-file': self.good_srt_file})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), 'POST data without "locator" form data.')
def test_fail_data_without_file(self):
link = reverse('upload_transcripts')
resp = self.client.post(link, {'locator': self.video_usage_key})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), 'POST data without "file" form data.')
def test_fail_data_with_bad_locator(self):
# Test for raising `InvalidLocationError` exception.
link = reverse('upload_transcripts')
filename = os.path.splitext(os.path.basename(self.good_srt_file.name))[0]
resp = self.client.post(link, {
'locator': 'BAD_LOCATOR',
'transcript-file': self.good_srt_file,
'video_list': json.dumps([{
'type': 'html5',
'video': filename,
'mode': 'mp4',
}])
})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), "Can't find item by locator.")
# Test for raising `ItemNotFoundError` exception.
link = reverse('upload_transcripts')
filename = os.path.splitext(os.path.basename(self.good_srt_file.name))[0]
resp = self.client.post(link, {
'locator': '{0}_{1}'.format(self.video_usage_key, 'BAD_LOCATOR'),
'transcript-file': self.good_srt_file,
'video_list': json.dumps([{
'type': 'html5',
'video': filename,
'mode': 'mp4',
}])
})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), "Can't find item by locator.")
def test_fail_for_non_video_module(self):
# non_video module: setup
data = {
'parent_locator': unicode(self.course.location),
'category': 'non_video',
'type': 'non_video'
}
resp = self.client.ajax_post('/xblock/', data)
usage_key = self._get_usage_key(resp)
item = modulestore().get_item(usage_key)
item.data = '<non_video youtube="0.75:JMD_ifUUfsU,1.0:hI10vDNYz4M" />'
modulestore().update_item(item, self.user.id)
# non_video module: testing
link = reverse('upload_transcripts')
filename = os.path.splitext(os.path.basename(self.good_srt_file.name))[0]
resp = self.client.post(link, {
'locator': unicode(usage_key),
'transcript-file': self.good_srt_file,
'video_list': json.dumps([{
'type': 'html5',
'video': filename,
'mode': 'mp4',
}])
})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), 'Transcripts are supported only for "video" modules.')
def test_fail_bad_xml(self):
self.item.data = '<<<video youtube="0.75:JMD_ifUUfsU,1.25:AKqURZnYqpk,1.50:DYpADpL7jAY" />'
modulestore().update_item(self.item, self.user.id)
link = reverse('upload_transcripts')
filename = os.path.splitext(os.path.basename(self.good_srt_file.name))[0]
resp = self.client.post(link, {
'locator': unicode(self.video_usage_key),
'transcript-file': self.good_srt_file,
'video_list': json.dumps([{
'type': 'html5',
'video': filename,
'mode': 'mp4',
}])
})
self.assertEqual(resp.status_code, 400)
# incorrect xml produces incorrect item category error
self.assertEqual(json.loads(resp.content).get('status'), 'Transcripts are supported only for "video" modules.')
def test_fail_bad_data_srt_file(self):
link = reverse('upload_transcripts')
filename = os.path.splitext(os.path.basename(self.bad_data_srt_file.name))[0]
resp = self.client.post(link, {
'locator': unicode(self.video_usage_key),
'transcript-file': self.bad_data_srt_file,
'video_list': json.dumps([{
'type': 'html5',
'video': filename,
'mode': 'mp4',
}])
})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), 'Something wrong with SubRip transcripts file during parsing.')
def test_fail_bad_name_srt_file(self):
link = reverse('upload_transcripts')
filename = os.path.splitext(os.path.basename(self.bad_name_srt_file.name))[0]
resp = self.client.post(link, {
'locator': unicode(self.video_usage_key),
'transcript-file': self.bad_name_srt_file,
'video_list': json.dumps([{
'type': 'html5',
'video': filename,
'mode': 'mp4',
}])
})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), 'We support only SubRip (*.srt) transcripts format.')
def test_undefined_file_extension(self):
srt_file = tempfile.NamedTemporaryFile(suffix='')
srt_file.write(textwrap.dedent("""
1
00:00:10,500 --> 00:00:13,000
Elephant's Dream
2
00:00:15,000 --> 00:00:18,000
At the left we can see...
"""))
srt_file.seek(0)
link = reverse('upload_transcripts')
filename = os.path.splitext(os.path.basename(srt_file.name))[0]
resp = self.client.post(link, {
'locator': self.video_usage_key,
'transcript-file': srt_file,
'video_list': json.dumps([{
'type': 'html5',
'video': filename,
'mode': 'mp4',
}])
})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), 'Undefined file extension.')
def test_subs_uploading_with_byte_order_mark(self):
"""
Test uploading subs containing BOM(Byte Order Mark), e.g. U+FEFF
"""
filedata = textwrap.dedent("""
1
00:00:10,500 --> 00:00:13,000
Test ufeff characters
2
00:00:15,000 --> 00:00:18,000
At the left we can see...
""").encode('utf-8-sig')
# Verify that ufeff character is in filedata.
self.assertIn("ufeff", filedata)
self.ufeff_srt_file.write(filedata)
self.ufeff_srt_file.seek(0)
link = reverse('upload_transcripts')
filename = os.path.splitext(os.path.basename(self.ufeff_srt_file.name))[0]
resp = self.client.post(link, {
'locator': self.video_usage_key,
'transcript-file': self.ufeff_srt_file,
'video_list': json.dumps([{
'type': 'html5',
'video': filename,
'mode': 'mp4',
}])
})
self.assertEqual(resp.status_code, 200)
content_location = StaticContent.compute_location(
self.course.id, 'subs_{0}.srt.sjson'.format(filename))
self.assertTrue(contentstore().find(content_location))
subs_text = json.loads(contentstore().find(content_location).data).get('text')
self.assertIn("Test ufeff characters", subs_text)
def tearDown(self):
super(TestUploadTranscripts, self).tearDown()
self.good_srt_file.close()
self.bad_data_srt_file.close()
self.bad_name_srt_file.close()
self.ufeff_srt_file.close()
class TestDownloadTranscripts(BaseTranscripts):
"""Tests for '/transcripts/download' url."""
def save_subs_to_store(self, subs, subs_id):
"""Save transcripts into `StaticContent`."""
filedata = json.dumps(subs, indent=2)
mime_type = 'application/json'
filename = 'subs_{0}.srt.sjson'.format(subs_id)
content_location = StaticContent.compute_location(self.course.id, filename)
content = StaticContent(content_location, filename, mime_type, filedata)
contentstore().save(content)
del_cached_content(content_location)
return content_location
def test_success_download_youtube(self):
self.item.data = '<video youtube="1:JMD_ifUUfsU" />'
modulestore().update_item(self.item, self.user.id)
subs = {
'start': [100, 200, 240],
'end': [200, 240, 380],
'text': [
'subs #1',
'subs #2',
'subs #3'
]
}
self.save_subs_to_store(subs, 'JMD_ifUUfsU')
link = reverse('download_transcripts')
resp = self.client.get(link, {'locator': self.video_usage_key, 'subs_id': "JMD_ifUUfsU"})
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content, """0\n00:00:00,100 --> 00:00:00,200\nsubs #1\n\n1\n00:00:00,200 --> 00:00:00,240\nsubs #2\n\n2\n00:00:00,240 --> 00:00:00,380\nsubs #3\n\n""")
def test_success_download_nonyoutube(self):
subs_id = str(uuid4())
self.item.data = textwrap.dedent("""
<video youtube="" sub="{}">
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/>
</video>
""".format(subs_id))
modulestore().update_item(self.item, self.user.id)
subs = {
'start': [100, 200, 240],
'end': [200, 240, 380],
'text': [
'subs #1',
'subs #2',
'subs #3'
]
}
self.save_subs_to_store(subs, subs_id)
link = reverse('download_transcripts')
resp = self.client.get(link, {'locator': self.video_usage_key, 'subs_id': subs_id})
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp.content,
'0\n00:00:00,100 --> 00:00:00,200\nsubs #1\n\n1\n00:00:00,200 --> '
'00:00:00,240\nsubs #2\n\n2\n00:00:00,240 --> 00:00:00,380\nsubs #3\n\n'
)
transcripts_utils.remove_subs_from_store(subs_id, self.item)
def test_fail_data_without_file(self):
link = reverse('download_transcripts')
resp = self.client.get(link, {'locator': ''})
self.assertEqual(resp.status_code, 404)
resp = self.client.get(link, {})
self.assertEqual(resp.status_code, 404)
def test_fail_data_with_bad_locator(self):
# Test for raising `InvalidLocationError` exception.
link = reverse('download_transcripts')
resp = self.client.get(link, {'locator': 'BAD_LOCATOR'})
self.assertEqual(resp.status_code, 404)
# Test for raising `ItemNotFoundError` exception.
link = reverse('download_transcripts')
resp = self.client.get(link, {'locator': '{0}_{1}'.format(self.video_usage_key, 'BAD_LOCATOR')})
self.assertEqual(resp.status_code, 404)
def test_fail_for_non_video_module(self):
# Video module: setup
data = {
'parent_locator': unicode(self.course.location),
'category': 'videoalpha',
'type': 'videoalpha'
}
resp = self.client.ajax_post('/xblock/', data)
usage_key = self._get_usage_key(resp)
subs_id = str(uuid4())
item = modulestore().get_item(usage_key)
item.data = textwrap.dedent("""
<videoalpha youtube="" sub="{}">
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/>
</videoalpha>
""".format(subs_id))
modulestore().update_item(item, self.user.id)
subs = {
'start': [100, 200, 240],
'end': [200, 240, 380],
'text': [
'subs #1',
'subs #2',
'subs #3'
]
}
self.save_subs_to_store(subs, subs_id)
link = reverse('download_transcripts')
resp = self.client.get(link, {'locator': unicode(usage_key)})
self.assertEqual(resp.status_code, 404)
def test_fail_nonyoutube_subs_dont_exist(self):
self.item.data = textwrap.dedent("""
<video youtube="" sub="UNDEFINED">
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/>
</video>
""")
modulestore().update_item(self.item, self.user.id)
link = reverse('download_transcripts')
resp = self.client.get(link, {'locator': self.video_usage_key})
self.assertEqual(resp.status_code, 404)
def test_empty_youtube_attr_and_sub_attr(self):
self.item.data = textwrap.dedent("""
<video youtube="">
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/>
</video>
""")
modulestore().update_item(self.item, self.user.id)
link = reverse('download_transcripts')
resp = self.client.get(link, {'locator': self.video_usage_key})
self.assertEqual(resp.status_code, 404)
def test_fail_bad_sjson_subs(self):
subs_id = str(uuid4())
self.item.data = textwrap.dedent("""
<video youtube="" sub="{}">
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/>
</video>
""".format(subs_id))
modulestore().update_item(self.item, self.user.id)
subs = {
'start': [100, 200, 240],
'end': [200, 240, 380],
'text': [
'subs #1'
]
}
self.save_subs_to_store(subs, 'JMD_ifUUfsU')
link = reverse('download_transcripts')
resp = self.client.get(link, {'locator': self.video_usage_key})
self.assertEqual(resp.status_code, 404)
class TestCheckTranscripts(BaseTranscripts):
"""Tests for '/transcripts/check' url."""
def save_subs_to_store(self, subs, subs_id):
"""Save transcripts into `StaticContent`."""
filedata = json.dumps(subs, indent=2)
mime_type = 'application/json'
filename = 'subs_{0}.srt.sjson'.format(subs_id)
content_location = StaticContent.compute_location(self.course.id, filename)
content = StaticContent(content_location, filename, mime_type, filedata)
contentstore().save(content)
del_cached_content(content_location)
return content_location
def test_success_download_nonyoutube(self):
subs_id = str(uuid4())
self.item.data = textwrap.dedent("""
<video youtube="" sub="{}">
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/>
</video>
""".format(subs_id))
modulestore().update_item(self.item, self.user.id)
subs = {
'start': [100, 200, 240],
'end': [200, 240, 380],
'text': [
'subs #1',
'subs #2',
'subs #3'
]
}
self.save_subs_to_store(subs, subs_id)
data = {
'locator': unicode(self.video_usage_key),
'videos': [{
'type': 'html5',
'video': subs_id,
'mode': 'mp4',
}]
}
link = reverse('check_transcripts')
resp = self.client.get(link, {'data': json.dumps(data)})
self.assertEqual(resp.status_code, 200)
self.assertDictEqual(
json.loads(resp.content),
{
u'status': u'Success',
u'subs': unicode(subs_id),
u'youtube_local': False,
u'is_youtube_mode': False,
u'youtube_server': False,
u'command': u'found',
u'current_item_subs': unicode(subs_id),
u'youtube_diff': True,
u'html5_local': [unicode(subs_id)],
u'html5_equal': False,
}
)
transcripts_utils.remove_subs_from_store(subs_id, self.item)
def test_check_youtube(self):
self.item.data = '<video youtube="1:JMD_ifUUfsU" />'
modulestore().update_item(self.item, self.user.id)
subs = {
'start': [100, 200, 240],
'end': [200, 240, 380],
'text': [
'subs #1',
'subs #2',
'subs #3'
]
}
self.save_subs_to_store(subs, 'JMD_ifUUfsU')
link = reverse('check_transcripts')
data = {
'locator': unicode(self.video_usage_key),
'videos': [{
'type': 'youtube',
'video': 'JMD_ifUUfsU',
'mode': 'youtube',
}]
}
resp = self.client.get(link, {'data': json.dumps(data)})
self.assertEqual(resp.status_code, 200)
self.assertDictEqual(
json.loads(resp.content),
{
u'status': u'Success',
u'subs': u'JMD_ifUUfsU',
u'youtube_local': True,
u'is_youtube_mode': True,
u'youtube_server': False,
u'command': u'found',
u'current_item_subs': None,
u'youtube_diff': True,
u'html5_local': [],
u'html5_equal': False,
}
)
@patch('xmodule.video_module.transcripts_utils.requests.get', side_effect=mock_requests_get)
def test_check_youtube_with_transcript_name(self, mock_get):
"""
Test that the transcripts are fetched correctly when the the transcript name is set
"""
self.item.data = '<video youtube="good_id_2" />'
modulestore().update_item(self.item, self.user.id)
subs = {
'start': [100, 200, 240],
'end': [200, 240, 380],
'text': [
'subs #1',
'subs #2',
'subs #3'
]
}
self.save_subs_to_store(subs, 'good_id_2')
link = reverse('check_transcripts')
data = {
'locator': unicode(self.video_usage_key),
'videos': [{
'type': 'youtube',
'video': 'good_id_2',
'mode': 'youtube',
}]
}
resp = self.client.get(link, {'data': json.dumps(data)})
mock_get.assert_any_call(
'http://video.google.com/timedtext',
params={'lang': 'en', 'v': 'good_id_2', 'name': 'Custom'}
)
self.assertEqual(resp.status_code, 200)
self.assertDictEqual(
json.loads(resp.content),
{
u'status': u'Success',
u'subs': u'good_id_2',
u'youtube_local': True,
u'is_youtube_mode': True,
u'youtube_server': True,
u'command': u'replace',
u'current_item_subs': None,
u'youtube_diff': True,
u'html5_local': [],
u'html5_equal': False,
}
)
def test_fail_data_without_id(self):
link = reverse('check_transcripts')
data = {
'locator': '',
'videos': [{
'type': '',
'video': '',
'mode': '',
}]
}
resp = self.client.get(link, {'data': json.dumps(data)})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), "Can't find item by locator.")
def test_fail_data_with_bad_locator(self):
# Test for raising `InvalidLocationError` exception.
link = reverse('check_transcripts')
data = {
'locator': '',
'videos': [{
'type': '',
'video': '',
'mode': '',
}]
}
resp = self.client.get(link, {'data': json.dumps(data)})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), "Can't find item by locator.")
# Test for raising `ItemNotFoundError` exception.
data = {
'locator': '{0}_{1}'.format(self.video_usage_key, 'BAD_LOCATOR'),
'videos': [{
'type': '',
'video': '',
'mode': '',
}]
}
resp = self.client.get(link, {'data': json.dumps(data)})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), "Can't find item by locator.")
def test_fail_for_non_video_module(self):
# Not video module: setup
data = {
'parent_locator': unicode(self.course.location),
'category': 'not_video',
'type': 'not_video'
}
resp = self.client.ajax_post('/xblock/', data)
usage_key = self._get_usage_key(resp)
subs_id = str(uuid4())
item = modulestore().get_item(usage_key)
item.data = textwrap.dedent("""
<not_video youtube="" sub="{}">
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/>
</videoalpha>
""".format(subs_id))
modulestore().update_item(item, self.user.id)
subs = {
'start': [100, 200, 240],
'end': [200, 240, 380],
'text': [
'subs #1',
'subs #2',
'subs #3'
]
}
self.save_subs_to_store(subs, subs_id)
data = {
'locator': unicode(usage_key),
'videos': [{
'type': '',
'video': '',
'mode': '',
}]
}
link = reverse('check_transcripts')
resp = self.client.get(link, {'data': json.dumps(data)})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), 'Transcripts are supported only for "video" modules.')
|
gsd-ufal/Juliabox | refs/heads/master | engine/src/juliabox/api/api_queue.py | 8 | __author__ = 'tan'
import time
import zmq
from datetime import timedelta
from zmq.devices.basedevice import ThreadDevice
from juliabox.db import JBoxAPISpec
from juliabox.jbox_util import LoggerMixin
from juliabox.cloud import Compute
class APIQueue(LoggerMixin):
BUFFER_SZ = 20
QUEUES = dict()
QLEN_WT = 0.25
QUEUE_CACHE = []
def __init__(self, api_name):
self.cmd = self.image_name = self.api_name = None
self.num_outstanding = self.mean_outstanding = self.timeout = 0
self.reset(api_name)
self.qdev = qdev = ThreadDevice(zmq.QUEUE, zmq.XREP, zmq.XREQ)
endpt_in, endpt_out = APIQueue.allocate_random_endpoints()
self.endpoints = endpt_in, endpt_out
qdev.bind_in(endpt_in)
qdev.bind_out(endpt_out)
if APIQueue._zmq_major_ver() > 2:
qdev.setsockopt_in(zmq.SNDHWM, APIQueue.BUFFER_SZ)
qdev.setsockopt_out(zmq.RCVHWM, APIQueue.BUFFER_SZ)
qdev.setsockopt_in(zmq.RCVHWM, APIQueue.BUFFER_SZ)
qdev.setsockopt_out(zmq.SNDHWM, APIQueue.BUFFER_SZ)
else:
qdev.setsockopt_in(zmq.HWM, APIQueue.BUFFER_SZ)
qdev.setsockopt_out(zmq.HWM, APIQueue.BUFFER_SZ)
qdev.start()
APIQueue.QUEUES[api_name] = self
self.log_debug("Created " + self.debug_str())
def reset(self, api_name):
self.api_name = api_name
self.num_outstanding = 0
self.mean_outstanding = 0
spec = JBoxAPISpec(api_name)
timeout_secs = spec.get_timeout_secs()
self.timeout = timedelta(seconds=timeout_secs) if timeout_secs is not None else None
self.cmd = spec.get_cmd()
self.image_name = spec.get_image_name()
@staticmethod
def _zmq_major_ver():
return int(zmq.zmq_version()[0])
def debug_str(self):
return "APIQueue %s (%s, %s). outstanding: %g, %g" % (self.api_name, self.get_endpoint_in(),
self.get_endpoint_out(), self.num_outstanding,
self.mean_outstanding)
def get_endpoint_in(self):
return self.endpoints[0]
def get_endpoint_out(self):
return self.endpoints[1]
def get_timeout(self):
return self.timeout
def get_command(self):
return self.cmd
def get_image_name(self):
return self.image_name
@staticmethod
def release_queue(api_name):
queue = APIQueue.get_queue(api_name, alloc=False)
if queue is None:
return
del APIQueue.QUEUES[api_name]
APIQueue.QUEUE_CACHE.append(queue)
APIQueue.log_debug("Released (cached) queue: %s", queue.debug_str())
@staticmethod
def get_queue(api_name, alloc=True):
if api_name in APIQueue.QUEUES:
return APIQueue.QUEUES[api_name]
elif alloc:
if len(APIQueue.QUEUE_CACHE) > 0:
queue = APIQueue.QUEUE_CACHE.pop()
queue.reset(api_name)
APIQueue.QUEUES[api_name] = queue
APIQueue.log_debug("Created (reused) queue: %s", queue.debug_str())
else:
queue = APIQueue(api_name)
APIQueue.log_debug("Created queue: %s", queue.debug_str())
return queue
return None
@staticmethod
def allocate_random_endpoints():
ctx = zmq.Context.instance()
binder = ctx.socket(zmq.REQ)
bind_pfx = "tcp://" + Compute.get_docker_bridge_ip()
port_in = binder.bind_to_random_port(bind_pfx)
port_out = binder.bind_to_random_port(bind_pfx)
binder.close()
time.sleep(0.25)
endpoint_in = bind_pfx + str(':') + str(port_in)
endpoint_out = bind_pfx + str(':') + str(port_out)
return endpoint_in, endpoint_out
def incr_outstanding(self, num):
self.num_outstanding += num
self.mean_outstanding = (APIQueue.QLEN_WT * self.mean_outstanding + self.num_outstanding) / (1+APIQueue.QLEN_WT)
|
Mhynlo/SickRage | refs/heads/master | lib/pyasn1/codec/cer/decoder.py | 261 | # CER decoder
from pyasn1.type import univ
from pyasn1.codec.ber import decoder
from pyasn1.compat.octets import oct2int
from pyasn1 import error
class BooleanDecoder(decoder.AbstractSimpleDecoder):
protoComponent = univ.Boolean(0)
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet, length,
state, decodeFun, substrateFun):
head, tail = substrate[:length], substrate[length:]
if not head:
raise error.PyAsn1Error('Empty substrate')
byte = oct2int(head[0])
# CER/DER specifies encoding of TRUE as 0xFF and FALSE as 0x0, while
# BER allows any non-zero value as TRUE; cf. sections 8.2.2. and 11.1
# in http://www.itu.int/ITU-T/studygroups/com17/languages/X.690-0207.pdf
if byte == 0xff:
value = 1
elif byte == 0x00:
value = 0
else:
raise error.PyAsn1Error('Boolean CER violation: %s' % byte)
return self._createComponent(asn1Spec, tagSet, value), tail
tagMap = decoder.tagMap.copy()
tagMap.update({
univ.Boolean.tagSet: BooleanDecoder()
})
typeMap = decoder.typeMap
class Decoder(decoder.Decoder): pass
decode = Decoder(tagMap, decoder.typeMap)
|
miracle2k/stgit | refs/heads/master | stgit/commands/sink.py | 1 |
__copyright__ = """
Copyright (C) 2007, Yann Dirson <ydirson@altern.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from stgit.argparse import opt
from stgit.commands import common
from stgit.lib import transaction
from stgit import argparse
help = 'Send patches deeper down the stack'
kind = 'stack'
usage = ['[-t <target patch>] [-n] [<patches>]']
description = """
This is the opposite operation of linkstg:float[]: move the specified
patches down the stack. It is for example useful to group stable
patches near the bottom of the stack, where they are less likely to be
impacted by the push of another patch, and from where they can be more
easily committed or pushed.
If no patch is specified on command-line, the current patch gets sunk.
By default patches are sunk to the bottom of the stack, but the '--to'
option allows to place them under any applied patch.
Sinking internally involves popping all patches (or all patches
including <target patch>), then pushing the patches to sink, and then
(unless '--nopush' is also given) pushing back into place the
formerly-applied patches."""
args = [argparse.patch_range(argparse.applied_patches,
argparse.unapplied_patches)]
options = [
opt('-n', '--nopush', action = 'store_true',
short = 'Do not push the patches back after sinking', long = """
Do not push back on the stack the formerly-applied patches.
Only the patches to sink are pushed."""),
opt('-t', '--to', metavar = 'TARGET', args = [argparse.applied_patches],
short = 'Sink patches below the TARGET patch', long = """
Specify a target patch to place the patches below, instead of
sinking them to the bottom of the stack.""")
] + argparse.keep_option()
directory = common.DirectoryHasRepositoryLib()
def func(parser, options, args):
"""Sink patches down the stack.
"""
stack = directory.repository.current_stack
if options.to and not options.to in stack.patchorder.applied:
raise common.CmdException('Cannot sink below %s since it is not applied'
% options.to)
if len(args) > 0:
patches = common.parse_patches(args, stack.patchorder.all)
else:
# current patch
patches = list(stack.patchorder.applied[-1:])
if not patches:
raise common.CmdException('No patches to sink')
if options.to and options.to in patches:
raise common.CmdException('Cannot have a sinked patch as target')
applied = [p for p in stack.patchorder.applied if p not in patches]
if options.to:
insert_idx = applied.index(options.to)
else:
insert_idx = 0
applied = applied[:insert_idx] + patches + applied[insert_idx:]
unapplied = [p for p in stack.patchorder.unapplied if p not in patches]
iw = stack.repository.default_iw
clean_iw = (not options.keep and iw) or None
trans = transaction.StackTransaction(stack, 'sink',
check_clean_iw = clean_iw)
try:
trans.reorder_patches(applied, unapplied, iw = iw)
except transaction.TransactionHalted:
pass
return trans.run(iw)
|
pedrobaeza/OpenUpgrade | refs/heads/8.0 | openerp/addons/base/tests/test_mail_examples.py | 302 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
MISC_HTML_SOURCE = """
<font size="2" style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; ">test1</font>
<div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; font-size: 12px; font-style: normal; ">
<b>test2</b></div><div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; font-size: 12px; ">
<i>test3</i></div><div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; font-size: 12px; ">
<u>test4</u></div><div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; font-size: 12px; ">
<strike>test5</strike></div><div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; ">
<font size="5">test6</font></div><div><ul><li><font color="#1f1f1f" face="monospace" size="2">test7</font></li><li>
<font color="#1f1f1f" face="monospace" size="2">test8</font></li></ul><div><ol><li><font color="#1f1f1f" face="monospace" size="2">test9</font>
</li><li><font color="#1f1f1f" face="monospace" size="2">test10</font></li></ol></div></div>
<blockquote style="margin: 0 0 0 40px; border: none; padding: 0px;"><div><div><div><font color="#1f1f1f" face="monospace" size="2">
test11</font></div></div></div></blockquote><blockquote style="margin: 0 0 0 40px; border: none; padding: 0px;">
<blockquote style="margin: 0 0 0 40px; border: none; padding: 0px;"><div><font color="#1f1f1f" face="monospace" size="2">
test12</font></div><div><font color="#1f1f1f" face="monospace" size="2"><br></font></div></blockquote></blockquote>
<font color="#1f1f1f" face="monospace" size="2"><a href="http://google.com">google</a></font>
<a href="javascript:alert('malicious code')">test link</a>
"""
EDI_LIKE_HTML_SOURCE = """<div style="font-family: 'Lucica Grande', Ubuntu, Arial, Verdana, sans-serif; font-size: 12px; color: rgb(34, 34, 34); background-color: #FFF; ">
<p>Hello ${object.partner_id.name},</p>
<p>A new invoice is available for you: </p>
<p style="border-left: 1px solid #8e0000; margin-left: 30px;">
<strong>REFERENCES</strong><br />
Invoice number: <strong>${object.number}</strong><br />
Invoice total: <strong>${object.amount_total} ${object.currency_id.name}</strong><br />
Invoice date: ${object.date_invoice}<br />
Order reference: ${object.origin}<br />
Your contact: <a href="mailto:${object.user_id.email or ''}?subject=Invoice%20${object.number}">${object.user_id.name}</a>
</p>
<br/>
<p>It is also possible to directly pay with Paypal:</p>
<a style="margin-left: 120px;" href="${object.paypal_url}">
<img class="oe_edi_paypal_button" src="https://www.paypal.com/en_US/i/btn/btn_paynowCC_LG.gif"/>
</a>
<br/>
<p>If you have any question, do not hesitate to contact us.</p>
<p>Thank you for choosing ${object.company_id.name or 'us'}!</p>
<br/>
<br/>
<div style="width: 375px; margin: 0px; padding: 0px; background-color: #8E0000; border-top-left-radius: 5px 5px; border-top-right-radius: 5px 5px; background-repeat: repeat no-repeat;">
<h3 style="margin: 0px; padding: 2px 14px; font-size: 12px; color: #DDD;">
<strong style="text-transform:uppercase;">${object.company_id.name}</strong></h3>
</div>
<div style="width: 347px; margin: 0px; padding: 5px 14px; line-height: 16px; background-color: #F2F2F2;">
<span style="color: #222; margin-bottom: 5px; display: block; ">
${object.company_id.street}<br/>
${object.company_id.street2}<br/>
${object.company_id.zip} ${object.company_id.city}<br/>
${object.company_id.state_id and ('%s, ' % object.company_id.state_id.name) or ''} ${object.company_id.country_id.name or ''}<br/>
</span>
<div style="margin-top: 0px; margin-right: 0px; margin-bottom: 0px; margin-left: 0px; padding-top: 0px; padding-right: 0px; padding-bottom: 0px; padding-left: 0px; ">
Phone: ${object.company_id.phone}
</div>
<div>
Web : <a href="${object.company_id.website}">${object.company_id.website}</a>
</div>
</div>
</div></body></html>"""
OERP_WEBSITE_HTML_1 = """
<div>
<div class="container">
<div class="row">
<div class="col-md-12 text-center mt16 mb16" data-snippet-id="colmd">
<h2>OpenERP HR Features</h2>
<h3 class="text-muted">Manage your company most important asset: People</h3>
</div>
<div class="col-md-4" data-snippet-id="colmd">
<img class="img-rounded img-responsive" src="/website/static/src/img/china_thumb.jpg">
<h4 class="mt16">Streamline Recruitments</h4>
<p>Post job offers and keep track of each application received. Follow applicants in your recruitment process with the smart kanban view.</p>
<p>Save time by automating some communications with email templates. Resumes are indexed automatically, allowing you to easily find for specific profiles.</p>
</div>
<div class="col-md-4" data-snippet-id="colmd">
<img class="img-rounded img-responsive" src="/website/static/src/img/desert_thumb.jpg">
<h4 class="mt16">Enterprise Social Network</h4>
<p>Break down information silos. Share knowledge and best practices amongst all employees. Follow specific people or documents and join groups of interests to share expertise and documents.</p>
<p>Interact with your collegues in real time with live chat.</p>
</div>
<div class="col-md-4" data-snippet-id="colmd">
<img class="img-rounded img-responsive" src="/website/static/src/img/deers_thumb.jpg">
<h4 class="mt16">Leaves Management</h4>
<p>Keep track of the vacation days accrued by each employee. Employees enter their requests (paid holidays, sick leave, etc), for managers to approve and validate. It's all done in just a few clicks. The agenda of each employee is updated accordingly.</p>
</div>
</div>
</div>
</div>"""
OERP_WEBSITE_HTML_1_IN = [
'Manage your company most important asset: People',
'img class="img-rounded img-responsive" src="/website/static/src/img/china_thumb.jpg"',
]
OERP_WEBSITE_HTML_1_OUT = [
'Break down information silos.',
'Keep track of the vacation days accrued by each employee',
'img class="img-rounded img-responsive" src="/website/static/src/img/deers_thumb.jpg',
]
OERP_WEBSITE_HTML_2 = """
<div class="mt16 cke_widget_editable cke_widget_element oe_editable oe_dirty" data-oe-model="blog.post" data-oe-id="6" data-oe-field="content" data-oe-type="html" data-oe-translate="0" data-oe-expression="blog_post.content" data-cke-widget-data="{}" data-cke-widget-keep-attr="0" data-widget="oeref" contenteditable="true" data-cke-widget-editable="text">
<section class="mt16 mb16" data-snippet-id="text-block">
<div class="container">
<div class="row">
<div class="col-md-12 text-center mt16 mb32" data-snippet-id="colmd">
<h2>
OpenERP Project Management
</h2>
<h3 class="text-muted">Infinitely flexible. Incredibly easy to use.</h3>
</div>
<div class="col-md-12 mb16 mt16" data-snippet-id="colmd">
<p>
OpenERP's <b>collaborative and realtime</b> project
management helps your team get work done. Keep
track of everything, from the big picture to the
minute details, from the customer contract to the
billing.
</p><p>
Organize projects around <b>your own processes</b>. Work
on tasks and issues using the kanban view, schedule
tasks using the gantt chart and control deadlines
in the calendar view. Every project may have it's
own stages allowing teams to optimize their job.
</p>
</div>
</div>
</div>
</section>
<section class="" data-snippet-id="image-text">
<div class="container">
<div class="row">
<div class="col-md-6 mt16 mb16" data-snippet-id="colmd">
<img class="img-responsive shadow" src="/website/static/src/img/image_text.jpg">
</div>
<div class="col-md-6 mt32" data-snippet-id="colmd">
<h3>Manage Your Shops</h3>
<p>
OpenERP's Point of Sale introduces a super clean
interface with no installation required that runs
online and offline on modern hardwares.
</p><p>
It's full integration with the company inventory
and accounting, gives you real time statistics and
consolidations amongst all shops without the hassle
of integrating several applications.
</p>
</div>
</div>
</div>
</section>
<section class="" data-snippet-id="text-image">
<div class="container">
<div class="row">
<div class="col-md-6 mt32" data-snippet-id="colmd">
<h3>Enterprise Social Network</h3>
<p>
Make every employee feel more connected and engaged
with twitter-like features for your own company. Follow
people, share best practices, 'like' top ideas, etc.
</p><p>
Connect with experts, follow what interests you, share
documents and promote best practices with OpenERP
Social application. Get work done with effective
collaboration across departments, geographies
and business applications.
</p>
</div>
<div class="col-md-6 mt16 mb16" data-snippet-id="colmd">
<img class="img-responsive shadow" src="/website/static/src/img/text_image.png">
</div>
</div>
</div>
</section><section class="" data-snippet-id="portfolio">
<div class="container">
<div class="row">
<div class="col-md-12 text-center mt16 mb32" data-snippet-id="colmd">
<h2>Our Porfolio</h2>
<h4 class="text-muted">More than 500 successful projects</h4>
</div>
<div class="col-md-4" data-snippet-id="colmd">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/deers.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/desert.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/china.jpg">
</div>
<div class="col-md-4" data-snippet-id="colmd">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/desert.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/china.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/deers.jpg">
</div>
<div class="col-md-4" data-snippet-id="colmd">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/landscape.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/china.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/desert.jpg">
</div>
</div>
</div>
</section>
</div>
"""
OERP_WEBSITE_HTML_2_IN = [
'management helps your team get work done',
]
OERP_WEBSITE_HTML_2_OUT = [
'Make every employee feel more connected',
'img class="img-responsive shadow" src="/website/static/src/img/text_image.png',
]
TEXT_1 = """I contact you about our meeting tomorrow. Here is the schedule I propose:
9 AM: brainstorming about our new amazing business app
9.45 AM: summary
10 AM: meeting with Ignasse to present our app
Is everything ok for you ?
--
MySignature"""
TEXT_1_IN = ["""I contact you about our meeting tomorrow. Here is the schedule I propose:
9 AM: brainstorming about our new amazing business app
9.45 AM: summary
10 AM: meeting with Ignasse to present our app
Is everything ok for you ?"""]
TEXT_1_OUT = ["""--
MySignature"""]
TEXT_2 = """Salut Raoul!
Le 28 oct. 2012 à 00:02, Raoul Grosbedon a écrit :
> I contact you about our meeting tomorrow. Here is the schedule I propose: (quote)
Of course. This seems viable.
> 2012/10/27 Bert Tartopoils :
>> blahblahblah (quote)?
>>
>> blahblahblah (quote)
>>
>> Bert TARTOPOILS
>> bert.tartopoils@miam.miam
>>
>
>
> --
> RaoulSignature
Bert TARTOPOILS
bert.tartopoils@miam.miam
"""
TEXT_2_IN = ["Salut Raoul!", "Of course. This seems viable."]
TEXT_2_OUT = ["I contact you about our meeting tomorrow. Here is the schedule I propose: (quote)",
"""> 2012/10/27 Bert Tartopoils :
>> blahblahblah (quote)?
>>
>> blahblahblah (quote)
>>
>> Bert TARTOPOILS
>> bert.tartopoils@miam.miam
>>
>
>
> --
> RaoulSignature"""]
HTML_1 = """<p>I contact you about our meeting for tomorrow. Here is the schedule I propose: (keep)
9 AM: brainstorming about our new amazing business app
9.45 AM: summary
10 AM: meeting with Ignasse to present our app
Is everything ok for you ?
--
MySignature</p>"""
HTML_1_IN = ["""I contact you about our meeting for tomorrow. Here is the schedule I propose: (keep)
9 AM: brainstorming about our new amazing business app
9.45 AM: summary
10 AM: meeting with Ignasse to present our app
Is everything ok for you ?"""]
HTML_1_OUT = ["""--
MySignature"""]
HTML_2 = """<div>
<font><span>I contact you about our meeting for tomorrow. Here is the schedule I propose:</span></font>
</div>
<div>
<ul>
<li><span>9 AM: brainstorming about our new amazing business app</span></li>
<li><span>9.45 AM: summary</span></li>
<li><span>10 AM: meeting with Fabien to present our app</span></li>
</ul>
</div>
<div>
<font><span>Is everything ok for you ?</span></font>
</div>"""
HTML_2_IN = ["<font><span>I contact you about our meeting for tomorrow. Here is the schedule I propose:</span></font>",
"<li><span>9 AM: brainstorming about our new amazing business app</span></li>",
"<li><span>9.45 AM: summary</span></li>",
"<li><span>10 AM: meeting with Fabien to present our app</span></li>",
"<font><span>Is everything ok for you ?</span></font>"]
HTML_2_OUT = []
HTML_3 = """<div><pre>This is an answer.
Regards,
XXXXXX
----- Mail original -----</pre>
<pre>Hi,
My CRM-related question.
Regards,
XXXX</pre></div>"""
HTML_3_IN = ["""<div><pre>This is an answer.
Regards,
XXXXXX
----- Mail original -----</pre>"""]
HTML_3_OUT = ["Hi,", "My CRM-related question.",
"Regards,"]
HTML_4 = """
<div>
<div>Hi Nicholas,</div>
<br>
<div>I'm free now. 00447710085916.</div>
<br>
<div>Regards,</div>
<div>Nicholas</div>
<br>
<span id="OLK_SRC_BODY_SECTION">
<div style="font-family:Calibri; font-size:11pt; text-align:left; color:black; BORDER-BOTTOM: medium none; BORDER-LEFT: medium none; PADDING-BOTTOM: 0in; PADDING-LEFT: 0in; PADDING-RIGHT: 0in; BORDER-TOP: #b5c4df 1pt solid; BORDER-RIGHT: medium none; PADDING-TOP: 3pt">
<span style="font-weight:bold">From: </span>OpenERP Enterprise <<a href="mailto:sales@openerp.com">sales@openerp.com</a>><br><span style="font-weight:bold">Reply-To: </span><<a href="mailto:sales@openerp.com">sales@openerp.com</a>><br><span style="font-weight:bold">Date: </span>Wed, 17 Apr 2013 13:30:47 +0000<br><span style="font-weight:bold">To: </span>Microsoft Office User <<a href="mailto:n.saxlund@babydino.com">n.saxlund@babydino.com</a>><br><span style="font-weight:bold">Subject: </span>Re: your OpenERP.com registration<br>
</div>
<br>
<div>
<p>Hello Nicholas Saxlund, </p>
<p>I noticed you recently registered to our OpenERP Online solution. </p>
<p>You indicated that you wish to use OpenERP in your own company. We would like to know more about your your business needs and requirements, and see how we can help you. When would you be available to discuss your project ?
</p>
<p>Best regards, </p>
<pre><a href="http://openerp.com">http://openerp.com</a>
Belgium: +32.81.81.37.00
U.S.: +1 (650) 307-6736
India: +91 (79) 40 500 100
</pre>
</div>
</span>
</div>"""
HTML_5 = """<div><pre>Hi,
I have downloaded OpenERP installer 7.0 and successfully installed the postgresql server and the OpenERP.
I created a database and started to install module by log in as administrator.
However, I was not able to install any module due to "OpenERP Server Error" as shown in the attachement.
Could you please let me know how could I fix this problem?
Regards,
Goh Sin Yih
________________________________
From: OpenERP Enterprise <sales@openerp.com>
To: sinyih_goh@yahoo.com
Sent: Friday, February 8, 2013 12:46 AM
Subject: Feedback From Your OpenERP Trial
Hello Goh Sin Yih,
Thank you for having tested OpenERP Online.
I noticed you started a trial of OpenERP Online (gsy) but you did not decide to keep using it.
So, I just wanted to get in touch with you to get your feedback. Can you tell me what kind of application you were you looking for and why you didn't decide to continue with OpenERP?
Thanks in advance for providing your feedback,
Do not hesitate to contact me if you have any questions,
Thanks,
</pre>"""
GMAIL_1 = """Hello,<div><br></div><div>Ok for me. I am replying directly in gmail, without signature.</div><div><br></div><div>Kind regards,</div><div><br></div><div>Demo.<br><br><div>On Thu, Nov 8, 2012 at 5:29 PM, <span><<a href="mailto:dummy@example.com">dummy@example.com</a>></span> wrote:<br><blockquote><div>I contact you about our meeting for tomorrow. Here is the schedule I propose:</div><div><ul><li>9 AM: brainstorming about our new amazing business app</span></li></li>
<li>9.45 AM: summary</li><li>10 AM: meeting with Fabien to present our app</li></ul></div><div>Is everything ok for you ?</div>
<div><p>--<br>Administrator</p></div>
<div><p>Log in our portal at: <a href="http://localhost:8069#action=login&db=mail_1&login=demo">http://localhost:8069#action=login&db=mail_1&login=demo</a></p></div>
</blockquote></div><br></div>"""
GMAIL_1_IN = ['Ok for me. I am replying directly in gmail, without signature.']
GMAIL_1_OUT = ['Administrator', 'Log in our portal at:']
THUNDERBIRD_1 = """<div>On 11/08/2012 05:29 PM,
<a href="mailto:dummy@example.com">dummy@example.com</a> wrote:<br></div>
<blockquote>
<div>I contact you about our meeting for tomorrow. Here is the
schedule I propose:</div>
<div>
<ul><li>9 AM: brainstorming about our new amazing business
app</span></li></li>
<li>9.45 AM: summary</li>
<li>10 AM: meeting with Fabien to present our app</li>
</ul></div>
<div>Is everything ok for you ?</div>
<div>
<p>--<br>
Administrator</p>
</div>
<div>
<p>Log in our portal at:
<a href="http://localhost:8069#action=login&db=mail_1&token=rHdWcUART5PhEnJRaXjH">http://localhost:8069#action=login&db=mail_1&token=rHdWcUART5PhEnJRaXjH</a></p>
</div>
</blockquote>
Ok for me. I am replying directly below your mail, using Thunderbird, with a signature.<br><br>
Did you receive my email about my new laptop, by the way ?<br><br>
Raoul.<br><pre>--
Raoul Grosbedonnée
</pre>"""
THUNDERBIRD_1_IN = ['Ok for me. I am replying directly below your mail, using Thunderbird, with a signature.']
THUNDERBIRD_1_OUT = ['I contact you about our meeting for tomorrow.', 'Raoul Grosbedon']
HOTMAIL_1 = """<div>
<div dir="ltr"><br>
I have an amazing company, i'm learning OpenERP, it is a small company yet, but plannig to grow up quickly.
<br> <br>Kindest regards,<br>xxx<br>
<div>
<div id="SkyDrivePlaceholder">
</div>
<hr id="stopSpelling">
Subject: Re: your OpenERP.com registration<br>From: xxx@xxx.xxx<br>To: xxx@xxx.xxx<br>Date: Wed, 27 Mar 2013 17:12:12 +0000
<br><br>
Hello xxx,
<br>
I noticed you recently created an OpenERP.com account to access OpenERP Apps.
<br>
You indicated that you wish to use OpenERP in your own company.
We would like to know more about your your business needs and requirements, and see how
we can help you. When would you be available to discuss your project ?<br>
Best regards,<br>
<pre>
<a href="http://openerp.com" target="_blank">http://openerp.com</a>
Belgium: +32.81.81.37.00
U.S.: +1 (650) 307-6736
India: +91 (79) 40 500 100
</pre>
</div>
</div>
</div>"""
HOTMAIL_1_IN = ["I have an amazing company, i'm learning OpenERP, it is a small company yet, but plannig to grow up quickly."]
HOTMAIL_1_OUT = ["Subject: Re: your OpenERP.com registration", " I noticed you recently created an OpenERP.com account to access OpenERP Apps.",
"We would like to know more about your your business needs and requirements", "Belgium: +32.81.81.37.00"]
MSOFFICE_1 = """
<div>
<div class="WordSection1">
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
Our requirements are simple. Just looking to replace some spreadsheets for tracking quotes and possibly using the timecard module.
We are a company of 25 engineers providing product design services to clients.
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
I’ll install on a windows server and run a very limited trial to see how it works.
If we adopt OpenERP we will probably move to Linux or look for a hosted SaaS option.
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
<br>
I am also evaluating Adempiere and maybe others.
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span>
</p>
<p> </p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
I expect the trial will take 2-3 months as this is not a high priority for us.
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span>
</p>
<p> </p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
Alan
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span>
</p>
<p> </p>
<p></p>
<div>
<div style="border:none;border-top:solid #B5C4DF 1.0pt;padding:3.0pt 0in 0in 0in">
<p class="MsoNormal">
<b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">
From:
</span></b>
<span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">
OpenERP Enterprise [mailto:sales@openerp.com]
<br><b>Sent:</b> Monday, 11 March, 2013 14:47<br><b>To:</b> Alan Widmer<br><b>Subject:</b> Re: your OpenERP.com registration
</span>
</p>
<p></p>
<p></p>
</div>
</div>
<p class="MsoNormal"></p>
<p> </p>
<p>Hello Alan Widmer, </p>
<p></p>
<p>I noticed you recently downloaded OpenERP. </p>
<p></p>
<p>
Uou mentioned you wish to use OpenERP in your own company. Please let me more about your
business needs and requirements? When will you be available to discuss about your project?
</p>
<p></p>
<p>Thanks for your interest in OpenERP, </p>
<p></p>
<p>Feel free to contact me if you have any questions, </p>
<p></p>
<p>Looking forward to hear from you soon. </p>
<p></p>
<pre><p> </p></pre>
<pre>--<p></p></pre>
<pre>Nicolas<p></p></pre>
<pre><a href="http://openerp.com">http://openerp.com</a><p></p></pre>
<pre>Belgium: +32.81.81.37.00<p></p></pre>
<pre>U.S.: +1 (650) 307-6736<p></p></pre>
<pre>India: +91 (79) 40 500 100<p></p></pre>
<pre> <p></p></pre>
</div>
</div>"""
MSOFFICE_1_IN = ['Our requirements are simple. Just looking to replace some spreadsheets for tracking quotes and possibly using the timecard module.']
MSOFFICE_1_OUT = ['I noticed you recently downloaded OpenERP.', 'Uou mentioned you wish to use OpenERP in your own company.', 'Belgium: +32.81.81.37.00']
MSOFFICE_2 = """
<div>
<div class="WordSection1">
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Nicolas,</span></p><p></p>
<p></p>
<p class="MsoNormal" style="text-indent:.5in">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">We are currently investigating the possibility of moving away from our current ERP </span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Thank You</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Matt</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<div>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Raoul Petitpoil</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Poil Industries</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Information Technology</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">920 Super Street</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Sanchez, Pa 17046 USA</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Tel: xxx.xxx</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Fax: xxx.xxx</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Email: </span>
<a href="mailto:raoul@petitpoil.com">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:blue">raoul@petitpoil.com</span>
</a>
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">www.poilindustries.com</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">www.superproducts.com</span></p><p></p>
<p></p>
</div>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<div>
<div style="border:none;border-top:solid #B5C4DF 1.0pt;padding:3.0pt 0in 0in 0in">
<p class="MsoNormal">
<b>
<span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">From:</span>
</b>
<span style="font-size:10.0pt;font-family:"Tahoma","sans-serif""> OpenERP Enterprise [mailto:sales@openerp.com] <br><b>Sent:</b> Wednesday, April 17, 2013 1:31 PM<br><b>To:</b> Matt Witters<br><b>Subject:</b> Re: your OpenERP.com registration</span></p><p></p>
<p></p>
</div>
</div>
<p class="MsoNormal"></p>
<p> </p>
<p>Hello Raoul Petitpoil, </p>
<p></p>
<p>I noticed you recently downloaded OpenERP. </p>
<p></p>
<p>You indicated that you wish to use OpenERP in your own company. We would like to know more about your your business needs and requirements, and see how we can help you. When would you be available to discuss your project ? </p>
<p></p>
<p>Best regards, </p>
<p></p>
<pre> <p> </p>
</pre>
<pre>--<p></p></pre>
<pre>Nicolas<p></p></pre>
<pre> <a href="http://openerp.com">http://openerp.com</a>
<p></p>
</pre>
<pre>Belgium: +32.81.81.37.00<p></p></pre>
<pre>U.S.: +1 (650) 307-6736<p></p></pre>
<pre>India: +91 (79) 40 500 100<p></p></pre>
<pre> <p></p></pre>
</div>
</div>"""
MSOFFICE_2_IN = ['We are currently investigating the possibility']
MSOFFICE_2_OUT = ['I noticed you recently downloaded OpenERP.', 'You indicated that you wish', 'Belgium: +32.81.81.37.00']
MSOFFICE_3 = """<div>
<div class="WordSection1">
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Hi Nicolas !</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<p class="MsoNormal">
<span lang="EN-US" style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Yes I’d be glad to hear about your offers as we struggle every year with the planning/approving of LOA. </span></p><p></p>
<p></p>
<p class="MsoNormal">
<span lang="EN-US" style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">I saw your boss yesterday on tv and immediately wanted to test the interface. </span></p><p></p>
<p></p>
<p class="MsoNormal">
<span lang="EN-US" style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<div>
<p class="MsoNormal">
<b>
<span lang="NL-BE" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">Bien à vous, </span></b></p><p></p><b>
</b>
<p></p>
<p class="MsoNormal">
<b>
<span lang="NL-BE" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">Met vriendelijke groeten, </span></b></p><p></p><b>
</b>
<p></p>
<p class="MsoNormal">
<b>
<span lang="EN-GB" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">Best regards,</span></b></p><p></p><b>
</b>
<p></p>
<p class="MsoNormal">
<b>
<span lang="EN-GB" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">
</span></b></p><p><b> </b></p><b>
</b>
<p></p>
<p class="MsoNormal">
<b>
<span lang="EN-GB" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">R. Petitpoil <br></span>
</b>
<span lang="EN-GB" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">Human Resource Manager<b><br><br>Field Resource s.a n.v. <i> <br></i></b>Hermesstraat 6A <br>1930 Zaventem</span>
<span lang="EN-GB" style="font-size:8.0pt;font-family:"Tahoma","sans-serif";color:gray"><br></span>
<b>
<span lang="FR" style="font-size:10.0pt;font-family:Wingdings;color:#1F497D">(</span>
</b>
<b>
<span lang="FR" style="font-size:9.0pt;font-family:Wingdings;color:#1F497D"> </span>
</b>
<b>
<span lang="EN-GB" style="font-size:8.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">xxx.xxx </span>
</b>
<b>
<span lang="EN-GB" style="font-size:9.0pt;font-family:"Trebuchet MS","sans-serif";color:gray"><br></span>
</b>
<b>
<span lang="FR" style="font-size:10.0pt;font-family:"Wingdings 2";color:#1F497D">7</span>
</b>
<b>
<span lang="FR" style="font-size:9.0pt;font-family:"Wingdings 2";color:#1F497D"> </span>
</b>
<b>
<span lang="EN-GB" style="font-size:8.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">+32 2 727.05.91<br></span>
</b>
<span lang="EN-GB" style="font-size:24.0pt;font-family:Webdings;color:green">P</span>
<span lang="EN-GB" style="font-size:8.0pt;font-family:"Tahoma","sans-serif";color:green"> <b> </b></span>
<b>
<span lang="EN-GB" style="font-size:9.0pt;font-family:"Trebuchet MS","sans-serif";color:green">Please consider the environment before printing this email.</span>
</b>
<span lang="EN-GB" style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:navy"> </span>
<span lang="EN-GB" style="font-family:"Calibri","sans-serif";color:navy">
</span></p><p></p>
<p></p>
</div>
<p class="MsoNormal">
<span lang="EN-US" style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<div>
<div style="border:none;border-top:solid #B5C4DF 1.0pt;padding:3.0pt 0cm 0cm 0cm">
<p class="MsoNormal">
<b>
<span lang="FR" style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">De :</span>
</b>
<span lang="FR" style="font-size:10.0pt;font-family:"Tahoma","sans-serif""> OpenERP Enterprise [mailto:sales@openerp.com] <br><b>Envoyé :</b> jeudi 18 avril 2013 11:31<br><b>À :</b> Paul Richard<br><b>Objet :</b> Re: your OpenERP.com registration</span></p><p></p>
<p></p>
</div>
</div>
<p class="MsoNormal"></p>
<p> </p>
<p>Hello Raoul PETITPOIL, </p>
<p></p>
<p>I noticed you recently registered to our OpenERP Online solution. </p>
<p></p>
<p>You indicated that you wish to use OpenERP in your own company. We would like to know more about your your business needs and requirements, and see how we can help you. When would you be available to discuss your project ? </p>
<p></p>
<p>Best regards, </p>
<p></p>
<pre> <p> </p>
</pre>
<pre>--<p></p></pre>
<pre>Nicolas<p></p></pre>
<pre> <a href="http://openerp.com">http://openerp.com</a>
<p></p>
</pre>
<pre>Belgium: +32.81.81.37.00<p></p></pre>
<pre>U.S.: +1 (650) 307-6736<p></p></pre>
<pre>India: +91 (79) 40 500 100<p></p></pre>
<pre> <p></p></pre>
</div>
</div>"""
MSOFFICE_3_IN = ['I saw your boss yesterday']
MSOFFICE_3_OUT = ['I noticed you recently downloaded OpenERP.', 'You indicated that you wish', 'Belgium: +32.81.81.37.00']
# ------------------------------------------------------------
# Test cases coming from bugs
# ------------------------------------------------------------
# bug: read more not apparent, strange message in read more span
BUG1 = """<pre>Hi Migration Team,
Paragraph 1, blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah.
Paragraph 2, blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah.
Paragraph 3, blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah.
Thanks.
Regards,
--
Olivier Laurent
Migration Manager
OpenERP SA
Chaussée de Namur, 40
B-1367 Gérompont
Tel: +32.81.81.37.00
Web: http://www.openerp.com</pre>"""
BUG_1_IN = [
'Hi Migration Team',
'Paragraph 1'
]
BUG_1_OUT = [
'Olivier Laurent',
'Chaussée de Namur',
'81.81.37.00',
'openerp.com',
]
BUG2 = """
<div>
<br>
<div class="moz-forward-container"><br>
<br>
-------- Original Message --------
<table class="moz-email-headers-table" border="0" cellpadding="0" cellspacing="0">
<tbody>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Subject:
</th>
<td>Fwd: TR: OpenERP S.A. Payment Reminder</td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Date: </th>
<td>Wed, 16 Oct 2013 14:11:13 +0200</td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">From: </th>
<td>Christine Herrmann <a class="moz-txt-link-rfc2396E" href="mailto:che@openerp.com"><che@openerp.com></a></td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">To: </th>
<td><a class="moz-txt-link-abbreviated" href="mailto:online@openerp.com">online@openerp.com</a></td>
</tr>
</tbody>
</table>
<br>
<br>
<br>
<div class="moz-forward-container"><br>
<br>
-------- Message original --------
<table class="moz-email-headers-table" border="0" cellpadding="0" cellspacing="0">
<tbody>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Sujet:
</th>
<td>TR: OpenERP S.A. Payment Reminder</td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Date :
</th>
<td>Wed, 16 Oct 2013 10:34:45 -0000</td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">De : </th>
<td>Ida Siwatala <a class="moz-txt-link-rfc2396E" href="mailto:infos@inzoservices.com"><infos@inzoservices.com></a></td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Répondre
à : </th>
<td><a class="moz-txt-link-abbreviated" href="mailto:catchall@mail.odoo.com">catchall@mail.odoo.com</a></td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Pour :
</th>
<td>Christine Herrmann (che) <a class="moz-txt-link-rfc2396E" href="mailto:che@openerp.com"><che@openerp.com></a></td>
</tr>
</tbody>
</table>
<br>
<br>
<div>
<div class="WordSection1">
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Bonjour,</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Pourriez-vous
me faire un retour sur ce point.</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Cordialement</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<div>
<div style="border:none;border-top:solid #B5C4DF
1.0pt;padding:3.0pt 0cm 0cm 0cm">
<p class="MsoNormal"><b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">De :</span></b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">
Ida Siwatala [<a class="moz-txt-link-freetext" href="mailto:infos@inzoservices.com">mailto:infos@inzoservices.com</a>]
<br>
<b>Envoyé :</b> vendredi 4 octobre 2013 20:03<br>
<b>À :</b> 'Followers of
INZO-services-8-all-e-Maxime-Lisbonne-77176-Savigny-le-temple-France'<br>
<b>Objet :</b> RE: OpenERP S.A. Payment Reminder</span></p>
</div>
</div>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Bonsoir,</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Je
me permets de revenir vers vous par écrit , car j’ai
fait 2 appels vers votre service en exposant mon
problème, mais je n’ai pas eu de retour.</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Cela
fait un mois que j’ai fait la souscription de votre
produit, mais je me rends compte qu’il est pas adapté à
ma situation ( fonctionnalité manquante et surtout je
n’ai pas beaucoup de temps à passer à résoudre des
bugs). </span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">C’est
pourquoi , j’ai demandé qu’un accord soit trouvé avec
vous pour annuler le contrat (tout en vous payant le
mois d’utilisation de septembre).</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Pourriez-vous
me faire un retour sur ce point.</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Cordialement,</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Ida
Siwatala</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">De :</span></b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">
<a href="mailto:che@openerp.com">che@openerp.com</a>
[<a href="mailto:che@openerp.com">mailto:che@openerp.com</a>]
<br>
<b>Envoyé :</b> vendredi 4 octobre 2013 17:41<br>
<b>À :</b> <a href="mailto:infos@inzoservices.com">infos@inzoservices.com</a><br>
<b>Objet :</b> OpenERP S.A. Payment Reminder</span></p>
<p> </p>
<div>
<p style="background:white"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222">Dear
INZO services,</span></p>
<p style="background:white"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222">Exception
made if there was a mistake of ours, it seems that the
following amount stays unpaid. Please, take
appropriate measures in order to carry out this
payment in the next 8 days. </span></p>
<p class="MsoNormal" style="background:white"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222"></span></p>
<p> </p>
<table class="MsoNormalTable" style="width:100.0%;border:outset 1.5pt" width="100%" border="1" cellpadding="0">
<tbody>
<tr>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Date de facturation</p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Description</p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Reference</p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Due Date</p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Amount (€)</p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Lit.</p>
</td>
</tr>
<tr>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal"><b>2013-09-24</b></p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal"><b>2013/1121</b></p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal"><b>Enterprise - Inzo Services
- Juillet 2013</b></p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal"><b>2013-09-24</b></p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal"><b>420.0</b></p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt"><br>
</td>
</tr>
<tr>
<td style="padding:.75pt .75pt .75pt .75pt"><br>
</td>
<td style="border:none;padding:.75pt .75pt .75pt
.75pt"><br>
</td>
<td style="border:none;padding:.75pt .75pt .75pt
.75pt"><br>
</td>
<td style="border:none;padding:.75pt .75pt .75pt
.75pt"><br>
</td>
<td style="border:none;padding:.75pt .75pt .75pt
.75pt"><br>
</td>
<td style="border:none;padding:.75pt .75pt .75pt
.75pt"><br>
</td>
</tr>
</tbody>
</table>
<p class="MsoNormal" style="text-align:center;background:white" align="center"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222">Amount
due : 420.00 € </span></p>
<p style="background:white"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222">Would
your payment have been carried out after this mail was
sent, please ignore this message. Do not hesitate to
contact our accounting department. </span></p>
<p class="MsoNormal" style="background:white"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222"><br>
Best Regards, <br>
Aurore Lesage <br>
OpenERP<br>
Chaussée de Namur, 40 <br>
B-1367 Grand Rosières <br>
Tel: +32.81.81.37.00 - Fax: +32.81.73.35.01 <br>
E-mail : <a href="mailto:ale@openerp.com">ale@openerp.com</a> <br>
Web: <a href="http://www.openerp.com">http://www.openerp.com</a></span></p>
</div>
</div>
</div>
--<br>
INZO services <small>Sent by <a style="color:inherit" href="http://www.openerp.com">OpenERP
S.A.</a> using <a style="color:inherit" href="https://www.openerp.com/">OpenERP</a>.</small>
<small>Access your messages and documents <a style="color:inherit" href="https://accounts.openerp.com?db=openerp#action=mail.action_mail_redirect&login=che&message_id=5750830">in
OpenERP</a></small> <br>
<pre class="moz-signature" cols="72">--
Christine Herrmann
OpenERP
Chaussée de Namur, 40
B-1367 Grand Rosières
Tel: +32.81.81.37.00 - Fax: +32.81.73.35.01
Web: <a class="moz-txt-link-freetext" href="http://www.openerp.com">http://www.openerp.com</a> </pre>
<br>
</div>
<br>
<br>
</div>
<br>
</div>"""
BUG_2_IN = [
'read more',
'...',
]
BUG_2_OUT = [
'Fwd: TR: OpenERP S.A'
'fait un mois'
]
# BUG 20/08/2014: READ MORE NOT APPEARING
BUG3 = """<div class="oe_msg_body_long" style="/* display: none; */"><p>OpenERP has been upgraded to version 8.0.</p>
<h2>What's new in this upgrade?</h2>
<div class="document">
<ul>
<li><p class="first">New Warehouse Management System:</p>
<blockquote>
<p>Schedule your picking, packing, receptions and internal moves automatically with Odoo using
your own routing rules. Define push and pull rules to organize a warehouse or to manage
product moves between several warehouses. Track in detail all stock moves, not only in your
warehouse but wherever else it's taken as well (customers, suppliers or manufacturing
locations).</p>
</blockquote>
</li>
<li><p class="first">New Product Configurator</p>
</li>
<li><p class="first">Documentation generation from website forum:</p>
<blockquote>
<p>New module to generate a documentation from questions and responses from your forum.
The documentation manager can define a table of content and any user, depending their karma,
can link a question to an entry of this TOC.</p>
</blockquote>
</li>
<li><p class="first">New kanban view of documents (resumes and letters in recruitement, project documents...)</p>
</li>
<li><p class="first">E-Commerce:</p>
<blockquote>
<ul class="simple">
<li>Manage TIN in contact form for B2B.</li>
<li>Dedicated salesteam to easily manage leads and orders.</li>
</ul>
</blockquote>
</li>
<li><p class="first">Better Instant Messaging.</p>
</li>
<li><p class="first">Faster and Improved Search view: Search drawer now appears on top of the results, and is open
by default in reporting views</p>
</li>
<li><p class="first">Improved User Interface:</p>
<blockquote>
<ul class="simple">
<li>Popups has changed to be more responsive on tablets and smartphones.</li>
<li>New Stat Buttons: Forms views have now dynamic buttons showing some statistics abouts linked models.</li>
<li>Color code to check in one look availability of components in an MRP order.</li>
<li>Unified menu bar allows you to switch easily between the frontend (website) and backend</li>
<li>Results panel is now scrollable independently of the menu bars, keeping the navigation,
search bar and view switcher always within reach.</li>
</ul>
</blockquote>
</li>
<li><p class="first">User signature is now in HTML.</p>
</li>
<li><p class="first">New development API.</p>
</li>
<li><p class="first">Remove support for Outlook and Thunderbird plugins</p>
</li>
</ul>
</div>
<p>Enjoy the new OpenERP Online!</p><span class="oe_mail_reduce"><a href="#">read less</a></span></div>"""
BUG_3_IN = [
'read more',
'...',
]
BUG_3_OUT = [
'New kanban view of documents'
]
|
ibmsoe/ImpalaPPC | refs/heads/Impala2.6-main | shell/ext-py/sqlparse-0.1.14/tests/test_functions.py | 45 | '''
Created on 13/02/2012
@author: piranna
'''
from unittest import main, TestCase
from sqlparse.filters import IncludeStatement, Tokens2Unicode
from sqlparse.lexer import tokenize
import sys
sys.path.insert(0, '..')
from sqlparse.filters import compact
from sqlparse.functions import getcolumns, getlimit, IsType
class Test_IncludeStatement(TestCase):
sql = """-- type: script
-- return: integer
INCLUDE "_Make_DirEntry.sql";
INSERT INTO directories(inode)
VALUES(:inode)
LIMIT 1"""
def test_includeStatement(self):
stream = tokenize(self.sql)
includeStatement = IncludeStatement('tests/files',
raiseexceptions=True)
stream = includeStatement.process(None, stream)
stream = compact(stream)
result = Tokens2Unicode(stream)
self.assertEqual(
result, (
'INSERT INTO dir_entries(type)VALUES(:type);INSERT INTO '
'directories(inode)VALUES(:inode)LIMIT 1'))
class Test_SQL(TestCase):
sql = """-- type: script
-- return: integer
INSERT INTO directories(inode)
VALUES(:inode)
LIMIT 1"""
sql2 = """SELECT child_entry,asdf AS inode, creation
FROM links
WHERE parent_dir == :parent_dir AND name == :name
LIMIT 1"""
sql3 = """SELECT
0 AS st_dev,
0 AS st_uid,
0 AS st_gid,
dir_entries.type AS st_mode,
dir_entries.inode AS st_ino,
COUNT(links.child_entry) AS st_nlink,
:creation AS st_ctime,
dir_entries.access AS st_atime,
dir_entries.modification AS st_mtime,
-- :creation AS st_ctime,
-- CAST(STRFTIME('%s',dir_entries.access) AS INTEGER) AS st_atime,
-- CAST(STRFTIME('%s',dir_entries.modification) AS INTEGER) AS st_mtime,
COALESCE(files.size,0) AS st_size, -- Python-FUSE
COALESCE(files.size,0) AS size -- PyFilesystem
FROM dir_entries
LEFT JOIN files
ON dir_entries.inode == files.inode
LEFT JOIN links
ON dir_entries.inode == links.child_entry
WHERE dir_entries.inode == :inode
GROUP BY dir_entries.inode
LIMIT 1"""
class Test_Compact(Test_SQL):
def test_compact1(self):
stream = compact(tokenize(self.sql))
result = Tokens2Unicode(stream)
self.assertEqual(result,
'INSERT INTO directories(inode)VALUES(:inode)LIMIT 1')
def test_compact2(self):
stream = tokenize(self.sql2)
result = compact(stream)
self.assertEqual(
Tokens2Unicode(result),
'SELECT child_entry,asdf AS inode,creation FROM links WHERE '
'parent_dir==:parent_dir AND name==:name LIMIT 1')
def test_compact3(self):
stream = tokenize(self.sql3)
result = compact(stream)
self.assertEqual(
Tokens2Unicode(result),
'SELECT 0 AS st_dev,0 AS st_uid,0 AS st_gid,dir_entries.type AS '
'st_mode,dir_entries.inode AS st_ino,COUNT(links.child_entry)AS '
'st_nlink,:creation AS st_ctime,dir_entries.access AS st_atime,'
'dir_entries.modification AS st_mtime,COALESCE(files.size,0)AS '
'st_size,COALESCE(files.size,0)AS size FROM dir_entries LEFT JOIN'
' files ON dir_entries.inode==files.inode LEFT JOIN links ON '
'dir_entries.inode==links.child_entry WHERE dir_entries.inode=='
':inode GROUP BY dir_entries.inode LIMIT 1')
class Test_GetColumns(Test_SQL):
def test_getcolumns1(self):
columns = getcolumns(tokenize(self.sql))
self.assertEqual(columns, [])
def test_getcolumns2(self):
columns = getcolumns(tokenize(self.sql2))
self.assertEqual(columns, ['child_entry', 'inode', 'creation'])
def test_getcolumns3(self):
columns = getcolumns(tokenize(self.sql3))
self.assertEqual(columns, ['st_dev', 'st_uid', 'st_gid', 'st_mode',
'st_ino', 'st_nlink', 'st_ctime',
'st_atime', 'st_mtime', 'st_size', 'size'])
class Test_GetLimit(Test_SQL):
def test_getlimit1(self):
limit = getlimit(tokenize(self.sql))
self.assertEqual(limit, 1)
def test_getlimit2(self):
limit = getlimit(tokenize(self.sql2))
self.assertEqual(limit, 1)
def test_getlimit3(self):
limit = getlimit(tokenize(self.sql3))
self.assertEqual(limit, 1)
class Test_IsType(Test_SQL):
def test_istype2(self):
stream = tokenize(self.sql2)
self.assertTrue(IsType('SELECT')(stream))
stream = tokenize(self.sql2)
self.assertFalse(IsType('INSERT')(stream))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
main()
|
KousikaGanesh/purchaseandInventory | refs/heads/master | openerp/addons/point_of_sale/wizard/__init__.py | 61 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import pos_confirm
import pos_discount
import pos_open_statement
import pos_details
import pos_sales_user
import pos_sales_user_today
import pos_receipt
import pos_payment_report_user
import pos_payment_report
import pos_payment
import pos_session_opening
import pos_box
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
jonchang/CloudForest | refs/heads/master | build/rpy2/rpy/robjects/tests/__init__.py | 3 | import unittest
import testRObject
import testVector
import testArray
import testDataFrame
import testFormula
import testFunction
import testEnvironment
import testRobjects
import testMethods
import testPackages
import testHelp
import testLanguage
# wrap this nicely so a warning is issued if no numpy present
import testNumpyConversions
def suite():
suite_RObject = testRObject.suite()
suite_Vector = testVector.suite()
suite_Array = testArray.suite()
suite_DataFrame = testDataFrame.suite()
suite_Function = testFunction.suite()
suite_Environment = testEnvironment.suite()
suite_Formula = testFormula.suite()
suite_Robjects = testRobjects.suite()
suite_NumpyConversions = testNumpyConversions.suite()
suite_Methods = testMethods.suite()
suite_Packages = testPackages.suite()
suite_Help = testHelp.suite()
suite_Language = testLanguage.suite()
alltests = unittest.TestSuite([suite_RObject,
suite_Vector,
suite_Array,
suite_DataFrame,
suite_Function,
suite_Environment,
suite_Formula,
suite_Robjects,
suite_Methods,
suite_NumpyConversions,
suite_Packages,
suite_Help,
suite_Language
])
return alltests
def main():
r = unittest.TestResult()
suite().run(r)
return r
if __name__ == '__main__':
tr = unittest.TextTestRunner(verbosity = 2)
suite = suite()
tr.run(suite)
|
vetalypp/e2openplugin-CrossEPG | refs/heads/master | scripts/lib/__init__.py | 3 | # empty file
|
mayank-johri/LearnSeleniumUsingPython | refs/heads/master | Section 2 - Advance Python/Chapter S2.06 - Web Development/code/flask/jinja_variable.py | 2 | """."""
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def రేమకు_స్వాగతం():
"""swagatham in telugu language."""
return render_template('jinja_variable.html', welcome="రేమకు స్వాగతం" )
if __name__ == '__main__':
app.run()
|
wakermahmud/sync-engine | refs/heads/master | tests/api/test_files.py | 1 | # -*- coding: utf-8 -*-
import os
import md5
import json
from datetime import datetime
import pytest
FILENAMES = ['muir.jpg', 'LetMeSendYouEmail.wav', 'piece-jointe.jpg',
'andra-moi-ennepe.txt']
@pytest.fixture
def draft(db, default_account):
return {
'subject': 'Draft test at {}'.format(datetime.utcnow()),
'body': '<html><body><h2>Sea, birds and sand.</h2></body></html>',
'to': [{'name': 'The red-haired mermaid',
'email': default_account.email_address}]
}
@pytest.fixture(scope='function')
def files(db):
filenames = FILENAMES
data = []
for filename in filenames:
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..',
'data', filename).encode('utf-8')
data.append((filename, path))
return data
@pytest.fixture(scope='function')
def uploaded_file_ids(api_client, files):
file_ids = []
upload_path = api_client.full_path('/files')
for filename, path in files:
# Mac and linux fight over filesystem encodings if we store this
# filename on the fs. Work around by changing the filename we upload
# instead.
if filename == 'piece-jointe.jpg':
filename = u'pièce-jointe.jpg'
elif filename == 'andra-moi-ennepe.txt':
filename = u'ἄνδρα μοι ἔννεπε'
data = {'file': (open(path, 'rb'), filename)}
r = api_client.client.post(upload_path, data=data)
assert r.status_code == 200
file_id = json.loads(r.data)[0]['id']
file_ids.append(file_id)
return file_ids
def test_file_filtering(api_client, uploaded_file_ids, draft):
# Attach the files to a draft and search there
draft['file_ids'] = uploaded_file_ids
r = api_client.post_data('/drafts', draft)
assert r.status_code == 200
draft_resp = json.loads(r.data)
assert len(draft_resp['files']) == 4
d_id = draft_resp['id']
results = api_client.get_data('/files?message_id={}'
.format(d_id))
assert all([d_id in f['message_ids'] for f in results])
assert len(results) == 4
results = api_client.get_data('/files?message_id={}&limit=1'
.format(d_id))
assert len(results) == 1
results = api_client.get_data('/files?message_id={}&offset=2'
.format(d_id))
assert len(results) == 2
results = api_client.get_data('/files?filename=LetMeSendYouEmail.wav')
assert len(results) == 1
results = api_client.get_data('/files?content_type=audio%2Fx-wav')
assert len(results) == 1
results = api_client.get_data('/files?content_type=image%2Fjpeg')
assert len(results) == 2
results = api_client.get_data('/files?content_type=image%2Fjpeg&view=count')
assert results["count"] == 2
results = api_client.get_data('/files?content_type=image%2Fjpeg&view=ids')
assert len(results) == 2
def test_attachment_has_same_id(api_client, uploaded_file_ids, draft):
attachment_id = uploaded_file_ids.pop()
draft['file_ids'] = [attachment_id]
r = api_client.post_data('/drafts', draft)
assert r.status_code == 200
draft_resp = json.loads(r.data)
assert attachment_id in [x['id'] for x in draft_resp['files']]
def test_delete(api_client, uploaded_file_ids, draft):
non_attachment_id = uploaded_file_ids.pop()
attachment_id = uploaded_file_ids.pop()
draft['file_ids'] = [attachment_id]
r = api_client.post_data('/drafts', draft)
assert r.status_code == 200
# Test that we can delete a non-attachment
r = api_client.delete('/files/{}'.format(non_attachment_id))
assert r.status_code == 200
data = api_client.get_data('/files/{}'.format(non_attachment_id))
assert data['message'].startswith("Couldn't find file")
# Make sure that we cannot delete attachments
r = api_client.delete('/files/{}'.format(attachment_id))
assert r.status_code == 400
data = api_client.get_data('/files/{}'.format(attachment_id))
assert data['id'] == attachment_id
@pytest.mark.parametrize("filename", FILENAMES)
def test_get_with_id(api_client, uploaded_file_ids, filename):
# See comment in uploaded_file_ids()
if filename == 'piece-jointe.jpg':
filename = u'pièce-jointe.jpg'
elif filename == 'andra-moi-ennepe.txt':
filename = u'ἄνδρα μοι ἔννεπε'
in_file = api_client.get_data(u'/files?filename={}'.format(filename))[0]
data = api_client.get_data('/files/{}'.format(in_file['id']))
assert data['filename'] == filename
def test_get_invalid(api_client, uploaded_file_ids):
data = api_client.get_data('/files/0000000000000000000000000')
assert data['message'].startswith("Couldn't find file")
data = api_client.get_data('/files/!')
assert data['message'].startswith("Invalid id")
data = api_client.get_data('/files/0000000000000000000000000/download')
assert data['message'].startswith("Couldn't find file")
data = api_client.get_data('/files/!/download')
assert data['message'].startswith("Invalid id")
r = api_client.delete('/files/0000000000000000000000000')
assert r.status_code == 404
r = api_client.delete('/files/!')
assert r.status_code == 400
@pytest.mark.parametrize("filename", FILENAMES)
def test_download(api_client, uploaded_file_ids, filename):
# See comment in uploaded_file_ids()
original_filename = filename
if filename == 'piece-jointe.jpg':
filename = u'pièce-jointe.jpg'
elif filename == 'andra-moi-ennepe.txt':
filename = u'ἄνδρα μοι ἔννεπε'
in_file = api_client.get_data(u'/files?filename={}'.format(filename))[0]
data = api_client.get_raw('/files/{}/download'.format(in_file['id'])).data
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..',
'data', original_filename.encode('utf-8'))
local_data = open(path, 'rb').read()
local_md5 = md5.new(local_data).digest()
dl_md5 = md5.new(data).digest()
assert local_md5 == dl_md5
|
maxamillion/ansible | refs/heads/devel | test/units/module_utils/facts/network/test_fc_wwn.py | 14 | # -*- coding: utf-8 -*-
# Copyright (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.facts.network import fc_wwn
from units.compat.mock import Mock
# AIX lsdev
LSDEV_OUTPUT = """
fcs0 Defined 00-00 8Gb PCI Express Dual Port FC Adapter (df1000f114108a03)
fcs1 Available 04-00 8Gb PCI Express Dual Port FC Adapter (df1000f114108a03)
"""
# a bit cutted output of lscfg (from Z0 to ZC)
LSCFG_OUTPUT = """
fcs1 U78CB.001.WZS00ZS-P1-C9-T1 8Gb PCI Express Dual Port FC Adapter (df1000f114108a03)
Part Number.................00E0806
Serial Number...............1C4090830F
Manufacturer................001C
EC Level.................... D77161
Customer Card ID Number.....577D
FRU Number..................00E0806
Device Specific.(ZM)........3
Network Address.............10000090FA551508
ROS Level and ID............027820B7
Device Specific.(Z0)........31004549
Device Specific.(ZC)........00000000
Hardware Location Code......U78CB.001.WZS00ZS-P1-C9-T1
"""
# Solaris
FCINFO_OUTPUT = """
HBA Port WWN: 10000090fa1658de
Port Mode: Initiator
Port ID: 30100
OS Device Name: /dev/cfg/c13
Manufacturer: Emulex
Model: LPe12002-S
Firmware Version: LPe12002-S 2.01a12
FCode/BIOS Version: Boot:5.03a0 Fcode:3.01a1
Serial Number: 4925381+13090001ER
Driver Name: emlxs
Driver Version: 3.3.00.1 (2018.01.05.16.30)
Type: N-port
State: online
Supported Speeds: 2Gb 4Gb 8Gb
Current Speed: 8Gb
Node WWN: 20000090fa1658de
NPIV Not Supported
"""
IOSCAN_OUT = """
Class I H/W Path Driver S/W State H/W Type Description
==================================================================
fc 0 2/0/10/1/0 fcd CLAIMED INTERFACE HP AB379-60101 4Gb Dual Port PCI/PCI-X Fibre Channel Adapter (FC Port 1)
/dev/fcd0
"""
FCMSUTIL_OUT = """
Vendor ID is = 0x1077
Device ID is = 0x2422
PCI Sub-system Vendor ID is = 0x103C
PCI Sub-system ID is = 0x12D7
PCI Mode = PCI-X 133 MHz
ISP Code version = 5.4.0
ISP Chip version = 3
Topology = PTTOPT_FABRIC
Link Speed = 4Gb
Local N_Port_id is = 0x010300
Previous N_Port_id is = None
N_Port Node World Wide Name = 0x50060b00006975ed
N_Port Port World Wide Name = 0x50060b00006975ec
Switch Port World Wide Name = 0x200300051e046c0f
Switch Node World Wide Name = 0x100000051e046c0f
N_Port Symbolic Port Name = server1_fcd0
N_Port Symbolic Node Name = server1_HP-UX_B.11.31
Driver state = ONLINE
Hardware Path is = 2/0/10/1/0
Maximum Frame Size = 2048
Driver-Firmware Dump Available = NO
Driver-Firmware Dump Timestamp = N/A
TYPE = PFC
NPIV Supported = YES
Driver Version = @(#) fcd B.11.31.1103 Dec 6 2010
"""
def mock_get_bin_path(cmd, required=False, opt_dirs=None):
result = None
if cmd == 'lsdev':
result = '/usr/sbin/lsdev'
elif cmd == 'lscfg':
result = '/usr/sbin/lscfg'
elif cmd == 'fcinfo':
result = '/usr/sbin/fcinfo'
elif cmd == 'ioscan':
result = '/usr/bin/ioscan'
elif cmd == 'fcmsutil':
result = '/opt/fcms/bin/fcmsutil'
return result
def mock_run_command(cmd):
rc = 0
if 'lsdev' in cmd:
result = LSDEV_OUTPUT
elif 'lscfg' in cmd:
result = LSCFG_OUTPUT
elif 'fcinfo' in cmd:
result = FCINFO_OUTPUT
elif 'ioscan' in cmd:
result = IOSCAN_OUT
elif 'fcmsutil' in cmd:
result = FCMSUTIL_OUT
else:
rc = 1
result = 'Error'
return (rc, result, '')
def test_get_fc_wwn_info(mocker):
module = Mock()
inst = fc_wwn.FcWwnInitiatorFactCollector()
mocker.patch.object(module, 'get_bin_path', side_effect=mock_get_bin_path)
mocker.patch.object(module, 'run_command', side_effect=mock_run_command)
d = {'aix6': ['10000090FA551508'], 'sunos5': ['10000090fa1658de'], 'hp-ux11': ['0x50060b00006975ec']}
for key, value in d.items():
mocker.patch('sys.platform', key)
wwn_expected = {"fibre_channel_wwn": value}
assert wwn_expected == inst.collect(module=module)
|
darjeeling/django | refs/heads/master | django/db/backends/sqlite3/introspection.py | 9 | import re
import warnings
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
from django.db.models.indexes import Index
from django.utils.deprecation import RemovedInDjango21Warning
field_size_re = re.compile(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$')
def get_field_size(name):
""" Extract the size number from a "varchar(11)" type name """
m = field_size_re.search(name)
return int(m.group(1)) if m else None
# This light wrapper "fakes" a dictionary interface, because some SQLite data
# types include variables in them -- e.g. "varchar(30)" -- and can't be matched
# as a simple dictionary lookup.
class FlexibleFieldLookupDict:
# Maps SQL types to Django Field types. Some of the SQL types have multiple
# entries here because SQLite allows for anything and doesn't normalize the
# field type; it uses whatever was given.
base_data_types_reverse = {
'bool': 'BooleanField',
'boolean': 'BooleanField',
'smallint': 'SmallIntegerField',
'smallint unsigned': 'PositiveSmallIntegerField',
'smallinteger': 'SmallIntegerField',
'int': 'IntegerField',
'integer': 'IntegerField',
'bigint': 'BigIntegerField',
'integer unsigned': 'PositiveIntegerField',
'decimal': 'DecimalField',
'real': 'FloatField',
'text': 'TextField',
'char': 'CharField',
'blob': 'BinaryField',
'date': 'DateField',
'datetime': 'DateTimeField',
'time': 'TimeField',
}
def __getitem__(self, key):
key = key.lower()
try:
return self.base_data_types_reverse[key]
except KeyError:
size = get_field_size(key)
if size is not None:
return ('CharField', {'max_length': size})
raise KeyError
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = FlexibleFieldLookupDict()
def get_table_list(self, cursor):
"""Return a list of table and view names in the current database."""
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute("""
SELECT name, type FROM sqlite_master
WHERE type in ('table', 'view') AND NOT name='sqlite_sequence'
ORDER BY name""")
return [TableInfo(row[0], row[1][0]) for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"""
Return a description of the table with the DB-API cursor.description
interface.
"""
return [
FieldInfo(
info['name'],
info['type'],
None,
info['size'],
None,
None,
info['null_ok'],
info['default'],
) for info in self._table_info(cursor, table_name)
]
def column_name_converter(self, name):
"""
SQLite will in some cases, e.g. when returning columns from views and
subselects, return column names in 'alias."column"' format instead of
simply 'column'.
Affects SQLite < 3.7.15, fixed by http://www.sqlite.org/src/info/5526e0aa3c
"""
# TODO: remove when SQLite < 3.7.15 is sufficiently old.
# 3.7.13 ships in Debian stable as of 2014-03-21.
if self.connection.Database.sqlite_version_info < (3, 7, 15):
return name.split('.')[-1].strip('"')
else:
return name
def get_relations(self, cursor, table_name):
"""
Return a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
# Dictionary of relations to return
relations = {}
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
try:
results = cursor.fetchone()[0].strip()
except TypeError:
# It might be a view, then no results will be returned
return relations
results = results[results.index('(') + 1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_desc in results.split(','):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search(r'references (\S*) ?\(["|]?(.*)["|]?\)', field_desc, re.I)
if not m:
continue
table, column = [s.strip('"') for s in m.groups()]
if field_desc.startswith("FOREIGN KEY"):
# Find name of the target FK field
m = re.match(r'FOREIGN KEY\s*\(([^\)]*)\).*', field_desc, re.I)
field_name = m.groups()[0].strip('"')
else:
field_name = field_desc.split()[0].strip('"')
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table])
result = cursor.fetchall()[0]
other_table_results = result[0].strip()
li, ri = other_table_results.index('('), other_table_results.rindex(')')
other_table_results = other_table_results[li + 1:ri]
for other_desc in other_table_results.split(','):
other_desc = other_desc.strip()
if other_desc.startswith('UNIQUE'):
continue
other_name = other_desc.split(' ', 1)[0].strip('"')
if other_name == column:
relations[field_name] = (other_name, table)
break
return relations
def get_key_columns(self, cursor, table_name):
"""
Return a list of (column_name, referenced_table_name, referenced_column_name)
for all key columns in given table.
"""
key_columns = []
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
results = cursor.fetchone()[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_index, field_desc in enumerate(results.split(',')):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search(r'"(.*)".*references (.*) \(["|](.*)["|]\)', field_desc, re.I)
if not m:
continue
# This will append (column_name, referenced_table_name, referenced_column_name) to key_columns
key_columns.append(tuple(s.strip('"') for s in m.groups()))
return key_columns
def get_indexes(self, cursor, table_name):
warnings.warn(
"get_indexes() is deprecated in favor of get_constraints().",
RemovedInDjango21Warning, stacklevel=2
)
indexes = {}
for info in self._table_info(cursor, table_name):
if info['pk'] != 0:
indexes[info['name']] = {'primary_key': True,
'unique': False}
cursor.execute('PRAGMA index_list(%s)' % self.connection.ops.quote_name(table_name))
# seq, name, unique
for index, unique in [(field[1], field[2]) for field in cursor.fetchall()]:
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
info = cursor.fetchall()
# Skip indexes across multiple fields
if len(info) != 1:
continue
name = info[0][2] # seqno, cid, name
indexes[name] = {'primary_key': indexes.get(name, {}).get("primary_key", False),
'unique': unique}
return indexes
def get_primary_key_column(self, cursor, table_name):
"""Return the column name of the primary key for the given table."""
# Don't use PRAGMA because that causes issues with some transactions
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
row = cursor.fetchone()
if row is None:
raise ValueError("Table %s does not exist" % table_name)
results = row[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
for field_desc in results.split(','):
field_desc = field_desc.strip()
m = re.search('"(.*)".*PRIMARY KEY( AUTOINCREMENT)?', field_desc)
if m:
return m.groups()[0]
return None
def _table_info(self, cursor, name):
cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(name))
# cid, name, type, notnull, default_value, pk
return [{
'name': field[1],
'type': field[2],
'size': get_field_size(field[2]),
'null_ok': not field[3],
'default': field[4],
'pk': field[5], # undocumented
} for field in cursor.fetchall()]
def get_constraints(self, cursor, table_name):
"""
Retrieve any constraints or keys (unique, pk, fk, check, index) across
one or more columns.
"""
constraints = {}
# Get the index info
cursor.execute("PRAGMA index_list(%s)" % self.connection.ops.quote_name(table_name))
for row in cursor.fetchall():
# Sqlite3 3.8.9+ has 5 columns, however older versions only give 3
# columns. Discard last 2 columns if there.
number, index, unique = row[:3]
# Get the index info for that index
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
for index_rank, column_rank, column in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": [],
"primary_key": False,
"unique": bool(unique),
"foreign_key": False,
"check": False,
"index": True,
}
constraints[index]['columns'].append(column)
# Add type and column orders for indexes
if constraints[index]['index'] and not constraints[index]['unique']:
# SQLite doesn't support any index type other than b-tree
constraints[index]['type'] = Index.suffix
cursor.execute(
"SELECT sql FROM sqlite_master "
"WHERE type='index' AND name=%s" % self.connection.ops.quote_name(index)
)
orders = []
# There would be only 1 row to loop over
for sql, in cursor.fetchall():
order_info = sql.split('(')[-1].split(')')[0].split(',')
orders = ['DESC' if info.endswith('DESC') else 'ASC' for info in order_info]
constraints[index]['orders'] = orders
# Get the PK
pk_column = self.get_primary_key_column(cursor, table_name)
if pk_column:
# SQLite doesn't actually give a name to the PK constraint,
# so we invent one. This is fine, as the SQLite backend never
# deletes PK constraints by name, as you can't delete constraints
# in SQLite; we remake the table with a new PK instead.
constraints["__primary__"] = {
"columns": [pk_column],
"primary_key": True,
"unique": False, # It's not actually a unique constraint.
"foreign_key": False,
"check": False,
"index": False,
}
# Get foreign keys
cursor.execute('PRAGMA foreign_key_list(%s)' % self.connection.ops.quote_name(table_name))
for row in cursor.fetchall():
# Remaining on_update/on_delete/match values are of no interest here
id_, seq, table, from_, to = row[:5]
constraints['fk_%d' % id_] = {
'columns': [from_],
'primary_key': False,
'unique': False,
'foreign_key': (table, to),
'check': False,
'index': False,
}
return constraints
|
abhattad4/Digi-Menu | refs/heads/master | digimenu2/tests/force_insert_update/models.py | 581 | """
Tests for forcing insert and update queries (instead of Django's normal
automatic behavior).
"""
from django.db import models
class Counter(models.Model):
name = models.CharField(max_length=10)
value = models.IntegerField()
class InheritedCounter(Counter):
tag = models.CharField(max_length=10)
class ProxyCounter(Counter):
class Meta:
proxy = True
class SubCounter(Counter):
pass
class WithCustomPK(models.Model):
name = models.IntegerField(primary_key=True)
value = models.IntegerField()
|
lvchaqiu/meizu-mx-kernel | refs/heads/master | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
|
Fokko/incubator-airflow | refs/heads/master | airflow/contrib/operators/emr_add_steps_operator.py | 1 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.contrib.hooks.emr_hook import EmrHook
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class EmrAddStepsOperator(BaseOperator):
"""
An operator that adds steps to an existing EMR job_flow.
:param job_flow_id: id of the JobFlow to add steps to. (templated)
:type job_flow_id: str
:param job_flow_name: name of the JobFlow to add steps to. Use as an alternative to passing
job_flow_id. will search for id of JobFlow with matching name in one of the states in
param cluster_states. Exactly one cluster like this should exist or will fail. (templated)
:type job_flow_name: str
:param cluster_states: Acceptable cluster states when searching for JobFlow id by job_flow_name.
(templated)
:type cluster_states: list
:param aws_conn_id: aws connection to uses
:type aws_conn_id: str
:param steps: boto3 style steps to be added to the jobflow. (templated)
:type steps: list
:param do_xcom_push: if True, job_flow_id is pushed to XCom with key job_flow_id.
:type do_xcom_push: bool
"""
template_fields = ['job_flow_id', 'job_flow_name', 'cluster_states', 'steps']
template_ext = ()
ui_color = '#f9c915'
@apply_defaults
def __init__(
self,
job_flow_id=None,
job_flow_name=None,
cluster_states=None,
aws_conn_id='aws_default',
steps=None,
*args, **kwargs):
if kwargs.get('xcom_push') is not None:
raise AirflowException("'xcom_push' was deprecated, use 'do_xcom_push' instead")
if not ((job_flow_id is None) ^ (job_flow_name is None)):
raise AirflowException('Exactly one of job_flow_id or job_flow_name must be specified.')
super().__init__(*args, **kwargs)
steps = steps or []
self.aws_conn_id = aws_conn_id
self.job_flow_id = job_flow_id
self.job_flow_name = job_flow_name
self.cluster_states = cluster_states
self.steps = steps
def execute(self, context):
emr = EmrHook(aws_conn_id=self.aws_conn_id).get_conn()
job_flow_id = self.job_flow_id
if not job_flow_id:
job_flow_id = emr.get_cluster_id_by_name(self.job_flow_name, self.cluster_states)
if self.do_xcom_push:
context['ti'].xcom_push(key='job_flow_id', value=job_flow_id)
self.log.info('Adding steps to %s', job_flow_id)
response = emr.add_job_flow_steps(JobFlowId=job_flow_id, Steps=self.steps)
if not response['ResponseMetadata']['HTTPStatusCode'] == 200:
raise AirflowException('Adding steps failed: %s' % response)
else:
self.log.info('Steps %s added to JobFlow', response['StepIds'])
return response['StepIds']
|
doctormo/gtkme | refs/heads/master | setup.py | 1 | #!/usr/bin/env python
#
# Copyright (C) 2010 Martin Owens
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import os
import sys
sys.path.insert(1, 'gtkme')
from setuptools import setup
from version import __version__, __pkgname__
# remove MANIFEST. distutils doesn't properly update it when the contents of directories change.
if os.path.exists('MANIFEST'): os.remove('MANIFEST')
setup(
name = __pkgname__,
version = __version__,
description = 'Amazing interface for wrapping python-gtk applications and make programming fun again.',
long_description = "Manages an Application with Gtk windows, forms, lists and other complex items easily.",
url = 'https://code.launchpad.net/~doctormo',
author = 'Martin Owens',
author_email = 'doctormo@gmail.com',
install_requires = ['pygobject'],
platforms = 'linux',
license = 'GPLv3',
packages = [ 'gtkme' ],
)
|
stinos/micropython | refs/heads/master | tests/basics/builtin_exec.py | 23 | # test builtin exec
try:
exec
except NameError:
print("SKIP")
raise SystemExit
print(exec("def foo(): return 42"))
print(foo())
d = {}
exec("def bar(): return 84", d)
print(d["bar"]())
# passing None/dict as args to globals/locals
foo = 11
exec('print(foo)')
exec('print(foo)', None)
exec('print(foo)', {'foo':3}, None)
exec('print(foo)', None, {'foo':3})
exec('print(foo)', None, {'bar':3})
exec('print(foo)', {'bar':3}, locals())
try:
exec('print(foo)', {'bar':3}, None)
except NameError:
print('NameError')
# invalid arg passed to globals
try:
exec('print(1)', 'foo')
except TypeError:
print('TypeError')
# invalid arg passed to locals
try:
exec('print(1)', None, 123)
except TypeError:
print('TypeError')
|
catapult-project/catapult | refs/heads/master | third_party/gsutil/third_party/pyu2f/pyu2f/convenience/customauthenticator.py | 7 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to offload the end to end flow of U2F signing."""
import base64
import hashlib
import json
import os
import struct
import subprocess
import sys
from pyu2f import errors
from pyu2f import model
from pyu2f.convenience import baseauthenticator
SK_SIGNING_PLUGIN_ENV_VAR = 'SK_SIGNING_PLUGIN'
U2F_SIGNATURE_TIMEOUT_SECONDS = 5
SK_SIGNING_PLUGIN_NO_ERROR = 0
SK_SIGNING_PLUGIN_TOUCH_REQUIRED = 0x6985
SK_SIGNING_PLUGIN_WRONG_DATA = 0x6A80
class CustomAuthenticator(baseauthenticator.BaseAuthenticator):
"""Offloads U2F signing to a pluggable command-line tool.
Offloads U2F signing to a signing plugin which takes the form of a
command-line tool. The command-line tool is configurable via the
SK_SIGNING_PLUGIN environment variable.
The signing plugin should implement the following interface:
Communication occurs over stdin/stdout, and messages are both sent and
received in the form:
[4 bytes - payload size (little-endian)][variable bytes - json payload]
Signing Request JSON
{
"type": "sign_helper_request",
"signData": [{
"keyHandle": <url-safe base64-encoded key handle>,
"appIdHash": <url-safe base64-encoded SHA-256 hash of application ID>,
"challengeHash": <url-safe base64-encoded SHA-256 hash of ClientData>,
"version": U2F protocol version (usually "U2F_V2")
},...],
"timeoutSeconds": <security key touch timeout>
}
Signing Response JSON
{
"type": "sign_helper_reply",
"code": <result code>.
"errorDetail": <text description of error>,
"responseData": {
"appIdHash": <url-safe base64-encoded SHA-256 hash of application ID>,
"challengeHash": <url-safe base64-encoded SHA-256 hash of ClientData>,
"keyHandle": <url-safe base64-encoded key handle>,
"version": <U2F protocol version>,
"signatureData": <url-safe base64-encoded signature>
}
}
Possible response error codes are:
NoError = 0
UnknownError = -127
TouchRequired = 0x6985
WrongData = 0x6a80
"""
def __init__(self, origin):
self.origin = origin
def Authenticate(self, app_id, challenge_data,
print_callback=sys.stderr.write):
"""See base class."""
# Ensure environment variable is present
plugin_cmd = os.environ.get(SK_SIGNING_PLUGIN_ENV_VAR)
if plugin_cmd is None:
raise errors.PluginError('{} env var is not set'
.format(SK_SIGNING_PLUGIN_ENV_VAR))
# Prepare input to signer
client_data_map, signing_input = self._BuildPluginRequest(
app_id, challenge_data, self.origin)
# Call plugin
print_callback('Please insert and touch your security key\n')
response = self._CallPlugin([plugin_cmd], signing_input)
# Handle response
key_challenge_pair = (response['keyHandle'], response['challengeHash'])
client_data_json = client_data_map[key_challenge_pair]
client_data = client_data_json.encode()
return self._BuildAuthenticatorResponse(app_id, client_data, response)
def IsAvailable(self):
"""See base class."""
return os.environ.get(SK_SIGNING_PLUGIN_ENV_VAR) is not None
def _BuildPluginRequest(self, app_id, challenge_data, origin):
"""Builds a JSON request in the form that the plugin expects."""
client_data_map = {}
encoded_challenges = []
app_id_hash_encoded = self._Base64Encode(self._SHA256(app_id))
for challenge_item in challenge_data:
key = challenge_item['key']
key_handle_encoded = self._Base64Encode(key.key_handle)
raw_challenge = challenge_item['challenge']
client_data_json = model.ClientData(
model.ClientData.TYP_AUTHENTICATION,
raw_challenge,
origin).GetJson()
challenge_hash_encoded = self._Base64Encode(
self._SHA256(client_data_json))
# Populate challenges list
encoded_challenges.append({
'appIdHash': app_id_hash_encoded,
'challengeHash': challenge_hash_encoded,
'keyHandle': key_handle_encoded,
'version': key.version,
})
# Populate ClientData map
key_challenge_pair = (key_handle_encoded, challenge_hash_encoded)
client_data_map[key_challenge_pair] = client_data_json
signing_request = {
'type': 'sign_helper_request',
'signData': encoded_challenges,
'timeoutSeconds': U2F_SIGNATURE_TIMEOUT_SECONDS,
'localAlways': True
}
return client_data_map, json.dumps(signing_request)
def _BuildAuthenticatorResponse(self, app_id, client_data, plugin_response):
"""Builds the response to return to the caller."""
encoded_client_data = self._Base64Encode(client_data)
signature_data = str(plugin_response['signatureData'])
key_handle = str(plugin_response['keyHandle'])
response = {
'clientData': encoded_client_data,
'signatureData': signature_data,
'applicationId': app_id,
'keyHandle': key_handle,
}
return response
def _CallPlugin(self, cmd, input_json):
"""Calls the plugin and validates the response."""
# Calculate length of input
input_length = len(input_json)
length_bytes_le = struct.pack('<I', input_length)
request = length_bytes_le + input_json.encode()
# Call plugin
sign_process = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout = sign_process.communicate(request)[0]
exit_status = sign_process.wait()
# Parse and validate response size
response_len_le = stdout[:4]
response_len = struct.unpack('<I', response_len_le)[0]
response = stdout[4:]
if response_len != len(response):
raise errors.PluginError(
'Plugin response length {} does not match data {} (exit_status={})'
.format(response_len, len(response), exit_status))
# Ensure valid json
try:
json_response = json.loads(response.decode())
except ValueError:
raise errors.PluginError('Plugin returned invalid output (exit_status={})'
.format(exit_status))
# Ensure response type
if json_response.get('type') != 'sign_helper_reply':
raise errors.PluginError('Plugin returned invalid response type '
'(exit_status={})'
.format(exit_status))
# Parse response codes
result_code = json_response.get('code')
if result_code is None:
raise errors.PluginError('Plugin missing result code (exit_status={})'
.format(exit_status))
# Handle errors
if result_code == SK_SIGNING_PLUGIN_TOUCH_REQUIRED:
raise errors.U2FError(errors.U2FError.TIMEOUT)
elif result_code == SK_SIGNING_PLUGIN_WRONG_DATA:
raise errors.U2FError(errors.U2FError.DEVICE_INELIGIBLE)
elif result_code != SK_SIGNING_PLUGIN_NO_ERROR:
raise errors.PluginError(
'Plugin failed with error {} - {} (exit_status={})'
.format(result_code,
json_response.get('errorDetail'),
exit_status))
# Ensure response data is present
response_data = json_response.get('responseData')
if response_data is None:
raise errors.PluginErrors(
'Plugin returned output with missing responseData (exit_status={})'
.format(exit_status))
return response_data
def _SHA256(self, string):
"""Helper method to perform SHA256."""
md = hashlib.sha256()
md.update(string.encode())
return md.digest()
def _Base64Encode(self, bytes_data):
"""Helper method to base64 encode, strip padding, and return str
result."""
return base64.urlsafe_b64encode(bytes_data).decode().rstrip('=')
|
azatoth/scons | refs/heads/master | src/engine/SCons/Tool/f90.py | 6 | """engine.SCons.Tool.f90
Tool-specific initialization for the generic Posix f90 Fortran compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import SCons.Defaults
import SCons.Scanner.Fortran
import SCons.Tool
import SCons.Util
from SCons.Tool.FortranCommon import add_all_to_env, add_f90_to_env
compilers = ['f90']
def generate(env):
add_all_to_env(env)
add_f90_to_env(env)
fc = env.Detect(compilers) or 'f90'
env['F90'] = fc
env['SHF90'] = fc
env['FORTRAN'] = fc
env['SHFORTRAN'] = fc
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
kspviswa/personfinder | refs/heads/master | tools/babel/messages/pofile.py | 54 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
"""Reading and writing of files in the ``gettext`` PO (portable object)
format.
:see: `The Format of PO Files
<http://www.gnu.org/software/gettext/manual/gettext.html#PO-Files>`_
"""
from datetime import date, datetime
import os
import re
from babel import __version__ as VERSION
from babel.messages.catalog import Catalog, Message
from babel.util import set, wraptext, LOCALTZ
__all__ = ['read_po', 'write_po']
__docformat__ = 'restructuredtext en'
def unescape(string):
r"""Reverse `escape` the given string.
>>> print unescape('"Say:\\n \\"hello, world!\\"\\n"')
Say:
"hello, world!"
<BLANKLINE>
:param string: the string to unescape
:return: the unescaped string
:rtype: `str` or `unicode`
"""
return string[1:-1].replace('\\\\', '\\') \
.replace('\\t', '\t') \
.replace('\\r', '\r') \
.replace('\\n', '\n') \
.replace('\\"', '\"')
def denormalize(string):
r"""Reverse the normalization done by the `normalize` function.
>>> print denormalize(r'''""
... "Say:\n"
... " \"hello, world!\"\n"''')
Say:
"hello, world!"
<BLANKLINE>
>>> print denormalize(r'''""
... "Say:\n"
... " \"Lorem ipsum dolor sit "
... "amet, consectetur adipisicing"
... " elit, \"\n"''')
Say:
"Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
<BLANKLINE>
:param string: the string to denormalize
:return: the denormalized string
:rtype: `unicode` or `str`
"""
if string.startswith('""'):
lines = []
for line in string.splitlines()[1:]:
lines.append(unescape(line))
return ''.join(lines)
else:
return unescape(string)
def read_po(fileobj, locale=None, domain=None, ignore_obsolete=False):
"""Read messages from a ``gettext`` PO (portable object) file from the given
file-like object and return a `Catalog`.
>>> from StringIO import StringIO
>>> buf = StringIO('''
... #: main.py:1
... #, fuzzy, python-format
... msgid "foo %(name)s"
... msgstr ""
...
... # A user comment
... #. An auto comment
... #: main.py:3
... msgid "bar"
... msgid_plural "baz"
... msgstr[0] ""
... msgstr[1] ""
... ''')
>>> catalog = read_po(buf)
>>> catalog.revision_date = datetime(2007, 04, 01)
>>> for message in catalog:
... if message.id:
... print (message.id, message.string)
... print ' ', (message.locations, message.flags)
... print ' ', (message.user_comments, message.auto_comments)
(u'foo %(name)s', '')
([(u'main.py', 1)], set([u'fuzzy', u'python-format']))
([], [])
((u'bar', u'baz'), ('', ''))
([(u'main.py', 3)], set([]))
([u'A user comment'], [u'An auto comment'])
:param fileobj: the file-like object to read the PO file from
:param locale: the locale identifier or `Locale` object, or `None`
if the catalog is not bound to a locale (which basically
means it's a template)
:param domain: the message domain
:param ignore_obsolete: whether to ignore obsolete messages in the input
:return: an iterator over ``(message, translation, location)`` tuples
:rtype: ``iterator``
"""
catalog = Catalog(locale=locale, domain=domain)
counter = [0]
offset = [0]
messages = []
translations = []
locations = []
flags = []
user_comments = []
auto_comments = []
obsolete = [False]
in_msgid = [False]
in_msgstr = [False]
def _add_message():
translations.sort()
if len(messages) > 1:
msgid = tuple([denormalize(m) for m in messages])
else:
msgid = denormalize(messages[0])
if isinstance(msgid, (list, tuple)):
string = []
for idx in range(catalog.num_plurals):
try:
string.append(translations[idx])
except IndexError:
string.append((idx, ''))
string = tuple([denormalize(t[1]) for t in string])
else:
string = denormalize(translations[0][1])
message = Message(msgid, string, list(locations), set(flags),
auto_comments, user_comments, lineno=offset[0] + 1)
if obsolete[0]:
if not ignore_obsolete:
catalog.obsolete[msgid] = message
else:
catalog[msgid] = message
del messages[:]; del translations[:]; del locations[:];
del flags[:]; del auto_comments[:]; del user_comments[:]
obsolete[0] = False
counter[0] += 1
def _process_message_line(lineno, line):
if line.startswith('msgid_plural'):
in_msgid[0] = True
msg = line[12:].lstrip()
messages.append(msg)
elif line.startswith('msgid'):
in_msgid[0] = True
offset[0] = lineno
txt = line[5:].lstrip()
if messages:
_add_message()
messages.append(txt)
elif line.startswith('msgstr'):
in_msgid[0] = False
in_msgstr[0] = True
msg = line[6:].lstrip()
if msg.startswith('['):
idx, msg = msg[1:].split(']', 1)
translations.append([int(idx), msg.lstrip()])
else:
translations.append([0, msg])
elif line.startswith('"'):
if in_msgid[0]:
messages[-1] += u'\n' + line.rstrip()
elif in_msgstr[0]:
translations[-1][1] += u'\n' + line.rstrip()
for lineno, line in enumerate(fileobj.readlines()):
line = line.strip()
if not isinstance(line, unicode):
line = line.decode(catalog.charset)
if line.startswith('#'):
in_msgid[0] = in_msgstr[0] = False
if messages and translations:
_add_message()
if line[1:].startswith(':'):
for location in line[2:].lstrip().split():
pos = location.rfind(':')
if pos >= 0:
try:
lineno = int(location[pos + 1:])
except ValueError:
continue
locations.append((location[:pos], lineno))
elif line[1:].startswith(','):
for flag in line[2:].lstrip().split(','):
flags.append(flag.strip())
elif line[1:].startswith('~'):
obsolete[0] = True
_process_message_line(lineno, line[2:].lstrip())
elif line[1:].startswith('.'):
# These are called auto-comments
comment = line[2:].strip()
if comment: # Just check that we're not adding empty comments
auto_comments.append(comment)
else:
# These are called user comments
user_comments.append(line[1:].strip())
else:
_process_message_line(lineno, line)
if messages:
_add_message()
# No actual messages found, but there was some info in comments, from which
# we'll construct an empty header message
elif not counter[0] and (flags or user_comments or auto_comments):
messages.append(u'')
translations.append([0, u''])
_add_message()
return catalog
WORD_SEP = re.compile('('
r'\s+|' # any whitespace
r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w)' # em-dash
')')
def escape(string):
r"""Escape the given string so that it can be included in double-quoted
strings in ``PO`` files.
>>> escape('''Say:
... "hello, world!"
... ''')
'"Say:\\n \\"hello, world!\\"\\n"'
:param string: the string to escape
:return: the escaped string
:rtype: `str` or `unicode`
"""
return '"%s"' % string.replace('\\', '\\\\') \
.replace('\t', '\\t') \
.replace('\r', '\\r') \
.replace('\n', '\\n') \
.replace('\"', '\\"')
def normalize(string, prefix='', width=76):
r"""Convert a string into a format that is appropriate for .po files.
>>> print normalize('''Say:
... "hello, world!"
... ''', width=None)
""
"Say:\n"
" \"hello, world!\"\n"
>>> print normalize('''Say:
... "Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
... ''', width=32)
""
"Say:\n"
" \"Lorem ipsum dolor sit "
"amet, consectetur adipisicing"
" elit, \"\n"
:param string: the string to normalize
:param prefix: a string that should be prepended to every line
:param width: the maximum line width; use `None`, 0, or a negative number
to completely disable line wrapping
:return: the normalized string
:rtype: `unicode`
"""
if width and width > 0:
prefixlen = len(prefix)
lines = []
for idx, line in enumerate(string.splitlines(True)):
if len(escape(line)) + prefixlen > width:
chunks = WORD_SEP.split(line)
chunks.reverse()
while chunks:
buf = []
size = 2
while chunks:
l = len(escape(chunks[-1])) - 2 + prefixlen
if size + l < width:
buf.append(chunks.pop())
size += l
else:
if not buf:
# handle long chunks by putting them on a
# separate line
buf.append(chunks.pop())
break
lines.append(u''.join(buf))
else:
lines.append(line)
else:
lines = string.splitlines(True)
if len(lines) <= 1:
return escape(string)
# Remove empty trailing line
if lines and not lines[-1]:
del lines[-1]
lines[-1] += '\n'
return u'""\n' + u'\n'.join([(prefix + escape(l)) for l in lines])
def write_po(fileobj, catalog, width=76, no_location=False, omit_header=False,
sort_output=False, sort_by_file=False, ignore_obsolete=False,
include_previous=False):
r"""Write a ``gettext`` PO (portable object) template file for a given
message catalog to the provided file-like object.
>>> catalog = Catalog()
>>> catalog.add(u'foo %(name)s', locations=[('main.py', 1)],
... flags=('fuzzy',))
>>> catalog.add((u'bar', u'baz'), locations=[('main.py', 3)])
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> write_po(buf, catalog, omit_header=True)
>>> print buf.getvalue()
#: main.py:1
#, fuzzy, python-format
msgid "foo %(name)s"
msgstr ""
<BLANKLINE>
#: main.py:3
msgid "bar"
msgid_plural "baz"
msgstr[0] ""
msgstr[1] ""
<BLANKLINE>
<BLANKLINE>
:param fileobj: the file-like object to write to
:param catalog: the `Catalog` instance
:param width: the maximum line width for the generated output; use `None`,
0, or a negative number to completely disable line wrapping
:param no_location: do not emit a location comment for every message
:param omit_header: do not include the ``msgid ""`` entry at the top of the
output
:param sort_output: whether to sort the messages in the output by msgid
:param sort_by_file: whether to sort the messages in the output by their
locations
:param ignore_obsolete: whether to ignore obsolete messages and not include
them in the output; by default they are included as
comments
:param include_previous: include the old msgid as a comment when
updating the catalog
"""
def _normalize(key, prefix=''):
return normalize(key, prefix=prefix, width=width) \
.encode(catalog.charset, 'backslashreplace')
def _write(text):
if isinstance(text, unicode):
text = text.encode(catalog.charset)
fileobj.write(text)
def _write_comment(comment, prefix=''):
# xgettext always wraps comments even if --no-wrap is passed;
# provide the same behaviour
if width and width > 0:
_width = width
else:
_width = 76
for line in wraptext(comment, _width):
_write('#%s %s\n' % (prefix, line.strip()))
def _write_message(message, prefix=''):
if isinstance(message.id, (list, tuple)):
_write('%smsgid %s\n' % (prefix, _normalize(message.id[0], prefix)))
_write('%smsgid_plural %s\n' % (
prefix, _normalize(message.id[1], prefix)
))
for idx in range(catalog.num_plurals):
try:
string = message.string[idx]
except IndexError:
string = ''
_write('%smsgstr[%d] %s\n' % (
prefix, idx, _normalize(string, prefix)
))
else:
_write('%smsgid %s\n' % (prefix, _normalize(message.id, prefix)))
_write('%smsgstr %s\n' % (
prefix, _normalize(message.string or '', prefix)
))
messages = list(catalog)
if sort_output:
messages.sort()
elif sort_by_file:
messages.sort(lambda x,y: cmp(x.locations, y.locations))
for message in messages:
if not message.id: # This is the header "message"
if omit_header:
continue
comment_header = catalog.header_comment
if width and width > 0:
lines = []
for line in comment_header.splitlines():
lines += wraptext(line, width=width,
subsequent_indent='# ')
comment_header = u'\n'.join(lines) + u'\n'
_write(comment_header)
for comment in message.user_comments:
_write_comment(comment)
for comment in message.auto_comments:
_write_comment(comment, prefix='.')
if not no_location:
locs = u' '.join([u'%s:%d' % (filename.replace(os.sep, '/'), lineno)
for filename, lineno in message.locations])
_write_comment(locs, prefix=':')
if message.flags:
_write('#%s\n' % ', '.join([''] + list(message.flags)))
if message.previous_id and include_previous:
_write_comment('msgid %s' % _normalize(message.previous_id[0]),
prefix='|')
if len(message.previous_id) > 1:
_write_comment('msgid_plural %s' % _normalize(
message.previous_id[1]
), prefix='|')
_write_message(message)
_write('\n')
if not ignore_obsolete:
for message in catalog.obsolete.values():
for comment in message.user_comments:
_write_comment(comment)
_write_message(message, prefix='#~ ')
_write('\n')
|
sgraham/nope | refs/heads/master | tools/gyp/test/hello/gyptest-target.py | 351 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simplest-possible build of a "Hello, world!" program
using an explicit build target of 'hello'.
"""
import TestGyp
test = TestGyp.TestGyp(workdir='workarea_target')
test.run_gyp('hello.gyp')
test.build('hello.gyp', 'hello')
test.run_built_executable('hello', stdout="Hello, world!\n")
test.up_to_date('hello.gyp', 'hello')
test.pass_test()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.