repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
xuxiao19910803/edx
|
refs/heads/master
|
common/test/acceptance/tests/video/__init__.py
|
12133432
| |
ashhher3/invenio
|
refs/heads/master
|
modules/bibauthorid/lib/__init__.py
|
12133432
| |
jasonwee/asus-rt-n14uhp-mrtg
|
refs/heads/master
|
tmp/ve_asus-rt-n14uhp-mrtg/lib/python3.4/site-packages/pip/models/index.py
|
917
|
from pip._vendor.six.moves.urllib import parse as urllib_parse
class Index(object):
def __init__(self, url):
self.url = url
self.netloc = urllib_parse.urlsplit(url).netloc
self.simple_url = self.url_to_path('simple')
self.pypi_url = self.url_to_path('pypi')
self.pip_json_url = self.url_to_path('pypi/pip/json')
def url_to_path(self, path):
return urllib_parse.urljoin(self.url, path)
PyPI = Index('https://pypi.python.org/')
|
CreativeMachinesLab/aracna
|
refs/heads/master
|
RaspberryPypose/commander.py
|
2
|
#!/usr/bin/env python
"""
PyPose: Bioloid pose system for arbotiX robocontroller
Copyright (c) 2009,2010 Michael E. Ferguson. All right reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software Foundation,
Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import time, sys, serial
import wx
# Commander definitions
BUT_R1 = 1
BUT_R2 = 2
BUT_R3 = 4
BUT_L4 = 8
BUT_L5 = 16
BUT_L6 = 32
BUT_RT = 64
BUT_LT = 128
width = 300
class Commander(wx.Frame):
TIMER_ID = 100
def __init__(self, parent, ser, debug = False):
wx.Frame.__init__(self, parent, -1, "ArbotiX Commander", style = wx.DEFAULT_FRAME_STYLE & ~ (wx.RESIZE_BORDER | wx.MAXIMIZE_BOX))
self.ser = ser
sizer = wx.GridBagSizer(10,10)
self.drive = wx.Panel(self,size=(width,width-20))
self.drive.SetBackgroundColour('WHITE')
self.drive.Bind(wx.EVT_MOTION, self.onMove)
wx.StaticLine(self.drive, -1, (width/2, 0), (1,width), style=wx.LI_VERTICAL)
wx.StaticLine(self.drive, -1, (0, width/2), (width,1))
sizer.Add(self.drive,(0,0),wx.GBSpan(2,1),wx.EXPAND|wx.ALL,5)
self.forward = 0
self.turn = 0
# Selection for horizontal movement
horiz = wx.StaticBox(self, -1, 'Horizontal Movement')
horiz.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
horizBox = wx.StaticBoxSizer(horiz,orient=wx.VERTICAL)
self.selTurn = wx.RadioButton(self, -1, 'Turn', style=wx.RB_GROUP)
horizBox.Add(self.selTurn)
self.selStrafe = wx.RadioButton(self, -1, 'Strafe')
horizBox.Add(self.selStrafe)
sizer.Add(horizBox, (0,1), wx.GBSpan(1,1), wx.EXPAND|wx.TOP|wx.RIGHT,5)
# Body rotations
body = wx.StaticBox(self, -1, 'Body Movement')
body.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
bodyBox = wx.StaticBoxSizer(body,orient=wx.VERTICAL)
bodySizer = wx.GridBagSizer(5,5)
bodySizer.Add(wx.StaticText(self, -1, "Pan:"),(0,0), wx.GBSpan(1,1),wx.ALIGN_CENTER_VERTICAL)
self.pan = wx.Slider(self, -1, 0, -100, 100, wx.DefaultPosition, (200, -1), wx.SL_HORIZONTAL | wx.SL_LABELS)
bodySizer.Add(self.pan,(0,1))
bodySizer.Add(wx.StaticText(self, -1, "Tilt:"),(1,0), wx.GBSpan(1,1),wx.ALIGN_CENTER_VERTICAL)
self.tilt = wx.Slider(self, -1, 0, -100, 100, wx.DefaultPosition, (200, -1), wx.SL_HORIZONTAL | wx.SL_LABELS)
bodySizer.Add(self.tilt,(1,1))
bodySizer.Add(wx.StaticText(self, -1, "Roll:"),(2,0), wx.GBSpan(1,1),wx.ALIGN_CENTER_VERTICAL)
self.roll = wx.Slider(self, -1, 0, -100, 100, wx.DefaultPosition, (200, -1), wx.SL_HORIZONTAL | wx.SL_LABELS)
self.roll.Disable()
bodySizer.Add(self.roll,(2,1))
bodyBox.Add(bodySizer)
sizer.Add(bodyBox, (1,1), wx.GBSpan(1,1), wx.EXPAND|wx.BOTTOM|wx.RIGHT,5)
# timer for output
self.timer = wx.Timer(self, self.TIMER_ID)
self.timer.Start(33)
wx.EVT_CLOSE(self, self.onClose)
wx.EVT_TIMER(self, self.TIMER_ID, self.onTimer)
self.SetSizerAndFit(sizer)
self.Show(True)
def onClose(self, event):
self.timer.Stop()
self.sendPacket(128,128,128,128,0)
self.Destroy()
def onMove(self, event=None):
if event.LeftIsDown():
pt = event.GetPosition()
self.forward = ((width/2)-pt[1])/2
self.turn = (pt[0]-(width/2))/2
else:
self.forward = 0
self.turn = 0
pass
def onTimer(self, event=None):
# configure output
Buttons = 0
if self.selStrafe.GetValue():
Buttons = BUT_LT
self.sendPacket(self.tilt.GetValue(), self.pan.GetValue(), self.forward, self.turn, Buttons)
while self.ser.inWaiting() > 0:
print self.ser.read(),
self.timer.Start(50)
def sendPacket(self, right_vertical, right_horizontal, left_vertical, left_horizontal, Buttons):
# send output
self.ser.write('\xFF')
self.ser.write(chr(right_vertical+128))
self.ser.write(chr(right_horizontal+128))
self.ser.write(chr(left_vertical+128))
self.ser.write(chr(left_horizontal+128))
self.ser.write(chr(Buttons))
self.ser.write(chr(0))
self.ser.write(chr(255 - ((right_vertical+right_horizontal+left_vertical+left_horizontal+Buttons)%256)))
if __name__ == "__main__":
# commander.py <serialport>
ser = serial.Serial()
ser.baudrate = 38400
ser.port = sys.argv[1]
ser.timeout = 0.5
ser.open()
app = wx.PySimpleApp()
frame = Commander(None, ser, True)
app.MainLoop()
|
alx-eu/django
|
refs/heads/stable/1.5.x
|
django/contrib/gis/feeds.py
|
225
|
from __future__ import unicode_literals
from django.contrib.syndication.views import Feed as BaseFeed
from django.utils.feedgenerator import Atom1Feed, Rss201rev2Feed
class GeoFeedMixin(object):
"""
This mixin provides the necessary routines for SyndicationFeed subclasses
to produce simple GeoRSS or W3C Geo elements.
"""
def georss_coords(self, coords):
"""
In GeoRSS coordinate pairs are ordered by lat/lon and separated by
a single white space. Given a tuple of coordinates, this will return
a unicode GeoRSS representation.
"""
return ' '.join(['%f %f' % (coord[1], coord[0]) for coord in coords])
def add_georss_point(self, handler, coords, w3c_geo=False):
"""
Adds a GeoRSS point with the given coords using the given handler.
Handles the differences between simple GeoRSS and the more pouplar
W3C Geo specification.
"""
if w3c_geo:
lon, lat = coords[:2]
handler.addQuickElement('geo:lat', '%f' % lat)
handler.addQuickElement('geo:lon', '%f' % lon)
else:
handler.addQuickElement('georss:point', self.georss_coords((coords,)))
def add_georss_element(self, handler, item, w3c_geo=False):
"""
This routine adds a GeoRSS XML element using the given item and handler.
"""
# Getting the Geometry object.
geom = item.get('geometry', None)
if not geom is None:
if isinstance(geom, (list, tuple)):
# Special case if a tuple/list was passed in. The tuple may be
# a point or a box
box_coords = None
if isinstance(geom[0], (list, tuple)):
# Box: ( (X0, Y0), (X1, Y1) )
if len(geom) == 2:
box_coords = geom
else:
raise ValueError('Only should be two sets of coordinates.')
else:
if len(geom) == 2:
# Point: (X, Y)
self.add_georss_point(handler, geom, w3c_geo=w3c_geo)
elif len(geom) == 4:
# Box: (X0, Y0, X1, Y1)
box_coords = (geom[:2], geom[2:])
else:
raise ValueError('Only should be 2 or 4 numeric elements.')
# If a GeoRSS box was given via tuple.
if not box_coords is None:
if w3c_geo: raise ValueError('Cannot use simple GeoRSS box in W3C Geo feeds.')
handler.addQuickElement('georss:box', self.georss_coords(box_coords))
else:
# Getting the lower-case geometry type.
gtype = str(geom.geom_type).lower()
if gtype == 'point':
self.add_georss_point(handler, geom.coords, w3c_geo=w3c_geo)
else:
if w3c_geo: raise ValueError('W3C Geo only supports Point geometries.')
# For formatting consistent w/the GeoRSS simple standard:
# http://georss.org/1.0#simple
if gtype in ('linestring', 'linearring'):
handler.addQuickElement('georss:line', self.georss_coords(geom.coords))
elif gtype in ('polygon',):
# Only support the exterior ring.
handler.addQuickElement('georss:polygon', self.georss_coords(geom[0].coords))
else:
raise ValueError('Geometry type "%s" not supported.' % geom.geom_type)
### SyndicationFeed subclasses ###
class GeoRSSFeed(Rss201rev2Feed, GeoFeedMixin):
def rss_attributes(self):
attrs = super(GeoRSSFeed, self).rss_attributes()
attrs['xmlns:georss'] = 'http://www.georss.org/georss'
return attrs
def add_item_elements(self, handler, item):
super(GeoRSSFeed, self).add_item_elements(handler, item)
self.add_georss_element(handler, item)
def add_root_elements(self, handler):
super(GeoRSSFeed, self).add_root_elements(handler)
self.add_georss_element(handler, self.feed)
class GeoAtom1Feed(Atom1Feed, GeoFeedMixin):
def root_attributes(self):
attrs = super(GeoAtom1Feed, self).root_attributes()
attrs['xmlns:georss'] = 'http://www.georss.org/georss'
return attrs
def add_item_elements(self, handler, item):
super(GeoAtom1Feed, self).add_item_elements(handler, item)
self.add_georss_element(handler, item)
def add_root_elements(self, handler):
super(GeoAtom1Feed, self).add_root_elements(handler)
self.add_georss_element(handler, self.feed)
class W3CGeoFeed(Rss201rev2Feed, GeoFeedMixin):
def rss_attributes(self):
attrs = super(W3CGeoFeed, self).rss_attributes()
attrs['xmlns:geo'] = 'http://www.w3.org/2003/01/geo/wgs84_pos#'
return attrs
def add_item_elements(self, handler, item):
super(W3CGeoFeed, self).add_item_elements(handler, item)
self.add_georss_element(handler, item, w3c_geo=True)
def add_root_elements(self, handler):
super(W3CGeoFeed, self).add_root_elements(handler)
self.add_georss_element(handler, self.feed, w3c_geo=True)
### Feed subclass ###
class Feed(BaseFeed):
"""
This is a subclass of the `Feed` from `django.contrib.syndication`.
This allows users to define a `geometry(obj)` and/or `item_geometry(item)`
methods on their own subclasses so that geo-referenced information may
placed in the feed.
"""
feed_type = GeoRSSFeed
def feed_extra_kwargs(self, obj):
return {'geometry' : self.__get_dynamic_attr('geometry', obj)}
def item_extra_kwargs(self, item):
return {'geometry' : self.__get_dynamic_attr('item_geometry', item)}
|
aganji/vdtp
|
refs/heads/master
|
client.py
|
1
|
import vdtp
LOCAL_IP = '127.0.0.1'
LOCAL_PORT = 5050
# contruct packet (key-value pairs)
data = {'a':1,'b':2,'c':'String'}
data['d'] = 'abcdefghijklmnopqrstuvwxyz'
vdtp.send(data,(LOCAL_IP,LOCAL_PORT),1)
|
anntzer/numpy
|
refs/heads/master
|
numpy/ma/tests/test_subclassing.py
|
3
|
# pylint: disable-msg=W0611, W0612, W0511,R0201
"""Tests suite for MaskedArray & subclassing.
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
:version: $Id: test_subclassing.py 3473 2007-10-29 15:18:13Z jarrod.millman $
"""
import numpy as np
from numpy.testing import assert_, assert_raises
from numpy.ma.testutils import assert_equal
from numpy.ma.core import (
array, arange, masked, MaskedArray, masked_array, log, add, hypot,
divide, asarray, asanyarray, nomask
)
# from numpy.ma.core import (
def assert_startswith(a, b):
# produces a better error message than assert_(a.startswith(b))
assert_equal(a[:len(b)], b)
class SubArray(np.ndarray):
# Defines a generic np.ndarray subclass, that stores some metadata
# in the dictionary `info`.
def __new__(cls,arr,info={}):
x = np.asanyarray(arr).view(cls)
x.info = info.copy()
return x
def __array_finalize__(self, obj):
if callable(getattr(super(SubArray, self),
'__array_finalize__', None)):
super(SubArray, self).__array_finalize__(obj)
self.info = getattr(obj, 'info', {}).copy()
return
def __add__(self, other):
result = super(SubArray, self).__add__(other)
result.info['added'] = result.info.get('added', 0) + 1
return result
def __iadd__(self, other):
result = super(SubArray, self).__iadd__(other)
result.info['iadded'] = result.info.get('iadded', 0) + 1
return result
subarray = SubArray
class SubMaskedArray(MaskedArray):
"""Pure subclass of MaskedArray, keeping some info on subclass."""
def __new__(cls, info=None, **kwargs):
obj = super(SubMaskedArray, cls).__new__(cls, **kwargs)
obj._optinfo['info'] = info
return obj
class MSubArray(SubArray, MaskedArray):
def __new__(cls, data, info={}, mask=nomask):
subarr = SubArray(data, info)
_data = MaskedArray.__new__(cls, data=subarr, mask=mask)
_data.info = subarr.info
return _data
@property
def _series(self):
_view = self.view(MaskedArray)
_view._sharedmask = False
return _view
msubarray = MSubArray
# Also a subclass that overrides __str__, __repr__ and __setitem__, disallowing
# setting to non-class values (and thus np.ma.core.masked_print_option)
# and overrides __array_wrap__, updating the info dict, to check that this
# doesn't get destroyed by MaskedArray._update_from. But this one also needs
# its own iterator...
class CSAIterator:
"""
Flat iterator object that uses its own setter/getter
(works around ndarray.flat not propagating subclass setters/getters
see https://github.com/numpy/numpy/issues/4564)
roughly following MaskedIterator
"""
def __init__(self, a):
self._original = a
self._dataiter = a.view(np.ndarray).flat
def __iter__(self):
return self
def __getitem__(self, indx):
out = self._dataiter.__getitem__(indx)
if not isinstance(out, np.ndarray):
out = out.__array__()
out = out.view(type(self._original))
return out
def __setitem__(self, index, value):
self._dataiter[index] = self._original._validate_input(value)
def __next__(self):
return next(self._dataiter).__array__().view(type(self._original))
class ComplicatedSubArray(SubArray):
def __str__(self):
return f'myprefix {self.view(SubArray)} mypostfix'
def __repr__(self):
# Return a repr that does not start with 'name('
return f'<{self.__class__.__name__} {self}>'
def _validate_input(self, value):
if not isinstance(value, ComplicatedSubArray):
raise ValueError("Can only set to MySubArray values")
return value
def __setitem__(self, item, value):
# validation ensures direct assignment with ndarray or
# masked_print_option will fail
super(ComplicatedSubArray, self).__setitem__(
item, self._validate_input(value))
def __getitem__(self, item):
# ensure getter returns our own class also for scalars
value = super(ComplicatedSubArray, self).__getitem__(item)
if not isinstance(value, np.ndarray): # scalar
value = value.__array__().view(ComplicatedSubArray)
return value
@property
def flat(self):
return CSAIterator(self)
@flat.setter
def flat(self, value):
y = self.ravel()
y[:] = value
def __array_wrap__(self, obj, context=None):
obj = super(ComplicatedSubArray, self).__array_wrap__(obj, context)
if context is not None and context[0] is np.multiply:
obj.info['multiplied'] = obj.info.get('multiplied', 0) + 1
return obj
class TestSubclassing:
# Test suite for masked subclasses of ndarray.
def setup(self):
x = np.arange(5, dtype='float')
mx = msubarray(x, mask=[0, 1, 0, 0, 0])
self.data = (x, mx)
def test_data_subclassing(self):
# Tests whether the subclass is kept.
x = np.arange(5)
m = [0, 0, 1, 0, 0]
xsub = SubArray(x)
xmsub = masked_array(xsub, mask=m)
assert_(isinstance(xmsub, MaskedArray))
assert_equal(xmsub._data, xsub)
assert_(isinstance(xmsub._data, SubArray))
def test_maskedarray_subclassing(self):
# Tests subclassing MaskedArray
(x, mx) = self.data
assert_(isinstance(mx._data, subarray))
def test_masked_unary_operations(self):
# Tests masked_unary_operation
(x, mx) = self.data
with np.errstate(divide='ignore'):
assert_(isinstance(log(mx), msubarray))
assert_equal(log(x), np.log(x))
def test_masked_binary_operations(self):
# Tests masked_binary_operation
(x, mx) = self.data
# Result should be a msubarray
assert_(isinstance(add(mx, mx), msubarray))
assert_(isinstance(add(mx, x), msubarray))
# Result should work
assert_equal(add(mx, x), mx+x)
assert_(isinstance(add(mx, mx)._data, subarray))
assert_(isinstance(add.outer(mx, mx), msubarray))
assert_(isinstance(hypot(mx, mx), msubarray))
assert_(isinstance(hypot(mx, x), msubarray))
def test_masked_binary_operations2(self):
# Tests domained_masked_binary_operation
(x, mx) = self.data
xmx = masked_array(mx.data.__array__(), mask=mx.mask)
assert_(isinstance(divide(mx, mx), msubarray))
assert_(isinstance(divide(mx, x), msubarray))
assert_equal(divide(mx, mx), divide(xmx, xmx))
def test_attributepropagation(self):
x = array(arange(5), mask=[0]+[1]*4)
my = masked_array(subarray(x))
ym = msubarray(x)
#
z = (my+1)
assert_(isinstance(z, MaskedArray))
assert_(not isinstance(z, MSubArray))
assert_(isinstance(z._data, SubArray))
assert_equal(z._data.info, {})
#
z = (ym+1)
assert_(isinstance(z, MaskedArray))
assert_(isinstance(z, MSubArray))
assert_(isinstance(z._data, SubArray))
assert_(z._data.info['added'] > 0)
# Test that inplace methods from data get used (gh-4617)
ym += 1
assert_(isinstance(ym, MaskedArray))
assert_(isinstance(ym, MSubArray))
assert_(isinstance(ym._data, SubArray))
assert_(ym._data.info['iadded'] > 0)
#
ym._set_mask([1, 0, 0, 0, 1])
assert_equal(ym._mask, [1, 0, 0, 0, 1])
ym._series._set_mask([0, 0, 0, 0, 1])
assert_equal(ym._mask, [0, 0, 0, 0, 1])
#
xsub = subarray(x, info={'name':'x'})
mxsub = masked_array(xsub)
assert_(hasattr(mxsub, 'info'))
assert_equal(mxsub.info, xsub.info)
def test_subclasspreservation(self):
# Checks that masked_array(...,subok=True) preserves the class.
x = np.arange(5)
m = [0, 0, 1, 0, 0]
xinfo = [(i, j) for (i, j) in zip(x, m)]
xsub = MSubArray(x, mask=m, info={'xsub':xinfo})
#
mxsub = masked_array(xsub, subok=False)
assert_(not isinstance(mxsub, MSubArray))
assert_(isinstance(mxsub, MaskedArray))
assert_equal(mxsub._mask, m)
#
mxsub = asarray(xsub)
assert_(not isinstance(mxsub, MSubArray))
assert_(isinstance(mxsub, MaskedArray))
assert_equal(mxsub._mask, m)
#
mxsub = masked_array(xsub, subok=True)
assert_(isinstance(mxsub, MSubArray))
assert_equal(mxsub.info, xsub.info)
assert_equal(mxsub._mask, xsub._mask)
#
mxsub = asanyarray(xsub)
assert_(isinstance(mxsub, MSubArray))
assert_equal(mxsub.info, xsub.info)
assert_equal(mxsub._mask, m)
def test_subclass_items(self):
"""test that getter and setter go via baseclass"""
x = np.arange(5)
xcsub = ComplicatedSubArray(x)
mxcsub = masked_array(xcsub, mask=[True, False, True, False, False])
# getter should return a ComplicatedSubArray, even for single item
# first check we wrote ComplicatedSubArray correctly
assert_(isinstance(xcsub[1], ComplicatedSubArray))
assert_(isinstance(xcsub[1,...], ComplicatedSubArray))
assert_(isinstance(xcsub[1:4], ComplicatedSubArray))
# now that it propagates inside the MaskedArray
assert_(isinstance(mxcsub[1], ComplicatedSubArray))
assert_(isinstance(mxcsub[1,...].data, ComplicatedSubArray))
assert_(mxcsub[0] is masked)
assert_(isinstance(mxcsub[0,...].data, ComplicatedSubArray))
assert_(isinstance(mxcsub[1:4].data, ComplicatedSubArray))
# also for flattened version (which goes via MaskedIterator)
assert_(isinstance(mxcsub.flat[1].data, ComplicatedSubArray))
assert_(mxcsub.flat[0] is masked)
assert_(isinstance(mxcsub.flat[1:4].base, ComplicatedSubArray))
# setter should only work with ComplicatedSubArray input
# first check we wrote ComplicatedSubArray correctly
assert_raises(ValueError, xcsub.__setitem__, 1, x[4])
# now that it propagates inside the MaskedArray
assert_raises(ValueError, mxcsub.__setitem__, 1, x[4])
assert_raises(ValueError, mxcsub.__setitem__, slice(1, 4), x[1:4])
mxcsub[1] = xcsub[4]
mxcsub[1:4] = xcsub[1:4]
# also for flattened version (which goes via MaskedIterator)
assert_raises(ValueError, mxcsub.flat.__setitem__, 1, x[4])
assert_raises(ValueError, mxcsub.flat.__setitem__, slice(1, 4), x[1:4])
mxcsub.flat[1] = xcsub[4]
mxcsub.flat[1:4] = xcsub[1:4]
def test_subclass_nomask_items(self):
x = np.arange(5)
xcsub = ComplicatedSubArray(x)
mxcsub_nomask = masked_array(xcsub)
assert_(isinstance(mxcsub_nomask[1,...].data, ComplicatedSubArray))
assert_(isinstance(mxcsub_nomask[0,...].data, ComplicatedSubArray))
assert_(isinstance(mxcsub_nomask[1], ComplicatedSubArray))
assert_(isinstance(mxcsub_nomask[0], ComplicatedSubArray))
def test_subclass_repr(self):
"""test that repr uses the name of the subclass
and 'array' for np.ndarray"""
x = np.arange(5)
mx = masked_array(x, mask=[True, False, True, False, False])
assert_startswith(repr(mx), 'masked_array')
xsub = SubArray(x)
mxsub = masked_array(xsub, mask=[True, False, True, False, False])
assert_startswith(repr(mxsub),
f'masked_{SubArray.__name__}(data=[--, 1, --, 3, 4]')
def test_subclass_str(self):
"""test str with subclass that has overridden str, setitem"""
# first without override
x = np.arange(5)
xsub = SubArray(x)
mxsub = masked_array(xsub, mask=[True, False, True, False, False])
assert_equal(str(mxsub), '[-- 1 -- 3 4]')
xcsub = ComplicatedSubArray(x)
assert_raises(ValueError, xcsub.__setitem__, 0,
np.ma.core.masked_print_option)
mxcsub = masked_array(xcsub, mask=[True, False, True, False, False])
assert_equal(str(mxcsub), 'myprefix [-- 1 -- 3 4] mypostfix')
def test_pure_subclass_info_preservation(self):
# Test that ufuncs and methods conserve extra information consistently;
# see gh-7122.
arr1 = SubMaskedArray('test', data=[1,2,3,4,5,6])
arr2 = SubMaskedArray(data=[0,1,2,3,4,5])
diff1 = np.subtract(arr1, arr2)
assert_('info' in diff1._optinfo)
assert_(diff1._optinfo['info'] == 'test')
diff2 = arr1 - arr2
assert_('info' in diff2._optinfo)
assert_(diff2._optinfo['info'] == 'test')
|
EvanK/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/avi/avi_alertemailconfig.py
|
31
|
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_alertemailconfig
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Module for setup of AlertEmailConfig Avi RESTful Object
description:
- This module is used to configure AlertEmailConfig object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
cc_emails:
description:
- Alerts are copied to the comma separated list of email recipients.
description:
description:
- User defined description for the object.
name:
description:
- A user-friendly name of the email notification service.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
to_emails:
description:
- Alerts are sent to the comma separated list of email recipients.
required: true
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create AlertEmailConfig object
avi_alertemailconfig:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_alertemailconfig
"""
RETURN = '''
obj:
description: AlertEmailConfig (api/alertemailconfig) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
cc_emails=dict(type='str',),
description=dict(type='str',),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
to_emails=dict(type='str', required=True),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'alertemailconfig',
set([]))
if __name__ == '__main__':
main()
|
tquizzle/Sick-Beard
|
refs/heads/development
|
sickbeard/notifiers/synologynotifier.py
|
48
|
# Author: Nyaran <nyayukko@gmail.com>
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import os
import subprocess
import sickbeard
from sickbeard import logger
from sickbeard import encodingKludge as ek
from sickbeard.exceptions import ex
from sickbeard import common
class synologyNotifier:
def notify_snatch(self, ep_name):
if sickbeard.SYNOLOGYNOTIFIER_NOTIFY_ONSNATCH:
self._send_synologyNotifier(ep_name, common.notifyStrings[common.NOTIFY_SNATCH])
def notify_download(self, ep_name):
if sickbeard.SYNOLOGYNOTIFIER_NOTIFY_ONDOWNLOAD:
self._send_synologyNotifier(ep_name, common.notifyStrings[common.NOTIFY_DOWNLOAD])
def notify_subtitle_download(self, ep_name, lang):
if sickbeard.SYNOLOGYNOTIFIER_NOTIFY_ONSUBTITLEDOWNLOAD:
self._send_synologyNotifier(ep_name + ": " + lang, common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD])
def _send_synologyNotifier(self, message, title):
synodsmnotify_cmd = ["/usr/syno/bin/synodsmnotify", "@administrators", title, message]
logger.log(u"Executing command "+str(synodsmnotify_cmd))
logger.log(u"Absolute path to command: "+ek.ek(os.path.abspath, synodsmnotify_cmd[0]), logger.DEBUG)
try:
p = subprocess.Popen(synodsmnotify_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=sickbeard.PROG_DIR)
out, err = p.communicate() #@UnusedVariable
logger.log(u"Script result: "+str(out), logger.DEBUG)
except OSError, e:
logger.log(u"Unable to run synodsmnotify: "+ex(e))
notifier = synologyNotifier
|
phretor/django-academic
|
refs/heads/master
|
academic/apps/projects/urls.py
|
1
|
from django.conf.urls.defaults import *
from django.views.decorators.cache import cache_page
from django.views.generic.list import ListView
from django.views.generic.list_detail import object_list, object_detail
from academic.projects.models import *
urlpatterns = patterns(
'',
url(r'^topics/(?P<slug>[-\w]+)/$',
cache_page(object_detail),
{'template_name': 'academic/topic_detail.html',
'queryset': Topic.objects.all() },
name='academic_projects_topic_detail'),
url(r'^topics/$',
cache_page(object_list),
{'template_name': 'academic/topic_list.html',
'queryset': Topic.objects.all() },
name='academic_projects_topic_list'),
url(r'^projects/(?P<slug>[-\w]+)/$',
cache_page(object_detail),
{'template_name': 'academic/project_detail.html',
'queryset': Project.objects.all() },
name='academic_projects_project_detail'),
url(r'^$',
cache_page(ListView.as_view(
queryset=Project.objects.order_by('topic'),
template_name='academic/project_list.html')),
name='academic_projects_project_list'),
)
|
7kbird/chrome
|
refs/heads/master
|
third_party/closure_linter/closure_linter/fixjsstyle.py
|
107
|
#!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Automatically fix simple style guide violations."""
__author__ = 'robbyw@google.com (Robert Walker)'
import sys
import gflags as flags
from closure_linter import error_fixer
from closure_linter import runner
from closure_linter.common import simplefileflags as fileflags
FLAGS = flags.FLAGS
flags.DEFINE_list('additional_extensions', None, 'List of additional file '
'extensions (not js) that should be treated as '
'JavaScript files.')
def main(argv=None):
"""Main function.
Args:
argv: Sequence of command line arguments.
"""
if argv is None:
argv = flags.FLAGS(sys.argv)
suffixes = ['.js']
if FLAGS.additional_extensions:
suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
files = fileflags.GetFileList(argv, 'JavaScript', suffixes)
fixer = error_fixer.ErrorFixer()
# Check the list of files.
for filename in files:
runner.Run(filename, fixer)
if __name__ == '__main__':
main()
|
hammerlab/cohorts
|
refs/heads/master
|
cohorts/variant_stats.py
|
1
|
# Copyright (c) 2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=unsubscriptable-object
from collections import namedtuple
import vcf
VariantStats = namedtuple("VariantStats",
["depth", "alt_depth", "variant_allele_frequency"])
SomaticVariantStats = namedtuple("SomaticVariantStats",
["tumor_stats", "normal_stats"])
def strelka_somatic_variant_stats(variant, variant_metadata):
"""Parse out the variant calling statistics for a given variant from a Strelka VCF
Parameters
----------
variant : varcode.Variant
sample_info : dict
Dictionary of sample to variant calling statistics, corresponds to the sample columns
in a Strelka VCF
Returns
-------
SomaticVariantStats
"""
sample_info = variant_metadata["sample_info"]
# Ensure there are exactly two samples in the VCF, a tumor and normal
assert len(sample_info) == 2, "More than two samples found in the somatic VCF"
tumor_stats = _strelka_variant_stats(variant, sample_info["TUMOR"])
normal_stats = _strelka_variant_stats(variant, sample_info["NORMAL"])
return SomaticVariantStats(tumor_stats=tumor_stats, normal_stats=normal_stats)
def _strelka_variant_stats(variant, sample_info):
"""Parse a single sample"s variant calling statistics based on Strelka VCF output
Parameters
----------
variant : varcode.Variant
sample_info : dict
Dictionary of Strelka-specific variant calling fields
Returns
-------
VariantStats
"""
if variant.is_deletion or variant.is_insertion:
# ref: https://sites.google.com/site/strelkasomaticvariantcaller/home/somatic-variant-output
ref_depth = int(sample_info['TAR'][0]) # number of reads supporting ref allele (non-deletion)
alt_depth = int(sample_info['TIR'][0]) # number of reads supporting alt allele (deletion)
depth = ref_depth + alt_depth
else:
# Retrieve the Tier 1 counts from Strelka
ref_depth = int(sample_info[variant.ref+"U"][0])
alt_depth = int(sample_info[variant.alt+"U"][0])
depth = alt_depth + ref_depth
if depth > 0:
vaf = float(alt_depth) / depth
else:
# unclear how to define vaf if no reads support variant
# up to user to interpret this (hopefully filtered out in QC settings)
vaf = None
return VariantStats(depth=depth, alt_depth=alt_depth, variant_allele_frequency=vaf)
def mutect_somatic_variant_stats(variant, variant_metadata):
"""Parse out the variant calling statistics for a given variant from a Mutect VCF
Parameters
----------
variant : varcode.Variant
sample_info : dict
Dictionary of sample to variant calling statistics, corresponds to the sample columns
in a Mutect VCF
Returns
-------
SomaticVariantStats
"""
sample_info = variant_metadata["sample_info"]
# Ensure there are exactly two samples in the VCF, a tumor and normal
assert len(sample_info) == 2, "More than two samples found in the somatic VCF"
# Find the sample with the genotype field set to variant in the VCF
tumor_sample_infos = [info for info in sample_info.values() if info["GT"] == "0/1"]
# Ensure there is only one such sample
assert len(tumor_sample_infos) == 1, "More than one tumor sample found in the VCF file"
tumor_sample_info = tumor_sample_infos[0]
normal_sample_info = [info for info in sample_info.values() if info["GT"] != "0/1"][0]
tumor_stats = _mutect_variant_stats(variant, tumor_sample_info)
normal_stats = _mutect_variant_stats(variant, normal_sample_info)
return SomaticVariantStats(tumor_stats=tumor_stats, normal_stats=normal_stats)
def _mutect_variant_stats(variant, sample_info):
"""Parse a single sample"s variant calling statistics based on Mutect"s (v1) VCF output
Parameters
----------
variant : varcode.Variant
sample_info : dict
Dictionary of Mutect-specific variant calling fields
Returns
-------
VariantStats
"""
# Parse out the AD (or allele depth field), which is an array of [REF_DEPTH, ALT_DEPTH]
ref_depth, alt_depth = sample_info["AD"]
depth = int(ref_depth) + int(alt_depth)
vaf = float(alt_depth) / depth
return VariantStats(depth=depth, alt_depth=alt_depth, variant_allele_frequency=vaf)
def _maf_variant_stats(variant, variant_metadata, prefix="t"):
ref_depth = variant_metadata["%s_ref_count" % prefix]
alt_depth = variant_metadata["%s_alt_count" % prefix]
depth = int(ref_depth) + int(alt_depth)
vaf = float(alt_depth) / depth
return VariantStats(depth=depth, alt_depth=alt_depth, variant_allele_frequency=vaf)
def maf_somatic_variant_stats(variant, variant_metadata):
"""
Parse out the variant calling statistics for a given variant from a MAF file
Assumes the MAF format described here: https://www.biostars.org/p/161298/#161777
Parameters
----------
variant : varcode.Variant
variant_metadata : dict
Dictionary of metadata for this variant
Returns
-------
SomaticVariantStats
"""
tumor_stats = None
normal_stats = None
if "t_ref_count" in variant_metadata:
tumor_stats = _maf_variant_stats(variant, variant_metadata, prefix="t")
if "n_ref_count" in variant_metadata:
normal_stats = _maf_variant_stats(variant, variant_metadata, prefix="n")
return SomaticVariantStats(tumor_stats=tumor_stats, normal_stats=normal_stats)
def _vcf_is_strelka(variant_file, variant_metadata):
"""Return True if variant_file given is in strelka format
"""
if "strelka" in variant_file.lower():
return True
elif "NORMAL" in variant_metadata["sample_info"].keys():
return True
else:
vcf_reader = vcf.Reader(open(variant_file, "r"))
try:
vcf_type = vcf_reader.metadata["content"]
except KeyError:
vcf_type = ""
if "strelka" in vcf_type.lower():
return True
return False
def _vcf_is_maf(variant_file):
"""Retrun True if variant_file given is in .maf format
"""
return ".maf" in variant_file.lower()
def _vcf_is_mutect(variant_file, variant_metadata):
"""Return True if variant_file give is in mutect format
"""
if "mutect" in variant_file.lower():
return True
elif "GT" in variant_metadata["sample_info"].keys():
return True
else:
vcf_reader = vcf.Reader(open(variant_file, "r"))
try:
vcf_type = vcf_reader.metadata["GATKCommandLine"][0]["ID"]
except KeyError:
vcf_type = ""
if "mutect" in vcf_type.lower():
return True
return False
def variant_stats_from_variant(variant,
metadata,
merge_fn=(lambda all_stats: \
max(all_stats, key=(lambda stats: stats.tumor_stats.depth)))):
"""Parse the variant calling stats from a variant called from multiple variant files. The stats are merged
based on `merge_fn`
Parameters
----------
variant : varcode.Variant
metadata : dict
Dictionary of variant file to variant calling metadata from that file
merge_fn : function
Function from list of SomaticVariantStats to single SomaticVariantStats.
This is used if a variant is called by multiple callers or appears in multiple VCFs.
By default, this uses the data from the caller that had a higher tumor depth.
Returns
-------
SomaticVariantStats
"""
all_stats = []
for (variant_file, variant_metadata) in metadata.items():
if _vcf_is_maf(variant_file=variant_file):
stats = maf_somatic_variant_stats(variant, variant_metadata)
elif _vcf_is_strelka(variant_file=variant_file,
variant_metadata=variant_metadata):
stats = strelka_somatic_variant_stats(variant, variant_metadata)
elif _vcf_is_mutect(variant_file=variant_file,
variant_metadata=variant_metadata):
stats = mutect_somatic_variant_stats(variant, variant_metadata)
else:
raise ValueError("Cannot parse sample fields, variant file {} is from an unsupported caller.".format(variant_file))
all_stats.append(stats)
return merge_fn(all_stats)
|
fyfcauc/android_external_chromium-org
|
refs/heads/du44
|
tools/idl_parser/idl_lexer_test.py
|
116
|
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from idl_lexer import IDLLexer
from idl_ppapi_lexer import IDLPPAPILexer
#
# FileToTokens
#
# From a source file generate a list of tokens.
#
def FileToTokens(lexer, filename):
with open(filename, 'rb') as srcfile:
lexer.Tokenize(srcfile.read(), filename)
return lexer.GetTokens()
#
# TextToTokens
#
# From a source file generate a list of tokens.
#
def TextToTokens(lexer, text):
lexer.Tokenize(text)
return lexer.GetTokens()
class WebIDLLexer(unittest.TestCase):
def setUp(self):
self.lexer = IDLLexer()
self.filenames = [
'test_lexer/values.in',
'test_lexer/keywords.in'
]
#
# testRebuildText
#
# From a set of tokens, generate a new source text by joining with a
# single space. The new source is then tokenized and compared against the
# old set.
#
def testRebuildText(self):
for filename in self.filenames:
tokens1 = FileToTokens(self.lexer, filename)
to_text = '\n'.join(['%s' % t.value for t in tokens1])
tokens2 = TextToTokens(self.lexer, to_text)
count1 = len(tokens1)
count2 = len(tokens2)
self.assertEqual(count1, count2)
for i in range(count1):
msg = 'Value %s does not match original %s on line %d of %s.' % (
tokens2[i].value, tokens1[i].value, tokens1[i].lineno, filename)
self.assertEqual(tokens1[i].value, tokens2[i].value, msg)
#
# testExpectedType
#
# From a set of tokens pairs, verify the type field of the second matches
# the value of the first, so that:
# integer 123 float 1.1 ...
# will generate a passing test, when the first token has both the type and
# value of the keyword integer and the second has the type of integer and
# value of 123 and so on.
#
def testExpectedType(self):
for filename in self.filenames:
tokens = FileToTokens(self.lexer, filename)
count = len(tokens)
self.assertTrue(count > 0)
self.assertFalse(count & 1)
index = 0
while index < count:
expect_type = tokens[index].value
actual_type = tokens[index + 1].type
msg = 'Type %s does not match expected %s on line %d of %s.' % (
actual_type, expect_type, tokens[index].lineno, filename)
index += 2
self.assertEqual(expect_type, actual_type, msg)
class PepperIDLLexer(WebIDLLexer):
def setUp(self):
self.lexer = IDLPPAPILexer()
self.filenames = [
'test_lexer/values_ppapi.in',
'test_lexer/keywords_ppapi.in'
]
if __name__ == '__main__':
unittest.main()
|
alexus37/AugmentedRealityChess
|
refs/heads/master
|
pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/sre_compile.py
|
4
|
/usr/lib/python2.7/sre_compile.py
|
ryfeus/lambda-packs
|
refs/heads/master
|
Keras_tensorflow/source/google/protobuf/internal/more_extensions_pb2.py
|
43
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/internal/more_extensions.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/internal/more_extensions.proto',
package='google.protobuf.internal',
syntax='proto2',
serialized_pb=_b('\n.google/protobuf/internal/more_extensions.proto\x12\x18google.protobuf.internal\"P\n\x0fTopLevelMessage\x12=\n\nsubmessage\x18\x01 \x01(\x0b\x32).google.protobuf.internal.ExtendedMessage\"\x1b\n\x0f\x45xtendedMessage*\x08\x08\x01\x10\x80\x80\x80\x80\x02\"-\n\x0e\x46oreignMessage\x12\x1b\n\x13\x66oreign_message_int\x18\x01 \x01(\x05:I\n\x16optional_int_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x01 \x01(\x05:w\n\x1aoptional_message_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x02 \x01(\x0b\x32(.google.protobuf.internal.ForeignMessage:I\n\x16repeated_int_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x03 \x03(\x05:w\n\x1arepeated_message_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x04 \x03(\x0b\x32(.google.protobuf.internal.ForeignMessage')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
OPTIONAL_INT_EXTENSION_FIELD_NUMBER = 1
optional_int_extension = _descriptor.FieldDescriptor(
name='optional_int_extension', full_name='google.protobuf.internal.optional_int_extension', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None)
OPTIONAL_MESSAGE_EXTENSION_FIELD_NUMBER = 2
optional_message_extension = _descriptor.FieldDescriptor(
name='optional_message_extension', full_name='google.protobuf.internal.optional_message_extension', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None)
REPEATED_INT_EXTENSION_FIELD_NUMBER = 3
repeated_int_extension = _descriptor.FieldDescriptor(
name='repeated_int_extension', full_name='google.protobuf.internal.repeated_int_extension', index=2,
number=3, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None)
REPEATED_MESSAGE_EXTENSION_FIELD_NUMBER = 4
repeated_message_extension = _descriptor.FieldDescriptor(
name='repeated_message_extension', full_name='google.protobuf.internal.repeated_message_extension', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None)
_TOPLEVELMESSAGE = _descriptor.Descriptor(
name='TopLevelMessage',
full_name='google.protobuf.internal.TopLevelMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='submessage', full_name='google.protobuf.internal.TopLevelMessage.submessage', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=76,
serialized_end=156,
)
_EXTENDEDMESSAGE = _descriptor.Descriptor(
name='ExtendedMessage',
full_name='google.protobuf.internal.ExtendedMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1, 536870912), ],
oneofs=[
],
serialized_start=158,
serialized_end=185,
)
_FOREIGNMESSAGE = _descriptor.Descriptor(
name='ForeignMessage',
full_name='google.protobuf.internal.ForeignMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='foreign_message_int', full_name='google.protobuf.internal.ForeignMessage.foreign_message_int', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=187,
serialized_end=232,
)
_TOPLEVELMESSAGE.fields_by_name['submessage'].message_type = _EXTENDEDMESSAGE
DESCRIPTOR.message_types_by_name['TopLevelMessage'] = _TOPLEVELMESSAGE
DESCRIPTOR.message_types_by_name['ExtendedMessage'] = _EXTENDEDMESSAGE
DESCRIPTOR.message_types_by_name['ForeignMessage'] = _FOREIGNMESSAGE
DESCRIPTOR.extensions_by_name['optional_int_extension'] = optional_int_extension
DESCRIPTOR.extensions_by_name['optional_message_extension'] = optional_message_extension
DESCRIPTOR.extensions_by_name['repeated_int_extension'] = repeated_int_extension
DESCRIPTOR.extensions_by_name['repeated_message_extension'] = repeated_message_extension
TopLevelMessage = _reflection.GeneratedProtocolMessageType('TopLevelMessage', (_message.Message,), dict(
DESCRIPTOR = _TOPLEVELMESSAGE,
__module__ = 'google.protobuf.internal.more_extensions_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.TopLevelMessage)
))
_sym_db.RegisterMessage(TopLevelMessage)
ExtendedMessage = _reflection.GeneratedProtocolMessageType('ExtendedMessage', (_message.Message,), dict(
DESCRIPTOR = _EXTENDEDMESSAGE,
__module__ = 'google.protobuf.internal.more_extensions_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.ExtendedMessage)
))
_sym_db.RegisterMessage(ExtendedMessage)
ForeignMessage = _reflection.GeneratedProtocolMessageType('ForeignMessage', (_message.Message,), dict(
DESCRIPTOR = _FOREIGNMESSAGE,
__module__ = 'google.protobuf.internal.more_extensions_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.ForeignMessage)
))
_sym_db.RegisterMessage(ForeignMessage)
ExtendedMessage.RegisterExtension(optional_int_extension)
optional_message_extension.message_type = _FOREIGNMESSAGE
ExtendedMessage.RegisterExtension(optional_message_extension)
ExtendedMessage.RegisterExtension(repeated_int_extension)
repeated_message_extension.message_type = _FOREIGNMESSAGE
ExtendedMessage.RegisterExtension(repeated_message_extension)
# @@protoc_insertion_point(module_scope)
|
kriberg/stationspinner
|
refs/heads/master
|
stationspinner/corporation/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
avoinsystems/stock-logistics-warehouse
|
refs/heads/8.0
|
stock_available_immediately/tests/test_stock_available_immediately.py
|
5
|
# -*- coding: utf-8 -*-
# © 2015 Therp BV <http://therp.nl>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp.tests.common import TransactionCase
class TestStockLogisticsWarehouse(TransactionCase):
def test01_stock_levels(self):
"""checking that immediately_usable_qty actually reflects \
the variations in stock, both on product and template"""
moveObj = self.env['stock.move']
productObj = self.env['product.product']
templateObj = self.env['product.template']
supplier_location = self.env.ref('stock.stock_location_suppliers')
stock_location = self.env.ref('stock.stock_location_stock')
customer_location = self.env.ref('stock.stock_location_customers')
uom_unit = self.env.ref('product.product_uom_unit')
# Create product template
templateAB = templateObj.create(
{'name': 'templAB',
'uom_id': uom_unit.id,
})
# Create product A and B
productA = productObj.create(
{'name': 'product A',
'standard_price': 1,
'type': 'product',
'uom_id': uom_unit.id,
'default_code': 'A',
'product_tmpl_id': templateAB.id,
})
productB = productObj.create(
{'name': 'product B',
'standard_price': 1,
'type': 'product',
'uom_id': uom_unit.id,
'default_code': 'B',
'product_tmpl_id': templateAB.id,
})
# Create a stock move from INCOMING to STOCK
stockMoveInA = moveObj.create(
{'location_id': supplier_location.id,
'location_dest_id': stock_location.id,
'name': 'MOVE INCOMING -> STOCK ',
'product_id': productA.id,
'product_uom': productA.uom_id.id,
'product_uom_qty': 2,
})
stockMoveInB = moveObj.create(
{'location_id': supplier_location.id,
'location_dest_id': stock_location.id,
'name': 'MOVE INCOMING -> STOCK ',
'product_id': productB.id,
'product_uom': productB.uom_id.id,
'product_uom_qty': 3,
})
def compare_product_usable_qty(product, value):
# Refresh, because the function field is not recalculated between
# transactions
product.refresh()
self.assertEqual(product.immediately_usable_qty, value)
compare_product_usable_qty(productA, 0)
compare_product_usable_qty(templateAB, 0)
stockMoveInA.action_confirm()
compare_product_usable_qty(productA, 0)
compare_product_usable_qty(templateAB, 0)
stockMoveInA.action_assign()
compare_product_usable_qty(productA, 0)
compare_product_usable_qty(templateAB, 0)
stockMoveInA.action_done()
compare_product_usable_qty(productA, 2)
compare_product_usable_qty(templateAB, 2)
# will directly trigger action_done on productB
stockMoveInB.action_done()
compare_product_usable_qty(productA, 2)
compare_product_usable_qty(productB, 3)
compare_product_usable_qty(templateAB, 5)
# Create a stock move from STOCK to CUSTOMER
stockMoveOutA = moveObj.create(
{'location_id': stock_location.id,
'location_dest_id': customer_location.id,
'name': ' STOCK --> CUSTOMER ',
'product_id': productA.id,
'product_uom': productA.uom_id.id,
'product_uom_qty': 1,
'state': 'confirmed',
})
stockMoveOutA.action_done()
compare_product_usable_qty(productA, 1)
compare_product_usable_qty(templateAB, 4)
|
django-nonrel/django-nonrel
|
refs/heads/develop
|
tests/regressiontests/forms/localflavor/fr.py
|
89
|
from django.contrib.localflavor.fr.forms import (FRZipCodeField,
FRPhoneNumberField, FRDepartmentSelect)
from utils import LocalFlavorTestCase
class FRLocalFlavorTests(LocalFlavorTestCase):
def test_FRZipCodeField(self):
error_format = [u'Enter a zip code in the format XXXXX.']
valid = {
'75001': '75001',
'93200': '93200',
}
invalid = {
'2A200': error_format,
'980001': error_format,
}
self.assertFieldOutput(FRZipCodeField, valid, invalid)
def test_FRPhoneNumberField(self):
error_format = [u'Phone numbers must be in 0X XX XX XX XX format.']
valid = {
'01 55 44 58 64': '01 55 44 58 64',
'0155445864': '01 55 44 58 64',
'01 5544 5864': '01 55 44 58 64',
'01 55.44.58.64': '01 55 44 58 64',
'01.55.44.58.64': '01 55 44 58 64',
}
invalid = {
'01,55,44,58,64': error_format,
'555 015 544': error_format,
}
self.assertFieldOutput(FRPhoneNumberField, valid, invalid)
def test_FRDepartmentSelect(self):
f = FRDepartmentSelect()
out = u'''<select name="dep">
<option value="01">01 - Ain</option>
<option value="02">02 - Aisne</option>
<option value="03">03 - Allier</option>
<option value="04">04 - Alpes-de-Haute-Provence</option>
<option value="05">05 - Hautes-Alpes</option>
<option value="06">06 - Alpes-Maritimes</option>
<option value="07">07 - Ardeche</option>
<option value="08">08 - Ardennes</option>
<option value="09">09 - Ariege</option>
<option value="10">10 - Aube</option>
<option value="11">11 - Aude</option>
<option value="12">12 - Aveyron</option>
<option value="13">13 - Bouches-du-Rhone</option>
<option value="14">14 - Calvados</option>
<option value="15">15 - Cantal</option>
<option value="16">16 - Charente</option>
<option value="17">17 - Charente-Maritime</option>
<option value="18">18 - Cher</option>
<option value="19">19 - Correze</option>
<option value="21">21 - Cote-d'Or</option>
<option value="22">22 - Cotes-d'Armor</option>
<option value="23">23 - Creuse</option>
<option value="24">24 - Dordogne</option>
<option value="25">25 - Doubs</option>
<option value="26">26 - Drome</option>
<option value="27">27 - Eure</option>
<option value="28">28 - Eure-et-Loire</option>
<option value="29">29 - Finistere</option>
<option value="2A">2A - Corse-du-Sud</option>
<option value="2B">2B - Haute-Corse</option>
<option value="30">30 - Gard</option>
<option value="31">31 - Haute-Garonne</option>
<option value="32">32 - Gers</option>
<option value="33">33 - Gironde</option>
<option value="34">34 - Herault</option>
<option value="35">35 - Ille-et-Vilaine</option>
<option value="36">36 - Indre</option>
<option value="37">37 - Indre-et-Loire</option>
<option value="38">38 - Isere</option>
<option value="39">39 - Jura</option>
<option value="40">40 - Landes</option>
<option value="41">41 - Loir-et-Cher</option>
<option value="42">42 - Loire</option>
<option value="43">43 - Haute-Loire</option>
<option value="44">44 - Loire-Atlantique</option>
<option value="45">45 - Loiret</option>
<option value="46">46 - Lot</option>
<option value="47">47 - Lot-et-Garonne</option>
<option value="48">48 - Lozere</option>
<option value="49">49 - Maine-et-Loire</option>
<option value="50">50 - Manche</option>
<option value="51">51 - Marne</option>
<option value="52">52 - Haute-Marne</option>
<option value="53">53 - Mayenne</option>
<option value="54">54 - Meurthe-et-Moselle</option>
<option value="55">55 - Meuse</option>
<option value="56">56 - Morbihan</option>
<option value="57">57 - Moselle</option>
<option value="58">58 - Nievre</option>
<option value="59">59 - Nord</option>
<option value="60">60 - Oise</option>
<option value="61">61 - Orne</option>
<option value="62">62 - Pas-de-Calais</option>
<option value="63">63 - Puy-de-Dome</option>
<option value="64">64 - Pyrenees-Atlantiques</option>
<option value="65">65 - Hautes-Pyrenees</option>
<option value="66">66 - Pyrenees-Orientales</option>
<option value="67">67 - Bas-Rhin</option>
<option value="68">68 - Haut-Rhin</option>
<option value="69">69 - Rhone</option>
<option value="70">70 - Haute-Saone</option>
<option value="71">71 - Saone-et-Loire</option>
<option value="72">72 - Sarthe</option>
<option value="73">73 - Savoie</option>
<option value="74">74 - Haute-Savoie</option>
<option value="75">75 - Paris</option>
<option value="76">76 - Seine-Maritime</option>
<option value="77">77 - Seine-et-Marne</option>
<option value="78">78 - Yvelines</option>
<option value="79">79 - Deux-Sevres</option>
<option value="80">80 - Somme</option>
<option value="81">81 - Tarn</option>
<option value="82">82 - Tarn-et-Garonne</option>
<option value="83">83 - Var</option>
<option value="84">84 - Vaucluse</option>
<option value="85">85 - Vendee</option>
<option value="86">86 - Vienne</option>
<option value="87">87 - Haute-Vienne</option>
<option value="88">88 - Vosges</option>
<option value="89">89 - Yonne</option>
<option value="90">90 - Territoire de Belfort</option>
<option value="91">91 - Essonne</option>
<option value="92">92 - Hauts-de-Seine</option>
<option value="93">93 - Seine-Saint-Denis</option>
<option value="94">94 - Val-de-Marne</option>
<option value="95">95 - Val-d'Oise</option>
<option value="971">971 - Guadeloupe</option>
<option value="972">972 - Martinique</option>
<option value="973">973 - Guyane</option>
<option value="974">974 - La Reunion</option>
<option value="975">975 - Saint-Pierre-et-Miquelon</option>
<option value="976">976 - Mayotte</option>
<option value="984">984 - Terres Australes et Antarctiques</option>
<option value="986">986 - Wallis et Futuna</option>
<option value="987">987 - Polynesie Francaise</option>
<option value="988">988 - Nouvelle-Caledonie</option>
</select>'''
self.assertEqual(f.render('dep', 'Paris'), out)
|
tmuelle2/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/createbug.py
|
125
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.tool.steps.abstractstep import AbstractStep
from webkitpy.tool.steps.options import Options
class CreateBug(AbstractStep):
@classmethod
def options(cls):
return AbstractStep.options() + [
Options.cc,
Options.component,
Options.blocks,
]
def run(self, state):
# No need to create a bug if we already have one.
if state.get("bug_id"):
return
cc = self._options.cc
if not cc:
cc = state.get("bug_cc")
blocks = self._options.blocks
if not blocks:
blocks = state.get("bug_blocked")
state["bug_id"] = self._tool.bugs.create_bug(state["bug_title"], state["bug_description"], blocked=blocks, component=self._options.component, cc=cc)
if blocks:
status = self._tool.bugs.fetch_bug(blocks).status()
if status == 'RESOLVED':
self._tool.bugs.reopen_bug(blocks, "Re-opened since this is blocked by bug %s" % state["bug_id"])
|
stuartcampbell/gitinspector
|
refs/heads/master
|
gitinspector/filtering.py
|
49
|
# coding: utf-8
#
# Copyright © 2012-2014 Ejwa Software. All rights reserved.
#
# This file is part of gitinspector.
#
# gitinspector is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gitinspector is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from __future__ import unicode_literals
from localization import N_
from outputable import Outputable
import re
import terminal
import textwrap
__filters__ = {"file": [[], set()], "author": [[], set()], "email": [[], set()], "revision": [[], set()]}
class InvalidRegExpError(ValueError):
def __init__(self, msg):
super(InvalidRegExpError, self).__init__(msg)
self.msg = msg
def get():
return __filters__
def __add_one__(string):
for i in __filters__:
if (i + ":").lower() == string[0:len(i) + 1].lower():
__filters__[i][0].append(string[len(i) + 1:])
return
__filters__["file"][0].append(string)
def add(string):
rules = string.split(",")
for rule in rules:
__add_one__(rule)
def clear():
for i in __filters__:
__filters__[i][0] = []
def get_filered(filter_type="file"):
return __filters__[filter_type][1]
def has_filtered():
for i in __filters__:
if __filters__[i][1]:
return True
return False
def set_filtered(string, filter_type="file"):
string = string.strip()
if len(string) > 0:
for i in __filters__[filter_type][0]:
try:
if re.search(i, string) != None:
__filters__[filter_type][1].add(string)
return True
except:
raise InvalidRegExpError(_("invalid regular expression specified"))
return False
FILTERING_INFO_TEXT = N_("The following files were excluded from the statistics due to the specified exclusion patterns")
FILTERING_AUTHOR_INFO_TEXT = N_("The following authors were excluded from the statistics due to the specified exclusion patterns")
FILTERING_EMAIL_INFO_TEXT = N_("The authors with the following emails were excluded from the statistics due to the specified " \
"exclusion patterns")
FILTERING_EMAIL_INFO_TEXT = N_("The following commit revisions were excluded from the statistics due to the specified " \
"exclusion patterns")
class Filtering(Outputable):
@staticmethod
def __output_html_section__(info_string, filtered):
filtering_xml = ""
if filtered:
filtering_xml += "<p>" + info_string + "."+ "</p>"
for i in filtered:
filtering_xml += "<p>" + i + "</p>"
return filtering_xml
def output_html(self):
if has_filtered():
filtering_xml = "<div><div class=\"box\">"
Filtering.__output_html_section__(_(FILTERING_INFO_TEXT), __filters__["file"][1])
Filtering.__output_html_section__(_(FILTERING_AUTHOR_INFO_TEXT), __filters__["author"][1])
Filtering.__output_html_section__(_(FILTERING_EMAIL_INFO_TEXT), __filters__["email"][1])
Filtering.__output_html_section__(_(FILTERING_EMAIL_INFO_TEXT), __filters__["revision"][1])
filtering_xml += "</div></div>"
print(filtering_xml)
@staticmethod
def __output_text_section__(info_string, filtered):
if filtered:
print("\n" + textwrap.fill(info_string + ":", width=terminal.get_size()[0]))
for i in filtered:
(width, _unused) = terminal.get_size()
print("...%s" % i[-width+3:] if len(i) > width else i)
def output_text(self):
Filtering.__output_text_section__(_(FILTERING_INFO_TEXT), __filters__["file"][1])
Filtering.__output_text_section__(_(FILTERING_AUTHOR_INFO_TEXT), __filters__["author"][1])
Filtering.__output_text_section__(_(FILTERING_EMAIL_INFO_TEXT), __filters__["email"][1])
Filtering.__output_text_section__(_(FILTERING_EMAIL_INFO_TEXT), __filters__["revision"][1])
@staticmethod
def __output_xml_section__(info_string, filtered, container_tagname):
if filtered:
message_xml = "\t\t\t<message>" +info_string + "</message>\n"
filtering_xml = ""
for i in filtered:
filtering_xml += "\t\t\t\t<entry>".format(container_tagname) + i + "</entry>\n".format(container_tagname)
print("\t\t<{0}>".format(container_tagname))
print(message_xml + "\t\t\t<entries>\n" + filtering_xml + "\t\t\t</entries>\n")
print("\t\t</{0}>".format(container_tagname))
def output_xml(self):
if has_filtered():
print("\t<filtering>")
Filtering.__output_xml_section__(_(FILTERING_INFO_TEXT), __filters__["file"][1], "files")
Filtering.__output_xml_section__(_(FILTERING_AUTHOR_INFO_TEXT), __filters__["author"][1], "authors")
Filtering.__output_xml_section__(_(FILTERING_EMAIL_INFO_TEXT), __filters__["email"][1], "emails")
Filtering.__output_xml_section__(_(FILTERING_EMAIL_INFO_TEXT), __filters__["revision"][1].union(), "revisions")
print("\t</filtering>")
|
hassanibi/erpnext
|
refs/heads/develop
|
erpnext/schools/doctype/student_admission/student_admission.py
|
25
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.website.website_generator import WebsiteGenerator
from frappe import _
class StudentAdmission(WebsiteGenerator):
website = frappe._dict(
template = "templates/generators/student_admission.html",
condition_field = "publish",
page_title_field = "title"
)
def autoname(self):
if not self.title:
self.title = self.get_title()
self.name = self.title
def get_context(self, context):
context.parents = [{'name': 'admissions', 'title': _('All Student Admissions') }]
def get_title(self):
return _("Admissions for {0}").format(self.academic_year)
def get_list_context(context):
context.title = _("Student Admissions")
|
T-B-F/pyBioUtils
|
refs/heads/master
|
scripts/jackhmmer_search_and_rescue.py
|
1
|
#!/usr/bin/env python
""" from initial sequence use jackhmmer to retrieve remote target
filter target based on the presence of the non degenerated walker A
"""
import os, sys, argparse
import shlex, subprocess
import re
from Bio import AlignIO
def get_cmd():
parser = argparse.ArgumentParser()
parser.add_argument("-i", action="store", dest="initialseq")
parser.add_argument("-w", action="store", dest="workdir")
parser.add_argument("-d", action="store", dest="dbtarget")
parser.add_argument("-n", action="store", dest="niter", type=int)
parser.add_argument("-o", action="store", dest="output")
params = parser.parse_args()
return params
def run_phmmer(inpath, outpath, target):
""" from an initial protein sequence query a database
"""
ali_out = outpath+"_msa.fasta"
res_out = outpath+"_res.txt"
log_file = outpath+"_plog.txt"
command = "phmmer -A {} -o {} {} {}".format(ali_out, res_out, inpath, target)
#print(command)
cmd = shlex.split(command)
with open(log_file, "w") as logf:
#try:
subprocess.check_call(cmd, stdout=logf, stderr=subprocess.PIPE)
#except:
# print("Error, unable to run {}".format(command), file=sys.stderr)
# sys.exit(1)
return ali_out
def run_hmmbuild(inpath, outpath):
""" build an hmm out of a fasta file
"""
hmmout = outpath+".hmm"
log_file = outpath+"_hmmlog.txt"
command = "hmmbuild {} {}".format(hmmout, inpath)
cmd = shlex.split(command)
with open(log_file, "w") as logf:
#try:
subprocess.check_call(cmd, stdout=logf, stderr=subprocess.PIPE)
#except:
# print("Error, unable to run {}".format(command), file=sys.stderr)
# sys.exit(1)
return hmmout
def run_hmmsearch(inpath, outpath, target):
""" run hmmsearch and keep aligned hits
"""
ali_out = outpath+"_msa.fasta"
res_out = outpath+"_res.txt"
log_file = outpath+"_hlog.txt"
command = "hmmsearch --noali -A {} -o {} {} {}".format(ali_out, res_out, inpath, target)
#print(command)
cmd = shlex.split(command)
with open(log_file, "w") as logf:
#try:
subprocess.check_call(cmd, stdout=logf, stderr=subprocess.PIPE)
#except:
# print("Error, unable to run {}".format(command), file=sys.stderr)
# sys.exit(1)
return ali_out
def read_results(path):
""" read aligned sequences of hmmsearch in stockholm format.
"""
proteins = dict()
with open(path) as inf:
msa = AlignIO.read(inf, format="stockholm")
for record in msa:
proteins[record.id] = str(record.seq)
return proteins
def read_input(path):
""" read initial input
"""
proteins = set()
with open(path) as inf:
for line in inf:
if line[0] == ">":
proteins.add(line[1:].strip().split()[0])
return proteins
def filter_results(proteins, name, workdir, mean_size):
""" filter results based on hit coverage of initial input and on regexp presence/absence
"""
outpath = os.path.join(workdir, name+"_filteredmsa.fasta")
r = re.compile("G.{4}GK[TS]")#.{20,100}[YIMLFWV]{3}[YIMLFWVN]D[DE]")
kept = set()
with open(outpath, "w") as outf:
for prot in proteins:
seq_msa = proteins[prot]
seq = seq_msa.replace("-", "").replace(".", "").upper()
# regexp match
match = r.search(seq)
if not match:
if (len(seq) / mean_size) >= 0.8:
outf.write(">{}\n{}\n".format(prot, seq_msa))
kept.add(prot.split()[0])
return kept, outpath
def read_results_and_filter(ali_results, name, workdir, n, mean_size):
""" apply read and filter to results
"""
# read results
hit_proteins = read_results(ali_results)
# filter results based on walker A regexp
res_proteins, filtered_ali = filter_results(hit_proteins, name+"_iter_{}".format(n), workdir, mean_size)
return res_proteins, filtered_ali
def compute_mean_length(path):
""" compute initial mean sequence compute_mean_length
"""
mean_size = 0
fasta = dict()
with open(path) as inf:
for line in inf:
if line[0] == ">":
prot = line[1:-1]
fasta[prot] = ""
else:
fasta[prot] += line.strip().replace("-", "").replace(".", "")
for prot in fasta:
mean_size += len(fasta[prot])
mean_size /= len(fasta)
return mean_size
def count_differences(query_proteins, targets):
notfound = 0
overlapping = set()
for prot in query_proteins:
tmp = prot.split("/")
name = tmp[0]
start, stop = tmp[1].split("-")
init_start, init_stop = int(start)-1, int(stop)
if name in targets:
found = False
for new_start, new_stop in targets[name]:
start = max(init_start, new_start)
stop = min(init_stop, new_stop)
diff = stop - start
if diff > 0:
c = diff / max(new_stop-new_start, init_stop-init_start)
if c > 0.9:
found = True
overlapping.add(prot)
if not found:
notfound += 1
else:
notfound += 1
return notfound, overlapping
def check_set_differences(new_proteins, prev_proteins, cov=0.9):
""" check hits, count number of new sequences and dropped sequences
"""
init = dict()
overlapping = set()
for prot in prev_proteins:
tmp = prot.split("/")
start, stop = tmp[1].split("-")
init.setdefault(tmp[0], list()).append((int(start)-1, int(stop)))
new = dict()
for prot in new_proteins:
tmp = prot.split("/")
start, stop = tmp[1].split("-")
new.setdefault(tmp[0], list()).append((int(start)-1, int(stop)))
nbnew, overlapping = count_differences(new_proteins, init)
dropped, _= count_differences(new_proteins, init)
print(nbnew, dropped, len(new_proteins), len(prev_proteins), len(overlapping))
inboth = len(overlapping)
if len(overlapping) == len(new_proteins):
return True
return False
def main():
params = get_cmd()
if not os.path.isdir(params.workdir):
os.makedirs(params.workdir)
stop = False
init_proteins = read_input(params.initialseq)
init_size = len(init_proteins)
init_length = compute_mean_length(params.initialseq)
inpath = params.initialseq
name = os.path.splitext(os.path.basename(inpath))[0]
if init_size == 0:
print("Error, no protein found in input fasta file", file=sys.stderr)
sys.exit(0)
elif init_size == 1:
# single sequence
outpath = os.path.join(params.workdir, name+"_iter_0")
# run phmmer
ali_results = run_phmmer(inpath, outpath, params.dbtarget)
res_proteins, filtered_ali_results = read_results_and_filter(ali_results, name, params.workdir, 0, init_length)
if len(res_proteins.intersection(init_proteins)) == len(res_proteins):
# no new proteins
stop = True
else:
init_proteins = res_proteins
# output alignment of jackhmmer is on stockholm format, convert to fasta
#with open(filtered_ali_results) as inf:
# msa = AlignIO.read(inf, format="stockholm")
#with open(filtered_ali_results, "w") as ouf:
# AlignIO.write(msa, outf, format="fasta")
inpath = filtered_ali_results
if not stop:
niter = params.niter +1 if init_size == 1 else params.niter
for n in range(1, niter):
outpath = os.path.join(params.workdir, name+"_iter_{}".format(n))
# convert fasta inputfile to hmm file
hmmpath = run_hmmbuild(inpath, outpath)
# run hmmsearch
ali_results = run_hmmsearch(hmmpath, outpath, params.dbtarget)
res_proteins, filtered_ali_results = read_results_and_filter(ali_results, name, params.workdir, n, init_length)
if check_set_differences(res_proteins, init_proteins):
# no new proteins
break
if len(res_proteins) == 0:
print("Error, no protein found", file=sys.stderr)
break
else:
init_proteins = res_proteins
# output alignment of jackhmmer is on stockholm format, convert to fasta
#with open(filtered_ali_results) as inf:
# msa = AlignIO.read(inf, format="stockholm")
#with open(filtered_ali_results, "w") as ouf:
# AlignIO.write(msa, outf, format="fasta")
inpath = filtered_ali_results
if len(init_proteins) != 0:
if params.initialseq == inpath:
print("No new sequences found")
else:
with open(params.output, "w") as outf, open(inpath) as inf:
for line in inf:
outf.write(line)
else:
print("Exit, without proteins")
sys.exit(0)
if __name__ == "__main__":
main()
|
leeseuljeong/leeseulstack_neutron
|
refs/heads/master
|
neutron/tests/unit/metaplugin/test_metaplugin.py
|
14
|
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo.config import cfg
import testtools
from neutron.common import exceptions as exc
from neutron.common import topics
from neutron import context
from neutron.db import db_base_plugin_v2
from neutron.db import models_v2
from neutron.extensions import flavor as ext_flavor
from neutron.openstack.common import uuidutils
from neutron.plugins.metaplugin import meta_neutron_plugin
from neutron.tests.unit import testlib_api
from neutron.tests.unit import testlib_plugin
CONF_FILE = ""
META_PATH = "neutron.plugins.metaplugin"
FAKE_PATH = "neutron.tests.unit.metaplugin"
PROXY_PATH = "%s.proxy_neutron_plugin.ProxyPluginV2" % META_PATH
PLUGIN_LIST = """
fake1:%s.fake_plugin.Fake1,fake2:%s.fake_plugin.Fake2,proxy:%s
""".strip() % (FAKE_PATH, FAKE_PATH, PROXY_PATH)
L3_PLUGIN_LIST = """
fake1:%s.fake_plugin.Fake1,fake2:%s.fake_plugin.Fake2
""".strip() % (FAKE_PATH, FAKE_PATH)
def setup_metaplugin_conf(has_l3=True):
cfg.CONF.set_override('auth_url', 'http://localhost:35357/v2.0',
'PROXY')
cfg.CONF.set_override('auth_region', 'RegionOne', 'PROXY')
cfg.CONF.set_override('admin_user', 'neutron', 'PROXY')
cfg.CONF.set_override('admin_password', 'password', 'PROXY')
cfg.CONF.set_override('admin_tenant_name', 'service', 'PROXY')
cfg.CONF.set_override('plugin_list', PLUGIN_LIST, 'META')
if has_l3:
cfg.CONF.set_override('l3_plugin_list', L3_PLUGIN_LIST, 'META')
else:
cfg.CONF.set_override('l3_plugin_list', "", 'META')
cfg.CONF.set_override('default_flavor', 'fake2', 'META')
cfg.CONF.set_override('default_l3_flavor', 'fake1', 'META')
cfg.CONF.set_override('base_mac', "12:34:56:78:90:ab")
#TODO(nati) remove this after subnet quota change is merged
cfg.CONF.set_override('max_dns_nameservers', 10)
# Hooks registered by metaplugin must not exist for other plugins UT.
# So hooks must be unregistered (overwrite to None in fact).
def unregister_meta_hooks():
db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook(
models_v2.Network, 'metaplugin_net', None, None, None)
db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook(
models_v2.Port, 'metaplugin_port', None, None, None)
class MetaNeutronPluginV2Test(testlib_api.SqlTestCase,
testlib_plugin.PluginSetupHelper):
"""Class conisting of MetaNeutronPluginV2 unit tests."""
has_l3 = True
def setUp(self):
super(MetaNeutronPluginV2Test, self).setUp()
self.fake_tenant_id = uuidutils.generate_uuid()
self.context = context.get_admin_context()
self.addCleanup(unregister_meta_hooks)
setup_metaplugin_conf(self.has_l3)
self.client_cls_p = mock.patch('neutronclient.v2_0.client.Client')
client_cls = self.client_cls_p.start()
self.client_inst = mock.Mock()
client_cls.return_value = self.client_inst
self.client_inst.create_network.return_value = \
{'id': 'fake_id'}
self.client_inst.create_port.return_value = \
{'id': 'fake_id'}
self.client_inst.create_subnet.return_value = \
{'id': 'fake_id'}
self.client_inst.update_network.return_value = \
{'id': 'fake_id'}
self.client_inst.update_port.return_value = \
{'id': 'fake_id'}
self.client_inst.update_subnet.return_value = \
{'id': 'fake_id'}
self.client_inst.delete_network.return_value = True
self.client_inst.delete_port.return_value = True
self.client_inst.delete_subnet.return_value = True
plugin = (meta_neutron_plugin.MetaPluginV2.__module__ + '.'
+ meta_neutron_plugin.MetaPluginV2.__name__)
self.setup_coreplugin(plugin)
self.plugin = meta_neutron_plugin.MetaPluginV2(configfile=None)
def _fake_network(self, flavor):
data = {'network': {'name': flavor,
'admin_state_up': True,
'shared': False,
'router:external': [],
'tenant_id': self.fake_tenant_id,
ext_flavor.FLAVOR_NETWORK: flavor}}
return data
def _fake_port(self, net_id):
return {'port': {'name': net_id,
'network_id': net_id,
'admin_state_up': True,
'device_id': 'bad_device_id',
'device_owner': 'bad_device_owner',
'admin_state_up': True,
'host_routes': [],
'fixed_ips': [],
'mac_address':
self.plugin._generate_mac(self.context, net_id),
'tenant_id': self.fake_tenant_id}}
def _fake_subnet(self, net_id):
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.254'}]
return {'subnet': {'name': net_id,
'network_id': net_id,
'gateway_ip': '10.0.0.1',
'dns_nameservers': ['10.0.0.2'],
'host_routes': [],
'cidr': '10.0.0.0/24',
'allocation_pools': allocation_pools,
'enable_dhcp': True,
'ip_version': 4}}
def _fake_router(self, flavor):
data = {'router': {'name': flavor, 'admin_state_up': True,
'tenant_id': self.fake_tenant_id,
ext_flavor.FLAVOR_ROUTER: flavor,
'external_gateway_info': None}}
return data
def test_create_delete_network(self):
network1 = self._fake_network('fake1')
ret1 = self.plugin.create_network(self.context, network1)
self.assertEqual('fake1', ret1[ext_flavor.FLAVOR_NETWORK])
network2 = self._fake_network('fake2')
ret2 = self.plugin.create_network(self.context, network2)
self.assertEqual('fake2', ret2[ext_flavor.FLAVOR_NETWORK])
network3 = self._fake_network('proxy')
ret3 = self.plugin.create_network(self.context, network3)
self.assertEqual('proxy', ret3[ext_flavor.FLAVOR_NETWORK])
db_ret1 = self.plugin.get_network(self.context, ret1['id'])
self.assertEqual('fake1', db_ret1['name'])
db_ret2 = self.plugin.get_network(self.context, ret2['id'])
self.assertEqual('fake2', db_ret2['name'])
db_ret3 = self.plugin.get_network(self.context, ret3['id'])
self.assertEqual('proxy', db_ret3['name'])
db_ret4 = self.plugin.get_networks(self.context)
self.assertEqual(3, len(db_ret4))
db_ret5 = self.plugin.get_networks(
self.context,
{ext_flavor.FLAVOR_NETWORK: ['fake1']})
self.assertEqual(1, len(db_ret5))
self.assertEqual('fake1', db_ret5[0]['name'])
self.plugin.delete_network(self.context, ret1['id'])
self.plugin.delete_network(self.context, ret2['id'])
self.plugin.delete_network(self.context, ret3['id'])
def test_create_delete_port(self):
network1 = self._fake_network('fake1')
network_ret1 = self.plugin.create_network(self.context, network1)
network2 = self._fake_network('fake2')
network_ret2 = self.plugin.create_network(self.context, network2)
network3 = self._fake_network('proxy')
network_ret3 = self.plugin.create_network(self.context, network3)
port1 = self._fake_port(network_ret1['id'])
port2 = self._fake_port(network_ret2['id'])
port3 = self._fake_port(network_ret3['id'])
port1_ret = self.plugin.create_port(self.context, port1)
port2_ret = self.plugin.create_port(self.context, port2)
port3_ret = self.plugin.create_port(self.context, port3)
ports_all = self.plugin.get_ports(self.context)
self.assertEqual(network_ret1['id'], port1_ret['network_id'])
self.assertEqual(network_ret2['id'], port2_ret['network_id'])
self.assertEqual(network_ret3['id'], port3_ret['network_id'])
self.assertEqual(3, len(ports_all))
port1_dict = self.plugin._make_port_dict(port1_ret)
port2_dict = self.plugin._make_port_dict(port2_ret)
port3_dict = self.plugin._make_port_dict(port3_ret)
self.assertEqual(port1_dict, port1_ret)
self.assertEqual(port2_dict, port2_ret)
self.assertEqual(port3_dict, port3_ret)
port1['port']['admin_state_up'] = False
port2['port']['admin_state_up'] = False
port3['port']['admin_state_up'] = False
self.plugin.update_port(self.context, port1_ret['id'], port1)
self.plugin.update_port(self.context, port2_ret['id'], port2)
self.plugin.update_port(self.context, port3_ret['id'], port3)
port_in_db1 = self.plugin.get_port(self.context, port1_ret['id'])
port_in_db2 = self.plugin.get_port(self.context, port2_ret['id'])
port_in_db3 = self.plugin.get_port(self.context, port3_ret['id'])
self.assertEqual(False, port_in_db1['admin_state_up'])
self.assertEqual(False, port_in_db2['admin_state_up'])
self.assertEqual(False, port_in_db3['admin_state_up'])
self.plugin.delete_port(self.context, port1_ret['id'])
self.plugin.delete_port(self.context, port2_ret['id'])
self.plugin.delete_port(self.context, port3_ret['id'])
self.plugin.delete_network(self.context, network_ret1['id'])
self.plugin.delete_network(self.context, network_ret2['id'])
self.plugin.delete_network(self.context, network_ret3['id'])
def test_create_delete_subnet(self):
# for this test we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
network1 = self._fake_network('fake1')
network_ret1 = self.plugin.create_network(self.context, network1)
network2 = self._fake_network('fake2')
network_ret2 = self.plugin.create_network(self.context, network2)
network3 = self._fake_network('proxy')
network_ret3 = self.plugin.create_network(self.context, network3)
subnet1 = self._fake_subnet(network_ret1['id'])
subnet2 = self._fake_subnet(network_ret2['id'])
subnet3 = self._fake_subnet(network_ret3['id'])
subnet1_ret = self.plugin.create_subnet(self.context, subnet1)
subnet2_ret = self.plugin.create_subnet(self.context, subnet2)
subnet3_ret = self.plugin.create_subnet(self.context, subnet3)
self.assertEqual(network_ret1['id'], subnet1_ret['network_id'])
self.assertEqual(network_ret2['id'], subnet2_ret['network_id'])
self.assertEqual(network_ret3['id'], subnet3_ret['network_id'])
subnet_in_db1 = self.plugin.get_subnet(self.context, subnet1_ret['id'])
subnet_in_db2 = self.plugin.get_subnet(self.context, subnet2_ret['id'])
subnet_in_db3 = self.plugin.get_subnet(self.context, subnet3_ret['id'])
subnet1['subnet']['allocation_pools'].pop()
subnet2['subnet']['allocation_pools'].pop()
subnet3['subnet']['allocation_pools'].pop()
self.plugin.update_subnet(self.context,
subnet1_ret['id'], subnet1)
self.plugin.update_subnet(self.context,
subnet2_ret['id'], subnet2)
self.plugin.update_subnet(self.context,
subnet3_ret['id'], subnet3)
subnet_in_db1 = self.plugin.get_subnet(self.context, subnet1_ret['id'])
subnet_in_db2 = self.plugin.get_subnet(self.context, subnet2_ret['id'])
subnet_in_db3 = self.plugin.get_subnet(self.context, subnet3_ret['id'])
self.assertEqual(4, subnet_in_db1['ip_version'])
self.assertEqual(4, subnet_in_db2['ip_version'])
self.assertEqual(4, subnet_in_db3['ip_version'])
self.plugin.delete_subnet(self.context, subnet1_ret['id'])
self.plugin.delete_subnet(self.context, subnet2_ret['id'])
self.plugin.delete_subnet(self.context, subnet3_ret['id'])
self.plugin.delete_network(self.context, network_ret1['id'])
self.plugin.delete_network(self.context, network_ret2['id'])
self.plugin.delete_network(self.context, network_ret3['id'])
def test_create_delete_router(self):
router1 = self._fake_router('fake1')
router_ret1 = self.plugin.create_router(self.context, router1)
router2 = self._fake_router('fake2')
router_ret2 = self.plugin.create_router(self.context, router2)
self.assertEqual('fake1', router_ret1[ext_flavor.FLAVOR_ROUTER])
self.assertEqual('fake2', router_ret2[ext_flavor.FLAVOR_ROUTER])
router_in_db1 = self.plugin.get_router(self.context, router_ret1['id'])
router_in_db2 = self.plugin.get_router(self.context, router_ret2['id'])
self.assertEqual('fake1', router_in_db1[ext_flavor.FLAVOR_ROUTER])
self.assertEqual('fake2', router_in_db2[ext_flavor.FLAVOR_ROUTER])
self.plugin.delete_router(self.context, router_ret1['id'])
self.plugin.delete_router(self.context, router_ret2['id'])
with testtools.ExpectedException(meta_neutron_plugin.FlavorNotFound):
self.plugin.get_router(self.context, router_ret1['id'])
def test_extension_method(self):
self.assertEqual('fake1', self.plugin.fake_func())
self.assertEqual('fake2', self.plugin.fake_func2())
def test_extension_not_implemented_method(self):
try:
self.plugin.not_implemented()
except AttributeError:
return
except Exception:
self.fail("AttributeError Error is not raised")
self.fail("No Error is not raised")
def test_create_network_flavor_fail(self):
with mock.patch('neutron.plugins.metaplugin.meta_db_v2.'
'add_network_flavor_binding',
side_effect=Exception):
network = self._fake_network('fake1')
self.assertRaises(meta_neutron_plugin.FaildToAddFlavorBinding,
self.plugin.create_network,
self.context,
network)
count = self.plugin.get_networks_count(self.context)
self.assertEqual(count, 0)
def test_create_router_flavor_fail(self):
with mock.patch('neutron.plugins.metaplugin.meta_db_v2.'
'add_router_flavor_binding',
side_effect=Exception):
router = self._fake_router('fake1')
self.assertRaises(meta_neutron_plugin.FaildToAddFlavorBinding,
self.plugin.create_router,
self.context,
router)
count = self.plugin.get_routers_count(self.context)
self.assertEqual(count, 0)
class MetaNeutronPluginV2TestWithoutL3(MetaNeutronPluginV2Test):
"""Tests without l3_plugin_list configration."""
has_l3 = False
def test_supported_extension_aliases(self):
self.assertEqual(self.plugin.supported_extension_aliases,
['flavor', 'external-net'])
def test_create_delete_router(self):
self.skipTest("Test case without router")
def test_create_router_flavor_fail(self):
self.skipTest("Test case without router")
class MetaNeutronPluginV2TestRpcFlavor(testlib_api.SqlTestCase):
"""Tests for rpc_flavor."""
def setUp(self):
super(MetaNeutronPluginV2TestRpcFlavor, self).setUp()
self.addCleanup(unregister_meta_hooks)
def test_rpc_flavor(self):
setup_metaplugin_conf()
cfg.CONF.set_override('rpc_flavor', 'fake1', 'META')
self.plugin = meta_neutron_plugin.MetaPluginV2()
self.assertEqual(topics.PLUGIN, 'q-plugin')
ret = self.plugin.rpc_workers_supported()
self.assertFalse(ret)
def test_invalid_rpc_flavor(self):
setup_metaplugin_conf()
cfg.CONF.set_override('rpc_flavor', 'fake-fake', 'META')
self.assertRaises(exc.Invalid,
meta_neutron_plugin.MetaPluginV2)
self.assertEqual(topics.PLUGIN, 'q-plugin')
def test_rpc_flavor_multiple_rpc_workers(self):
setup_metaplugin_conf()
cfg.CONF.set_override('rpc_flavor', 'fake2', 'META')
self.plugin = meta_neutron_plugin.MetaPluginV2()
self.assertEqual(topics.PLUGIN, 'q-plugin')
ret = self.plugin.rpc_workers_supported()
self.assertTrue(ret)
ret = self.plugin.start_rpc_listeners()
self.assertEqual('OK', ret)
|
DueLaser/due_rasp
|
refs/heads/master
|
src/octoprint/gcodegenerator/simplepath.py
|
9
|
#!/usr/bin/env python
"""
simplepath.py
functions for digesting paths into a simple list structure
Copyright (C) 2005 Aaron Spike, aaron@ekips.org
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import re, math
def lexPath(d):
"""
returns and iterator that breaks path data
identifies command and parameter tokens
"""
offset = 0
length = len(d)
delim = re.compile(r'[ \t\r\n,]+')
command = re.compile(r'[MLHVCSQTAZmlhvcsqtaz]')
parameter = re.compile(r'(([-+]?[0-9]+(\.[0-9]*)?|[-+]?\.[0-9]+)([eE][-+]?[0-9]+)?)')
while 1:
m = delim.match(d, offset)
if m:
offset = m.end()
if offset >= length:
break
m = command.match(d, offset)
if m:
yield [d[offset:m.end()], True]
offset = m.end()
continue
m = parameter.match(d, offset)
if m:
yield [d[offset:m.end()], False]
offset = m.end()
continue
#TODO: create new exception
raise Exception, 'Invalid path data!'
'''
pathdefs = {commandfamily:
[
implicitnext,
#params,
[casts,cast,cast],
[coord type,x,y,0]
]}
'''
pathdefs = {
'M':['L', 2, [float, float], ['x','y']],
'L':['L', 2, [float, float], ['x','y']],
'H':['H', 1, [float], ['x']],
'V':['V', 1, [float], ['y']],
'C':['C', 6, [float, float, float, float, float, float], ['x','y','x','y','x','y']],
'S':['S', 4, [float, float, float, float], ['x','y','x','y']],
'Q':['Q', 4, [float, float, float, float], ['x','y','x','y']],
'T':['T', 2, [float, float], ['x','y']],
'A':['A', 7, [float, float, float, int, int, float, float], ['r','r','a',0,'s','x','y']],
'Z':['L', 0, [], []]
}
def parsePath(d):
"""
Parse SVG path and return an array of segments.
Removes all shorthand notation.
Converts coordinates to absolute.
"""
retval = []
lexer = lexPath(d)
pen = (0.0,0.0)
subPathStart = pen
lastControl = pen
lastCommand = ''
while 1:
try:
token, isCommand = lexer.next()
except StopIteration:
break
params = []
needParam = True
if isCommand:
if not lastCommand and token.upper() != 'M':
raise Exception, 'Invalid path, must begin with moveto.'
else:
command = token
else:
#command was omited
#use last command's implicit next command
needParam = False
if lastCommand:
if lastCommand.isupper():
command = pathdefs[lastCommand][0]
else:
command = pathdefs[lastCommand.upper()][0].lower()
else:
raise Exception, 'Invalid path, no initial command.'
numParams = pathdefs[command.upper()][1]
while numParams > 0:
if needParam:
try:
token, isCommand = lexer.next()
if isCommand:
raise Exception, 'Invalid number of parameters'
except StopIteration:
raise Exception, 'Unexpected end of path'
cast = pathdefs[command.upper()][2][-numParams]
param = cast(token)
if command.islower():
if pathdefs[command.upper()][3][-numParams]=='x':
param += pen[0]
elif pathdefs[command.upper()][3][-numParams]=='y':
param += pen[1]
params.append(param)
needParam = True
numParams -= 1
#segment is now absolute so
outputCommand = command.upper()
#Flesh out shortcut notation
if outputCommand in ('H','V'):
if outputCommand == 'H':
params.append(pen[1])
if outputCommand == 'V':
params.insert(0,pen[0])
outputCommand = 'L'
if outputCommand in ('S','T'):
params.insert(0,pen[1]+(pen[1]-lastControl[1]))
params.insert(0,pen[0]+(pen[0]-lastControl[0]))
if outputCommand == 'S':
outputCommand = 'C'
if outputCommand == 'T':
outputCommand = 'Q'
#current values become "last" values
if outputCommand == 'M':
subPathStart = tuple(params[0:2])
pen = subPathStart
if outputCommand == 'Z':
pen = subPathStart
else:
pen = tuple(params[-2:])
if outputCommand in ('Q','C'):
lastControl = tuple(params[-4:-2])
else:
lastControl = pen
lastCommand = command
retval.append([outputCommand,params])
return retval
def formatPath(a):
"""Format SVG path data from an array"""
return "".join([cmd + " ".join([str(p) for p in params]) for cmd, params in a])
def translatePath(p, x, y):
for cmd,params in p:
defs = pathdefs[cmd]
for i in range(defs[1]):
if defs[3][i] == 'x':
params[i] += x
elif defs[3][i] == 'y':
params[i] += y
def scalePath(p, x, y):
for cmd,params in p:
defs = pathdefs[cmd]
for i in range(defs[1]):
if defs[3][i] == 'x':
params[i] *= x
elif defs[3][i] == 'y':
params[i] *= y
elif defs[3][i] == 'r': # radius parameter
params[i] *= x
elif defs[3][i] == 's': # sweep-flag parameter
if x*y < 0:
params[i] = 1 - params[i]
elif defs[3][i] == 'a': # x-axis-rotation angle
if y < 0:
params[i] = - params[i]
def rotatePath(p, a, cx = 0, cy = 0):
if a == 0:
return p
for cmd,params in p:
defs = pathdefs[cmd]
for i in range(defs[1]):
if defs[3][i] == 'x':
x = params[i] - cx
y = params[i + 1] - cy
r = math.sqrt((x**2) + (y**2))
if r != 0:
theta = math.atan2(y, x) + a
params[i] = (r * math.cos(theta)) + cx
params[i + 1] = (r * math.sin(theta)) + cy
# vim: expandtab shiftwidth=4 tabstop=8 softtabstop=4 encoding=utf-8 textwidth=99
|
kalicodextu/djangoblog
|
refs/heads/master
|
blog/tests.py
|
873
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
# Create your tests here.
|
noiselabs/box-linux-sync
|
refs/heads/master
|
setup.py
|
1
|
from setuptools import setup, find_packages
import sys, os
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.md')).read()
NEWS = open(os.path.join(here, 'NEWS.txt')).read()
version = '0.1.0'
install_requires = [
# List your project dependencies here.
# For more details, see:
# http://packages.python.org/distribute/setuptools.html#declaring-dependencies
]
setup(name='box-linux-sync',
version=version,
description="Linux client for Box.com",
long_description=README + '\n\n' + NEWS,
classifiers=[
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Topic :: Communications :: File Sharing',
'Topic :: Utilities'
],
keywords='box sync noiselabs',
author='V\xc3\xadtor Brand\xc3\xa3o',
author_email='noisebleed@noiselabs.org',
url='https://github.com/noisebleed/box-linux-sync',
license='LGPL-3',
packages=find_packages('src'),
package_dir = {'': 'src'},
namespace_packages = ['noiselabs'],
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
entry_points={
'console_scripts':
['box-linux-sync=noiselabs.box:main']
}
)
|
kthordarson/CouchPotatoServer
|
refs/heads/master
|
libs/dateutil/tzwin.py
|
227
|
# This code was originally contributed by Jeffrey Harris.
import datetime
import struct
import winreg
__all__ = ["tzwin", "tzwinlocal"]
ONEWEEK = datetime.timedelta(7)
TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones"
TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones"
TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation"
def _settzkeyname():
global TZKEYNAME
handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
try:
winreg.OpenKey(handle, TZKEYNAMENT).Close()
TZKEYNAME = TZKEYNAMENT
except WindowsError:
TZKEYNAME = TZKEYNAME9X
handle.Close()
_settzkeyname()
class tzwinbase(datetime.tzinfo):
"""tzinfo class based on win32's timezones available in the registry."""
def utcoffset(self, dt):
if self._isdst(dt):
return datetime.timedelta(minutes=self._dstoffset)
else:
return datetime.timedelta(minutes=self._stdoffset)
def dst(self, dt):
if self._isdst(dt):
minutes = self._dstoffset - self._stdoffset
return datetime.timedelta(minutes=minutes)
else:
return datetime.timedelta(0)
def tzname(self, dt):
if self._isdst(dt):
return self._dstname
else:
return self._stdname
def list():
"""Return a list of all time zones known to the system."""
handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
tzkey = winreg.OpenKey(handle, TZKEYNAME)
result = [winreg.EnumKey(tzkey, i)
for i in range(winreg.QueryInfoKey(tzkey)[0])]
tzkey.Close()
handle.Close()
return result
list = staticmethod(list)
def display(self):
return self._display
def _isdst(self, dt):
dston = picknthweekday(dt.year, self._dstmonth, self._dstdayofweek,
self._dsthour, self._dstminute,
self._dstweeknumber)
dstoff = picknthweekday(dt.year, self._stdmonth, self._stddayofweek,
self._stdhour, self._stdminute,
self._stdweeknumber)
if dston < dstoff:
return dston <= dt.replace(tzinfo=None) < dstoff
else:
return not dstoff <= dt.replace(tzinfo=None) < dston
class tzwin(tzwinbase):
def __init__(self, name):
self._name = name
handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
tzkey = winreg.OpenKey(handle, "%s\%s" % (TZKEYNAME, name))
keydict = valuestodict(tzkey)
tzkey.Close()
handle.Close()
self._stdname = keydict["Std"].encode("iso-8859-1")
self._dstname = keydict["Dlt"].encode("iso-8859-1")
self._display = keydict["Display"]
# See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
tup = struct.unpack("=3l16h", keydict["TZI"])
self._stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1
self._dstoffset = self._stdoffset-tup[2] # + DaylightBias * -1
(self._stdmonth,
self._stddayofweek, # Sunday = 0
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[4:9]
(self._dstmonth,
self._dstdayofweek, # Sunday = 0
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[12:17]
def __repr__(self):
return "tzwin(%s)" % repr(self._name)
def __reduce__(self):
return (self.__class__, (self._name,))
class tzwinlocal(tzwinbase):
def __init__(self):
handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
tzlocalkey = winreg.OpenKey(handle, TZLOCALKEYNAME)
keydict = valuestodict(tzlocalkey)
tzlocalkey.Close()
self._stdname = keydict["StandardName"].encode("iso-8859-1")
self._dstname = keydict["DaylightName"].encode("iso-8859-1")
try:
tzkey = winreg.OpenKey(handle, "%s\%s"%(TZKEYNAME, self._stdname))
_keydict = valuestodict(tzkey)
self._display = _keydict["Display"]
tzkey.Close()
except OSError:
self._display = None
handle.Close()
self._stdoffset = -keydict["Bias"]-keydict["StandardBias"]
self._dstoffset = self._stdoffset-keydict["DaylightBias"]
# See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
tup = struct.unpack("=8h", keydict["StandardStart"])
(self._stdmonth,
self._stddayofweek, # Sunday = 0
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[1:6]
tup = struct.unpack("=8h", keydict["DaylightStart"])
(self._dstmonth,
self._dstdayofweek, # Sunday = 0
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[1:6]
def __reduce__(self):
return (self.__class__, ())
def picknthweekday(year, month, dayofweek, hour, minute, whichweek):
"""dayofweek == 0 means Sunday, whichweek 5 means last instance"""
first = datetime.datetime(year, month, 1, hour, minute)
weekdayone = first.replace(day=((dayofweek-first.isoweekday())%7+1))
for n in range(whichweek):
dt = weekdayone+(whichweek-n)*ONEWEEK
if dt.month == month:
return dt
def valuestodict(key):
"""Convert a registry key's values to a dictionary."""
dict = {}
size = winreg.QueryInfoKey(key)[1]
for i in range(size):
data = winreg.EnumValue(key, i)
dict[data[0]] = data[1]
return dict
|
gregdek/ansible
|
refs/heads/devel
|
lib/ansible/modules/remote_management/oneview/oneview_network_set_facts.py
|
125
|
#!/usr/bin/python
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneview_network_set_facts
short_description: Retrieve facts about the OneView Network Sets
description:
- Retrieve facts about the Network Sets from OneView.
version_added: "2.4"
requirements:
- hpOneView >= 2.0.1
author:
- Felipe Bulsoni (@fgbulsoni)
- Thiago Miotto (@tmiotto)
- Adriane Cardozo (@adriane-cardozo)
options:
name:
description:
- Network Set name.
options:
description:
- "List with options to gather facts about Network Set.
Option allowed: C(withoutEthernet).
The option C(withoutEthernet) retrieves the list of network_sets excluding Ethernet networks."
extends_documentation_fragment:
- oneview
- oneview.factsparams
'''
EXAMPLES = '''
- name: Gather facts about all Network Sets
oneview_network_set_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 500
no_log: true
delegate_to: localhost
- debug: var=network_sets
- name: Gather paginated, filtered, and sorted facts about Network Sets
oneview_network_set_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 500
params:
start: 0
count: 3
sort: 'name:descending'
filter: name='netset001'
no_log: true
delegate_to: localhost
- debug: var=network_sets
- name: Gather facts about all Network Sets, excluding Ethernet networks
oneview_network_set_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 500
options:
- withoutEthernet
no_log: true
delegate_to: localhost
- debug: var=network_sets
- name: Gather facts about a Network Set by name
oneview_network_set_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 500
name: Name of the Network Set
no_log: true
delegate_to: localhost
- debug: var=network_sets
- name: Gather facts about a Network Set by name, excluding Ethernet networks
oneview_network_set_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 500
name: Name of the Network Set
options:
- withoutEthernet
no_log: true
delegate_to: localhost
- debug: var=network_sets
'''
RETURN = '''
network_sets:
description: Has all the OneView facts about the Network Sets.
returned: Always, but can be empty.
type: dict
'''
from ansible.module_utils.oneview import OneViewModuleBase
class NetworkSetFactsModule(OneViewModuleBase):
argument_spec = dict(
name=dict(type='str'),
options=dict(type='list'),
params=dict(type='dict'),
)
def __init__(self):
super(NetworkSetFactsModule, self).__init__(additional_arg_spec=self.argument_spec)
def execute_module(self):
name = self.module.params.get('name')
if 'withoutEthernet' in self.options:
filter_by_name = ("\"'name'='%s'\"" % name) if name else ''
network_sets = self.oneview_client.network_sets.get_all_without_ethernet(filter=filter_by_name)
elif name:
network_sets = self.oneview_client.network_sets.get_by('name', name)
else:
network_sets = self.oneview_client.network_sets.get_all(**self.facts_params)
return dict(changed=False,
ansible_facts=dict(network_sets=network_sets))
def main():
NetworkSetFactsModule().run()
if __name__ == '__main__':
main()
|
kartikgupta0909/gaia
|
refs/heads/master
|
test/unittest/test_transfo.py
|
3
|
#!/usr/bin/env python
# Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Gaia
#
# Gaia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from gaia2 import *
import unittest
import testdata
def search(dataset, id, n):
v = View(dataset)
dist = MetricFactory.create('euclidean', dataset.layout())
return v.nnSearch(id, dist).get(n)
class TestTransfo(unittest.TestCase):
def compareResults(self, r1, r2):
self.assertEqual(len(r1), len(r2))
for i in range(len(r1)):
self.assertEqual(r1[i][0], r2[i][0])
self.assertAlmostEqual(r1[i][1], r2[i][1], 5)
def setUp(self):
cvar.verbose = False
def tearDown(self):
testdata.resetSettings()
def testDoesntBlowup(self):
ds = testdata.loadTestDB()
ignored_descs = testdata.TEST_DATABASE_VARLENGTH_REAL
ds = transform(ds, 'fixlength', { 'except': ignored_descs })
dsc = transform(ds, 'cleaner', { 'except': ignored_descs })
dsr = transform(dsc, 'remove', { 'descriptorNames': '*mfcc*' })
dsr2 = transform(dsc, 'remove', { 'descriptorNames': [ '*mfcc*' ] })
del dsr2
del dsc
del ds
dsn = transform(dsr, 'normalize', { 'except': ignored_descs })
dspca = transform(dsn, 'pca', { 'resultName': 'pca30',
'dimension': 30,
'descriptorNames': [ '*.mean', '*.var' ] })
def testQt46FloatParameterBug(self):
# Note: this was triggered by Qt 4.6 introducing a QVariant(float) constructor, which resulted
# in pmapToPython to fail with an unknown type error (followed by a segfault)...
ds = testdata.loadTestDB()
ds = transform(ds, 'fixlength')
ds = transform(ds, 'removevl')
ds = transform(ds, 'normalize')
self.assertEqual(ds.history().toPython()[-1]['Applier parameters']['coeffs']['.barkbands.mean']['a'][0],
24.922689437866211)
def testRegressionGaia14(self):
ds = testdata.loadSmallDB()
ds = transform(ds, 'fixlength')
to_remove = testdata.TEST_SMALLDB_VARLENGTH
dsr = transform(ds, 'remove', { 'descriptorNames': to_remove })
self.compareResults(search(dsr, '1_ethno.wav', 5), testdata.SMALL_DB_RAW_RESULTS)
dsc = transform(dsr, 'cleaner')
self.compareResults(search(dsc, '1_ethno.wav', 5), testdata.SMALL_DB_CLEAN_RESULTS)
dsn = transform(dsc, 'normalize')
self.compareResults(search(dsn, '1_ethno.wav', 5), testdata.SMALL_DB_NORM_RESULTS)
dspca = transform(dsn, 'pca', { 'resultName': 'pca30',
'dimension': 30,
'descriptorNames': '*' })
self.compareResults(search(dspca, '1_ethno.wav', 5), testdata.SMALL_DB_PCA_RESULTS)
def testWrongArgument(self):
ds = testdata.loadTestDB()
ds = transform(ds, 'fixlength')
ds = transform(ds, 'removevl')
ds = transform(ds, 'cleaner')
ds = transform(ds, 'normalize')
# missing param: className
self.assertRaises(Exception, transform, ds, 'svmtrain', { 'descriptorNames': '*.mean' })
# wrong param: descriptorName
self.assertRaises(Exception, transform, ds, 'svmtrain', { 'className': 'kloug',
'descriptorName': '*.mean' })
# missing param: resultName
self.assertRaises(Exception, transform, ds, 'pca', { 'dimension': 15, 'resultName': '' })
def testSimplifyHistory(self):
ds = testdata.createSimpleDataSet()
p = Point()
p.setName('p2')
p.setLayout(ds.layout())
p['a.2'] = [ 1.2, 2.3 ]
ds.addPoint(p)
ds0 = ds.copy()
ds1 = ds.copy()
ds1.simplifyHistory()
self.assertEqual(ds1.history().size(), 0)
ds = transform(ds, 'removevl')
ds2 = ds.copy()
ds2.simplifyHistory()
self.assertEqual(ds2.history().toPython(),
[{ 'Analyzer name': 'remove',
'Analyzer parameters': { 'descriptorNames': [ '.a.2' ] },
'Applier name': 'removedesc',
'Applier parameters': {'descriptorNames': [ '.a.2' ] },
'Additional info': {}
}])
ds = transform(ds, 'fixlength')
ds3 = ds.copy()
ds3.simplifyHistory()
self.assertEqual(ds3.history().toPython(),
[{ 'Analyzer name': 'remove',
'Analyzer parameters': { 'descriptorNames': [ '.a.2' ] },
'Applier name': 'removedesc',
'Applier parameters': {'descriptorNames': [ '.a.2' ] },
'Additional info': {}
},
{ 'Analyzer name': 'fixlength',
'Analyzer parameters': { 'descriptorNames': [ '.a.1', '.b', '.c', '.d' ] },
'Applier name': 'fixlengthapplier',
'Applier parameters': {'descriptorNames': [ '.a.1', '.b', '.c', '.d' ] },
'Additional info': {}
}])
ds = transform(ds, 'remove', { 'descriptorNames': 'a.*' })
ds4 = ds.copy()
ds4.simplifyHistory()
self.assertEqual(ds4.history().toPython(),
[{ 'Analyzer name': 'remove',
'Analyzer parameters': { 'descriptorNames': [ '.a.1', '.a.2' ] },
'Applier name': 'removedesc',
'Applier parameters': {'descriptorNames': [ '.a.1', '.a.2' ] },
'Additional info': {}
},
{ 'Analyzer name': 'fixlength',
'Analyzer parameters': { 'descriptorNames': [ '.b', '.c', '.d' ] },
'Applier name': 'fixlengthapplier',
'Applier parameters': {'descriptorNames': [ '.b', '.c', '.d' ] },
'Additional info': {}
}])
ds = transform(ds, 'select', { 'descriptorNames': [ 'b', 'c' ] })
ds5 = ds.copy()
ds5.simplifyHistory()
self.assertEqual(ds5.history().toPython(),
[{ 'Analyzer name': 'remove',
'Analyzer parameters': { 'descriptorNames': [ '.a.1', '.a.2', '.d' ] },
'Applier name': 'removedesc',
'Applier parameters': {'descriptorNames': [ '.a.1', '.a.2', '.d' ] },
'Additional info': {}
},
{ 'Analyzer name': 'fixlength',
'Analyzer parameters': { 'descriptorNames': [ '.b', '.c' ] },
'Applier name': 'fixlengthapplier',
'Applier parameters': {'descriptorNames': [ '.b', '.c' ] },
'Additional info': {}
}])
p2 = Point()
p2.setLayout(ds0.layout())
p2['b'] = 23
p2['c'] = 78
p2['a.2'] = [ 1, 2, 3, 4 ]
p2m = ds5.history().mapPoint(p2)
self.assertEqual(p2m.layout().descriptorNames(), ('.b', '.c'))
self.assertEqual(p2m['b'], 23.)
self.assertEqual(p2m['c'], 78.)
def testFixLength(self):
testdata.useFixedLength = True
self.testDoesntBlowup()
self.testRegressionGaia14()
def testEnumerate(self):
testdata.useEnumerate = True
self.testDoesntBlowup()
self.testRegressionGaia14()
def testEnumerateFixLength(self):
testdata.useEnumerate = True
self.testFixLength()
suite = unittest.TestLoader().loadTestsFromTestCase(TestTransfo)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
|
irdumbs/Dumb-Cogs
|
refs/heads/master
|
welcome/welcome.py
|
1
|
import discord
from discord.ext import commands
from .utils.dataIO import dataIO
from .utils import checks
from .utils.chat_formatting import pagify
from __main__ import send_cmd_help
from copy import deepcopy
import os
import asyncio
from random import choice as rand_choice
default_greeting = "Welcome {0.name} to {1.name}!"
default_settings = {"GREETING": [default_greeting], "ON": False,
"CHANNEL": None, "WHISPER": False,
"BOTS_MSG": None, "BOTS_ROLE": None}
settings_path = "data/welcome/settings.json"
class Welcome:
"""Welcomes new members to the server in the default channel"""
def __init__(self, bot):
self.bot = bot
self.settings = dataIO.load_json(settings_path)
@commands.group(pass_context=True, no_pm=True)
@checks.admin_or_permissions(manage_server=True)
async def welcomeset(self, ctx):
"""Sets welcome module settings"""
server = ctx.message.server
if server.id not in self.settings:
self.settings[server.id] = deepcopy(default_settings)
self.settings[server.id]["CHANNEL"] = server.default_channel.id
dataIO.save_json(settings_path, self.settings)
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
msg = "```"
msg += "Random GREETING: {}\n".format(rand_choice(self.settings[server.id]["GREETING"]))
msg += "CHANNEL: #{}\n".format(self.get_welcome_channel(server))
msg += "ON: {}\n".format(self.settings[server.id]["ON"])
msg += "WHISPER: {}\n".format(self.settings[server.id]["WHISPER"])
msg += "BOTS_MSG: {}\n".format(self.settings[server.id]["BOTS_MSG"])
msg += "BOTS_ROLE: {}\n".format(self.settings[server.id]["BOTS_ROLE"])
msg += "```"
await self.bot.say(msg)
@welcomeset.group(pass_context=True, name="msg")
async def welcomeset_msg(self, ctx):
"""Manage welcome messages
"""
if ctx.invoked_subcommand is None or \
isinstance(ctx.invoked_subcommand, commands.Group):
await send_cmd_help(ctx)
return
@welcomeset_msg.command(pass_context=True, name="add", no_pm=True)
async def welcomeset_msg_add(self, ctx, *, format_msg):
"""Adds a welcome message format for the server to be chosen at random
{0} is user
{1} is server
Default is set to:
Welcome {0.name} to {1.name}!
Example formats:
{0.mention}.. What are you doing here?
{1.name} has a new member! {0.name}#{0.discriminator} - {0.id}
Someone new joined! Who is it?! D: IS HE HERE TO HURT US?!"""
server = ctx.message.server
self.settings[server.id]["GREETING"].append(format_msg)
dataIO.save_json(settings_path, self.settings)
await self.bot.say("Welcome message added for the server.")
await self.send_testing_msg(ctx, msg=format_msg)
@welcomeset_msg.command(pass_context=True, name="del", no_pm=True)
async def welcomeset_msg_del(self, ctx):
"""Removes a welcome message from the random message list
"""
server = ctx.message.server
author = ctx.message.author
msg = 'Choose a welcome message to delete:\n\n'
for c, m in enumerate(self.settings[server.id]["GREETING"]):
msg += " {}. {}\n".format(c, m)
for page in pagify(msg, ['\n', ' '], shorten_by=20):
await self.bot.say("```\n{}\n```".format(page))
answer = await self.bot.wait_for_message(timeout=120, author=author)
try:
num = int(answer.content)
choice = self.settings[server.id]["GREETING"].pop(num)
except:
await self.bot.say("That's not a number in the list :/")
return
if not self.settings[server.id]["GREETING"]:
self.settings[server.id]["GREETING"] = [default_greeting]
dataIO.save_json(settings_path, self.settings)
await self.bot.say("**This message was deleted:**\n{}".format(choice))
@welcomeset_msg.command(pass_context=True, name="list", no_pm=True)
async def welcomeset_msg_list(self, ctx):
"""Lists the welcome messages of this server
"""
server = ctx.message.server
msg = 'Welcome messages:\n\n'
for c, m in enumerate(self.settings[server.id]["GREETING"]):
msg += " {}. {}\n".format(c, m)
for page in pagify(msg, ['\n', ' '], shorten_by=20):
await self.bot.say("```\n{}\n```".format(page))
@welcomeset.command(pass_context=True)
async def toggle(self, ctx):
"""Turns on/off welcoming new users to the server"""
server = ctx.message.server
self.settings[server.id]["ON"] = not self.settings[server.id]["ON"]
if self.settings[server.id]["ON"]:
await self.bot.say("I will now welcome new users to the server.")
await self.send_testing_msg(ctx)
else:
await self.bot.say("I will no longer welcome new users.")
dataIO.save_json(settings_path, self.settings)
@welcomeset.command(pass_context=True)
async def channel(self, ctx, channel : discord.Channel=None):
"""Sets the channel to send the welcome message
If channel isn't specified, the server's default channel will be used"""
server = ctx.message.server
if channel is None:
channel = ctx.message.server.default_channel
if not server.get_member(self.bot.user.id
).permissions_in(channel).send_messages:
await self.bot.say("I do not have permissions to send "
"messages to {0.mention}".format(channel))
return
self.settings[server.id]["CHANNEL"] = channel.id
dataIO.save_json(settings_path, self.settings)
channel = self.get_welcome_channel(server)
await self.bot.send_message(channel, "I will now send welcome "
"messages to {0.mention}".format(channel))
await self.send_testing_msg(ctx)
@welcomeset.group(pass_context=True, name="bot", no_pm=True)
async def welcomeset_bot(self, ctx):
"""Special welcome for bots"""
if ctx.invoked_subcommand is None or \
isinstance(ctx.invoked_subcommand, commands.Group):
await send_cmd_help(ctx)
return
@welcomeset_bot.command(pass_context=True, name="msg", no_pm=True)
async def welcomeset_bot_msg(self, ctx, *, format_msg=None):
"""Set the welcome msg for bots.
Leave blank to reset to regular user welcome"""
server = ctx.message.server
self.settings[server.id]["BOTS_MSG"] = format_msg
dataIO.save_json(settings_path, self.settings)
if format_msg is None:
await self.bot.say("Bot message reset. Bots will now be welcomed as regular users.")
else:
await self.bot.say("Bot welcome message set for the server.")
await self.send_testing_msg(ctx, bot=True)
# TODO: Check if have permissions
@welcomeset_bot.command(pass_context=True, name="role", no_pm=True)
async def welcomeset_bot_role(self, ctx, role: discord.Role=None):
"""Set the role to put bots in when they join.
Leave blank to not give them a role."""
server = ctx.message.server
self.settings[server.id]["BOTS_ROLE"] = role.name if role else role
dataIO.save_json(settings_path, self.settings)
await self.bot.say("Bots that join this server will "
"now be put into the {} role".format(role.name))
@welcomeset.command(pass_context=True)
async def whisper(self, ctx, choice: str=None):
"""Sets whether or not a DM is sent to the new user
Options:
off - turns off DMs to users
only - only send a DM to the user, don't send a welcome to the channel
both - send a message to both the user and the channel
If Option isn't specified, toggles between 'off' and 'only'
DMs will not be sent to bots"""
options = {"off": False, "only": True, "both": "BOTH"}
server = ctx.message.server
if choice is None:
self.settings[server.id]["WHISPER"] = not self.settings[server.id]["WHISPER"]
elif choice.lower() not in options:
await send_cmd_help(ctx)
return
else:
self.settings[server.id]["WHISPER"] = options[choice.lower()]
dataIO.save_json(settings_path, self.settings)
channel = self.get_welcome_channel(server)
if not self.settings[server.id]["WHISPER"]:
await self.bot.say("I will no longer send DMs to new users")
elif self.settings[server.id]["WHISPER"] == "BOTH":
await self.bot.send_message(channel, "I will now send welcome "
"messages to {0.mention} as well as to "
"the new user in a DM".format(channel))
else:
await self.bot.send_message(channel, "I will now only send "
"welcome messages to the new user "
"as a DM".format(channel))
await self.send_testing_msg(ctx)
async def member_join(self, member):
server = member.server
if server.id not in self.settings:
self.settings[server.id] = deepcopy(default_settings)
self.settings[server.id]["CHANNEL"] = server.default_channel.id
dataIO.save_json(settings_path, self.settings)
if not self.settings[server.id]["ON"]:
return
if server is None:
print("Server is None. Private Message or some new fangled "
"Discord thing?.. Anyways there be an error, "
"the user was {}".format(member.name))
return
only_whisper = self.settings[server.id]["WHISPER"] is True
bot_welcome = member.bot and self.settings[server.id]["BOTS_MSG"]
bot_role = member.bot and self.settings[server.id]["BOTS_ROLE"]
msg = bot_welcome or rand_choice(self.settings[server.id]["GREETING"])
# whisper the user if needed
if not member.bot and self.settings[server.id]["WHISPER"]:
try:
await self.bot.send_message(member, msg.format(member, server))
except:
print("welcome.py: unable to whisper {}. Probably "
"doesn't want to be PM'd".format(member))
# grab the welcome channel
channel = self.get_welcome_channel(server)
if channel is None: # complain even if only whisper
print('welcome.py: Channel not found. It was most '
'likely deleted. User joined: {}'.format(member.name))
return
# we can stop here
if only_whisper and not bot_welcome:
return
if not self.speak_permissions(server):
print("Permissions Error. User that joined: "
"{0.name}".format(member))
print("Bot doesn't have permissions to send messages to "
"{0.name}'s #{1.name} channel".format(server, channel))
return
# try to add role if needed
failed_to_add_role = False
if bot_role:
try:
role = discord.utils.get(server.roles, name=bot_role)
await self.bot.add_roles(member, role)
except:
print('welcome.py: unable to add {} role to {}. '
'Role was deleted, network error, or lacking '
'permissions. Trying again in 5 seconds.'
.format(bot_role, member))
failed_to_add_role = True
else:
print('welcome.py: added {} role to '
'bot, {}'.format(role, member))
# finally, welcome them
await self.bot.send_message(channel, msg.format(member, server))
if failed_to_add_role:
await asyncio.sleep(5)
try:
await self.bot.add_roles(member, role)
except:
print('welcome.py: Still unable to add {} role to {}.'
.format(bot_role, member))
else:
print('welcome.py: added {} role to '
'bot, {}'.format(role, member))
def get_welcome_channel(self, server):
try:
return server.get_channel(self.settings[server.id]["CHANNEL"])
except:
return None
def speak_permissions(self, server):
channel = self.get_welcome_channel(server)
if channel is None:
return False
return server.get_member(self.bot.user.id
).permissions_in(channel).send_messages
async def send_testing_msg(self, ctx, bot=False, msg=None):
server = ctx.message.server
channel = self.get_welcome_channel(server)
rand_msg = msg or rand_choice(self.settings[server.id]["GREETING"])
if channel is None:
await self.bot.send_message(ctx.message.channel,
"I can't find the specified channel. "
"It might have been deleted.")
return
await self.bot.send_message(ctx.message.channel,
"`Sending a testing message to "
"`{0.mention}".format(channel))
if self.speak_permissions(server):
msg = self.settings[server.id]["BOTS_MSG"] if bot else rand_msg
if not bot and self.settings[server.id]["WHISPER"]:
await self.bot.send_message(ctx.message.author,
msg.format(ctx.message.author,server))
if bot or self.settings[server.id]["WHISPER"] is not True:
await self.bot.send_message(channel,
msg.format(ctx.message.author, server))
else:
await self.bot.send_message(ctx.message.channel,
"I do not have permissions "
"to send messages to "
"{0.mention}".format(channel))
def check_folders():
if not os.path.exists("data/welcome"):
print("Creating data/welcome folder...")
os.makedirs("data/welcome")
def check_files():
f = settings_path
if not dataIO.is_valid_json(f):
print("Creating welcome settings.json...")
dataIO.save_json(f, {})
else: # consistency check
current = dataIO.load_json(f)
for k, v in current.items():
if v.keys() != default_settings.keys():
for key in default_settings.keys():
if key not in v.keys():
current[k][key] = deepcopy(default_settings)[key]
print("Adding " + str(key) +
" field to welcome settings.json")
# upgrade. Before GREETING was 1 string
for server in current.values():
if isinstance(server["GREETING"], str):
server["GREETING"] = [server["GREETING"]]
dataIO.save_json(f, current)
def setup(bot):
check_folders()
check_files()
n = Welcome(bot)
bot.add_listener(n.member_join, "on_member_join")
bot.add_cog(n)
|
Slezhuk/ansible
|
refs/heads/devel
|
lib/ansible/modules/notification/campfire.py
|
70
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: campfire
version_added: "1.2"
short_description: Send a message to Campfire
description:
- Send a message to Campfire.
- Messages with newlines will result in a "Paste" message being sent.
options:
subscription:
description:
- The subscription name to use.
required: true
token:
description:
- API token.
required: true
room:
description:
- Room number to which the message should be sent.
required: true
msg:
description:
- The message body.
required: true
notify:
description:
- Send a notification sound before the message.
required: false
choices: ["56k", "bell", "bezos", "bueller", "clowntown",
"cottoneyejoe", "crickets", "dadgummit", "dangerzone",
"danielsan", "deeper", "drama", "greatjob", "greyjoy",
"guarantee", "heygirl", "horn", "horror",
"inconceivable", "live", "loggins", "makeitso", "noooo",
"nyan", "ohmy", "ohyeah", "pushit", "rimshot",
"rollout", "rumble", "sax", "secret", "sexyback",
"story", "tada", "tmyk", "trololo", "trombone", "unix",
"vuvuzela", "what", "whoomp", "yeah", "yodel"]
# informational: requirements for nodes
requirements: [ ]
author: "Adam Garside (@fabulops)"
'''
EXAMPLES = '''
- campfire:
subscription: foo
token: 12345
room: 123
msg: Task completed.
- campfire:
subscription: foo
token: 12345
room: 123
notify: loggins
msg: Task completed ... with feeling.
'''
import cgi
def main():
module = AnsibleModule(
argument_spec=dict(
subscription=dict(required=True),
token=dict(required=True, no_log=True),
room=dict(required=True),
msg=dict(required=True),
notify=dict(required=False,
choices=["56k", "bell", "bezos", "bueller",
"clowntown", "cottoneyejoe",
"crickets", "dadgummit", "dangerzone",
"danielsan", "deeper", "drama",
"greatjob", "greyjoy", "guarantee",
"heygirl", "horn", "horror",
"inconceivable", "live", "loggins",
"makeitso", "noooo", "nyan", "ohmy",
"ohyeah", "pushit", "rimshot",
"rollout", "rumble", "sax", "secret",
"sexyback", "story", "tada", "tmyk",
"trololo", "trombone", "unix",
"vuvuzela", "what", "whoomp", "yeah",
"yodel"]),
),
supports_check_mode=False
)
subscription = module.params["subscription"]
token = module.params["token"]
room = module.params["room"]
msg = module.params["msg"]
notify = module.params["notify"]
URI = "https://%s.campfirenow.com" % subscription
NSTR = "<message><type>SoundMessage</type><body>%s</body></message>"
MSTR = "<message><body>%s</body></message>"
AGENT = "Ansible/1.2"
# Hack to add basic auth username and password the way fetch_url expects
module.params['url_username'] = token
module.params['url_password'] = 'X'
target_url = '%s/room/%s/speak.xml' % (URI, room)
headers = {'Content-Type': 'application/xml',
'User-agent': AGENT}
# Send some audible notification if requested
if notify:
response, info = fetch_url(module, target_url, data=NSTR % cgi.escape(notify), headers=headers)
if info['status'] not in [200, 201]:
module.fail_json(msg="unable to send msg: '%s', campfire api"
" returned error code: '%s'" %
(notify, info['status']))
# Send the message
response, info = fetch_url(module, target_url, data=MSTR %cgi.escape(msg), headers=headers)
if info['status'] not in [200, 201]:
module.fail_json(msg="unable to send msg: '%s', campfire api"
" returned error code: '%s'" %
(msg, info['status']))
module.exit_json(changed=True, room=room, msg=msg, notify=notify)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
|
feist/pcs
|
refs/heads/master
|
pcs/lib/commands/test/cluster/__init__.py
|
12133432
| |
jk1/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/contrib/markup/models.py
|
12133432
| |
tjghs/ghstracker
|
refs/heads/master
|
ghswebsite/apps/announcements/models.py
|
3
|
from django.db import models
class Announcement(models.Model):
date = models.DateField(auto_now=True)
title = models.CharField(max_length=100)
content = models.TextField(max_length=500)
|
pbanaszkiewicz/website
|
refs/heads/gh-pages
|
_posts/2015/02/per-capita.py
|
9
|
import sys
import csv
import numpy as np
from matplotlib import pyplot as plt
countries = []
populations = []
attendees = []
instructors = []
with open(sys.argv[1], 'r') as raw:
cooked = csv.reader(raw)
for (c, p, a, i) in cooked:
countries.append(c)
populations.append(float(p))
attendees.append(float(a))
instructors.append(float(i))
populations = np.array(populations) / 1e6
attendees = np.array(attendees) / populations
instructors = np.array(instructors) / populations
plt.scatter(attendees, instructors)
plt.xlabel('Attendees per million pop')
plt.ylabel('Instructors per million pop')
for (label, x, y) in zip(countries, attendees, instructors):
plt.annotate(label, xy = (x, y))
plt.show()
|
jcpowermac/ansible
|
refs/heads/devel
|
test/units/modules/network/f5/test_bigiq_regkey_pool.py
|
28
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.bigiq_regkey_pool import ModuleParameters
from library.bigiq_regkey_pool import ApiParameters
from library.bigiq_regkey_pool import ModuleManager
from library.bigiq_regkey_pool import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigiq_regkey_pool import ModuleParameters
from ansible.modules.network.f5.bigiq_regkey_pool import ApiParameters
from ansible.modules.network.f5.bigiq_regkey_pool import ModuleManager
from ansible.modules.network.f5.bigiq_regkey_pool import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
description='this is a description'
)
p = ModuleParameters(params=args)
assert p.description == 'this is a description'
def test_api_parameters(self):
args = load_fixture('load_regkey_license_pool.json')
p = ApiParameters(params=args)
assert p.description == 'this is a description'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
set_module_args(dict(
name='foo',
description='bar baz',
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
|
mweb/python
|
refs/heads/master
|
exercises/house/example.py
|
7
|
parts = [('lay in', 'the house that Jack built'),
('ate', 'the malt'),
('killed', 'the rat'),
('worried', 'the cat'),
('tossed', 'the dog'),
('milked', 'the cow with the crumpled horn'),
('kissed', 'the maiden all forlorn'),
('married', 'the man all tattered and torn'),
('woke', 'the priest all shaven and shorn'),
('kept', 'the rooster that crowed in the morn'),
('belonged to', 'the farmer sowing his corn'),
('', 'the horse and the hound and the horn')]
def verse(n):
v = ['This is {}'.format(parts[n][1])]
v.extend(['that {0} {1}'.format(parts[i][0], parts[i][1])
for i in range(n - 1, -1, -1)])
v[-1] += '.'
return '\n'.join(v)
def rhyme():
return "\n\n".join(verse(n) for n in range(len(parts)))
|
skyshe/oms
|
refs/heads/master
|
asset/form.py
|
13
|
# -*- coding: utf-8 -*-
from django import forms
from asset.models import *
class HostsListForm(forms.ModelForm):
class Meta:
model = HostList
widgets = {
'ip': forms.TextInput(attrs={'class': 'form-control'}),
'hostname': forms.TextInput(attrs={'class': 'form-control'}),
'product': forms.TextInput(attrs={'class': 'form-control'}),
'application': forms.TextInput(attrs={'class': 'form-control'}),
'idc_jg': forms.TextInput(attrs={'class': 'form-control'}),
'status': forms.TextInput(attrs={'class': 'form-control'}),
'remark': forms.TextInput(attrs={'class': 'form-control'}),
}
class NetworkAssetForm(forms.ModelForm):
class Meta:
model = NetworkAsset
widgets = {
'ip': forms.TextInput(attrs={'class': 'form-control'}),
'hostname': forms.TextInput(attrs={'class': 'form-control'}),
'manufacturer': forms.TextInput(attrs={'class': 'form-control'}),
'productname': forms.TextInput(attrs={'class': 'form-control'}),
'idc_jg': forms.TextInput(attrs={'class': 'form-control'}),
'service_tag': forms.TextInput(attrs={'class': 'form-control'}),
'remark': forms.TextInput(attrs={'class': 'form-control'}),
}
class IdcAssetForm(forms.ModelForm):
class Meta:
model = IdcAsset
widgets = {
'idc_name': forms.TextInput(attrs={'class': 'form-control'}),
'idc_type': forms.TextInput(attrs={'class': 'form-control'}),
'idc_location': forms.TextInput(attrs={'class': 'form-control'}),
'contract_date': forms.TextInput(attrs={'class': 'form-control'}),
'idc_contacts': forms.TextInput(attrs={'class': 'form-control'}),
'remark': forms.TextInput(attrs={'class': 'form-control'}),
}
|
markmsmith/LeapWebVisualizer
|
refs/heads/master
|
websocketserver.py
|
1
|
import os
import time
import Queue
import logging
import threading
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.websocket
import Leap
import simplejson as json
logger = logging.getLogger(__name__)
class LeapJSONEncoder(json.JSONEncoder):
"""
JSONEncoder subclass that knows how to serialize leap swig things
"""
def default(self, o):
if isinstance(o, Leap.Vector):
return {
"x": o.x,
"y": o.y,
"z": o.z,
}
elif isinstance(o, Leap.Ray):
return {
"position": o.position,
"direction": o.direction,
}
elif isinstance(o, Leap.Ball):
return {
"position": o.position,
"radius": o.radius,
}
elif isinstance(o, Leap.Finger):
return {
'id': o.id(),
'tip': o.tip(),
'velocity': o.velocity(),
'width': o.width(),
'length': o.length(),
'isTool': o.isTool(),
}
elif isinstance(o, Leap.Hand):
return {
'id': o.id(),
'fingers': o.fingers(),
'palm': o.palm(),
'velocity': o.velocity(),
'normal': o.normal(),
'ball': o.ball(),
}
elif isinstance(o, Leap.Frame):
return {
'id': o.id(),
'timestamp': o.timestamp(),
'hands': o.hands(),
}
else:
return super(LeapJSONEncoder, self).default(o)
class LListener(Leap.Listener):
"""
Listener to throw things on a queue
"""
def __init__(self, event_queue, *args, **kwargs):
super(LListener, self).__init__(*args, **kwargs)
self.event_queue = event_queue
def try_put(self, msg):
assert isinstance(msg, dict)
try:
self.event_queue.put(msg, block=False)
except Queue.Full:
pass
def onInit(self, controller):
self.try_put(
{
"state": "initialized"
}
)
def onConnect(self, controller):
self.try_put(
{
"state": "connected"
}
)
def onDisconnect(self, controller):
self.try_put(
{
"state": "disconnected"
}
)
def onFrame(self, controller):
self.try_put(
{
"state": "frame",
"frame": controller.frame(),
}
)
class LeapThread(threading.Thread):
def __init__(self, event_queue):
threading.Thread.__init__(self)
self.daemon = True
self.event_queue = event_queue
def run(self):
self.listener = LListener(self.event_queue)
self.controller = Leap.Controller(self.listener)
while 1:
time.sleep(1)
self.controller = None
class PlaybackThread(threading.Thread):
def __init__(self, event_queue, playback, loop, playbackDelay):
threading.Thread.__init__(self)
self.daemon = True
self.event_queue = event_queue
self.playback = playback
self.loop = loop
self.playbackDelay = playbackDelay
def try_put(self, msg):
assert isinstance(msg, dict)
try:
self.event_queue.put(msg, block=False)
except Queue.Full:
pass
def run(self):
# give them time to connect
print "Delaying playback for %d seconds" % self.playbackDelay
time.sleep(self.playbackDelay)
print "Playing back recording %s" % self.playback
self.playback_recording()
if(self.loop):
print "looping recording"
while(True):
self.playback_recording()
def playback_recording(self):
with open(self.playback, 'r') as f:
for line in f:
lineJson = json.loads(line)
print line
self.try_put(lineJson)
time.sleep(0.01)
print "Playback complete"
class Application(tornado.web.Application):
def __init__(self, options):
self.options = options
self.recording = False
self.lsh = LeapSocketHandler
handlers = [
(r"/", MainHandler),
(r"/leapsocket", self.lsh),
]
settings = {
'static_path': os.path.join(os.path.dirname(__file__), "static"),
}
tornado.web.Application.__init__(self, handlers, **settings)
self.event_queue = Queue.Queue()
if(options.playback):
self.playback_thread = PlaybackThread(self.event_queue, options.playback, options.loop, options.playbackDelay)
self.playback_thread.start()
else:
self.leap_thread = LeapThread(self.event_queue)
self.leap_thread.start()
self.startTime = time.time()
tornado.ioloop.PeriodicCallback(self._poll_for_leap_events, 1).start()
def _poll_for_leap_events(self):
try:
d = self.event_queue.get(block=False)
logger.debug("pending event queue size: %i", self.event_queue.qsize())
frameJson = json.dumps(d, cls=LeapJSONEncoder)
if(self.options.record):
now = time.time()
if(self.recording or
((now - self.startTime) >= self.options.recordingDelay)):
if(not self.recording):
print "Starting recording to %s" % self.options.record
self.recording = True
with open(self.options.record, 'a') as f:
f.write(frameJson)
f.write('\n')
self.lsh.send_updates(frameJson)
self.event_queue.task_done()
except Queue.Empty:
pass
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.redirect("/static/html/index.html")
class LeapSocketHandler(tornado.websocket.WebSocketHandler):
waiters = set()
def allow_draft76(self):
# for iOS 5.0 Safari
return True
def open(self):
LeapSocketHandler.waiters.add(self)
def on_close(self):
LeapSocketHandler.waiters.remove(self)
@classmethod
def send_updates(cls, chat):
if cls.waiters:
logger.debug("sending message to %d waiters", len(cls.waiters))
for waiter in cls.waiters:
try:
waiter.write_message(chat)
except:
logger.error("Error sending message", exc_info=True)
def main():
logging.basicConfig(level=logging.INFO)
tornado.options.define("port", default=8888, help="run on the given port", type=int)
tornado.options.define("playback", default=None, help="A frame data recording file (in json format) to playback isntead of getting data from the Leap", type=str)
tornado.options.define("playbackDelay", default=5.0, help="How long to wait (in seconds) before playing back the recording (only relevant when using --playback)", type=float)
tornado.options.define("loop", default=False, help="Whether to loop playback of the recording (only relevant when using --playback)", type=bool)
tornado.options.define("record", default=None, help="The name of a file to record frame data to. Can be played back with --playback=<file name>", type=str)
tornado.options.define("recordingDelay", default=5.0, help="How long to wait (in seconds) before starting to record (only relevant when using --record)", type=float)
tornado.options.parse_command_line()
app = Application(tornado.options.options)
app.listen(tornado.options.options.port)
print "%s listening on http://%s:%s" % (__file__, "0.0.0.0", tornado.options.options.port)
print "ctrl-c to stop!"
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
FHannes/intellij-community
|
refs/heads/master
|
plugins/hg4idea/testData/bin/mercurial/scmposix.py
|
94
|
import sys, os
import osutil
def _rcfiles(path):
rcs = [os.path.join(path, 'hgrc')]
rcdir = os.path.join(path, 'hgrc.d')
try:
rcs.extend([os.path.join(rcdir, f)
for f, kind in osutil.listdir(rcdir)
if f.endswith(".rc")])
except OSError:
pass
return rcs
def systemrcpath():
path = []
if sys.platform == 'plan9':
root = 'lib/mercurial'
else:
root = 'etc/mercurial'
# old mod_python does not set sys.argv
if len(getattr(sys, 'argv', [])) > 0:
p = os.path.dirname(os.path.dirname(sys.argv[0]))
path.extend(_rcfiles(os.path.join(p, root)))
path.extend(_rcfiles('/' + root))
return path
def userrcpath():
if sys.platform == 'plan9':
return [os.environ['home'] + '/lib/hgrc']
else:
return [os.path.expanduser('~/.hgrc')]
|
pythonpackages/vanity_app
|
refs/heads/master
|
debug.py
|
2
|
import os
import sys
import redis
import readline
import rlcompleter
import urlparse
readline.parse_and_bind('tab: complete')
if 'REDISTOGO_URL' in os.environ:
urlparse.uses_netloc.append('redis')
url = urlparse.urlparse(os.environ['REDISTOGO_URL'])
db = redis.Redis(host=url.hostname, port=url.port, db=0, password=url.password)
else:
db = redis.Redis()
_interactive = True
if len(sys.argv) > 1:
_options, _args = __import__("getopt").getopt(sys.argv[1:], 'ic:m:')
_interactive = False
for (_opt, _val) in _options:
if _opt == '-i':
_interactive = True
elif _opt == '-c':
exec _val
elif _opt == '-m':
sys.argv[1:] = _args
_args = []
__import__("runpy").run_module(
_val, {}, "__main__", alter_sys=True)
if _args:
sys.argv[:] = _args
__file__ = _args[0]
del _options, _args
execfile(__file__)
if _interactive:
del _interactive
__import__("code").interact(banner="", local=globals())
|
ncultra/qemu
|
refs/heads/master
|
scripts/tracetool/backend/simple.py
|
97
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simple built-in backend.
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012-2014, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@linux.vnet.ibm.com"
from tracetool import out
PUBLIC = True
def is_string(arg):
strtype = ('const char*', 'char*', 'const char *', 'char *')
if arg.lstrip().startswith(strtype):
return True
else:
return False
def generate_h_begin(events):
for event in events:
out('void _simple_%(api)s(%(args)s);',
api=event.api(),
args=event.args)
out('')
def generate_h(event):
out(' _simple_%(api)s(%(args)s);',
api=event.api(),
args=", ".join(event.args.names()))
def generate_c_begin(events):
out('#include "trace.h"',
'#include "trace/control.h"',
'#include "trace/simple.h"',
'')
def generate_c(event):
out('void _simple_%(api)s(%(args)s)',
'{',
' TraceBufferRecord rec;',
api=event.api(),
args=event.args)
sizes = []
for type_, name in event.args:
if is_string(type_):
out(' size_t arg%(name)s_len = %(name)s ? MIN(strlen(%(name)s), MAX_TRACE_STRLEN) : 0;',
name=name)
strsizeinfo = "4 + arg%s_len" % name
sizes.append(strsizeinfo)
else:
sizes.append("8")
sizestr = " + ".join(sizes)
if len(event.args) == 0:
sizestr = '0'
out('',
' if (!trace_event_get_state(%(event_id)s)) {',
' return;',
' }',
'',
' if (trace_record_start(&rec, %(event_id)s, %(size_str)s)) {',
' return; /* Trace Buffer Full, Event Dropped ! */',
' }',
event_id='TRACE_' + event.name.upper(),
size_str=sizestr)
if len(event.args) > 0:
for type_, name in event.args:
# string
if is_string(type_):
out(' trace_record_write_str(&rec, %(name)s, arg%(name)s_len);',
name=name)
# pointer var (not string)
elif type_.endswith('*'):
out(' trace_record_write_u64(&rec, (uintptr_t)(uint64_t *)%(name)s);',
name=name)
# primitive data type
else:
out(' trace_record_write_u64(&rec, (uint64_t)%(name)s);',
name=name)
out(' trace_record_finish(&rec);',
'}',
'')
|
github-account-because-they-want-it/django
|
refs/heads/master
|
django/dispatch/dispatcher.py
|
171
|
import sys
import threading
import warnings
import weakref
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.inspect import func_accepts_kwargs
from django.utils.six.moves import range
if six.PY2:
from .weakref_backports import WeakMethod
else:
from weakref import WeakMethod
def _make_id(target):
if hasattr(target, '__func__'):
return (id(target.__self__), id(target.__func__))
return id(target)
NONE_ID = _make_id(None)
# A marker for caching
NO_RECEIVERS = object()
class Signal(object):
"""
Base class for all signals
Internal attributes:
receivers
{ receiverkey (id) : weakref(receiver) }
"""
def __init__(self, providing_args=None, use_caching=False):
"""
Create a new signal.
providing_args
A list of the arguments this signal can pass along in a send() call.
"""
self.receivers = []
if providing_args is None:
providing_args = []
self.providing_args = set(providing_args)
self.lock = threading.Lock()
self.use_caching = use_caching
# For convenience we create empty caches even if they are not used.
# A note about caching: if use_caching is defined, then for each
# distinct sender we cache the receivers that sender has in
# 'sender_receivers_cache'. The cache is cleaned when .connect() or
# .disconnect() is called and populated on send().
self.sender_receivers_cache = weakref.WeakKeyDictionary() if use_caching else {}
self._dead_receivers = False
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
If weak is True, then receiver must be weak referenceable.
Receivers must be able to accept keyword arguments.
If a receiver is connected with a dispatch_uid argument, it
will not be added if another receiver was already connected
with that dispatch_uid.
sender
The sender to which the receiver should respond. Must either be
of type Signal, or None to receive events from any sender.
weak
Whether to use weak references to the receiver. By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
"""
from django.conf import settings
# If DEBUG is on, check that we got a good receiver
if settings.configured and settings.DEBUG:
assert callable(receiver), "Signal receivers must be callable."
# Check for **kwargs
if not func_accepts_kwargs(receiver):
raise ValueError("Signal receivers must accept keyword arguments (**kwargs).")
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
if weak:
ref = weakref.ref
receiver_object = receiver
# Check for bound methods
if hasattr(receiver, '__self__') and hasattr(receiver, '__func__'):
ref = WeakMethod
receiver_object = receiver.__self__
if six.PY3:
receiver = ref(receiver)
weakref.finalize(receiver_object, self._remove_receiver)
else:
receiver = ref(receiver, self._remove_receiver)
with self.lock:
self._clear_dead_receivers()
for r_key, _ in self.receivers:
if r_key == lookup_key:
break
else:
self.receivers.append((lookup_key, receiver))
self.sender_receivers_cache.clear()
def disconnect(self, receiver=None, sender=None, weak=None, dispatch_uid=None):
"""
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be remove from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
"""
if weak is not None:
warnings.warn("Passing `weak` to disconnect has no effect.",
RemovedInDjango20Warning, stacklevel=2)
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
disconnected = False
with self.lock:
self._clear_dead_receivers()
for index in range(len(self.receivers)):
(r_key, _) = self.receivers[index]
if r_key == lookup_key:
disconnected = True
del self.receivers[index]
break
self.sender_receivers_cache.clear()
return disconnected
def has_listeners(self, sender=None):
return bool(self._live_receivers(sender))
def send(self, sender, **named):
"""
Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop, so it is quite possible to not have all
receivers called if a raises an error.
Arguments:
sender
The sender of the signal Either a specific object or None.
named
Named arguments which will be passed to receivers.
Returns a list of tuple pairs [(receiver, response), ... ].
"""
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
for receiver in self._live_receivers(sender):
response = receiver(signal=self, sender=sender, **named)
responses.append((receiver, response))
return responses
def send_robust(self, sender, **named):
"""
Send signal from sender to all connected receivers catching errors.
Arguments:
sender
The sender of the signal. Can be any python object (normally one
registered with a connect if you actually want something to
occur).
named
Named arguments which will be passed to receivers. These
arguments must be a subset of the argument names defined in
providing_args.
Return a list of tuple pairs [(receiver, response), ... ]. May raise
DispatcherKeyError.
If any receiver raises an error (specifically any subclass of
Exception), the error instance is returned as the result for that
receiver. The traceback is always attached to the error at
``__traceback__``.
"""
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
for receiver in self._live_receivers(sender):
try:
response = receiver(signal=self, sender=sender, **named)
except Exception as err:
if not hasattr(err, '__traceback__'):
err.__traceback__ = sys.exc_info()[2]
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
def _clear_dead_receivers(self):
# Note: caller is assumed to hold self.lock.
if self._dead_receivers:
self._dead_receivers = False
new_receivers = []
for r in self.receivers:
if isinstance(r[1], weakref.ReferenceType) and r[1]() is None:
continue
new_receivers.append(r)
self.receivers = new_receivers
def _live_receivers(self, sender):
"""
Filter sequence of receivers to get resolved, live receivers.
This checks for weak references and resolves them, then returning only
live receivers.
"""
receivers = None
if self.use_caching and not self._dead_receivers:
receivers = self.sender_receivers_cache.get(sender)
# We could end up here with NO_RECEIVERS even if we do check this case in
# .send() prior to calling _live_receivers() due to concurrent .send() call.
if receivers is NO_RECEIVERS:
return []
if receivers is None:
with self.lock:
self._clear_dead_receivers()
senderkey = _make_id(sender)
receivers = []
for (receiverkey, r_senderkey), receiver in self.receivers:
if r_senderkey == NONE_ID or r_senderkey == senderkey:
receivers.append(receiver)
if self.use_caching:
if not receivers:
self.sender_receivers_cache[sender] = NO_RECEIVERS
else:
# Note, we must cache the weakref versions.
self.sender_receivers_cache[sender] = receivers
non_weak_receivers = []
for receiver in receivers:
if isinstance(receiver, weakref.ReferenceType):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
non_weak_receivers.append(receiver)
else:
non_weak_receivers.append(receiver)
return non_weak_receivers
def _remove_receiver(self, receiver=None):
# Mark that the self.receivers list has dead weakrefs. If so, we will
# clean those up in connect, disconnect and _live_receivers while
# holding self.lock. Note that doing the cleanup here isn't a good
# idea, _remove_receiver() will be called as side effect of garbage
# collection, and so the call can happen while we are already holding
# self.lock.
self._dead_receivers = True
def receiver(signal, **kwargs):
"""
A decorator for connecting receivers to signals. Used by passing in the
signal (or list of signals) and keyword arguments to connect::
@receiver(post_save, sender=MyModel)
def signal_receiver(sender, **kwargs):
...
@receiver([post_save, post_delete], sender=MyModel)
def signals_receiver(sender, **kwargs):
...
"""
def _decorator(func):
if isinstance(signal, (list, tuple)):
for s in signal:
s.connect(func, **kwargs)
else:
signal.connect(func, **kwargs)
return func
return _decorator
|
leiferikb/bitpop
|
refs/heads/master
|
src/third_party/webpagereplay/third_party/dns/rdtypes/ANY/ISDN.py
|
248
|
# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.exception
import dns.rdata
import dns.tokenizer
class ISDN(dns.rdata.Rdata):
"""ISDN record
@ivar address: the ISDN address
@type address: string
@ivar subaddress: the ISDN subaddress (or '' if not present)
@type subaddress: string
@see: RFC 1183"""
__slots__ = ['address', 'subaddress']
def __init__(self, rdclass, rdtype, address, subaddress):
super(ISDN, self).__init__(rdclass, rdtype)
self.address = address
self.subaddress = subaddress
def to_text(self, origin=None, relativize=True, **kw):
if self.subaddress:
return '"%s" "%s"' % (dns.rdata._escapify(self.address),
dns.rdata._escapify(self.subaddress))
else:
return '"%s"' % dns.rdata._escapify(self.address)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
address = tok.get_string()
t = tok.get()
if not t.is_eol_or_eof():
tok.unget(t)
subaddress = tok.get_string()
else:
tok.unget(t)
subaddress = ''
tok.get_eol()
return cls(rdclass, rdtype, address, subaddress)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
l = len(self.address)
assert l < 256
byte = chr(l)
file.write(byte)
file.write(self.address)
l = len(self.subaddress)
if l > 0:
assert l < 256
byte = chr(l)
file.write(byte)
file.write(self.subaddress)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
l = ord(wire[current])
current += 1
rdlen -= 1
if l > rdlen:
raise dns.exception.FormError
address = wire[current : current + l]
current += l
rdlen -= l
if rdlen > 0:
l = ord(wire[current])
current += 1
rdlen -= 1
if l != rdlen:
raise dns.exception.FormError
subaddress = wire[current : current + l]
else:
subaddress = ''
return cls(rdclass, rdtype, address, subaddress)
from_wire = classmethod(from_wire)
def _cmp(self, other):
v = cmp(self.address, other.address)
if v == 0:
v = cmp(self.subaddress, other.subaddress)
return v
|
bepitulaz/huntingdimana
|
refs/heads/master
|
env/Lib/encodings/utf_16.py
|
404
|
""" Python 'utf-16' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs, sys
### Codec APIs
encode = codecs.utf_16_encode
def decode(input, errors='strict'):
return codecs.utf_16_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict'):
codecs.IncrementalEncoder.__init__(self, errors)
self.encoder = None
def encode(self, input, final=False):
if self.encoder is None:
result = codecs.utf_16_encode(input, self.errors)[0]
if sys.byteorder == 'little':
self.encoder = codecs.utf_16_le_encode
else:
self.encoder = codecs.utf_16_be_encode
return result
return self.encoder(input, self.errors)[0]
def reset(self):
codecs.IncrementalEncoder.reset(self)
self.encoder = None
def getstate(self):
# state info we return to the caller:
# 0: stream is in natural order for this platform
# 2: endianness hasn't been determined yet
# (we're never writing in unnatural order)
return (2 if self.encoder is None else 0)
def setstate(self, state):
if state:
self.encoder = None
else:
if sys.byteorder == 'little':
self.encoder = codecs.utf_16_le_encode
else:
self.encoder = codecs.utf_16_be_encode
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
def __init__(self, errors='strict'):
codecs.BufferedIncrementalDecoder.__init__(self, errors)
self.decoder = None
def _buffer_decode(self, input, errors, final):
if self.decoder is None:
(output, consumed, byteorder) = \
codecs.utf_16_ex_decode(input, errors, 0, final)
if byteorder == -1:
self.decoder = codecs.utf_16_le_decode
elif byteorder == 1:
self.decoder = codecs.utf_16_be_decode
elif consumed >= 2:
raise UnicodeError("UTF-16 stream does not start with BOM")
return (output, consumed)
return self.decoder(input, self.errors, final)
def reset(self):
codecs.BufferedIncrementalDecoder.reset(self)
self.decoder = None
class StreamWriter(codecs.StreamWriter):
def __init__(self, stream, errors='strict'):
codecs.StreamWriter.__init__(self, stream, errors)
self.encoder = None
def reset(self):
codecs.StreamWriter.reset(self)
self.encoder = None
def encode(self, input, errors='strict'):
if self.encoder is None:
result = codecs.utf_16_encode(input, errors)
if sys.byteorder == 'little':
self.encoder = codecs.utf_16_le_encode
else:
self.encoder = codecs.utf_16_be_encode
return result
else:
return self.encoder(input, errors)
class StreamReader(codecs.StreamReader):
def reset(self):
codecs.StreamReader.reset(self)
try:
del self.decode
except AttributeError:
pass
def decode(self, input, errors='strict'):
(object, consumed, byteorder) = \
codecs.utf_16_ex_decode(input, errors, 0, False)
if byteorder == -1:
self.decode = codecs.utf_16_le_decode
elif byteorder == 1:
self.decode = codecs.utf_16_be_decode
elif consumed>=2:
raise UnicodeError,"UTF-16 stream does not start with BOM"
return (object, consumed)
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-16',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
Unidata/MetPy
|
refs/heads/gh-pages
|
v0.8/_downloads/Four_Panel_Map.py
|
6
|
# Copyright (c) 2017 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Four Panel Map
===============
By reading model output data from a netCDF file, we can create a four panel plot showing:
* 300 hPa heights and winds
* 500 hPa heights and absolute vorticity
* Surface temperatures
* Precipitable water
"""
###########################################
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import numpy as np
import scipy.ndimage as ndimage
import xarray as xr
from metpy.cbook import get_test_data
from metpy.plots import add_metpy_logo
###########################################
crs = ccrs.LambertConformal(central_longitude=-100.0, central_latitude=45.0)
###########################################
# Function used to create the map subplots
def plot_background(ax):
ax.set_extent([235., 290., 20., 55.])
ax.add_feature(cfeature.COASTLINE.with_scale('50m'), linewidth=0.5)
ax.add_feature(cfeature.STATES, linewidth=0.5)
ax.add_feature(cfeature.BORDERS, linewidth=0.5)
return ax
###########################################
# Open the example netCDF data
ds = xr.open_dataset(get_test_data('gfs_output.nc', False))
print(ds)
###########################################
# Combine 1D latitude and longitudes into a 2D grid of locations
lon_2d, lat_2d = np.meshgrid(ds['lon'], ds['lat'])
###########################################
# Pull out the data
vort_500 = ds['vort_500'][0]
surface_temp = ds['temp'][0]
precip_water = ds['precip_water'][0]
winds_300 = ds['winds_300'][0]
###########################################
# Do unit conversions to what we wish to plot
vort_500 = vort_500 * 1e5
surface_temp.metpy.convert_units('degF')
precip_water.metpy.convert_units('inches')
winds_300.metpy.convert_units('knots')
###########################################
# Smooth the height data
heights_300 = ndimage.gaussian_filter(ds['heights_300'][0], sigma=1.5, order=0)
heights_500 = ndimage.gaussian_filter(ds['heights_500'][0], sigma=1.5, order=0)
###########################################
# Create the figure and plot background on different axes
fig, axarr = plt.subplots(nrows=2, ncols=2, figsize=(20, 13), constrained_layout=True,
subplot_kw={'projection': crs})
add_metpy_logo(fig, 140, 120, size='large')
axlist = axarr.flatten()
for ax in axlist:
plot_background(ax)
# Upper left plot - 300-hPa winds and geopotential heights
cf1 = axlist[0].contourf(lon_2d, lat_2d, winds_300, cmap='cool', transform=ccrs.PlateCarree())
c1 = axlist[0].contour(lon_2d, lat_2d, heights_300, colors='black', linewidths=2,
transform=ccrs.PlateCarree())
axlist[0].clabel(c1, fontsize=10, inline=1, inline_spacing=1, fmt='%i', rightside_up=True)
axlist[0].set_title('300-hPa Wind Speeds and Heights', fontsize=16)
cb1 = fig.colorbar(cf1, ax=axlist[0], orientation='horizontal', shrink=0.74, pad=0)
cb1.set_label('knots', size='x-large')
# Upper right plot - 500mb absolute vorticity and geopotential heights
cf2 = axlist[1].contourf(lon_2d, lat_2d, vort_500, cmap='BrBG', transform=ccrs.PlateCarree(),
zorder=0, norm=plt.Normalize(-32, 32))
c2 = axlist[1].contour(lon_2d, lat_2d, heights_500, colors='k', linewidths=2,
transform=ccrs.PlateCarree())
axlist[1].clabel(c2, fontsize=10, inline=1, inline_spacing=1, fmt='%i', rightside_up=True)
axlist[1].set_title('500-hPa Absolute Vorticity and Heights', fontsize=16)
cb2 = fig.colorbar(cf2, ax=axlist[1], orientation='horizontal', shrink=0.74, pad=0)
cb2.set_label(r'$10^{-5}$ s$^{-1}$', size='x-large')
# Lower left plot - surface temperatures
cf3 = axlist[2].contourf(lon_2d, lat_2d, surface_temp, cmap='YlOrRd',
transform=ccrs.PlateCarree(), zorder=0)
axlist[2].set_title('Surface Temperatures', fontsize=16)
cb3 = fig.colorbar(cf3, ax=axlist[2], orientation='horizontal', shrink=0.74, pad=0)
cb3.set_label(u'\N{DEGREE FAHRENHEIT}', size='x-large')
# Lower right plot - precipitable water entire atmosphere
cf4 = axlist[3].contourf(lon_2d, lat_2d, precip_water, cmap='Greens',
transform=ccrs.PlateCarree(), zorder=0)
axlist[3].set_title('Precipitable Water', fontsize=16)
cb4 = fig.colorbar(cf4, ax=axlist[3], orientation='horizontal', shrink=0.74, pad=0)
cb4.set_label('in.', size='x-large')
# Set height padding for plots
fig.set_constrained_layout_pads(w_pad=0., h_pad=0.1, hspace=0., wspace=0.)
# Set figure title
fig.suptitle(ds['time'][0].dt.strftime('%d %B %Y %H:%MZ'), fontsize=24)
# Display the plot
plt.show()
|
pitch-sands/i-MPI
|
refs/heads/master
|
flask/Lib/site-packages/lamson/bounce.py
|
1
|
"""
Bounce analysis module for Lamson. It uses an algorithm that tries
to simply collect the headers that are most likely found in a bounce
message, and then determine a probability based on what it finds.
"""
import re
from functools import wraps
BOUNCE_MATCHERS = {
'Action': re.compile(r'(failed|delayed|delivered|relayed|expanded)', re.IGNORECASE | re.DOTALL),
'Content-Description': re.compile(r'(Notification|Undelivered Message|Delivery Report)', re.IGNORECASE | re.DOTALL),
'Diagnostic-Code': re.compile(r'(.+);\s*([0-9\-\.]+)?\s*(.*)', re.IGNORECASE | re.DOTALL),
'Final-Recipient': re.compile(r'(.+);\s*(.*)', re.IGNORECASE | re.DOTALL),
'Received': re.compile(r'(.+)', re.IGNORECASE | re.DOTALL),
'Remote-Mta': re.compile(r'(.+);\s*(.*)', re.IGNORECASE | re.DOTALL),
'Reporting-Mta': re.compile(r'(.+);\s*(.*)', re.IGNORECASE | re.DOTALL),
'Status': re.compile(r'([0-9]+)\.([0-9]+)\.([0-9]+)', re.IGNORECASE | re.DOTALL)
}
BOUNCE_MAX = len(BOUNCE_MATCHERS) * 2.0
PRIMARY_STATUS_CODES = {
u'1': u'Unknown Status Code 1',
u'2': u'Success',
u'3': u'Temporary Failure',
u'4': u'Persistent Transient Failure',
u'5': u'Permanent Failure'
}
SECONDARY_STATUS_CODES = {
u'0': u'Other or Undefined Status',
u'1': u'Addressing Status',
u'2': u'Mailbox Status',
u'3': u'Mail System Status',
u'4': u'Network and Routing Status',
u'5': u'Mail Delivery Protocol Status',
u'6': u'Message Content or Media Status',
u'7': u'Security or Policy Status',
}
COMBINED_STATUS_CODES = {
u'00': u'Not Applicable',
u'10': u'Other address status',
u'11': u'Bad destination mailbox address',
u'12': u'Bad destination system address',
u'13': u'Bad destination mailbox address syntax',
u'14': u'Destination mailbox address ambiguous',
u'15': u'Destination mailbox address valid',
u'16': u'Mailbox has moved',
u'17': u'Bad sender\'s mailbox address syntax',
u'18': u'Bad sender\'s system address',
u'20': u'Other or undefined mailbox status',
u'21': u'Mailbox disabled, not accepting messages',
u'22': u'Mailbox full',
u'23': u'Message length exceeds administrative limit.',
u'24': u'Mailing list expansion problem',
u'30': u'Other or undefined mail system status',
u'31': u'Mail system full',
u'32': u'System not accepting network messages',
u'33': u'System not capable of selected features',
u'34': u'Message too big for system',
u'40': u'Other or undefined network or routing status',
u'41': u'No answer from host',
u'42': u'Bad connection',
u'43': u'Routing server failure',
u'44': u'Unable to route',
u'45': u'Network congestion',
u'46': u'Routing loop detected',
u'47': u'Delivery time expired',
u'50': u'Other or undefined protocol status',
u'51': u'Invalid command',
u'52': u'Syntax error',
u'53': u'Too many recipients',
u'54': u'Invalid command arguments',
u'55': u'Wrong protocol version',
u'60': u'Other or undefined media error',
u'61': u'Media not supported',
u'62': u'Conversion required and prohibited',
u'63': u'Conversion required but not supported',
u'64': u'Conversion with loss performed',
u'65': u'Conversion failed',
u'70': u'Other or undefined security status',
u'71': u'Delivery not authorized, message refused',
u'72': u'Mailing list expansion prohibited',
u'73': u'Security conversion required but not possible',
u'74': u'Security features not supported',
u'75': u'Cryptographic failure',
u'76': u'Cryptographic algorithm not supported',
u'77': u'Message integrity failure',
}
def match_bounce_headers(msg):
"""
Goes through the headers in a potential bounce message recursively
and collects all the answers for the usual bounce headers.
"""
matches = {'Content-Description-Parts': {}}
for part in msg.base.walk():
for k in BOUNCE_MATCHERS:
if k in part.headers:
if k not in matches:
matches[k] = set()
# kind of an odd place to put this, but it's the easiest way
if k == 'Content-Description':
matches['Content-Description-Parts'][part.headers[k].lower()] = part
matches[k].add(part.headers[k])
return matches
def detect(msg):
"""
Given a message, this will calculate a probability score based on
possible bounce headers it finds and return a lamson.bounce.BounceAnalyzer
object for further analysis.
The detection algorithm is very simple but still accurate. For each header
it finds it adds a point to the score. It then uses the regex in BOUNCE_MATCHERS
to see if the value of that header is parseable, and if it is it adds another
point to the score. The final probability is based on how many headers and matchers
were found out of the total possible.
Finally, a header will be included in the score if it doesn't match in value, but
it WILL NOT be included in the headers used by BounceAnalyzer to give you meanings
like remote_mta and such.
Because this algorithm is very dumb, you are free to add to BOUNCE_MATCHERS in your
boot files if there's special headers you need to detect in your own code.
"""
originals = match_bounce_headers(msg)
results = {'Content-Description-Parts':
originals['Content-Description-Parts']}
score = 0
del originals['Content-Description-Parts']
for key in originals:
score += 1 # score still goes up, even if value doesn't parse
r = BOUNCE_MATCHERS[key]
scan = (r.match(v) for v in originals[key])
matched = [m.groups() for m in scan if m]
# a key is counted in the score, but only added if it matches
if len(matched) > 0:
score += len(matched) / len(originals[key])
results[key] = matched
return BounceAnalyzer(results, score / BOUNCE_MAX)
class BounceAnalyzer(object):
"""
BounceAnalyzer collects up the score and the headers and gives more
meaningful interaction with them. You can keep it simple and just use
is_hard, is_soft, and probable methods to see if there was a bounce.
If you need more information then attributes are set for each of the following:
* primary_status -- The main status number that determines hard vs soft.
* secondary_status -- Advice status.
* combined_status -- the 2nd and 3rd number combined gives more detail.
* remote_mta -- The MTA that you sent mail to and aborted.
* reporting_mta -- The MTA that was sending the mail and has to report to you.
* diagnostic_codes -- Human readable codes usually with info from the provider.
* action -- Usually 'failed', and turns out to be not too useful.
* content_parts -- All the attachments found as a hash keyed by the type.
* original -- The original message, if it's found.
* report -- All report elements, as lamson.encoding.MailBase raw messages.
* notification -- Usually the detailed reason you bounced.
"""
def __init__(self, headers, score):
"""
Initializes all the various attributes you can use to analyze the bounce
results.
"""
self.headers = headers
self.score = score
if 'Status' in self.headers:
status = self.headers['Status'][0]
self.primary_status = int(status[0]), PRIMARY_STATUS_CODES[status[0]]
self.secondary_status = int(status[1]), SECONDARY_STATUS_CODES[status[1]]
combined = "".join(status[1:])
self.combined_status = int(combined), COMBINED_STATUS_CODES[combined]
else:
self.primary_status = (None, None)
self.secondary_status = (None, None)
self.combined_status = (None, None)
if 'Remote-Mta' in self.headers:
self.remote_mta = self.headers['Remote-Mta'][0][1]
else:
self.remote_mta = None
if 'Reporting-Mta' in self.headers:
self.reporting_mta = self.headers['Reporting-Mta'][0][1]
else:
self.reporting_mta = None
if 'Final-Recipient' in self.headers:
self.final_recipient = self.headers['Final-Recipient'][0][1]
else:
self.final_recipient = None
if 'Diagnostic-Code' in self.headers:
self.diagnostic_codes = self.headers['Diagnostic-Code'][0][1:]
else:
self.diagnostic_codes = [None, None]
if 'Action' in self.headers:
self.action = self.headers['Action'][0][0]
else:
self.action = None
# these are forced lowercase because they're so damn random
self.content_parts = self.headers['Content-Description-Parts']
# and of course, this isn't the original original, it's the wrapper
self.original = self.content_parts.get('undelivered message', None)
if self.original and self.original.parts:
self.original = self.original.parts[0]
self.report = self.content_parts.get('delivery report', None)
if self.report and self.report.parts:
self.report = self.report.parts
self.notification = self.content_parts.get('notification', None)
def is_hard(self):
"""
Tells you if this was a hard bounce, which is determined by the message
being a probably bounce with a primary_status greater than 4.
"""
return self.probable() and self.primary_status[0] > 4
def is_soft(self):
"""Basically the inverse of is_hard()"""
return self.probable() and self.primary_status[0] <= 4
def probable(self, threshold=0.3):
"""
Determines if this is probably a bounce based on the score
probability. Default threshold is 0.3 which is conservative.
"""
return self.score > threshold
def error_for_humans(self):
"""
Constructs an error from the status codes that you can print to
a user.
"""
if self.primary_status[0]:
return "%s, %s, %s" % (self.primary_status[1],
self.secondary_status[1],
self.combined_status[1])
else:
return "No status codes found in bounce message."
class bounce_to(object):
"""
Used to route bounce messages to a handler for either soft or hard bounces.
Set the soft/hard parameters to the function that represents the handler.
The function should take one argument of the message that it needs to handle
and should have a route that handles everything.
WARNING: You should only place this on the START of modules that will
receive bounces, and every bounce handler should return START. The reason
is that the bounce emails come from *mail daemons* not the actual person
who bounced. You can find out who that person is using
message.bounce.final_recipient. But the bounce handler is *actually*
interacting with a message from something like MAILER-DAEMON@somehost.com.
If you don't go back to start immediately then you will mess with the state
for this address, which can be bad.
"""
def __init__(self, soft=None, hard=None):
self.soft = soft
self.hard = hard
assert self.soft and self.hard, "You must give at least soft and/or hard"
def __call__(self, func):
@wraps(func)
def bounce_wrapper(message, *args, **kw):
if message.is_bounce():
if message.bounce.is_soft():
return self.soft(message)
else:
return self.hard(message)
else:
return func(message, *args, **kw)
return bounce_wrapper
|
MalloyPower/parsing-python
|
refs/heads/master
|
front-end/testsuite-python-lib/Python-3.2/Lib/test/json_tests/test_scanstring.py
|
2
|
import sys
import decimal
from unittest import TestCase
import json
import json.decoder
class TestScanString(TestCase):
def test_py_scanstring(self):
self._test_scanstring(json.decoder.py_scanstring)
def test_c_scanstring(self):
if json.decoder.c_scanstring is not None:
self._test_scanstring(json.decoder.c_scanstring)
def _test_scanstring(self, scanstring):
self.assertEqual(
scanstring('"z\\ud834\\udd20x"', 1, True),
('z\U0001d120x', 16))
if sys.maxunicode == 65535:
self.assertEqual(
scanstring('"z\U0001d120x"', 1, True),
('z\U0001d120x', 6))
else:
self.assertEqual(
scanstring('"z\U0001d120x"', 1, True),
('z\U0001d120x', 5))
self.assertEqual(
scanstring('"\\u007b"', 1, True),
('{', 8))
self.assertEqual(
scanstring('"A JSON payload should be an object or array, not a string."', 1, True),
('A JSON payload should be an object or array, not a string.', 60))
self.assertEqual(
scanstring('["Unclosed array"', 2, True),
('Unclosed array', 17))
self.assertEqual(
scanstring('["extra comma",]', 2, True),
('extra comma', 14))
self.assertEqual(
scanstring('["double extra comma",,]', 2, True),
('double extra comma', 21))
self.assertEqual(
scanstring('["Comma after the close"],', 2, True),
('Comma after the close', 24))
self.assertEqual(
scanstring('["Extra close"]]', 2, True),
('Extra close', 14))
self.assertEqual(
scanstring('{"Extra comma": true,}', 2, True),
('Extra comma', 14))
self.assertEqual(
scanstring('{"Extra value after close": true} "misplaced quoted value"', 2, True),
('Extra value after close', 26))
self.assertEqual(
scanstring('{"Illegal expression": 1 + 2}', 2, True),
('Illegal expression', 21))
self.assertEqual(
scanstring('{"Illegal invocation": alert()}', 2, True),
('Illegal invocation', 21))
self.assertEqual(
scanstring('{"Numbers cannot have leading zeroes": 013}', 2, True),
('Numbers cannot have leading zeroes', 37))
self.assertEqual(
scanstring('{"Numbers cannot be hex": 0x14}', 2, True),
('Numbers cannot be hex', 24))
self.assertEqual(
scanstring('[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]', 21, True),
('Too deep', 30))
self.assertEqual(
scanstring('{"Missing colon" null}', 2, True),
('Missing colon', 16))
self.assertEqual(
scanstring('{"Double colon":: null}', 2, True),
('Double colon', 15))
self.assertEqual(
scanstring('{"Comma instead of colon", null}', 2, True),
('Comma instead of colon', 25))
self.assertEqual(
scanstring('["Colon instead of comma": false]', 2, True),
('Colon instead of comma', 25))
self.assertEqual(
scanstring('["Bad value", truth]', 2, True),
('Bad value', 12))
def test_overflow(self):
self.assertRaises(OverflowError, json.decoder.scanstring, b"xxx", sys.maxsize+1)
|
Serebriakov/python-llfuse
|
refs/heads/master
|
setup.py
|
2
|
#!/usr/bin/env python
'''
setup.py
Installation script for LLFUSE.
Copyright (C) Nikolaus Rath <Nikolaus@rath.org>
This file is part of LLFUSE (http://python-llfuse.googlecode.com).
LLFUSE can be distributed under the terms of the GNU LGPL.
'''
from __future__ import division, print_function, absolute_import
import sys
import os
import subprocess
try:
import setuptools
except ImportError:
raise SystemExit('Setuptools package not found. Please install from '
'https://pypi.python.org/pypi/setuptools')
from setuptools import Extension
# Add util to load path
basedir = os.path.abspath(os.path.dirname(sys.argv[0]))
sys.path.insert(0, os.path.join(basedir, 'util'))
# Add src to load path, important for Sphinx autodoc
# to work properly
sys.path.insert(0, os.path.join(basedir, 'src'))
LLFUSE_VERSION = '0.40'
def main():
try:
from sphinx.application import Sphinx #pylint: disable-msg=W0612
except ImportError:
pass
else:
fix_docutils()
with open(os.path.join(basedir, 'rst', 'about.rst'), 'r') as fh:
long_desc = fh.read()
compile_args = pkg_config('fuse', cflags=True, ldflags=False, min_ver='2.8.0')
compile_args += ['-DFUSE_USE_VERSION=28', '-Wall',
'-DLLFUSE_VERSION="%s"' % LLFUSE_VERSION]
# Enable fatal warnings only when compiling from Mercurial tip.
# Otherwise, this breaks both forward and backward compatibility
# (because compilation with newer compiler may fail if additional
# warnings are added, and compilation with older compiler may fail
# if it doesn't know about a newer -Wno-* option).
if os.path.exists(os.path.join(basedir, 'MANIFEST.in')):
print('MANIFEST.in exists, compiling with developer options')
compile_args += [ '-Werror', '-Wextra', '-Wconversion',
'-Wno-sign-conversion' ]
# http://bugs.python.org/issue7576
if sys.version_info[0] == 3 and sys.version_info[1] < 2:
compile_args.append('-Wno-missing-field-initializers')
# http://trac.cython.org/cython_trac/ticket/811
compile_args.append('-Wno-unused-but-set-variable')
# http://trac.cython.org/cython_trac/ticket/813
compile_args.append('-Wno-maybe-uninitialized')
# http://bugs.python.org/issue969718
if sys.version_info[0] == 2:
compile_args.append('-fno-strict-aliasing')
link_args = pkg_config('fuse', cflags=False, ldflags=True, min_ver='2.8.0')
link_args.append('-lpthread')
if os.uname()[0] == 'Linux':
link_args.append('-lrt')
compile_args.append('-DHAVE_STRUCT_STAT_ST_ATIM')
elif os.uname()[0] in ('Darwin', 'FreeBSD'):
compile_args.append('-DHAVE_STRUCT_STAT_ST_ATIMESPEC')
else:
print("NOTE: unknown system (%s), nanosecond resolution file times "
"will not be available" % os.uname()[0])
setuptools.setup(
name='llfuse',
zip_safe=True,
version=LLFUSE_VERSION,
description='Python bindings for the low-level FUSE API',
long_description=long_desc,
author='Nikolaus Rath',
author_email='Nikolaus@rath.org',
url='http://python-llfuse.googlecode.com/',
download_url='http://code.google.com/p/python-llfuse/downloads/',
license='LGPL',
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Filesystems',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: BSD :: FreeBSD'],
platforms=[ 'Linux', 'FreeBSD', 'OS X' ],
keywords=['FUSE', 'python' ],
package_dir={'': 'src'},
packages=setuptools.find_packages('src'),
provides=['llfuse'],
ext_modules=[Extension('llfuse.capi', ['src/llfuse/capi.c'],
extra_compile_args=compile_args,
extra_link_args=link_args)],
cmdclass={'build_cython': build_cython,
'upload_docs': upload_docs },
command_options={
'build_sphinx': {
'version': ('setup.py', LLFUSE_VERSION),
'release': ('setup.py', LLFUSE_VERSION),
}}
)
def pkg_config(pkg, cflags=True, ldflags=False, min_ver=None):
'''Frontend to ``pkg-config``'''
if min_ver:
cmd = ['pkg-config', pkg, '--atleast-version', min_ver ]
if subprocess.call(cmd) != 0:
cmd = ['pkg-config', '--modversion', pkg ]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
version = proc.communicate()[0].strip()
if not version:
raise SystemExit() # pkg-config generates error message already
else:
raise SystemExit('%s version too old (found: %s, required: %s)'
% (pkg, version, min_ver))
cmd = ['pkg-config', pkg ]
if cflags:
cmd.append('--cflags')
if ldflags:
cmd.append('--libs')
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
cflags = proc.stdout.readline().rstrip()
proc.stdout.close()
if proc.wait() != 0:
raise SystemExit('Failed to execute pkg-config. Exit code: %d.\n'
'Check that the %s development package been installed properly.'
% (proc.returncode, pkg))
return cflags.decode('us-ascii').split()
class build_cython(setuptools.Command):
user_options = []
boolean_options = []
description = "Compile .pyx to .c"
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
from Cython.Compiler.Main import compile as cython_compile
from Cython.Compiler.Options import extra_warnings
except ImportError:
raise SystemExit('Cython needs to be installed for this command')
directives = dict(extra_warnings)
directives['embedsignature'] = True
directives['language_level'] = 3
# http://trac.cython.org/cython_trac/ticket/714
directives['warn.maybe_uninitialized'] = False
options = {'include_path': [ os.path.join(basedir, 'Include') ],
'recursive': False, 'verbose': True, 'timestamps': False,
'compiler_directives': directives, 'warning_errors': True,
'compile_time_env': {} }
for sysname in ('linux', 'freebsd', 'darwin'):
print('compiling capi.pyx to capi_%s.c...' % (sysname,))
options['compile_time_env']['TARGET_PLATFORM'] = sysname
options['output_file'] = os.path.join(basedir, 'src', 'llfuse',
'capi_%s.c' % (sysname,))
res = cython_compile(os.path.join(basedir, 'src', 'llfuse', 'capi.pyx'),
full_module_name='llfuse.capi', **options)
if res.num_errors != 0:
raise SystemExit('Cython encountered errors.')
class upload_docs(setuptools.Command):
user_options = []
boolean_options = []
description = "Upload documentation"
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
subprocess.check_call(['rsync', '-aHv', '--del', os.path.join(basedir, 'doc', 'html') + '/',
'ebox.rath.org:/srv/www.rath.org/public_html/llfuse-docs/'])
def fix_docutils():
'''Work around https://bitbucket.org/birkenfeld/sphinx/issue/1154/'''
import docutils.parsers
from docutils.parsers import rst
old_getclass = docutils.parsers.get_parser_class
# Check if bug is there
try:
old_getclass('rst')
except AttributeError:
pass
else:
return
def get_parser_class(parser_name):
"""Return the Parser class from the `parser_name` module."""
if parser_name in ('rst', 'restructuredtext'):
return rst.Parser
else:
return old_getclass(parser_name)
docutils.parsers.get_parser_class = get_parser_class
assert docutils.parsers.get_parser_class('rst') is rst.Parser
if __name__ == '__main__':
main()
|
rszalski/python-gae-spike
|
refs/heads/master
|
lib/flask/signals.py
|
783
|
# -*- coding: utf-8 -*-
"""
flask.signals
~~~~~~~~~~~~~
Implements signals based on blinker if available, otherwise
falls silently back to a noop
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
signals_available = False
try:
from blinker import Namespace
signals_available = True
except ImportError:
class Namespace(object):
def signal(self, name, doc=None):
return _FakeSignal(name, doc)
class _FakeSignal(object):
"""If blinker is unavailable, create a fake class with the same
interface that allows sending of signals but will fail with an
error on anything else. Instead of doing anything on send, it
will just ignore the arguments and do nothing instead.
"""
def __init__(self, name, doc=None):
self.name = name
self.__doc__ = doc
def _fail(self, *args, **kwargs):
raise RuntimeError('signalling support is unavailable '
'because the blinker library is '
'not installed.')
send = lambda *a, **kw: None
connect = disconnect = has_receivers_for = receivers_for = \
temporarily_connected_to = connected_to = _fail
del _fail
# the namespace for code signals. If you are not flask code, do
# not put signals in here. Create your own namespace instead.
_signals = Namespace()
# core signals. For usage examples grep the sourcecode or consult
# the API documentation in docs/api.rst as well as docs/signals.rst
template_rendered = _signals.signal('template-rendered')
request_started = _signals.signal('request-started')
request_finished = _signals.signal('request-finished')
request_tearing_down = _signals.signal('request-tearing-down')
got_request_exception = _signals.signal('got-request-exception')
appcontext_tearing_down = _signals.signal('appcontext-tearing-down')
appcontext_pushed = _signals.signal('appcontext-pushed')
appcontext_popped = _signals.signal('appcontext-popped')
message_flashed = _signals.signal('message-flashed')
|
sjuxax/raggregate
|
refs/heads/master
|
raggregate/tests/test_epistle.py
|
1
|
from pyramid import testing
from raggregate.tests import BaseTest
from raggregate.queries import epistle as epistle_queries
from raggregate.queries import users
from raggregate.models.epistle import Epistle
from raggregate.views.epistle import _unwrap_list
class TestEpistles(BaseTest):
def create_epistle(self, recipient = None, sender = None, body = None, subject = None):
if not recipient:
recipient = users.create_user(username = 't1', password='test')
if not sender:
sender = users.create_user(username = 't2', password='test')
if not body:
body = u'test epistle'
if not subject:
subject = u'a simple test'
epistle = Epistle(recipient.id, sender.id, body, subject = subject)
self.dbsession.add(epistle)
self.dbsession.flush()
return epistle
def test_get_epistle_by_id(self):
epistle = self.create_epistle()
result = epistle_queries.get_epistle_by_id(epistle.id)
assert epistle.id == result.id
def test_send_epistle_by_id(self):
#@TODO: as this test illustrates, we should make a "send_epistle" function in queries
u1 = users.create_user(username = 't1', password='test')
u2 = users.create_user(username = 't2', password='test')
ep = self.create_epistle(u1, u2, u'test epistle', subject = u'a simple test')
epd = epistle_queries.get_epistle_by_recipient_id(u1.id)[0]
assert ep.id == epd.id
def test_unwrap_list_generator(self):
test_list = [[[1]], [2], 3, [5], [[6]], [[9]]]
ulist = []
[ulist.append(e) for e in _unwrap_list(test_list)]
assert ulist == [1, 2, 3, 5, 6, 9]
def test_get_epistle_by_sender_id(self):
epistle = self.create_epistle()
result = epistle_queries.get_epistle_by_sender_id(epistle.sender)[0]
assert epistle.id == result.id
def test_get_epistle_by_sender_name(self):
user = users.create_user(username = 'test', password = 'test')
epistle = self.create_epistle(sender = user)
result = epistle_queries.get_epistle_by_sender_name(user.name)[0]
assert epistle.id == result.id
def test_get_epistle_by_recipient_name(self):
user = users.create_user(username = 'test', password = 'test')
epistle = self.create_epistle(recipient = user)
result = epistle_queries.get_epistle_by_recipient_name(user.name)[0]
assert epistle.id == result.id
def test_get_new_message_num(self):
recipient = users.create_user(username = 'test', password = 'test')
epistle = self.create_epistle(recipient = recipient)
result = epistle_queries.get_new_message_num(recipient.id)
assert result == 1
def test_mark_epistle_read(self):
#@TODO: Make query accept a dbsession variable so this can be tested
#epistle = self.create_epistle()
#epistle = epistle_queries.mark_epistle_read(epistle)
#assert epistle.unread == False
assert True
def test_get_unread_epistles_by_recipient_id(self):
epistle = self.create_epistle()
result = epistle_queries.get_unread_epistles_by_recipient_id(epistle.recipient)[0]
assert epistle.id == result.id
|
mikegraham/dask
|
refs/heads/master
|
dask/tests/__init__.py
|
12133432
| |
michailbrynard/django-skeleton
|
refs/heads/master
|
src/config/__init__.py
|
12133432
| |
eregs/regulations-core
|
refs/heads/master
|
regcore/migrations/0009_auto_20160322_1646.py
|
3
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('regcore', '0008_auto_20160314_1144'),
]
operations = [
migrations.RenameField(
model_name='layer',
old_name='reference',
new_name='doc_id',
),
migrations.AddField(
model_name='layer',
name='doc_type',
field=models.SlugField(default='cfr', max_length=20),
preserve_default=False,
),
migrations.AlterUniqueTogether(
name='layer',
unique_together=set([('name', 'doc_type', 'doc_id')]),
),
migrations.AlterIndexTogether(
name='layer',
index_together=set([('name', 'doc_type', 'doc_id')]),
),
]
|
luotao1/Paddle
|
refs/heads/develop
|
python/paddle/fluid/tests/unittests/dist_ctr.py
|
2
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle
import paddle.fluid as fluid
import os
import dist_ctr_reader
from test_dist_base import TestDistRunnerBase, runtime_main
IS_SPARSE = True
os.environ['PADDLE_ENABLE_REMOTE_PREFETCH'] = "1"
# Fix seed for test
fluid.default_startup_program().random_seed = 1
fluid.default_main_program().random_seed = 1
class TestDistCTR2x2(TestDistRunnerBase):
def get_model(self, batch_size=2):
dnn_input_dim, lr_input_dim = dist_ctr_reader.load_data_meta()
""" network definition """
dnn_data = fluid.layers.data(
name="dnn_data",
shape=[-1, 1],
dtype="int64",
lod_level=1,
append_batch_size=False)
lr_data = fluid.layers.data(
name="lr_data",
shape=[-1, 1],
dtype="int64",
lod_level=1,
append_batch_size=False)
label = fluid.layers.data(
name="click",
shape=[-1, 1],
dtype="int64",
lod_level=0,
append_batch_size=False)
# build dnn model
dnn_layer_dims = [128, 64, 32, 1]
dnn_embedding = fluid.layers.embedding(
is_distributed=False,
input=dnn_data,
size=[dnn_input_dim, dnn_layer_dims[0]],
param_attr=fluid.ParamAttr(
name="deep_embedding",
initializer=fluid.initializer.Constant(value=0.01)),
is_sparse=IS_SPARSE)
dnn_pool = fluid.layers.sequence_pool(
input=dnn_embedding, pool_type="sum")
dnn_out = dnn_pool
for i, dim in enumerate(dnn_layer_dims[1:]):
fc = fluid.layers.fc(
input=dnn_out,
size=dim,
act="relu",
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.01)),
name='dnn-fc-%d' % i)
dnn_out = fc
# build lr model
lr_embbding = fluid.layers.embedding(
is_distributed=False,
input=lr_data,
size=[lr_input_dim, 1],
param_attr=fluid.ParamAttr(
name="wide_embedding",
initializer=fluid.initializer.Constant(value=0.01)),
is_sparse=IS_SPARSE)
lr_pool = fluid.layers.sequence_pool(input=lr_embbding, pool_type="sum")
merge_layer = fluid.layers.concat(input=[dnn_out, lr_pool], axis=1)
predict = fluid.layers.fc(input=merge_layer, size=2, act='softmax')
acc = fluid.layers.accuracy(input=predict, label=label)
auc_var, batch_auc_var, auc_states = fluid.layers.auc(input=predict,
label=label)
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost)
inference_program = paddle.fluid.default_main_program().clone()
regularization = None
use_l2_decay = bool(os.getenv('USE_L2_DECAY', 0))
if use_l2_decay:
regularization = fluid.regularizer.L2DecayRegularizer(
regularization_coeff=1e-1)
use_lr_decay = bool(os.getenv('LR_DECAY', 0))
lr = 0.0001
if use_lr_decay:
lr = fluid.layers.exponential_decay(
learning_rate=0.0001,
decay_steps=10000,
decay_rate=0.999,
staircase=True)
sgd_optimizer = fluid.optimizer.SGD(learning_rate=lr,
regularization=regularization)
sgd_optimizer.minimize(avg_cost)
dataset = dist_ctr_reader.Dataset()
train_reader = paddle.batch(dataset.train(), batch_size=batch_size)
test_reader = paddle.batch(dataset.test(), batch_size=batch_size)
return inference_program, avg_cost, train_reader, test_reader, None, predict
if __name__ == "__main__":
runtime_main(TestDistCTR2x2)
|
ZhangXinNan/tensorflow
|
refs/heads/master
|
tensorflow/python/saved_model/saved_model_test.py
|
4
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SavedModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import loader_impl
from tensorflow.python.saved_model import main_op
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import saver_test_utils
from tensorflow.python.training import training
from tensorflow.python.util import compat
SAVED_MODEL_PATH = ("cc/saved_model/testdata/half_plus_two/00000123")
def tearDownModule():
file_io.delete_recursively(test.get_temp_dir())
class SavedModelTest(test.TestCase):
def _get_export_dir(self, label):
return os.path.join(test.get_temp_dir(), label)
def _init_and_validate_variable(self, sess, variable_name, variable_value):
v = variables.Variable(variable_value, name=variable_name)
sess.run(variables.global_variables_initializer())
self.assertEqual(variable_value, v.eval())
def _build_asset_collection(self, asset_file_name, asset_file_contents,
asset_file_tensor_name, asset_subdir=""):
parent_dir = os.path.join(
compat.as_bytes(test.get_temp_dir()), compat.as_bytes(asset_subdir))
file_io.recursive_create_dir(parent_dir)
asset_filepath = os.path.join(
compat.as_bytes(parent_dir), compat.as_bytes(asset_file_name))
file_io.write_string_to_file(asset_filepath, asset_file_contents)
asset_file_tensor = constant_op.constant(
asset_filepath, name=asset_file_tensor_name)
ops.add_to_collection(ops.GraphKeys.ASSET_FILEPATHS, asset_file_tensor)
asset_collection = ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
return asset_collection
def _validate_asset_collection(self, export_dir, graph_collection_def,
expected_asset_file_name,
expected_asset_file_contents,
expected_asset_tensor_name,
asset_id=0):
assets_any = graph_collection_def[constants.ASSETS_KEY].any_list.value
asset = meta_graph_pb2.AssetFileDef()
assets_any[asset_id].Unpack(asset)
assets_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes(expected_asset_file_name))
actual_asset_contents = file_io.read_file_to_string(assets_path)
self.assertEqual(expected_asset_file_contents,
compat.as_text(actual_asset_contents))
self.assertEqual(expected_asset_file_name, asset.filename)
self.assertEqual(expected_asset_tensor_name, asset.tensor_info.name)
def _validate_inputs_tensor_info_fail(self, builder, tensor_info):
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
foo_signature = signature_def_utils.build_signature_def({
"foo_inputs": tensor_info
}, dict(), "foo")
self.assertRaises(
AssertionError,
builder.add_meta_graph_and_variables,
sess, ["foo"],
signature_def_map={"foo_key": foo_signature})
def _validate_inputs_tensor_info_accept(self, builder, tensor_info):
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
foo_signature = signature_def_utils.build_signature_def({
"foo_inputs": tensor_info
}, dict(), "foo")
builder.add_meta_graph_and_variables(
sess, ["foo"],
signature_def_map={"foo_key": foo_signature})
def _validate_outputs_tensor_info_fail(self, builder, tensor_info):
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
foo_signature = signature_def_utils.build_signature_def(
dict(), {"foo_outputs": tensor_info}, "foo")
self.assertRaises(
AssertionError,
builder.add_meta_graph_and_variables,
sess, ["foo"],
signature_def_map={"foo_key": foo_signature})
def _validate_outputs_tensor_info_accept(self, builder, tensor_info):
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
foo_signature = signature_def_utils.build_signature_def(
dict(), {"foo_outputs": tensor_info}, "foo")
builder.add_meta_graph_and_variables(
sess, ["foo"],
signature_def_map={"foo_key": foo_signature})
def testMaybeSavedModelDir(self):
base_path = test.test_src_dir_path("/python/saved_model")
self.assertFalse(loader.maybe_saved_model_directory(base_path))
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
self.assertTrue(loader.maybe_saved_model_directory(base_path))
base_path = "complete_garbage"
self.assertFalse(loader.maybe_saved_model_directory(base_path))
def testBadSavedModelFileFormat(self):
export_dir = self._get_export_dir("test_bad_saved_model_file_format")
# Attempt to load a SavedModel from an export directory that does not exist.
with self.test_session(graph=ops.Graph()) as sess:
with self.assertRaisesRegexp(IOError,
"SavedModel file does not exist at: %s" %
export_dir):
loader.load(sess, ["foo"], export_dir)
os.makedirs(export_dir)
# Write an invalid binary proto to saved_model.pb.
path_to_pb = os.path.join(export_dir, constants.SAVED_MODEL_FILENAME_PB)
with open(path_to_pb, "w") as f:
f.write("invalid content")
with self.test_session(graph=ops.Graph()) as sess:
with self.assertRaisesRegexp(IOError, "Cannot parse file.*%s" %
constants.SAVED_MODEL_FILENAME_PB):
loader.load(sess, ["foo"], export_dir)
# Cleanup the directory and start again.
file_io.delete_recursively(export_dir)
os.makedirs(export_dir)
# Write an invalid text proto to saved_model.pbtxt
path_to_pbtxt = os.path.join(export_dir,
constants.SAVED_MODEL_FILENAME_PBTXT)
with open(path_to_pbtxt, "w") as f:
f.write("invalid content")
with self.test_session(graph=ops.Graph()) as sess:
with self.assertRaisesRegexp(IOError, "Cannot parse file.*%s" %
constants.SAVED_MODEL_FILENAME_PBTXT):
loader.load(sess, ["foo"], export_dir)
def testVerifySessionGraphUsage(self):
export_dir = self._get_export_dir("test_verify_session_graph_usage")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, [tag_constants.TRAINING])
# Save the SavedModel to disk.
builder.save()
# Build a session and supply it to the load operation.
sess = session.Session(graph=ops.Graph())
loader.load(sess, [tag_constants.TRAINING], export_dir)
# Check the variable within the scope of the session and its graph.
with sess:
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
def testSequence(self):
export_dir = self._get_export_dir("test_sequence")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Expect an assertion error since add_meta_graph_and_variables() should be
# invoked before any add_meta_graph() calls.
with self.test_session(graph=ops.Graph()) as sess:
self.assertRaises(AssertionError, builder.add_meta_graph, ["foo"])
# Expect an assertion error for multiple calls of
# add_meta_graph_and_variables() since weights should be saved exactly once.
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, ["bar"])
self.assertRaises(AssertionError, builder.add_meta_graph_and_variables,
sess, ["baz"])
def testTags(self):
export_dir = self._get_export_dir("test_tags")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with a single variable. SavedModel invoked to:
# - add with weights.
# - a single tag (from predefined constants).
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, [tag_constants.TRAINING])
# Graph that updates the single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
# - a single tag (from predefined constants).
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 43)
builder.add_meta_graph([tag_constants.SERVING])
# Graph that updates the single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
# - multiple tags (from predefined constants).
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 45)
builder.add_meta_graph([tag_constants.SERVING, tag_constants.GPU])
# Graph that updates the single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
# - multiple tags (from predefined constants for serving on TPU).
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 45)
builder.add_meta_graph([tag_constants.SERVING, tag_constants.TPU])
# Graph that updates the single variable. SavedModel is invoked:
# - to add the model (weights are not updated).
# - multiple custom tags.
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 44)
builder.add_meta_graph(["foo", "bar"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with a single predefined tag whose variables were saved.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.TRAINING], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with a single predefined tag whose variables were not
# saved.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with multiple predefined tags whose variables were not
# saved.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.SERVING, tag_constants.GPU], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with multiple predefined tags (for serving on TPU)
# whose variables were not saved.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.SERVING, tag_constants.TPU], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with multiple tags. Provide duplicate tags to test set
# semantics.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo", "bar", "foo"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Try restoring a graph with a non-existent tag. This should yield a runtime
# error.
with self.test_session(graph=ops.Graph()) as sess:
self.assertRaises(RuntimeError, loader.load, sess, ["INVALID"],
export_dir)
# Try restoring a graph where a subset of the tags match. Since tag matching
# for meta graph defs follows "all" semantics, this should yield a runtime
# error.
with self.test_session(graph=ops.Graph()) as sess:
self.assertRaises(RuntimeError, loader.load, sess, ["foo", "baz"],
export_dir)
def testVariables(self):
export_dir = self._get_export_dir("test_variables")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with two variables. SavedModel invoked to:
# - add with weights.
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v1", 1)
self._init_and_validate_variable(sess, "v2", 2)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Graph with a single variable (subset of the variables from the previous
# graph whose weights were saved). SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v2", 3)
builder.add_meta_graph(["bar"])
# Graph with a single variable (disjoint set of variables from the previous
# graph whose weights were saved). SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v3", 4)
builder.add_meta_graph(["baz"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo", whose variables were saved.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
collection_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertEqual(len(collection_vars), 2)
self.assertEqual(1, collection_vars[0].eval())
self.assertEqual(2, collection_vars[1].eval())
# Restore the graph with tag "bar", whose variables were not saved. Only the
# subset of the variables added to the graph will be restored with the
# checkpointed value.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
collection_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertEqual(len(collection_vars), 1)
self.assertEqual(2, collection_vars[0].eval())
# Try restoring the graph with tag "baz", whose variables were not saved.
# Since this graph has a disjoint set of variables from the set that was
# saved, this should raise an error.
with self.test_session(graph=ops.Graph()) as sess:
self.assertRaises(errors.NotFoundError, loader.load, sess, ["baz"],
export_dir)
def testGraphWithoutVariables(self):
export_dir = self._get_export_dir("test_graph_has_variables")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with no variables.
with self.test_session(graph=ops.Graph()) as sess:
constant_5_name = constant_op.constant(5.0).name
builder.add_meta_graph_and_variables(sess, ["foo"])
# Second graph with no variables
with self.test_session(graph=ops.Graph()) as sess:
constant_6_name = constant_op.constant(6.0).name
builder.add_meta_graph(["bar"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo".
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
# Read the constant a from the graph.
a = ops.get_default_graph().get_tensor_by_name(constant_5_name)
b = constant_op.constant(6.0)
c = a * b
self.assertEqual(30.0, sess.run(c))
# Restore the graph with tag "bar".
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
# Read the constant a from the graph.
a = ops.get_default_graph().get_tensor_by_name(constant_6_name)
b = constant_op.constant(5.0)
c = a * b
self.assertEqual(30.0, sess.run(c))
def testNoOverwrite(self):
export_dir = self._get_export_dir("test_no_overwrite")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with a single variable. SavedModel invoked to:
# - add with weights.
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Restore the graph with tag "foo", whose variables were saved.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# An attempt to create another builder with the same export directory should
# result in an assertion error.
self.assertRaises(AssertionError, saved_model_builder.SavedModelBuilder,
export_dir)
def testSaveAsText(self):
export_dir = self._get_export_dir("test_astext")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with a single variable. SavedModel invoked to:
# - add with weights.
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Graph with the same single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 43)
builder.add_meta_graph(["bar"])
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Restore the graph with tag "foo", whose variables were saved.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with tag "bar", whose variables were not saved.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
def testCollections(self):
export_dir = self._get_export_dir("test_collections")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with a single variable added to a collection. SavedModel invoked to:
# - add with weights.
with self.test_session(graph=ops.Graph()) as sess:
v = variables.Variable(42, name="v")
ops.add_to_collection("foo_vars", v)
sess.run(variables.global_variables_initializer())
self.assertEqual(42, v.eval())
builder.add_meta_graph_and_variables(sess, ["foo"])
# Graph with the same single variable added to a different collection.
# SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.test_session(graph=ops.Graph()) as sess:
v = variables.Variable(43, name="v")
ops.add_to_collection("bar_vars", v)
sess.run(variables.global_variables_initializer())
self.assertEqual(43, v.eval())
builder.add_meta_graph(["bar"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo", whose variables were saved. The
# collection 'foo_vars' should contain a single element. The collection
# 'bar_vars' should not be found.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
collection_foo_vars = ops.get_collection("foo_vars")
self.assertEqual(len(collection_foo_vars), 1)
self.assertEqual(42, collection_foo_vars[0].eval())
self.assertEqual(len(ops.get_collection("bar_vars")), 0)
# Restore the graph with tag "bar", whose variables were not saved. The
# collection-def exported as part of the meta graph def is updated to
# reflect the new collection. The value of the variable in the
# collection-def corresponds to the saved value (from the previous graph
# with tag "foo").
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
collection_bar_vars = ops.get_collection("bar_vars")
self.assertEqual(len(collection_bar_vars), 1)
self.assertEqual(42, collection_bar_vars[0].eval())
self.assertEqual(len(ops.get_collection("foo_vars")), 0)
def testSignatureDefs(self):
export_dir = self._get_export_dir("test_signature_defs")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with a single variable and a single entry in the signature def map.
# SavedModel is invoked to add with weights.
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build and populate an empty SignatureDef for testing.
foo_signature = signature_def_utils.build_signature_def(dict(),
dict(), "foo")
builder.add_meta_graph_and_variables(
sess, ["foo"], signature_def_map={"foo_key": foo_signature})
# Graph with the same single variable and multiple entries in the signature
# def map. No weights are saved by SavedModel.
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 43)
# Build and populate a different SignatureDef for testing.
bar_signature = signature_def_utils.build_signature_def(dict(),
dict(), "bar")
# Also, build a different SignatureDef corresponding to "foo_key" defined
# in the previous graph.
foo_new_signature = signature_def_utils.build_signature_def(dict(),
dict(),
"foo_new")
builder.add_meta_graph(
["bar"],
signature_def_map={
"bar_key": bar_signature,
"foo_key": foo_new_signature
})
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo". The single entry in the SignatureDef map
# corresponding to "foo_key" should exist.
with self.test_session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
foo_signature = foo_graph.signature_def
self.assertEqual(len(foo_signature), 1)
self.assertEqual("foo", foo_signature["foo_key"].method_name)
# Restore the graph with tag "bar". The SignatureDef map should have two
# entries. One corresponding to "bar_key" and another corresponding to the
# new value of "foo_key".
with self.test_session(graph=ops.Graph()) as sess:
bar_graph = loader.load(sess, ["bar"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
bar_signature = bar_graph.signature_def
self.assertEqual(len(bar_signature), 2)
self.assertEqual("bar", bar_signature["bar_key"].method_name)
self.assertEqual("foo_new", bar_signature["foo_key"].method_name)
def testSignatureDefValidationFails(self):
export_dir = self._get_export_dir("test_signature_def_validation_fail")
builder = saved_model_builder.SavedModelBuilder(export_dir)
tensor_without_encoding = meta_graph_pb2.TensorInfo()
tensor_without_encoding.dtype = types_pb2.DT_FLOAT
self._validate_inputs_tensor_info_fail(builder, tensor_without_encoding)
self._validate_outputs_tensor_info_fail(builder, tensor_without_encoding)
tensor_without_dtype = meta_graph_pb2.TensorInfo()
tensor_without_dtype.name = "x"
self._validate_inputs_tensor_info_fail(builder, tensor_without_dtype)
self._validate_outputs_tensor_info_fail(builder, tensor_without_dtype)
tensor_empty = meta_graph_pb2.TensorInfo()
self._validate_inputs_tensor_info_fail(builder, tensor_empty)
self._validate_outputs_tensor_info_fail(builder, tensor_empty)
def testSignatureDefValidationSucceedsWithName(self):
tensor_with_name = meta_graph_pb2.TensorInfo()
tensor_with_name.name = "foo"
tensor_with_name.dtype = types_pb2.DT_FLOAT
export_dir = self._get_export_dir("test_signature_def_validation_name_1")
builder = saved_model_builder.SavedModelBuilder(export_dir)
self._validate_inputs_tensor_info_accept(builder, tensor_with_name)
export_dir = self._get_export_dir("test_signature_def_validation_name_2")
builder = saved_model_builder.SavedModelBuilder(export_dir)
self._validate_outputs_tensor_info_accept(builder, tensor_with_name)
def testSignatureDefValidationSucceedsWithCoo(self):
tensor_with_coo = meta_graph_pb2.TensorInfo()
# TODO(soergel) test validation of each of the fields of coo_sparse
tensor_with_coo.coo_sparse.values_tensor_name = "foo"
tensor_with_coo.dtype = types_pb2.DT_FLOAT
export_dir = self._get_export_dir("test_signature_def_validation_coo_1")
builder = saved_model_builder.SavedModelBuilder(export_dir)
self._validate_inputs_tensor_info_accept(builder, tensor_with_coo)
export_dir = self._get_export_dir("test_signature_def_validation_coo_2")
builder = saved_model_builder.SavedModelBuilder(export_dir)
self._validate_outputs_tensor_info_accept(builder, tensor_with_coo)
def testAssets(self):
export_dir = self._get_export_dir("test_assets")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection.
ignored_filepath = os.path.join(
compat.as_bytes(test.get_temp_dir()), compat.as_bytes("ignored.txt"))
file_io.write_string_to_file(ignored_filepath, "will be ignored")
asset_collection = self._build_asset_collection("hello42.txt",
"foo bar baz",
"asset_file_tensor")
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_collection=asset_collection)
# Save the SavedModel to disk.
builder.save()
with self.test_session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_asset_collection(export_dir, foo_graph.collection_def,
"hello42.txt", "foo bar baz",
"asset_file_tensor:0")
ignored_asset_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes("ignored.txt"))
self.assertFalse(file_io.file_exists(ignored_asset_path))
def testAssetsNameCollisionDiffFile(self):
export_dir = self._get_export_dir("test_assets_name_collision_diff_file")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
asset_collection = self._build_asset_collection(
"hello42.txt", "foo bar bak", "asset_file_tensor",
asset_subdir="1")
asset_collection = self._build_asset_collection(
"hello42.txt", "foo bar baz", "asset_file_tensor_1",
asset_subdir="2")
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_collection=asset_collection)
# Save the SavedModel to disk.
builder.save()
with self.test_session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_asset_collection(export_dir, foo_graph.collection_def,
"hello42.txt", "foo bar bak",
"asset_file_tensor:0")
self._validate_asset_collection(export_dir, foo_graph.collection_def,
"hello42.txt_1", "foo bar baz",
"asset_file_tensor_1:0",
asset_id=1)
def testAssetsNameCollisionSameFilepath(self):
export_dir = self._get_export_dir("test_assets_name_collision_same_path")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
asset_collection = self._build_asset_collection(
"hello42.txt", "foo bar baz", "asset_file_tensor")
asset_collection = self._build_asset_collection(
"hello42.txt", "foo bar baz", "asset_file_tensor_1")
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_collection=asset_collection)
# Save the SavedModel to disk.
builder.save()
with self.test_session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_asset_collection(export_dir, foo_graph.collection_def,
"hello42.txt", "foo bar baz",
"asset_file_tensor:0")
# The second tensor should be recorded, but the same.
self._validate_asset_collection(export_dir, foo_graph.collection_def,
"hello42.txt", "foo bar baz",
"asset_file_tensor_1:0",
asset_id=1)
ignored_asset_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes("hello42.txt_1"))
self.assertFalse(file_io.file_exists(ignored_asset_path))
def testAssetsNameCollisionSameFile(self):
export_dir = self._get_export_dir("test_assets_name_collision_same_file")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
asset_collection = self._build_asset_collection(
"hello42.txt", "foo bar baz", "asset_file_tensor",
asset_subdir="1")
asset_collection = self._build_asset_collection(
"hello42.txt", "foo bar baz", "asset_file_tensor_1",
asset_subdir="2")
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_collection=asset_collection)
# Save the SavedModel to disk.
builder.save()
with self.test_session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_asset_collection(export_dir, foo_graph.collection_def,
"hello42.txt", "foo bar baz",
"asset_file_tensor:0")
# The second tensor should be recorded, but the same.
self._validate_asset_collection(export_dir, foo_graph.collection_def,
"hello42.txt", "foo bar baz",
"asset_file_tensor_1:0",
asset_id=1)
ignored_asset_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes("hello42.txt_1"))
self.assertFalse(file_io.file_exists(ignored_asset_path))
def testAssetsNameCollisionManyFiles(self):
export_dir = self._get_export_dir("test_assets_name_collision_many_files")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
for i in range(5):
idx = str(i)
asset_collection = self._build_asset_collection(
"hello42.txt", "foo bar baz " + idx, "asset_file_tensor_" + idx,
asset_subdir=idx)
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_collection=asset_collection)
# Save the SavedModel to disk.
builder.save()
with self.test_session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
for i in range(1, 5):
idx = str(i)
self._validate_asset_collection(
export_dir, foo_graph.collection_def, "hello42.txt_" + idx,
"foo bar baz " + idx, "asset_file_tensor_{}:0".format(idx),
asset_id=i)
self._validate_asset_collection(export_dir, foo_graph.collection_def,
"hello42.txt", "foo bar baz 0",
"asset_file_tensor_0:0")
def testCustomMainOp(self):
export_dir = self._get_export_dir("test_main_op")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.test_session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variables.Variable(1, name="v1")
ops.add_to_collection("v", v1)
v2 = variables.Variable(2, name="v2")
ops.add_to_collection("v", v2)
# Initialize another variable `v3` to 42.
v3 = variables.Variable(42, name="v3")
ops.add_to_collection("v", v3)
# Set up an assignment op to be run as part of the main_op.
with ops.control_dependencies([main_op.main_op()]):
add_v1_v2 = math_ops.add(v1._ref(), v2._ref())
custom_main_op = control_flow_ops.group(state_ops.assign(v3, add_v1_v2))
sess.run(custom_main_op)
builder.add_meta_graph_and_variables(
sess, ["foo"], main_op=custom_main_op)
# Save the SavedModel to disk.
builder.save()
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
# Evaluates to the sum of the first two variables and assigned as part of
# the main_op, following a restore.
self.assertEqual(3, ops.get_collection("v")[2].eval())
def testLegacyInitOp(self):
export_dir = self._get_export_dir("test_legacy_init_op")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.test_session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variables.Variable(1, name="v1")
ops.add_to_collection("v", v1)
v2 = variables.Variable(2, name="v2")
ops.add_to_collection("v", v2)
# Initialize another variable `v3` to 42.
v3 = variables.Variable(42, name="v3", trainable=False, collections=[])
ops.add_to_collection("v", v3)
# Set up an assignment op to be run as part of the legacy_init_op.
assign_v3 = state_ops.assign(v3, math_ops.add(v1, v2))
legacy_init_op = control_flow_ops.group(assign_v3, name="legacy_init_op")
sess.run(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(
sess, ["foo"], legacy_init_op=legacy_init_op)
# Save the SavedModel to disk.
builder.save()
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
# Evaluates to the sum of the first two variables and assigned as part of
# the legacy_init_op, following a restore.
self.assertEqual(3, ops.get_collection("v")[2].eval())
def testLegacyInitOpWithNonEmptyCollection(self):
export_dir = self._get_export_dir(
"test_legacy_init_op_with_non_empty_collection")
self._testInitOpsWithNonEmptyCollection(
export_dir, constants.LEGACY_INIT_OP_KEY)
def testMainOpWithNonEmptyCollection(self):
export_dir = self._get_export_dir(
"test_main_op_with_non_empty_collection")
self._testInitOpsWithNonEmptyCollection(export_dir, constants.MAIN_OP_KEY)
def _testInitOpsWithNonEmptyCollection(self, export_dir, key):
builder = saved_model_builder.SavedModelBuilder(export_dir)
g = ops.Graph()
with self.test_session(graph=g) as sess:
# Initialize variable `v1` to 1.
v1 = variables.Variable(1, name="v1")
ops.add_to_collection("v", v1)
# Initialize another variable `v2` to 42.
v2 = variables.Variable(42, name="v2", trainable=False, collections=[])
ops.add_to_collection("v", v2)
# Set up an assignment op to be run as part of the init op.
assign_v2 = state_ops.assign(v2, v1)
init_op = control_flow_ops.group(assign_v2, name="init_op")
sess.run(variables.global_variables_initializer())
ops.add_to_collection(key, control_flow_ops.no_op())
# ValueError should be raised since the LEGACY_INIT_OP_KEY collection
# is not empty and we don't support multiple init ops.
with self.assertRaisesRegexp(ValueError, "Graph already contains"):
builder.add_meta_graph_and_variables(
sess, ["foo"], legacy_init_op=init_op)
# We shouldn't be able to add as MAIN_OP, either.
with self.assertRaisesRegexp(ValueError, "Graph already contains"):
builder.add_meta_graph_and_variables(sess, ["foo"], main_op=init_op)
def testTrainOp(self):
export_dir = self._get_export_dir("test_train_op")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.test_session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variables.Variable(1, name="v1")
ops.add_to_collection("v", v1)
v2 = variables.Variable(2, name="v2")
ops.add_to_collection("v", v2)
sess.run(variables.global_variables_initializer())
train_op = state_ops.assign_add(v1, v2)
sess.run(train_op)
# TODO(karmel): remove explicit call when in the public method.
builder._add_train_op(train_op)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk.
builder.save()
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(3, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
self.assertIsInstance(
ops.get_collection(constants.TRAIN_OP_KEY)[0], ops.Tensor)
def testTrainOpGroup(self):
export_dir = self._get_export_dir("test_train_op_group")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.test_session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variables.Variable(1, name="v1")
ops.add_to_collection("v", v1)
v2 = variables.Variable(2, name="v2")
ops.add_to_collection("v", v2)
sess.run(variables.global_variables_initializer())
train_op = control_flow_ops.group()
sess.run(train_op)
# TODO(karmel): remove explicit call when in the public method.
builder._add_train_op(train_op)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk.
builder.save()
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
self.assertIsInstance(
ops.get_collection(constants.TRAIN_OP_KEY)[0], ops.Operation)
def testTrainOpAfterVariables(self):
export_dir = self._get_export_dir("test_train_op_after_variables")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.test_session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variables.Variable(1, name="v1")
ops.add_to_collection("v", v1)
v2 = variables.Variable(2, name="v2")
ops.add_to_collection("v", v2)
sess.run(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(sess, ["pre_foo"])
train_op = state_ops.assign_add(v1, v2)
sess.run(train_op)
# TODO(karmel): remove explicit call when in the public method.
builder._add_train_op(train_op)
builder.add_meta_graph(["foo"])
# Save the SavedModel to disk.
builder.save()
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertIsInstance(
ops.get_collection(constants.TRAIN_OP_KEY)[0], ops.Tensor)
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["pre_foo"], export_dir)
self.assertFalse(ops.get_collection(constants.TRAIN_OP_KEY))
def testMultipleAssets(self):
export_dir = self._get_export_dir("test_multiple_assets")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection specific to `foo` graph.
asset_collection = self._build_asset_collection("foo.txt", "content_foo",
"asset_file_tensor")
# Add the asset collection as part of the graph with tag "foo".
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_collection=asset_collection)
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection specific to `bar` graph.
asset_collection = self._build_asset_collection("bar.txt", "content_bar",
"asset_file_tensor")
# Add the asset collection as part of the graph with tag "bar".
builder.add_meta_graph(["bar"], assets_collection=asset_collection)
# Save the SavedModel to disk.
builder.save()
# Check assets restored for graph with tag "foo".
with self.test_session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_asset_collection(export_dir, foo_graph.collection_def,
"foo.txt", "content_foo",
"asset_file_tensor:0")
# Check assets restored for graph with tag "bar".
with self.test_session(graph=ops.Graph()) as sess:
bar_graph = loader.load(sess, ["bar"], export_dir)
self._validate_asset_collection(export_dir, bar_graph.collection_def,
"bar.txt", "content_bar",
"asset_file_tensor:0")
def testDuplicateAssets(self):
export_dir = self._get_export_dir("test_duplicate_assets")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection with `foo.txt` that has `foo` specific
# content.
asset_collection = self._build_asset_collection("foo.txt", "content_foo",
"asset_file_tensor")
# Add the asset collection as part of the graph with tag "foo".
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_collection=asset_collection)
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection with `foo.txt` that has `bar` specific
# content.
asset_collection = self._build_asset_collection("foo.txt", "content_bar",
"asset_file_tensor")
# Add the asset collection as part of the graph with tag "bar".
builder.add_meta_graph(["bar"], assets_collection=asset_collection)
# Save the SavedModel to disk.
builder.save()
# Check assets restored for graph with tag "foo".
with self.test_session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_asset_collection(export_dir, foo_graph.collection_def,
"foo.txt", "content_foo",
"asset_file_tensor:0")
# Check assets restored for graph with tag "bar".
with self.test_session(graph=ops.Graph()) as sess:
bar_graph = loader.load(sess, ["bar"], export_dir)
# Validate the assets for `bar` graph. `foo.txt` should contain the
# original contents corresponding to `foo` graph since an asset with the
# same name across multiple graphs is only stored the first time
self._validate_asset_collection(export_dir, bar_graph.collection_def,
"foo.txt", "content_foo",
"asset_file_tensor:0")
def testOp(self):
export_dir = self._get_export_dir("test_op")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with session.Session(
graph=ops.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v1 = variables.Variable(1, name="v1")
with sess.graph.device("/cpu:1"):
v2 = variables.Variable(2, name="v2")
# v3 is an unsaved variable derived from v1 and v2. It is used to
# exercise the ability to run an init op when restoring a graph.
v3 = variables.Variable(1, name="v3", trainable=False, collections=[])
assign_v3 = state_ops.assign(v3, math_ops.add(v1, v2))
init_op = control_flow_ops.group(assign_v3, name="init_op")
ops.add_to_collection("v", v1)
ops.add_to_collection("v", v2)
ops.add_to_collection("v", v3)
ops.add_to_collection("init_op", init_op)
sess.run(variables.global_variables_initializer())
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk.
builder.save()
with session.Session(
graph=ops.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
loader.load(sess, ["foo"], export_dir)
# Validate variables, run the init op and verify result.
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
ops.get_collection("init_op")[0].run()
self.assertEqual(3, ops.get_collection("v")[2].eval())
def testCustomSaveable(self):
export_dir = self._get_export_dir("custom_saveable")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with session.Session(
graph=ops.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
# CheckpointedOp is a key-value table that can be saved across sessions.
# The table register itself in SAVEABLE_OBJECTS collection.
v1 = saver_test_utils.CheckpointedOp(name="v1")
variables.global_variables_initializer().run()
v1.insert("k1", 3.0).run()
# Once the table is restored, we can access it through this reference.
ops.add_to_collection("table_ref", v1.table_ref)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk.
builder.save()
with session.Session(
graph=ops.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
loader.load(sess, ["foo"], export_dir)
# Instantiate a wrapper object from the checkpointed reference.
v1 = saver_test_utils.CheckpointedOp(
name="v1", table_ref=ops.get_collection("table_ref")[0])
self.assertEqual(b"k1", v1.keys().eval())
self.assertEqual(3.0, v1.values().eval())
def testCustomSaver(self):
export_dir = self._get_export_dir("test_custom_saver")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.test_session(graph=ops.Graph()) as sess:
variables.Variable(1, name="v1")
sess.run(variables.global_variables_initializer())
custom_saver = training.Saver(name="my_saver")
builder.add_meta_graph_and_variables(sess, ["tag"], saver=custom_saver)
# Save the SavedModel to disk.
builder.save()
with ops.Graph().as_default() as graph:
with self.test_session(graph=graph) as sess:
saved_graph = loader.load(sess, ["tag"], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue("my_saver/restore_all" in graph_ops)
self.assertFalse("save/restore_all" in graph_ops)
self.assertEqual(
saved_graph.saver_def.restore_op_name, "my_saver/restore_all")
def testNoCustomSaver(self):
export_dir = self._get_export_dir("test_no_custom_saver")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.test_session(graph=ops.Graph()) as sess:
variables.Variable(1, name="v1")
sess.run(variables.global_variables_initializer())
training.Saver(name="my_saver")
builder.add_meta_graph_and_variables(sess, ["tag"])
# Save the SavedModel to disk.
builder.save()
with ops.Graph().as_default() as graph:
with self.test_session(graph=graph) as sess:
saved_graph = loader.load(sess, ["tag"], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue("my_saver/restore_all" in graph_ops)
self.assertTrue("save/restore_all" in graph_ops)
self.assertEqual(
saved_graph.saver_def.restore_op_name, "save/restore_all")
def testMultipleCustomSavers(self):
export_dir = self._get_export_dir("test_multiple_custom_savers")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.test_session(graph=ops.Graph()) as sess:
variables.Variable(1, name="v1")
sess.run(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(sess, ["tag_0"])
saver_1 = training.Saver()
builder.add_meta_graph(["tag_1"], saver=saver_1)
saver_2 = training.Saver()
builder.add_meta_graph(["tag_2"], saver=saver_2)
# Save the SavedModel to disk.
builder.save()
def _validate_custom_saver(tag_name, saver_name):
with ops.Graph().as_default() as graph:
with self.test_session(graph=graph) as sess:
saved_graph = loader.load(sess, [tag_name], export_dir)
self.assertEqual(
saved_graph.saver_def.restore_op_name,
saver_name)
_validate_custom_saver("tag_0", "save/restore_all")
_validate_custom_saver("tag_1", "save_1/restore_all")
_validate_custom_saver("tag_2", "save_2/restore_all")
def testImportScope(self):
export_dir = self._get_export_dir("test_scoped_assets")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Build a SavedModel with a variable, an asset, and a constant tensor.
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
asset_collection = self._build_asset_collection("foo.txt", "content_foo",
"asset_file_tensor")
constant_op.constant("constant value", name="constant_tensor_name")
builder.add_meta_graph_and_variables(
sess, ["tag_name"], assets_collection=asset_collection)
# Save the asset file path for later comparison.
asset_file_path = asset_collection[0].eval()
# Save the SavedModel to disk.
builder.save()
with self.test_session(graph=ops.Graph()) as sess:
# Restore the SavedModel under an import_scope in a new graph/session.
graph_proto = loader.load(
sess, ["tag_name"], export_dir, import_scope="scope_name")
# The loaded variable tensor should be scoped, but its contents should be
# unchanged.
self.assertEqual(
"scope_name/v:0",
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].name)
self.assertEqual(
42,
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# The loaded asset tensor should be scoped, but the asset file path and
# contents should be unchanged.
asset_collection = ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
self.assertEqual(1, len(asset_collection))
self.assertEqual(asset_file_path, asset_collection[0].eval())
self.assertEqual("scope_name/asset_file_tensor:0",
asset_collection[0].name)
# The static asset data inside graph_proto.collection_def should not be
# scoped.
self._validate_asset_collection(export_dir, graph_proto.collection_def,
"foo.txt", "content_foo",
"asset_file_tensor:0")
# The constant tensor should be scoped, but its contents should be
# unchanged.
self.assertEqual(
compat.as_bytes("constant value"),
ops.get_default_graph().get_tensor_by_name(
"scope_name/constant_tensor_name:0").eval())
def testClearDevices(self):
export_dir = self._get_export_dir("test_clear_devices")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Specify a device and save a variable.
ops.reset_default_graph()
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(
sess, [tag_constants.TRAINING], clear_devices=True)
# Save the SavedModel to disk.
builder.save()
# Restore the graph with a single predefined tag whose variables were saved
# without any device information.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.TRAINING], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
def testStripDefaultAttrs(self):
export_dir = self._get_export_dir("test_strip_default_attrs")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Add a graph with two float32 variables and a Complex Op composing them
# with strip_default_attrs enabled.
with session.Session(graph=ops.Graph()) as sess:
real_num = variables.Variable(1.0, dtype=dtypes.float32, name="real")
imag_num = variables.Variable(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
sess.run(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(
sess, ["foo"], strip_default_attrs=True)
# Add a graph with the same float32 variables and a Complex Op composing
# them with strip_default_attrs disabled.
with session.Session(graph=ops.Graph()) as sess:
real_num = variables.Variable(1.0, dtype=dtypes.float32, name="real")
imag_num = variables.Variable(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
sess.run(variables.global_variables_initializer())
builder.add_meta_graph(["bar"], strip_default_attrs=False)
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Loading graph "foo" via the loader must restore the defaults for the
# "Complex" node based on the "Complex" OpDef in the Op registry.
sess = session.Session(graph=ops.Graph())
meta_graph_def = loader.load(sess, ["foo"], export_dir)
complex_node = test_util.get_node_def_from_graph("complex",
meta_graph_def.graph_def)
self.assertIn("T", complex_node.attr)
self.assertIn("Tout", complex_node.attr)
# Load graph "foo" from disk as-is to verify default attrs are stripped.
# pylint: disable=protected-access
saved_model_pb = loader_impl._parse_saved_model(export_dir)
self.assertIsNotNone(saved_model_pb)
# pylint: enable=protected-access
meta_graph_foo_def = None
meta_graph_bar_def = None
for meta_graph_def in saved_model_pb.meta_graphs:
if set(meta_graph_def.meta_info_def.tags) == set(["foo"]):
meta_graph_foo_def = meta_graph_def
elif set(meta_graph_def.meta_info_def.tags) == set(["bar"]):
meta_graph_bar_def = meta_graph_def
self.assertIsNotNone(meta_graph_foo_def)
self.assertIsNotNone(meta_graph_bar_def)
# "Complex" Op has 2 attributes with defaults:
# o "T" : float32. (input type)
# o "Tout" : complex64. (output type)
# "Complex" Op in graph "foo" shouldn't have attributes "T" and "Tout".
# Graph "foo" was saved with strip_default_attrs set to True.
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_foo_def.graph_def)
self.assertNotIn("T", node_def.attr)
self.assertNotIn("Tout", node_def.attr)
# "Complex" Op in graph "bar" must have attributes "T" and "Tout".
# Graph "bar" was saved with strip_default_attrs set to False.
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_bar_def.graph_def)
self.assertIn("T", node_def.attr)
self.assertIn("Tout", node_def.attr)
# Tests the behavior of loading SavedModels that having missing attrs or attrs
# with incorrect types.
def testInconsistentConsumerDefaultAttrs(self):
export_dir = self._get_export_dir(
"test_strip_default_attrs_no_consumer_defaults")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Add a graph with a single variable and a test op with a defaultless
# float32 attr, "test_attr".
with session.Session(graph=ops.Graph()) as sess:
variables.Variable(1.0, dtype=dtypes.float64, name="var")
test_ops.test_attr(T=dtypes.float32, name="test_attr")
sess.run(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Rewrite the SavedModel to remove the T attr from "test_attr".
saved_model_file = os.path.join(
export_dir, constants.SAVED_MODEL_FILENAME_PBTXT)
with open(saved_model_file) as f:
original_saved_model = f.read()
no_attr_saved_model = original_saved_model.replace("""
attr {
key: "T"
value {
type: DT_FLOAT
}
}""", "")
with open(saved_model_file, "w") as f:
f.write(no_attr_saved_model)
# Loading the SavedModel via the loader must fail because the SavedModel
# does not have any attr values for the "TestAttr" node, and there is no
# default specified in the TestAttr OpDef.
sess = session.Session(graph=ops.Graph())
with self.assertRaisesRegexp(
ValueError, "NodeDef missing attr 'T' from Op<name=TestAttr"):
loader.load(sess, ["foo"], export_dir)
# Rewrite the SavedModel to change the type of the T attr in "test_attr"
bad_type_saved_model = original_saved_model.replace("""
attr {
key: "T"
value {
type: DT_FLOAT
}
}""", """
attr {
key: "T"
value {
type: DT_DOUBLE
}
}""")
with open(saved_model_file, "w") as f:
f.write(bad_type_saved_model)
# Loading the SavedModel via the loader must fail because there is no
# OpKernel registered to handle T = double.
sess = session.Session(graph=ops.Graph())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
".*No OpKernel was registered to support Op \'TestAttr\' with these "
"attrs..*"):
loader.load(sess, ["foo"], export_dir)
if __name__ == "__main__":
test.main()
|
PeterWangIntel/chromium-crosswalk
|
refs/heads/master
|
third_party/markdown/util.py
|
109
|
# -*- coding: utf-8 -*-
# markdown is released under the BSD license
# Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later)
# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
# Copyright 2004 Manfred Stienstra (the original version)
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE PYTHON MARKDOWN PROJECT ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL ANY CONTRIBUTORS TO THE PYTHON MARKDOWN PROJECT
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import unicode_literals
import re
import sys
"""
Python 3 Stuff
=============================================================================
"""
PY3 = sys.version_info[0] == 3
if PY3:
string_type = str
text_type = str
int2str = chr
else:
string_type = basestring
text_type = unicode
int2str = unichr
"""
Constants you might want to modify
-----------------------------------------------------------------------------
"""
BLOCK_LEVEL_ELEMENTS = re.compile("^(p|div|h[1-6]|blockquote|pre|table|dl|ol|ul"
"|script|noscript|form|fieldset|iframe|math"
"|hr|hr/|style|li|dt|dd|thead|tbody"
"|tr|th|td|section|footer|header|group|figure"
"|figcaption|aside|article|canvas|output"
"|progress|video)$", re.IGNORECASE)
# Placeholders
STX = '\u0002' # Use STX ("Start of text") for start-of-placeholder
ETX = '\u0003' # Use ETX ("End of text") for end-of-placeholder
INLINE_PLACEHOLDER_PREFIX = STX+"klzzwxh:"
INLINE_PLACEHOLDER = INLINE_PLACEHOLDER_PREFIX + "%s" + ETX
INLINE_PLACEHOLDER_RE = re.compile(INLINE_PLACEHOLDER % r'([0-9]{4})')
AMP_SUBSTITUTE = STX+"amp"+ETX
"""
Constants you probably do not need to change
-----------------------------------------------------------------------------
"""
RTL_BIDI_RANGES = ( ('\u0590', '\u07FF'),
# Hebrew (0590-05FF), Arabic (0600-06FF),
# Syriac (0700-074F), Arabic supplement (0750-077F),
# Thaana (0780-07BF), Nko (07C0-07FF).
('\u2D30', '\u2D7F'), # Tifinagh
)
# Extensions should use "markdown.util.etree" instead of "etree" (or do `from
# markdown.util import etree`). Do not import it by yourself.
try: # Is the C implemenation of ElementTree available?
import xml.etree.cElementTree as etree
from xml.etree.ElementTree import Comment
# Serializers (including ours) test with non-c Comment
etree.test_comment = Comment
if etree.VERSION < "1.0.5":
raise RuntimeError("cElementTree version 1.0.5 or higher is required.")
except (ImportError, RuntimeError):
# Use the Python implementation of ElementTree?
import xml.etree.ElementTree as etree
if etree.VERSION < "1.1":
raise RuntimeError("ElementTree version 1.1 or higher is required")
"""
AUXILIARY GLOBAL FUNCTIONS
=============================================================================
"""
def isBlockLevel(tag):
"""Check if the tag is a block level HTML tag."""
if isinstance(tag, string_type):
return BLOCK_LEVEL_ELEMENTS.match(tag)
# Some ElementTree tags are not strings, so return False.
return False
"""
MISC AUXILIARY CLASSES
=============================================================================
"""
class AtomicString(text_type):
"""A string which should not be further processed."""
pass
class Processor(object):
def __init__(self, markdown_instance=None):
if markdown_instance:
self.markdown = markdown_instance
class HtmlStash(object):
"""
This class is used for stashing HTML objects that we extract
in the beginning and replace with place-holders.
"""
def __init__ (self):
""" Create a HtmlStash. """
self.html_counter = 0 # for counting inline html segments
self.rawHtmlBlocks=[]
def store(self, html, safe=False):
"""
Saves an HTML segment for later reinsertion. Returns a
placeholder string that needs to be inserted into the
document.
Keyword arguments:
* html: an html segment
* safe: label an html segment as safe for safemode
Returns : a placeholder string
"""
self.rawHtmlBlocks.append((html, safe))
placeholder = self.get_placeholder(self.html_counter)
self.html_counter += 1
return placeholder
def reset(self):
self.html_counter = 0
self.rawHtmlBlocks = []
def get_placeholder(self, key):
return "%swzxhzdk:%d%s" % (STX, key, ETX)
|
alexandrucoman/vbox-nova-driver
|
refs/heads/master
|
nova/tests/unit/virt/test_images.py
|
67
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from oslo_concurrency import processutils
from nova import exception
from nova import test
from nova import utils
from nova.virt import images
class QemuTestCase(test.NoDBTestCase):
def test_qemu_info_with_bad_path(self):
self.assertRaises(exception.InvalidDiskInfo,
images.qemu_img_info,
'/path/that/does/not/exist')
@mock.patch.object(os.path, 'exists', return_value=True)
def test_qemu_info_with_errors(self, path_exists):
self.assertRaises(processutils.ProcessExecutionError,
images.qemu_img_info,
'/fake/path')
@mock.patch.object(os.path, 'exists', return_value=True)
@mock.patch.object(utils, 'execute',
return_value=('stdout', None))
def test_qemu_info_with_no_errors(self, path_exists,
utils_execute):
image_info = images.qemu_img_info('/fake/path')
self.assertTrue(image_info)
self.assertTrue(str(image_info))
|
petecummings/django-cms
|
refs/heads/develop
|
cms/tests/test_views.py
|
16
|
from __future__ import with_statement
from copy import deepcopy
import re
import sys
from django.core.cache import cache
from django.conf import settings
from django.contrib.auth.models import Permission
from django.core.urlresolvers import clear_url_caches
from django.http import Http404
from django.template import Variable
from django.test.utils import override_settings
from cms.api import create_page, create_title, publish_page
from cms.apphook_pool import apphook_pool
from cms.models import PagePermission, UserSettings, Placeholder
from cms.page_rendering import _handle_no_page
from cms.test_utils.testcases import CMSTestCase, ClearURLs
from cms.test_utils.util.fuzzy_int import FuzzyInt
from cms.utils.compat import DJANGO_1_7
from cms.utils.conf import get_cms_setting
from cms.views import details
from menus.menu_pool import menu_pool
APP_NAME = 'SampleApp'
APP_MODULE = "cms.test_utils.project.sampleapp.cms_apps"
@override_settings(
CMS_PERMISSION=True,
ROOT_URLCONF='cms.test_utils.project.urls',
)
class ViewTests(CMSTestCase):
def setUp(self):
clear_url_caches()
def test_handle_no_page(self):
"""
Test handle nopage correctly works with DEBUG=True
"""
request = self.get_request('/')
slug = ''
self.assertRaises(Http404, _handle_no_page, request, slug)
with self.settings(DEBUG=True):
request = self.get_request('/en/')
slug = ''
response = _handle_no_page(request, slug)
self.assertEqual(response.status_code, 200)
def test_apphook_not_hooked(self):
"""
Test details view when apphook pool has apphooks, but they're not
actually hooked
"""
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
apphooks = (
'%s.%s' % (APP_MODULE, APP_NAME),
)
create_page("page2", "nav_playground.html", "en", published=True)
with self.settings(CMS_APPHOOKS=apphooks):
apphook_pool.clear()
response = self.client.get('/en/')
self.assertEqual(response.status_code, 200)
apphook_pool.clear()
def test_external_redirect(self):
# test external redirect
redirect_one = 'https://www.django-cms.org/'
one = create_page("one", "nav_playground.html", "en", published=True,
redirect=redirect_one)
url = one.get_absolute_url()
request = self.get_request(url)
response = details(request, one.get_path("en"))
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], redirect_one)
def test_internal_neutral_redirect(self):
# test internal language neutral redirect
redirect_one = 'https://www.django-cms.org/'
redirect_two = '/'
one = create_page("one", "nav_playground.html", "en", published=True,
redirect=redirect_one)
two = create_page("two", "nav_playground.html", "en", parent=one,
published=True, redirect=redirect_two)
url = two.get_absolute_url()
request = self.get_request(url)
response = details(request, two.get_path())
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], '/en/')
def test_internal_forced_redirect(self):
# test internal forced language redirect
redirect_one = 'https://www.django-cms.org/'
redirect_three = '/en/'
one = create_page("one", "nav_playground.html", "en", published=True,
redirect=redirect_one)
three = create_page("three", "nav_playground.html", "en", parent=one,
published=True, redirect=redirect_three)
url = three.get_slug()
request = self.get_request(url)
response = details(request, url.strip('/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], redirect_three)
def test_redirect_to_self(self):
one = create_page("one", "nav_playground.html", "en", published=True,
redirect='/')
url = one.get_absolute_url()
request = self.get_request(url)
response = details(request, one.get_path())
self.assertEqual(response.status_code, 200)
def test_redirect_to_self_with_host(self):
one = create_page("one", "nav_playground.html", "en", published=True,
redirect='http://testserver/en/')
url = one.get_absolute_url()
request = self.get_request(url)
response = details(request, one.get_path())
self.assertEqual(response.status_code, 200)
def test_redirect_with_toolbar(self):
create_page("one", "nav_playground.html", "en", published=True,
redirect='/en/page2')
superuser = self.get_superuser()
with self.login_user_context(superuser):
response = self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
self.assertEqual(response.status_code, 200)
def test_login_required(self):
create_page("page", "nav_playground.html", "en", published=True,
login_required=True)
plain_url = '/accounts/'
login_rx = re.compile("%s\?(signin=|next=/en/)&" % plain_url)
with self.settings(LOGIN_URL=plain_url + '?signin'):
request = self.get_request('/en/')
response = details(request, '')
self.assertEqual(response.status_code, 302)
self.assertTrue(login_rx.search(response['Location']))
login_rx = re.compile("%s\?(signin=|next=/)&" % plain_url)
with self.settings(USE_I18N=False, LOGIN_URL=plain_url + '?signin'):
request = self.get_request('/')
response = details(request, '')
self.assertEqual(response.status_code, 302)
self.assertTrue(login_rx.search(response['Location']))
def test_edit_permission(self):
page = create_page("page", "nav_playground.html", "en", published=True)
# Anon user
response = self.client.get("/en/?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
self.assertNotContains(response, "cms_toolbar-item-switch-save-edit", 200)
# Superuser
user = self.get_superuser()
with self.login_user_context(user):
response = self.client.get("/en/?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
self.assertContains(response, "cms-toolbar-item-switch-save-edit", 1, 200)
# Admin but with no permission
user = self.get_staff_user_with_no_permissions()
user.user_permissions.add(Permission.objects.get(codename='change_page'))
with self.login_user_context(user):
response = self.client.get("/en/?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
self.assertNotContains(response, "cms-toolbar-item-switch-save-edit", 200)
PagePermission.objects.create(can_change=True, user=user, page=page)
with self.login_user_context(user):
response = self.client.get("/en/?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
self.assertContains(response, "cms-toolbar-item-switch-save-edit", 1, 200)
def test_toolbar_switch_urls(self):
user = self.get_superuser()
user_settings = UserSettings(language="en", user=user)
placeholder = Placeholder(slot="clipboard")
placeholder.save()
user_settings.clipboard = placeholder
user_settings.save()
page = create_page("page", "nav_playground.html", "en", published=True)
create_title("fr", "french home", page)
publish_page(page, user, "fr")
with self.login_user_context(user):
response = self.client.get("/fr/?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
self.assertContains(response, "/fr/?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF'), 1, 200)
response = self.client.get("/fr/?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF'))
self.assertContains(response, "/fr/?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'), 1, 200)
@override_settings(ROOT_URLCONF='cms.test_utils.project.urls')
class ContextTests(ClearURLs, CMSTestCase):
def test_context_current_page(self):
"""
Asserts the number of queries triggered by
`cms.context_processors.cms_settings` and `cms.middleware.page`
"""
from django.template import context
page_template = "nav_playground.html"
if DJANGO_1_7:
original_context = {'TEMPLATE_CONTEXT_PROCESSORS': settings.TEMPLATE_CONTEXT_PROCESSORS}
override = {'TEMPLATE_CONTEXT_PROCESSORS': list(settings.TEMPLATE_CONTEXT_PROCESSORS)}
override['TEMPLATE_CONTEXT_PROCESSORS'].remove("cms.context_processors.cms_settings")
else:
original_context = {'TEMPLATES': settings.TEMPLATES}
override = {'TEMPLATES': deepcopy(settings.TEMPLATES)}
override['TEMPLATES'][0]['OPTIONS']['context_processors'].remove("cms.context_processors.cms_settings")
page = create_page("page", page_template, "en", published=True)
page_2 = create_page("page-2", page_template, "en", published=True,
parent=page)
# Tests for standard django applications
# 1 query is executed in get_app_patterns(), not related
# to cms.context_processors.cms_settings.
# Executing this oputside queries assertion context ensure
# repetability
self.client.get("/en/plain_view/")
cache.clear()
menu_pool.clear()
context._standard_context_processors = None
# Number of queries when context processors is not enabled
with self.settings(**override):
with self.assertNumQueries(FuzzyInt(0, 12)) as context:
response = self.client.get("/en/plain_view/")
num_queries = len(context.captured_queries)
self.assertFalse('CMS_TEMPLATE' in response.context)
cache.clear()
menu_pool.clear()
# Number of queries when context processor is enabled
with self.settings(**original_context):
# no extra query is run when accessing urls managed by standard
# django applications
with self.assertNumQueries(FuzzyInt(0, num_queries)):
response = self.client.get("/en/plain_view/")
# One query when determining current page
with self.assertNumQueries(FuzzyInt(0, 1)):
self.assertFalse(response.context['request'].current_page)
self.assertFalse(response.context['request']._current_page_cache)
# Zero more queries when determining the current template
with self.assertNumQueries(0):
# Template is the first in the CMS_TEMPLATES list
template = Variable('CMS_TEMPLATE').resolve(response.context)
self.assertEqual(template, get_cms_setting('TEMPLATES')[0][0])
cache.clear()
menu_pool.clear()
# Number of queries when context processors is not enabled
with self.settings(**override):
# Baseline number of queries
with self.assertNumQueries(FuzzyInt(13, 20)) as context:
response = self.client.get("/en/page-2/")
num_queries_page = len(context.captured_queries)
cache.clear()
menu_pool.clear()
# Number of queries when context processors is enabled
with self.settings(**original_context):
# Exactly the same number of queries are executed with and without
# the context_processor
with self.assertNumQueries(num_queries_page):
response = self.client.get("/en/page-2/")
template = Variable('CMS_TEMPLATE').resolve(response.context)
self.assertEqual(template, page_template)
cache.clear()
menu_pool.clear()
page_2.template = 'INHERIT'
page_2.save()
page_2.publish('en')
with self.settings(**original_context):
# One query more triggered as page inherits template from ancestor
with self.assertNumQueries(num_queries_page + 1):
response = self.client.get("/en/page-2/")
template = Variable('CMS_TEMPLATE').resolve(response.context)
self.assertEqual(template, page_template)
|
iskandr/fancyimpute
|
refs/heads/master
|
test/low_rank_data.py
|
2
|
import numpy as np
def create_rank_k_dataset(
n_rows=5,
n_cols=5,
k=3,
fraction_missing=0.1,
symmetric=False,
random_seed=0):
np.random.seed(random_seed)
x = np.random.randn(n_rows, k)
y = np.random.randn(k, n_cols)
XY = np.dot(x, y)
if symmetric:
assert n_rows == n_cols
XY = 0.5 * XY + 0.5 * XY.T
missing_raw_values = np.random.uniform(0, 1, (n_rows, n_cols))
missing_mask = missing_raw_values < fraction_missing
XY_incomplete = XY.copy()
# fill missing entries with NaN
XY_incomplete[missing_mask] = np.nan
return XY, XY_incomplete, missing_mask
# create some default data to be shared across tests
XY, XY_incomplete, missing_mask = create_rank_k_dataset(
n_rows=500,
n_cols=10,
k=3,
fraction_missing=0.25)
|
ebagdasa/tempest
|
refs/heads/master
|
tempest/api/data_processing/test_plugins.py
|
2
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.data_processing import base as dp_base
from tempest import config
from tempest import test
CONF = config.CONF
class PluginsTest(dp_base.BaseDataProcessingTest):
def _list_all_plugin_names(self):
"""Returns all enabled plugin names.
It ensures main plugins availability.
"""
_, plugins = self.client.list_plugins()
plugins_names = [plugin['name'] for plugin in plugins]
for enabled_plugin in CONF.data_processing_feature_enabled.plugins:
self.assertIn(enabled_plugin, plugins_names)
return plugins_names
@test.attr(type='smoke')
def test_plugin_list(self):
self._list_all_plugin_names()
@test.attr(type='smoke')
def test_plugin_get(self):
for plugin_name in self._list_all_plugin_names():
_, plugin = self.client.get_plugin(plugin_name)
self.assertEqual(plugin_name, plugin['name'])
for plugin_version in plugin['versions']:
_, detailed_plugin = self.client.get_plugin(plugin_name,
plugin_version)
self.assertEqual(plugin_name, detailed_plugin['name'])
# check that required image tags contains name and version
image_tags = detailed_plugin['required_image_tags']
self.assertIn(plugin_name, image_tags)
self.assertIn(plugin_version, image_tags)
|
fe11x/pythondotorg
|
refs/heads/master
|
pages/search_indexes.py
|
14
|
from django.template.defaultfilters import truncatewords_html, striptags
from haystack import indexes
from .models import Page
class PageIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
title = indexes.CharField(model_attr='title')
description = indexes.CharField(model_attr='description')
path = indexes.CharField(model_attr='path')
include_template = indexes.CharField()
def get_model(self):
return Page
def prepare_include_template(self, obj):
return "search/includes/pages.page.html"
def prepare_description(self, obj):
""" Create a description if none exists """
if obj.description:
return obj.description
else:
return striptags(truncatewords_html(obj.content.rendered, 50))
def index_queryset(self, using=None):
""" Only index published pages """
return self.get_model().objects.filter(is_published=True)
|
caktus/rapidsms-appointments
|
refs/heads/master
|
appointments/tests/test_tasks.py
|
1
|
from __future__ import unicode_literals
import datetime
from .base import AppointmentDataTestCase, Appointment, Milestone, Notification, now
from ..tasks import generate_appointments, send_appointment_notifications, APPT_REMINDER
class GenerateAppointmentsTestCase(AppointmentDataTestCase):
"Task to generate future appointments"
def setUp(self):
self.timeline = self.create_timeline(name='Test', slug='foo')
self.offsets = [1, 3, 7, 14, 30]
for offset in self.offsets:
self.create_milestone(name='{0} day(s)'.format(offset), offset=offset, timeline=self.timeline)
self.sub = self.create_timeline_subscription(timeline=self.timeline)
self.cnx = self.sub.connection
def test_generate_appointments(self):
"Test the default task"
self.assertEqual(0, Appointment.objects.filter(subscription__connection=self.cnx).count())
generate_appointments()
self.assertEqual(4, Appointment.objects.filter(subscription__connection=self.cnx).count())
def test_generate_appointments_already_exists(self):
"The task should generate no appointments if the series already exists for the user"
self.cnx = self.sub.connection
for offset in self.offsets:
date = now() + datetime.timedelta(days=offset)
milestone = Milestone.objects.get(offset=offset)
self.create_appointment(subscription=self.sub, date=date, milestone=milestone)
self.assertEqual(5, Appointment.objects.filter(subscription__connection=self.cnx).count())
generate_appointments()
self.assertEqual(5, Appointment.objects.filter(subscription__connection=self.cnx).count())
def test_generate_appointments_out_of_range(self):
"The task should generate no appointments if the milestones are out of range"
Milestone.objects.all().delete()
offsets = [15, 17]
for offset in offsets:
self.create_milestone(name='{0} day(s)'.format(offset), offset=offset, timeline=self.timeline)
self.assertEqual(0, Appointment.objects.filter(subscription__connection=self.cnx).count())
generate_appointments()
self.assertEqual(0, Appointment.objects.filter(subscription__connection=self.cnx).count())
def test_generate_appointments_multiple_subscriptions(self):
"The task should generate appointments for all applicable subscriptions"
self.assertEqual(0, Appointment.objects.all().count())
self.create_timeline_subscription(timeline=self.timeline)
generate_appointments()
self.assertEqual(8, Appointment.objects.all().count())
def test_generate_appointments_for_n_days(self):
"The task should generate appointments when supplied N days as an argument"
self.assertEqual(0, Appointment.objects.all().count())
generate_appointments(30)
self.assertEqual(5, Appointment.objects.all().count())
class SendAppointmentNotificationsTestCase(AppointmentDataTestCase):
"Task to send notifications for upcoming Appointments"
def setUp(self):
self.backend = self.create_backend(name='mockbackend')
self.cnx = self.create_connection(backend=self.backend)
self.timeline = self.create_timeline()
self.subscription = self.create_timeline_subscription(connection=self.cnx, timeline=self.timeline)
self.appointment = self.create_appointment(subscription=self.subscription)
def create_milestone(self, **kwargs):
"Ensure milestones are created on the default timeline."
kwargs['timeline'] = self.timeline
return super(SendAppointmentNotificationsTestCase, self).create_milestone(**kwargs)
def test_send_notifications(self):
"Test the default task"
self.assertEqual(0, Notification.objects.filter(appointment=self.appointment).count())
send_appointment_notifications()
self.assertEqual(1, Notification.objects.filter(appointment=self.appointment).count())
msg = APPT_REMINDER % {'date': self.appointment.date}
self.assertEqual(self.outbound[0].text, msg)
self.assertEqual(self.outbound[0].connection, self.cnx)
def test_send_notifications_not_notified(self):
"The task should generate no notifications if a reminder has already been sent"
self.create_notification(appointment=self.appointment, status=1)
self.assertEqual(1, Notification.objects.filter(appointment=self.appointment).count())
send_appointment_notifications()
self.assertEqual(1, Notification.objects.filter(appointment=self.appointment).count())
self.assertEqual(0, len(self.outbound))
def test_send_notifications_out_of_range(self):
"The task should generate no notifications if the appointment(s) are out of range"
self.appointment.date = self.appointment.date + datetime.timedelta(days=10)
self.appointment.save()
self.assertEqual(0, Notification.objects.filter(appointment=self.appointment).count())
send_appointment_notifications()
self.assertEqual(0, Notification.objects.filter(appointment=self.appointment).count())
self.assertEqual(0, len(self.outbound))
def test_send_notifications_multiple_users(self):
"The task should generate notifications for all applicable appointments"
self.cnx2 = self.create_connection(identity='johndoe', backend=self.backend)
self.sub2 = self.create_timeline_subscription(connection=self.cnx2, timeline=self.timeline)
self.create_appointment(subscription=self.sub2)
self.assertEqual(0, Notification.objects.all().count())
send_appointment_notifications()
self.assertEqual(2, Notification.objects.all().count())
self.assertEqual(2, len(self.outbound))
def test_send_notifications_for_n_days(self):
"The task should generate appointments when supplied N days as an argument"
self.create_appointment(subscription=self.subscription, date=now() + datetime.timedelta(days=10))
self.assertEqual(0, Notification.objects.all().count())
send_appointment_notifications(30)
self.assertEqual(2, Notification.objects.all().count())
self.assertEqual(2, len(self.outbound))
|
matsumoto-r/synciga
|
refs/heads/master
|
src/talk/site_scons/site_tools/talk_noops.py
|
96
|
# Copyright 2010 Google Inc.
# All Rights Reserved.
# Author: thaloun@google.com (Tim Haloun)
"""Noop tool that defines builder functions for non-default platforms to
avoid errors when scanning sconsscripts."""
import SCons.Builder
def generate(env):
"""SCons method."""
if not env.Bit('windows'):
builder = SCons.Builder.Builder(
action=''
)
env.Append(BUILDERS={'RES': builder, 'Grit': builder})
def exists(env):
return 1
|
Zarthus/Reconcile
|
refs/heads/master
|
.travis/test_module.py
|
1
|
"""
The MIT License (MIT)
Copyright (c) 2014 - 2015 Jos "Zarthus" Ahrens and contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
Validate a module
"""
import sys
import os
import re
check_modules = []
failcount = 0
checklist = []
errorlist = []
if len(sys.argv) == 1:
print("No module specified, checking them all.")
# Check against disabled modules, those that are not empty are appended to a list.
dmods = os.listdir("modules_disabled")
dismods = []
for dismod in dmods:
if os.stat(os.path.join("modules_disabled", dismod)).st_size:
dismods.append(dismod)
for file in sorted(os.listdir("modules") + dismods):
if (file == "__init__.py" or file.endswith(".pyc") or os.path.isdir(os.path.join("modules", file))):
continue
check_modules.append(file)
else:
for mod in sorted(sys.argv[1:]):
check_modules.append(mod)
def check_module(module_name):
if (not os.path.exists(os.path.join("modules", module_name)) and
not os.path.exists(os.path.join("modules_disabled", module_name))):
print("Module {} does not exist.".format(module_name))
return 1
requirements = {
"imports_template": False,
"extends_template": False,
"utilises_callback": False,
"utilises_logger": True, # Check against use of print() instead of self.logger.log
"does_not_call_logger_directly": True # Check if self.log over self.logger.log was used.
}
optional = {
"requires_api_key": False,
"registers_commands": False,
"has_configuration_block": False,
"uses_rate_limitation": False
}
confblock_func = []
ratelimited_commands = []
error_count = 0
f = None
if os.path.exists(os.path.join("modules", module_name)):
f = open(os.path.join("modules", module_name)).read().split("\n")
elif os.path.exists(os.path.join("modules_disabled", module_name)):
f = open(os.path.join("modules_disabled", module_name)).read().split("\n")
else:
print("Module {} does not exist".format(module_name))
return 1
print("\nChecking module {} for any errors".format(module_name))
ratelimit_regex = re.compile(r"self\.ratelimit\((.*)\)")
line_num = 0
for line in f:
line_num += 1
line = line.lower().strip()
if line.startswith("from core import moduletemplate"):
requirements["imports_template"] = True
if line.endswith("(moduletemplate.botmodule):"):
requirements["extends_template"] = True
if line.startswith("def on_"):
requirements["utilises_callback"] = True
if line.startswith("print("):
requirements["utilises_logger"] = False
if line.startswith("self.logger."):
requirements["does_not_call_logger_directly"] = False
if line.startswith("self.requireapikey"):
optional["requires_api_key"] = True
if line.startswith("self.register_command("):
optional["registers_commands"] = True
if ratelimit_regex.search(line):
optional["uses_rate_limitation"] = True
optional["has_configuration_block"] = True
cmdname = ""
params = ratelimit_regex.search(line).group(1)
if "," in params:
params = params.split(",")
if len(params) > 1:
cmdname = params[0]
confblock_func.append(params[1])
else:
cmdname = params[0]
confblock_func.append("rate_limit_delay")
else:
cmdname = params
confblock_func.append("rate_limit_delay")
if ((cmdname.startswith("\"") or cmdname.startswith("'")) and
(cmdname.endswith("\"") or cmdname.endswith("'"))):
ratelimited_commands.append(cmdname.replace("\"", "").replace("'", ""))
else:
ratelimited_commands.append("line:{} var:{}".format(line_num, cmdname))
if "self.module_data" in line:
optional["has_configuration_block"] = True
for word in line.split():
if word.startswith("self.module_data["):
funcname = ''
try:
funcname = word.split("\"")[1]
except Exception:
funcname = word.split("'")[1]
confblock_func.append(funcname)
ratelimited_commands = list(set(ratelimited_commands))
confblock_func = list(set(confblock_func))
for requirement in requirements.items():
if requirement[1]:
print(" [x] Requirement satisfied: {}".format(requirement[0].replace("_", " ")))
else:
print(" [ ] Requirement NOT met: {}".format(requirement[0].replace("_", " ")))
error_count += 1
for opt in optional.items():
if opt[1]:
print(" [x] Optional checks: This module: {}".format(opt[0].replace("_", " ")))
for func in confblock_func:
print(" Introduces module block configuration: {}".format(func))
for cmd in ratelimited_commands:
print(" Ratelimitation on: {}".format(cmd))
print("check_module('{}') ran with {} errors.\n"
.format(module_name, error_count))
return error_count
for module in check_modules:
if not module.endswith(".py"):
module + ".py"
check = check_module(module)
if check > 0:
failcount += check
errorlist.append(module)
checklist.append(module)
print("A total of {}/{} modules errored with a total of {} errors.".format(len(errorlist), len(checklist), failcount))
print("The following modules were checked: " + str(checklist))
print("The following modules were errored: " + str(errorlist) + "\n")
sys.exit(failcount)
|
smolix/incubator-mxnet
|
refs/heads/master
|
tools/coreml/converter/utils.py
|
46
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
def load_model(model_name, epoch_num, data_shapes, label_shapes, label_names, gpus=''):
"""Returns a module loaded with the provided model.
Parameters
----------
model_name: str
Prefix of the MXNet model name as stored on the local directory.
epoch_num : int
Epoch number of model we would like to load.
input_shape: tuple
The shape of the input data in the form of (batch_size, channels, height, width)
files: list of strings
List of URLs pertaining to files that need to be downloaded in order to use the model.
data_shapes: list of tuples.
List of tuples where each tuple is a pair of input variable name and its shape.
label_shapes: list of (str, tuple)
Typically is ``data_iter.provide_label``.
label_names: list of str
Name of the output labels in the MXNet symbolic graph.
gpus: str
Comma separated string of gpu ids on which inferences are executed. E.g. 3,5,6 would refer to GPUs 3, 5 and 6.
If empty, we use CPU.
Returns
-------
MXNet module
"""
sym, arg_params, aux_params = mx.model.load_checkpoint(model_name, epoch_num)
mod = create_module(sym, data_shapes, label_shapes, label_names, gpus)
mod.set_params(
arg_params=arg_params,
aux_params=aux_params,
allow_missing=True
)
return mod
def create_module(sym, data_shapes, label_shapes, label_names, gpus=''):
"""Creates a new MXNet module.
Parameters
----------
sym : Symbol
An MXNet symbol.
input_shape: tuple
The shape of the input data in the form of (batch_size, channels, height, width)
files: list of strings
List of URLs pertaining to files that need to be downloaded in order to use the model.
data_shapes: list of tuples.
List of tuples where each tuple is a pair of input variable name and its shape.
label_shapes: list of (str, tuple)
Typically is ``data_iter.provide_label``.
label_names: list of str
Name of the output labels in the MXNet symbolic graph.
gpus: str
Comma separated string of gpu ids on which inferences are executed. E.g. 3,5,6 would refer to GPUs 3, 5 and 6.
If empty, we use CPU.
Returns
-------
MXNet module
"""
if gpus == '':
devices = mx.cpu()
else:
devices = [mx.gpu(int(i)) for i in gpus.split(',')]
data_names = [data_shape[0] for data_shape in data_shapes]
mod = mx.mod.Module(
symbol=sym,
data_names=data_names,
context=devices,
label_names=label_names
)
mod.bind(
for_training=False,
data_shapes=data_shapes,
label_shapes=label_shapes
)
return mod
|
israeltobias/DownMedia
|
refs/heads/master
|
youtube-dl/youtube_dl/extractor/flickr.py
|
37
|
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlencode
from ..utils import (
ExtractorError,
int_or_none,
qualities,
)
class FlickrIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.|secure\.)?flickr\.com/photos/[\w\-_@]+/(?P<id>\d+)'
_TEST = {
'url': 'http://www.flickr.com/photos/forestwander-nature-pictures/5645318632/in/photostream/',
'md5': '164fe3fa6c22e18d448d4d5af2330f31',
'info_dict': {
'id': '5645318632',
'ext': 'mpg',
'description': 'Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.',
'title': 'Dark Hollow Waterfalls',
'duration': 19,
'timestamp': 1303528740,
'upload_date': '20110423',
'uploader_id': '10922353@N03',
'uploader': 'Forest Wander',
'uploader_url': 'https://www.flickr.com/photos/forestwander-nature-pictures/',
'comment_count': int,
'view_count': int,
'tags': list,
'license': 'Attribution-ShareAlike',
}
}
_API_BASE_URL = 'https://api.flickr.com/services/rest?'
# https://help.yahoo.com/kb/flickr/SLN25525.html
_LICENSES = {
'0': 'All Rights Reserved',
'1': 'Attribution-NonCommercial-ShareAlike',
'2': 'Attribution-NonCommercial',
'3': 'Attribution-NonCommercial-NoDerivs',
'4': 'Attribution',
'5': 'Attribution-ShareAlike',
'6': 'Attribution-NoDerivs',
'7': 'No known copyright restrictions',
'8': 'United States government work',
'9': 'Public Domain Dedication (CC0)',
'10': 'Public Domain Work',
}
def _call_api(self, method, video_id, api_key, note, secret=None):
query = {
'photo_id': video_id,
'method': 'flickr.%s' % method,
'api_key': api_key,
'format': 'json',
'nojsoncallback': 1,
}
if secret:
query['secret'] = secret
data = self._download_json(self._API_BASE_URL + compat_urllib_parse_urlencode(query), video_id, note)
if data['stat'] != 'ok':
raise ExtractorError(data['message'])
return data
def _real_extract(self, url):
video_id = self._match_id(url)
api_key = self._download_json(
'https://www.flickr.com/hermes_error_beacon.gne', video_id,
'Downloading api key')['site_key']
video_info = self._call_api(
'photos.getInfo', video_id, api_key, 'Downloading video info')['photo']
if video_info['media'] == 'video':
streams = self._call_api(
'video.getStreamInfo', video_id, api_key,
'Downloading streams info', video_info['secret'])['streams']
preference = qualities(
['288p', 'iphone_wifi', '100', '300', '700', '360p', 'appletv', '720p', '1080p', 'orig'])
formats = []
for stream in streams['stream']:
stream_type = str(stream.get('type'))
formats.append({
'format_id': stream_type,
'url': stream['_content'],
'preference': preference(stream_type),
})
self._sort_formats(formats)
owner = video_info.get('owner', {})
uploader_id = owner.get('nsid')
uploader_path = owner.get('path_alias') or uploader_id
uploader_url = 'https://www.flickr.com/photos/%s/' % uploader_path if uploader_path else None
return {
'id': video_id,
'title': video_info['title']['_content'],
'description': video_info.get('description', {}).get('_content'),
'formats': formats,
'timestamp': int_or_none(video_info.get('dateuploaded')),
'duration': int_or_none(video_info.get('video', {}).get('duration')),
'uploader_id': uploader_id,
'uploader': owner.get('realname'),
'uploader_url': uploader_url,
'comment_count': int_or_none(video_info.get('comments', {}).get('_content')),
'view_count': int_or_none(video_info.get('views')),
'tags': [tag.get('_content') for tag in video_info.get('tags', {}).get('tag', [])],
'license': self._LICENSES.get(video_info.get('license')),
}
else:
raise ExtractorError('not a video', expected=True)
|
janusnic/21v-python
|
refs/heads/master
|
unit_12/pad/p1.py
|
1
|
import sys
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import Qt
class Main(QtGui.QMainWindow):
def __init__(self, parent = None):
QtGui.QMainWindow.__init__(self,parent)
self.initUI()
def initUI(self):
# x and y coordinates on the screen, width, height
self.setGeometry(100,100,1030,800)
self.setWindowTitle("Writer")
def main():
app = QtGui.QApplication(sys.argv)
main = Main()
main.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
171121130/SWI
|
refs/heads/master
|
venv/Lib/site-packages/pymysql/tests/test_converters.py
|
19
|
import datetime
from unittest import TestCase
from pymysql._compat import PY2
from pymysql import converters
__all__ = ["TestConverter"]
class TestConverter(TestCase):
def test_escape_string(self):
self.assertEqual(
converters.escape_string(u"foo\nbar"),
u"foo\\nbar"
)
if PY2:
def test_escape_string_bytes(self):
self.assertEqual(
converters.escape_string(b"foo\nbar"),
b"foo\\nbar"
)
def test_convert_datetime(self):
expected = datetime.datetime(2007, 2, 24, 23, 6, 20)
dt = converters.convert_datetime('2007-02-24 23:06:20')
self.assertEqual(dt, expected)
def test_convert_datetime_with_fsp(self):
expected = datetime.datetime(2007, 2, 24, 23, 6, 20, 511581)
dt = converters.convert_datetime('2007-02-24 23:06:20.511581')
self.assertEqual(dt, expected)
def _test_convert_timedelta(self, with_negate=False, with_fsp=False):
d = {'hours': 789, 'minutes': 12, 'seconds': 34}
s = '%(hours)s:%(minutes)s:%(seconds)s' % d
if with_fsp:
d['microseconds'] = 511581
s += '.%(microseconds)s' % d
expected = datetime.timedelta(**d)
if with_negate:
expected = -expected
s = '-' + s
tdelta = converters.convert_timedelta(s)
self.assertEqual(tdelta, expected)
def test_convert_timedelta(self):
self._test_convert_timedelta(with_negate=False, with_fsp=False)
self._test_convert_timedelta(with_negate=True, with_fsp=False)
def test_convert_timedelta_with_fsp(self):
self._test_convert_timedelta(with_negate=False, with_fsp=True)
self._test_convert_timedelta(with_negate=False, with_fsp=True)
def test_convert_time(self):
expected = datetime.time(23, 6, 20)
time_obj = converters.convert_time('23:06:20')
self.assertEqual(time_obj, expected)
def test_convert_time_with_fsp(self):
expected = datetime.time(23, 6, 20, 511581)
time_obj = converters.convert_time('23:06:20.511581')
self.assertEqual(time_obj, expected)
|
zuotingbing/spark
|
refs/heads/master
|
python/pyspark/sql/tests/test_dataframe.py
|
2
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pydoc
import time
import unittest
from pyspark.sql import SparkSession, Row
from pyspark.sql.types import *
from pyspark.sql.utils import AnalysisException, IllegalArgumentException
from pyspark.testing.sqlutils import ReusedSQLTestCase, SQLTestUtils, have_pyarrow, have_pandas, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
class DataFrameTests(ReusedSQLTestCase):
def test_range(self):
self.assertEqual(self.spark.range(1, 1).count(), 0)
self.assertEqual(self.spark.range(1, 0, -1).count(), 1)
self.assertEqual(self.spark.range(0, 1 << 40, 1 << 39).count(), 2)
self.assertEqual(self.spark.range(-2).count(), 0)
self.assertEqual(self.spark.range(3).count(), 3)
def test_duplicated_column_names(self):
df = self.spark.createDataFrame([(1, 2)], ["c", "c"])
row = df.select('*').first()
self.assertEqual(1, row[0])
self.assertEqual(2, row[1])
self.assertEqual("Row(c=1, c=2)", str(row))
# Cannot access columns
self.assertRaises(AnalysisException, lambda: df.select(df[0]).first())
self.assertRaises(AnalysisException, lambda: df.select(df.c).first())
self.assertRaises(AnalysisException, lambda: df.select(df["c"]).first())
def test_freqItems(self):
vals = [Row(a=1, b=-2.0) if i % 2 == 0 else Row(a=i, b=i * 1.0) for i in range(100)]
df = self.sc.parallelize(vals).toDF()
items = df.stat.freqItems(("a", "b"), 0.4).collect()[0]
self.assertTrue(1 in items[0])
self.assertTrue(-2.0 in items[1])
def test_help_command(self):
# Regression test for SPARK-5464
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
# render_doc() reproduces the help() exception without printing output
pydoc.render_doc(df)
pydoc.render_doc(df.foo)
pydoc.render_doc(df.take(1))
def test_dropna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# shouldn't drop a non-null row
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, 80.1)], schema).dropna().count(),
1)
# dropping rows with a single null value
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna().count(),
0)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='any').count(),
0)
# if how = 'all', only drop rows if all values are null
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='all').count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(None, None, None)], schema).dropna(how='all').count(),
0)
# how and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
0)
# threshold
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(thresh=2).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(thresh=2).count(),
0)
# threshold and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 180.9)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
0)
# thresh should take precedence over how
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(
how='any', thresh=2, subset=['name', 'age']).count(),
1)
def test_fillna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True),
StructField("spy", BooleanType(), True)])
# fillna shouldn't change non-null values
row = self.spark.createDataFrame([(u'Alice', 10, 80.1, True)], schema).fillna(50).first()
self.assertEqual(row.age, 10)
# fillna with int
row = self.spark.createDataFrame([(u'Alice', None, None, None)], schema).fillna(50).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.0)
# fillna with double
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(50.1).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.1)
# fillna with bool
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(True).first()
self.assertEqual(row.age, None)
self.assertEqual(row.spy, True)
# fillna with string
row = self.spark.createDataFrame([(None, None, None, None)], schema).fillna("hello").first()
self.assertEqual(row.name, u"hello")
self.assertEqual(row.age, None)
# fillna with subset specified for numeric cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(50, subset=['name', 'age']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, 50)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for string cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna("haha", subset=['name', 'age']).first()
self.assertEqual(row.name, "haha")
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for bool cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(True, subset=['name', 'spy']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, True)
# fillna with dictionary for boolean types
row = self.spark.createDataFrame([Row(a=None), Row(a=True)]).fillna({"a": True}).first()
self.assertEqual(row.a, True)
def test_repartitionByRange_dataframe(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
df1 = self.spark.createDataFrame(
[(u'Bob', 27, 66.0), (u'Alice', 10, 10.0), (u'Bob', 10, 66.0)], schema)
df2 = self.spark.createDataFrame(
[(u'Alice', 10, 10.0), (u'Bob', 10, 66.0), (u'Bob', 27, 66.0)], schema)
# test repartitionByRange(numPartitions, *cols)
df3 = df1.repartitionByRange(2, "name", "age")
self.assertEqual(df3.rdd.getNumPartitions(), 2)
self.assertEqual(df3.rdd.first(), df2.rdd.first())
self.assertEqual(df3.rdd.take(3), df2.rdd.take(3))
# test repartitionByRange(numPartitions, *cols)
df4 = df1.repartitionByRange(3, "name", "age")
self.assertEqual(df4.rdd.getNumPartitions(), 3)
self.assertEqual(df4.rdd.first(), df2.rdd.first())
self.assertEqual(df4.rdd.take(3), df2.rdd.take(3))
# test repartitionByRange(*cols)
df5 = df1.repartitionByRange("name", "age")
self.assertEqual(df5.rdd.first(), df2.rdd.first())
self.assertEqual(df5.rdd.take(3), df2.rdd.take(3))
def test_replace(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# replace with int
row = self.spark.createDataFrame([(u'Alice', 10, 10.0)], schema).replace(10, 20).first()
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 20.0)
# replace with double
row = self.spark.createDataFrame(
[(u'Alice', 80, 80.0)], schema).replace(80.0, 82.1).first()
self.assertEqual(row.age, 82)
self.assertEqual(row.height, 82.1)
# replace with string
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(u'Alice', u'Ann').first()
self.assertEqual(row.name, u"Ann")
self.assertEqual(row.age, 10)
# replace with subset specified by a string of a column name w/ actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='age').first()
self.assertEqual(row.age, 20)
# replace with subset specified by a string of a column name w/o actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='height').first()
self.assertEqual(row.age, 10)
# replace with subset specified with one column replaced, another column not in subset
# stays unchanged.
row = self.spark.createDataFrame(
[(u'Alice', 10, 10.0)], schema).replace(10, 20, subset=['name', 'age']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 10.0)
# replace with subset specified but no column will be replaced
row = self.spark.createDataFrame(
[(u'Alice', 10, None)], schema).replace(10, 20, subset=['name', 'height']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 10)
self.assertEqual(row.height, None)
# replace with lists
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace([u'Alice'], [u'Ann']).first()
self.assertTupleEqual(row, (u'Ann', 10, 80.1))
# replace with dict
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}).first()
self.assertTupleEqual(row, (u'Alice', 11, 80.1))
# test backward compatibility with dummy value
dummy_value = 1
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({'Alice': 'Bob'}, dummy_value).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# test dict with mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: -10, 80.1: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', -10, 90.5))
# replace with tuples
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace((u'Alice', ), (u'Bob', )).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# replace multiple columns
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80.0), (20, 90)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.0))
# test for mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80), (20, 90.5)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace({10: 20, 80: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
# replace with boolean
row = (self
.spark.createDataFrame([(u'Alice', 10, 80.0)], schema)
.selectExpr("name = 'Bob'", 'age <= 15')
.replace(False, True).first())
self.assertTupleEqual(row, (True, True))
# replace string with None and then drop None rows
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(u'Alice', None).dropna()
self.assertEqual(row.count(), 0)
# replace with number and None
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace([10, 80], [20, None]).first()
self.assertTupleEqual(row, (u'Alice', 20, None))
# should fail if subset is not list, tuple or None
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}, subset=1).first()
# should fail if to_replace and value have different length
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", "Bob"], ["Eve"]).first()
# should fail if when received unexpected type
with self.assertRaises(ValueError):
from datetime import datetime
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(datetime.now(), datetime.now()).first()
# should fail if provided mixed type replacements
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", 10], ["Eve", 20]).first()
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({u"Alice": u"Bob", 10: 20}).first()
with self.assertRaisesRegexp(
TypeError,
'value argument is required when to_replace is not a dictionary.'):
self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(["Alice", "Bob"]).first()
def test_with_column_with_existing_name(self):
keys = self.df.withColumn("key", self.df.key).select("key").collect()
self.assertEqual([r.key for r in keys], list(range(100)))
# regression test for SPARK-10417
def test_column_iterator(self):
def foo():
for x in self.df.key:
break
self.assertRaises(TypeError, foo)
def test_generic_hints(self):
from pyspark.sql import DataFrame
df1 = self.spark.range(10e10).toDF("id")
df2 = self.spark.range(10e10).toDF("id")
self.assertIsInstance(df1.hint("broadcast"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", []), DataFrame)
# Dummy rules
self.assertIsInstance(df1.hint("broadcast", "foo", "bar"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", ["foo", "bar"]), DataFrame)
plan = df1.join(df2.hint("broadcast"), "id")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan.toString().count("BroadcastHashJoin"))
# add tests for SPARK-23647 (test more types for hint)
def test_extended_hint_types(self):
from pyspark.sql import DataFrame
df = self.spark.range(10e10).toDF("id")
such_a_nice_list = ["itworks1", "itworks2", "itworks3"]
hinted_df = df.hint("my awesome hint", 1.2345, "what", such_a_nice_list)
logical_plan = hinted_df._jdf.queryExecution().logical()
self.assertEqual(1, logical_plan.toString().count("1.2345"))
self.assertEqual(1, logical_plan.toString().count("what"))
self.assertEqual(3, logical_plan.toString().count("itworks"))
def test_sample(self):
self.assertRaisesRegexp(
TypeError,
"should be a bool, float and number",
lambda: self.spark.range(1).sample())
self.assertRaises(
TypeError,
lambda: self.spark.range(1).sample("a"))
self.assertRaises(
TypeError,
lambda: self.spark.range(1).sample(seed="abc"))
self.assertRaises(
IllegalArgumentException,
lambda: self.spark.range(1).sample(-1.0))
def test_toDF_with_schema_string(self):
data = [Row(key=i, value=str(i)) for i in range(100)]
rdd = self.sc.parallelize(data, 5)
df = rdd.toDF("key: int, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:int,value:string>")
self.assertEqual(df.collect(), data)
# different but compatible field types can be used.
df = rdd.toDF("key: string, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:string,value:string>")
self.assertEqual(df.collect(), [Row(key=str(i), value=str(i)) for i in range(100)])
# field names can differ.
df = rdd.toDF(" a: int, b: string ")
self.assertEqual(df.schema.simpleString(), "struct<a:int,b:string>")
self.assertEqual(df.collect(), data)
# number of fields must match.
self.assertRaisesRegexp(Exception, "Length of object",
lambda: rdd.toDF("key: int").collect())
# field types mismatch will cause exception at runtime.
self.assertRaisesRegexp(Exception, "FloatType can not accept",
lambda: rdd.toDF("key: float, value: string").collect())
# flat schema values will be wrapped into row.
df = rdd.map(lambda row: row.key).toDF("int")
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
# users can use DataType directly instead of data type string.
df = rdd.map(lambda row: row.key).toDF(IntegerType())
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
def test_join_without_on(self):
df1 = self.spark.range(1).toDF("a")
df2 = self.spark.range(1).toDF("b")
with self.sql_conf({"spark.sql.crossJoin.enabled": False}):
self.assertRaises(AnalysisException, lambda: df1.join(df2, how="inner").collect())
with self.sql_conf({"spark.sql.crossJoin.enabled": True}):
actual = df1.join(df2, how="inner").collect()
expected = [Row(a=0, b=0)]
self.assertEqual(actual, expected)
# Regression test for invalid join methods when on is None, Spark-14761
def test_invalid_join_method(self):
df1 = self.spark.createDataFrame([("Alice", 5), ("Bob", 8)], ["name", "age"])
df2 = self.spark.createDataFrame([("Alice", 80), ("Bob", 90)], ["name", "height"])
self.assertRaises(IllegalArgumentException, lambda: df1.join(df2, how="invalid-join-type"))
# Cartesian products require cross join syntax
def test_require_cross(self):
df1 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
with self.sql_conf({"spark.sql.crossJoin.enabled": False}):
# joins without conditions require cross join syntax
self.assertRaises(AnalysisException, lambda: df1.join(df2).collect())
# works with crossJoin
self.assertEqual(1, df1.crossJoin(df2).count())
def test_cache(self):
spark = self.spark
with self.tempView("tab1", "tab2"):
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab1")
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab2")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab1")
self.assertTrue(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab2")
spark.catalog.uncacheTable("tab1")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertTrue(spark.catalog.isCached("tab2"))
spark.catalog.clearCache()
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.isCached("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.cacheTable("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.uncacheTable("does_not_exist"))
def _to_pandas(self):
from datetime import datetime, date
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", BooleanType()).add("d", FloatType())\
.add("dt", DateType()).add("ts", TimestampType())
data = [
(1, "foo", True, 3.0, date(1969, 1, 1), datetime(1969, 1, 1, 1, 1, 1)),
(2, "foo", True, 5.0, None, None),
(3, "bar", False, -1.0, date(2012, 3, 3), datetime(2012, 3, 3, 3, 3, 3)),
(4, "bar", False, 6.0, date(2100, 4, 4), datetime(2100, 4, 4, 4, 4, 4)),
]
df = self.spark.createDataFrame(data, schema)
return df.toPandas()
@unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_to_pandas(self):
import numpy as np
pdf = self._to_pandas()
types = pdf.dtypes
self.assertEquals(types[0], np.int32)
self.assertEquals(types[1], np.object)
self.assertEquals(types[2], np.bool)
self.assertEquals(types[3], np.float32)
self.assertEquals(types[4], np.object) # datetime.date
self.assertEquals(types[5], 'datetime64[ns]')
@unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_to_pandas_with_duplicated_column_names(self):
import numpy as np
sql = "select 1 v, 1 v"
for arrowEnabled in [False, True]:
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": arrowEnabled}):
df = self.spark.sql(sql)
pdf = df.toPandas()
types = pdf.dtypes
self.assertEquals(types.iloc[0], np.int32)
self.assertEquals(types.iloc[1], np.int32)
@unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_to_pandas_on_cross_join(self):
import numpy as np
sql = """
select t1.*, t2.* from (
select explode(sequence(1, 3)) v
) t1 left join (
select explode(sequence(1, 3)) v
) t2
"""
for arrowEnabled in [False, True]:
with self.sql_conf({"spark.sql.crossJoin.enabled": True,
"spark.sql.execution.arrow.pyspark.enabled": arrowEnabled}):
df = self.spark.sql(sql)
pdf = df.toPandas()
types = pdf.dtypes
self.assertEquals(types.iloc[0], np.int32)
self.assertEquals(types.iloc[1], np.int32)
@unittest.skipIf(have_pandas, "Required Pandas was found.")
def test_to_pandas_required_pandas_not_found(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(ImportError, 'Pandas >= .* must be installed'):
self._to_pandas()
@unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_to_pandas_avoid_astype(self):
import numpy as np
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", IntegerType())
data = [(1, "foo", 16777220), (None, "bar", None)]
df = self.spark.createDataFrame(data, schema)
types = df.toPandas().dtypes
self.assertEquals(types[0], np.float64) # doesn't convert to np.int32 due to NaN value.
self.assertEquals(types[1], np.object)
self.assertEquals(types[2], np.float64)
@unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_to_pandas_from_empty_dataframe(self):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
# SPARK-29188 test that toPandas() on an empty dataframe has the correct dtypes
import numpy as np
sql = """
SELECT CAST(1 AS TINYINT) AS tinyint,
CAST(1 AS SMALLINT) AS smallint,
CAST(1 AS INT) AS int,
CAST(1 AS BIGINT) AS bigint,
CAST(0 AS FLOAT) AS float,
CAST(0 AS DOUBLE) AS double,
CAST(1 AS BOOLEAN) AS boolean,
CAST('foo' AS STRING) AS string,
CAST('2019-01-01' AS TIMESTAMP) AS timestamp
"""
dtypes_when_nonempty_df = self.spark.sql(sql).toPandas().dtypes
dtypes_when_empty_df = self.spark.sql(sql).filter("False").toPandas().dtypes
self.assertTrue(np.all(dtypes_when_empty_df == dtypes_when_nonempty_df))
@unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_to_pandas_from_null_dataframe(self):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
# SPARK-29188 test that toPandas() on a dataframe with only nulls has correct dtypes
import numpy as np
sql = """
SELECT CAST(NULL AS TINYINT) AS tinyint,
CAST(NULL AS SMALLINT) AS smallint,
CAST(NULL AS INT) AS int,
CAST(NULL AS BIGINT) AS bigint,
CAST(NULL AS FLOAT) AS float,
CAST(NULL AS DOUBLE) AS double,
CAST(NULL AS BOOLEAN) AS boolean,
CAST(NULL AS STRING) AS string,
CAST(NULL AS TIMESTAMP) AS timestamp
"""
pdf = self.spark.sql(sql).toPandas()
types = pdf.dtypes
self.assertEqual(types[0], np.float64)
self.assertEqual(types[1], np.float64)
self.assertEqual(types[2], np.float64)
self.assertEqual(types[3], np.float64)
self.assertEqual(types[4], np.float32)
self.assertEqual(types[5], np.float64)
self.assertEqual(types[6], np.object)
self.assertEqual(types[7], np.object)
self.assertTrue(np.can_cast(np.datetime64, types[8]))
@unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_to_pandas_from_mixed_dataframe(self):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
# SPARK-29188 test that toPandas() on a dataframe with some nulls has correct dtypes
import numpy as np
sql = """
SELECT CAST(col1 AS TINYINT) AS tinyint,
CAST(col2 AS SMALLINT) AS smallint,
CAST(col3 AS INT) AS int,
CAST(col4 AS BIGINT) AS bigint,
CAST(col5 AS FLOAT) AS float,
CAST(col6 AS DOUBLE) AS double,
CAST(col7 AS BOOLEAN) AS boolean,
CAST(col8 AS STRING) AS string,
CAST(col9 AS TIMESTAMP) AS timestamp
FROM VALUES (1, 1, 1, 1, 1, 1, 1, 1, 1),
(NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)
"""
pdf_with_some_nulls = self.spark.sql(sql).toPandas()
pdf_with_only_nulls = self.spark.sql(sql).filter('tinyint is null').toPandas()
self.assertTrue(np.all(pdf_with_only_nulls.dtypes == pdf_with_some_nulls.dtypes))
def test_create_dataframe_from_array_of_long(self):
import array
data = [Row(longarray=array.array('l', [-9223372036854775808, 0, 9223372036854775807]))]
df = self.spark.createDataFrame(data)
self.assertEqual(df.first(), Row(longarray=[-9223372036854775808, 0, 9223372036854775807]))
@unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_create_dataframe_from_pandas_with_timestamp(self):
import pandas as pd
from datetime import datetime
pdf = pd.DataFrame({"ts": [datetime(2017, 10, 31, 1, 1, 1)],
"d": [pd.Timestamp.now().date()]}, columns=["d", "ts"])
# test types are inferred correctly without specifying schema
df = self.spark.createDataFrame(pdf)
self.assertTrue(isinstance(df.schema['ts'].dataType, TimestampType))
self.assertTrue(isinstance(df.schema['d'].dataType, DateType))
# test with schema will accept pdf as input
df = self.spark.createDataFrame(pdf, schema="d date, ts timestamp")
self.assertTrue(isinstance(df.schema['ts'].dataType, TimestampType))
self.assertTrue(isinstance(df.schema['d'].dataType, DateType))
@unittest.skipIf(have_pandas, "Required Pandas was found.")
def test_create_dataframe_required_pandas_not_found(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(
ImportError,
"(Pandas >= .* must be installed|No module named '?pandas'?)"):
import pandas as pd
from datetime import datetime
pdf = pd.DataFrame({"ts": [datetime(2017, 10, 31, 1, 1, 1)],
"d": [pd.Timestamp.now().date()]})
self.spark.createDataFrame(pdf)
# Regression test for SPARK-23360
@unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_create_dataframe_from_pandas_with_dst(self):
import pandas as pd
from pandas.util.testing import assert_frame_equal
from datetime import datetime
pdf = pd.DataFrame({'time': [datetime(2015, 10, 31, 22, 30)]})
df = self.spark.createDataFrame(pdf)
assert_frame_equal(pdf, df.toPandas())
orig_env_tz = os.environ.get('TZ', None)
try:
tz = 'America/Los_Angeles'
os.environ['TZ'] = tz
time.tzset()
with self.sql_conf({'spark.sql.session.timeZone': tz}):
df = self.spark.createDataFrame(pdf)
assert_frame_equal(pdf, df.toPandas())
finally:
del os.environ['TZ']
if orig_env_tz is not None:
os.environ['TZ'] = orig_env_tz
time.tzset()
def test_repr_behaviors(self):
import re
pattern = re.compile(r'^ *\|', re.MULTILINE)
df = self.spark.createDataFrame([(1, "1"), (22222, "22222")], ("key", "value"))
# test when eager evaluation is enabled and _repr_html_ will not be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": True}):
expected1 = """+-----+-----+
|| key|value|
|+-----+-----+
|| 1| 1|
||22222|22222|
|+-----+-----+
|"""
self.assertEquals(re.sub(pattern, '', expected1), df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
expected2 = """+---+-----+
||key|value|
|+---+-----+
|| 1| 1|
||222| 222|
|+---+-----+
|"""
self.assertEquals(re.sub(pattern, '', expected2), df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
expected3 = """+---+-----+
||key|value|
|+---+-----+
|| 1| 1|
|+---+-----+
|only showing top 1 row
|"""
self.assertEquals(re.sub(pattern, '', expected3), df.__repr__())
# test when eager evaluation is enabled and _repr_html_ will be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": True}):
expected1 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|<tr><td>22222</td><td>22222</td></tr>
|</table>
|"""
self.assertEquals(re.sub(pattern, '', expected1), df._repr_html_())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
expected2 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|<tr><td>222</td><td>222</td></tr>
|</table>
|"""
self.assertEquals(re.sub(pattern, '', expected2), df._repr_html_())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
expected3 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|</table>
|only showing top 1 row
|"""
self.assertEquals(re.sub(pattern, '', expected3), df._repr_html_())
# test when eager evaluation is disabled and _repr_html_ will be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": False}):
expected = "DataFrame[key: bigint, value: string]"
self.assertEquals(None, df._repr_html_())
self.assertEquals(expected, df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
self.assertEquals(None, df._repr_html_())
self.assertEquals(expected, df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
self.assertEquals(None, df._repr_html_())
self.assertEquals(expected, df.__repr__())
def test_to_local_iterator(self):
df = self.spark.range(8, numPartitions=4)
expected = df.collect()
it = df.toLocalIterator()
self.assertEqual(expected, list(it))
# Test DataFrame with empty partition
df = self.spark.range(3, numPartitions=4)
it = df.toLocalIterator()
expected = df.collect()
self.assertEqual(expected, list(it))
def test_to_local_iterator_prefetch(self):
df = self.spark.range(8, numPartitions=4)
expected = df.collect()
it = df.toLocalIterator(prefetchPartitions=True)
self.assertEqual(expected, list(it))
def test_to_local_iterator_not_fully_consumed(self):
# SPARK-23961: toLocalIterator throws exception when not fully consumed
# Create a DataFrame large enough so that write to socket will eventually block
df = self.spark.range(1 << 20, numPartitions=2)
it = df.toLocalIterator()
self.assertEqual(df.take(1)[0], next(it))
with QuietTest(self.sc):
it = None # remove iterator from scope, socket is closed when cleaned up
# Make sure normal df operations still work
result = []
for i, row in enumerate(df.toLocalIterator()):
result.append(row)
if i == 7:
break
self.assertEqual(df.take(8), result)
def test_same_semantics_error(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(ValueError, "should be of DataFrame.*int"):
self.spark.range(10).sameSemantics(1)
class QueryExecutionListenerTests(unittest.TestCase, SQLTestUtils):
# These tests are separate because it uses 'spark.sql.queryExecutionListeners' which is
# static and immutable. This can't be set or unset, for example, via `spark.conf`.
@classmethod
def setUpClass(cls):
import glob
from pyspark.find_spark_home import _find_spark_home
SPARK_HOME = _find_spark_home()
filename_pattern = (
"sql/core/target/scala-*/test-classes/org/apache/spark/sql/"
"TestQueryExecutionListener.class")
cls.has_listener = bool(glob.glob(os.path.join(SPARK_HOME, filename_pattern)))
if cls.has_listener:
# Note that 'spark.sql.queryExecutionListeners' is a static immutable configuration.
cls.spark = SparkSession.builder \
.master("local[4]") \
.appName(cls.__name__) \
.config(
"spark.sql.queryExecutionListeners",
"org.apache.spark.sql.TestQueryExecutionListener") \
.getOrCreate()
def setUp(self):
if not self.has_listener:
raise self.skipTest(
"'org.apache.spark.sql.TestQueryExecutionListener' is not "
"available. Will skip the related tests.")
@classmethod
def tearDownClass(cls):
if hasattr(cls, "spark"):
cls.spark.stop()
def tearDown(self):
self.spark._jvm.OnSuccessCall.clear()
def test_query_execution_listener_on_collect(self):
self.assertFalse(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should not be called before 'collect'")
self.spark.sql("SELECT * FROM range(1)").collect()
self.spark.sparkContext._jsc.sc().listenerBus().waitUntilEmpty(10000)
self.assertTrue(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should be called after 'collect'")
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
def test_query_execution_listener_on_collect_with_arrow(self):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": True}):
self.assertFalse(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should not be "
"called before 'toPandas'")
self.spark.sql("SELECT * FROM range(1)").toPandas()
self.spark.sparkContext._jsc.sc().listenerBus().waitUntilEmpty(10000)
self.assertTrue(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should be called after 'toPandas'")
if __name__ == "__main__":
from pyspark.sql.tests.test_dataframe import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
fdr/double-boiler
|
refs/heads/master
|
hellodjango/__init__.py
|
12133432
| |
vvtam/virtualenv
|
refs/heads/develop
|
virtualenv_support/__init__.py
|
12133432
| |
ctrlaltdel/neutrinator
|
refs/heads/master
|
vendor/dogpile/__init__.py
|
3
|
__version__ = '0.7.1'
from .lock import Lock # noqa
from .lock import NeedRegenerationException # noqa
|
xvedejas/limbo4
|
refs/heads/master
|
Lib/unittest/test/testmock/testpatch.py
|
4
|
# Copyright (C) 2007-2012 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
import os
import sys
import unittest
from unittest.test.testmock import support
from unittest.test.testmock.support import SomeClass, is_instance
from unittest.mock import (
NonCallableMock, CallableMixin, patch, sentinel,
MagicMock, Mock, NonCallableMagicMock, patch, _patch,
DEFAULT, call, _get_target
)
builtin_string = 'builtins'
PTModule = sys.modules[__name__]
MODNAME = '%s.PTModule' % __name__
def _get_proxy(obj, get_only=True):
class Proxy(object):
def __getattr__(self, name):
return getattr(obj, name)
if not get_only:
def __setattr__(self, name, value):
setattr(obj, name, value)
def __delattr__(self, name):
delattr(obj, name)
Proxy.__setattr__ = __setattr__
Proxy.__delattr__ = __delattr__
return Proxy()
# for use in the test
something = sentinel.Something
something_else = sentinel.SomethingElse
class Foo(object):
def __init__(self, a):
pass
def f(self, a):
pass
def g(self):
pass
foo = 'bar'
class Bar(object):
def a(self):
pass
foo_name = '%s.Foo' % __name__
def function(a, b=Foo):
pass
class Container(object):
def __init__(self):
self.values = {}
def __getitem__(self, name):
return self.values[name]
def __setitem__(self, name, value):
self.values[name] = value
def __delitem__(self, name):
del self.values[name]
def __iter__(self):
return iter(self.values)
class PatchTest(unittest.TestCase):
def assertNotCallable(self, obj, magic=True):
MockClass = NonCallableMagicMock
if not magic:
MockClass = NonCallableMock
self.assertRaises(TypeError, obj)
self.assertTrue(is_instance(obj, MockClass))
self.assertFalse(is_instance(obj, CallableMixin))
def test_single_patchobject(self):
class Something(object):
attribute = sentinel.Original
@patch.object(Something, 'attribute', sentinel.Patched)
def test():
self.assertEqual(Something.attribute, sentinel.Patched, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patchobject_with_none(self):
class Something(object):
attribute = sentinel.Original
@patch.object(Something, 'attribute', None)
def test():
self.assertIsNone(Something.attribute, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_multiple_patchobject(self):
class Something(object):
attribute = sentinel.Original
next_attribute = sentinel.Original2
@patch.object(Something, 'attribute', sentinel.Patched)
@patch.object(Something, 'next_attribute', sentinel.Patched2)
def test():
self.assertEqual(Something.attribute, sentinel.Patched,
"unpatched")
self.assertEqual(Something.next_attribute, sentinel.Patched2,
"unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
self.assertEqual(Something.next_attribute, sentinel.Original2,
"patch not restored")
def test_object_lookup_is_quite_lazy(self):
global something
original = something
@patch('%s.something' % __name__, sentinel.Something2)
def test():
pass
try:
something = sentinel.replacement_value
test()
self.assertEqual(something, sentinel.replacement_value)
finally:
something = original
def test_patch(self):
@patch('%s.something' % __name__, sentinel.Something2)
def test():
self.assertEqual(PTModule.something, sentinel.Something2,
"unpatched")
test()
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
@patch('%s.something' % __name__, sentinel.Something2)
@patch('%s.something_else' % __name__, sentinel.SomethingElse)
def test():
self.assertEqual(PTModule.something, sentinel.Something2,
"unpatched")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"unpatched")
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"patch not restored")
# Test the patching and restoring works a second time
test()
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"patch not restored")
mock = Mock()
mock.return_value = sentinel.Handle
@patch('%s.open' % builtin_string, mock)
def test():
self.assertEqual(open('filename', 'r'), sentinel.Handle,
"open not patched")
test()
test()
self.assertNotEqual(open, mock, "patch not restored")
def test_patch_class_attribute(self):
@patch('%s.SomeClass.class_attribute' % __name__,
sentinel.ClassAttribute)
def test():
self.assertEqual(PTModule.SomeClass.class_attribute,
sentinel.ClassAttribute, "unpatched")
test()
self.assertIsNone(PTModule.SomeClass.class_attribute,
"patch not restored")
def test_patchobject_with_default_mock(self):
class Test(object):
something = sentinel.Original
something2 = sentinel.Original2
@patch.object(Test, 'something')
def test(mock):
self.assertEqual(mock, Test.something,
"Mock not passed into test function")
self.assertIsInstance(mock, MagicMock,
"patch with two arguments did not create a mock")
test()
@patch.object(Test, 'something')
@patch.object(Test, 'something2')
def test(this1, this2, mock1, mock2):
self.assertEqual(this1, sentinel.this1,
"Patched function didn't receive initial argument")
self.assertEqual(this2, sentinel.this2,
"Patched function didn't receive second argument")
self.assertEqual(mock1, Test.something2,
"Mock not passed into test function")
self.assertEqual(mock2, Test.something,
"Second Mock not passed into test function")
self.assertIsInstance(mock2, MagicMock,
"patch with two arguments did not create a mock")
self.assertIsInstance(mock2, MagicMock,
"patch with two arguments did not create a mock")
# A hack to test that new mocks are passed the second time
self.assertNotEqual(outerMock1, mock1, "unexpected value for mock1")
self.assertNotEqual(outerMock2, mock2, "unexpected value for mock1")
return mock1, mock2
outerMock1 = outerMock2 = None
outerMock1, outerMock2 = test(sentinel.this1, sentinel.this2)
# Test that executing a second time creates new mocks
test(sentinel.this1, sentinel.this2)
def test_patch_with_spec(self):
@patch('%s.SomeClass' % __name__, spec=SomeClass)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patchobject_with_spec(self):
@patch.object(SomeClass, 'class_attribute', spec=SomeClass)
def test(MockAttribute):
self.assertEqual(SomeClass.class_attribute, MockAttribute)
self.assertTrue(is_instance(SomeClass.class_attribute.wibble,
MagicMock))
self.assertRaises(AttributeError,
lambda: SomeClass.class_attribute.not_wibble)
test()
def test_patch_with_spec_as_list(self):
@patch('%s.SomeClass' % __name__, spec=['wibble'])
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patchobject_with_spec_as_list(self):
@patch.object(SomeClass, 'class_attribute', spec=['wibble'])
def test(MockAttribute):
self.assertEqual(SomeClass.class_attribute, MockAttribute)
self.assertTrue(is_instance(SomeClass.class_attribute.wibble,
MagicMock))
self.assertRaises(AttributeError,
lambda: SomeClass.class_attribute.not_wibble)
test()
def test_nested_patch_with_spec_as_list(self):
# regression test for nested decorators
@patch('%s.open' % builtin_string)
@patch('%s.SomeClass' % __name__, spec=['wibble'])
def test(MockSomeClass, MockOpen):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patch_with_spec_as_boolean(self):
@patch('%s.SomeClass' % __name__, spec=True)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
# Should not raise attribute error
MockSomeClass.wibble
self.assertRaises(AttributeError, lambda: MockSomeClass.not_wibble)
test()
def test_patch_object_with_spec_as_boolean(self):
@patch.object(PTModule, 'SomeClass', spec=True)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
# Should not raise attribute error
MockSomeClass.wibble
self.assertRaises(AttributeError, lambda: MockSomeClass.not_wibble)
test()
def test_patch_class_acts_with_spec_is_inherited(self):
@patch('%s.SomeClass' % __name__, spec=True)
def test(MockSomeClass):
self.assertTrue(is_instance(MockSomeClass, MagicMock))
instance = MockSomeClass()
self.assertNotCallable(instance)
# Should not raise attribute error
instance.wibble
self.assertRaises(AttributeError, lambda: instance.not_wibble)
test()
def test_patch_with_create_mocks_non_existent_attributes(self):
@patch('%s.frooble' % builtin_string, sentinel.Frooble, create=True)
def test():
self.assertEqual(frooble, sentinel.Frooble)
test()
self.assertRaises(NameError, lambda: frooble)
def test_patchobject_with_create_mocks_non_existent_attributes(self):
@patch.object(SomeClass, 'frooble', sentinel.Frooble, create=True)
def test():
self.assertEqual(SomeClass.frooble, sentinel.Frooble)
test()
self.assertFalse(hasattr(SomeClass, 'frooble'))
def test_patch_wont_create_by_default(self):
try:
@patch('%s.frooble' % builtin_string, sentinel.Frooble)
def test():
self.assertEqual(frooble, sentinel.Frooble)
test()
except AttributeError:
pass
else:
self.fail('Patching non existent attributes should fail')
self.assertRaises(NameError, lambda: frooble)
def test_patchobject_wont_create_by_default(self):
try:
@patch.object(SomeClass, 'frooble', sentinel.Frooble)
def test():
self.fail('Patching non existent attributes should fail')
test()
except AttributeError:
pass
else:
self.fail('Patching non existent attributes should fail')
self.assertFalse(hasattr(SomeClass, 'frooble'))
def test_patch_with_static_methods(self):
class Foo(object):
@staticmethod
def woot():
return sentinel.Static
@patch.object(Foo, 'woot', staticmethod(lambda: sentinel.Patched))
def anonymous():
self.assertEqual(Foo.woot(), sentinel.Patched)
anonymous()
self.assertEqual(Foo.woot(), sentinel.Static)
def test_patch_local(self):
foo = sentinel.Foo
@patch.object(sentinel, 'Foo', 'Foo')
def anonymous():
self.assertEqual(sentinel.Foo, 'Foo')
anonymous()
self.assertEqual(sentinel.Foo, foo)
def test_patch_slots(self):
class Foo(object):
__slots__ = ('Foo',)
foo = Foo()
foo.Foo = sentinel.Foo
@patch.object(foo, 'Foo', 'Foo')
def anonymous():
self.assertEqual(foo.Foo, 'Foo')
anonymous()
self.assertEqual(foo.Foo, sentinel.Foo)
def test_patchobject_class_decorator(self):
class Something(object):
attribute = sentinel.Original
class Foo(object):
def test_method(other_self):
self.assertEqual(Something.attribute, sentinel.Patched,
"unpatched")
def not_test_method(other_self):
self.assertEqual(Something.attribute, sentinel.Original,
"non-test method patched")
Foo = patch.object(Something, 'attribute', sentinel.Patched)(Foo)
f = Foo()
f.test_method()
f.not_test_method()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patch_class_decorator(self):
class Something(object):
attribute = sentinel.Original
class Foo(object):
def test_method(other_self, mock_something):
self.assertEqual(PTModule.something, mock_something,
"unpatched")
def not_test_method(other_self):
self.assertEqual(PTModule.something, sentinel.Something,
"non-test method patched")
Foo = patch('%s.something' % __name__)(Foo)
f = Foo()
f.test_method()
f.not_test_method()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
def test_patchobject_twice(self):
class Something(object):
attribute = sentinel.Original
next_attribute = sentinel.Original2
@patch.object(Something, 'attribute', sentinel.Patched)
@patch.object(Something, 'attribute', sentinel.Patched)
def test():
self.assertEqual(Something.attribute, sentinel.Patched, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patch_dict(self):
foo = {'initial': object(), 'other': 'something'}
original = foo.copy()
@patch.dict(foo)
def test():
foo['a'] = 3
del foo['initial']
foo['other'] = 'something else'
test()
self.assertEqual(foo, original)
@patch.dict(foo, {'a': 'b'})
def test():
self.assertEqual(len(foo), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo, original)
@patch.dict(foo, [('a', 'b')])
def test():
self.assertEqual(len(foo), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo, original)
def test_patch_dict_with_container_object(self):
foo = Container()
foo['initial'] = object()
foo['other'] = 'something'
original = foo.values.copy()
@patch.dict(foo)
def test():
foo['a'] = 3
del foo['initial']
foo['other'] = 'something else'
test()
self.assertEqual(foo.values, original)
@patch.dict(foo, {'a': 'b'})
def test():
self.assertEqual(len(foo.values), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo.values, original)
def test_patch_dict_with_clear(self):
foo = {'initial': object(), 'other': 'something'}
original = foo.copy()
@patch.dict(foo, clear=True)
def test():
self.assertEqual(foo, {})
foo['a'] = 3
foo['other'] = 'something else'
test()
self.assertEqual(foo, original)
@patch.dict(foo, {'a': 'b'}, clear=True)
def test():
self.assertEqual(foo, {'a': 'b'})
test()
self.assertEqual(foo, original)
@patch.dict(foo, [('a', 'b')], clear=True)
def test():
self.assertEqual(foo, {'a': 'b'})
test()
self.assertEqual(foo, original)
def test_patch_dict_with_container_object_and_clear(self):
foo = Container()
foo['initial'] = object()
foo['other'] = 'something'
original = foo.values.copy()
@patch.dict(foo, clear=True)
def test():
self.assertEqual(foo.values, {})
foo['a'] = 3
foo['other'] = 'something else'
test()
self.assertEqual(foo.values, original)
@patch.dict(foo, {'a': 'b'}, clear=True)
def test():
self.assertEqual(foo.values, {'a': 'b'})
test()
self.assertEqual(foo.values, original)
def test_name_preserved(self):
foo = {}
@patch('%s.SomeClass' % __name__, object())
@patch('%s.SomeClass' % __name__, object(), autospec=True)
@patch.object(SomeClass, object())
@patch.dict(foo)
def some_name():
pass
self.assertEqual(some_name.__name__, 'some_name')
def test_patch_with_exception(self):
foo = {}
@patch.dict(foo, {'a': 'b'})
def test():
raise NameError('Konrad')
try:
test()
except NameError:
pass
else:
self.fail('NameError not raised by test')
self.assertEqual(foo, {})
def test_patch_dict_with_string(self):
@patch.dict('os.environ', {'konrad_delong': 'some value'})
def test():
self.assertIn('konrad_delong', os.environ)
test()
def test_patch_descriptor(self):
# would be some effort to fix this - we could special case the
# builtin descriptors: classmethod, property, staticmethod
return
class Nothing(object):
foo = None
class Something(object):
foo = {}
@patch.object(Nothing, 'foo', 2)
@classmethod
def klass(cls):
self.assertIs(cls, Something)
@patch.object(Nothing, 'foo', 2)
@staticmethod
def static(arg):
return arg
@patch.dict(foo)
@classmethod
def klass_dict(cls):
self.assertIs(cls, Something)
@patch.dict(foo)
@staticmethod
def static_dict(arg):
return arg
# these will raise exceptions if patching descriptors is broken
self.assertEqual(Something.static('f00'), 'f00')
Something.klass()
self.assertEqual(Something.static_dict('f00'), 'f00')
Something.klass_dict()
something = Something()
self.assertEqual(something.static('f00'), 'f00')
something.klass()
self.assertEqual(something.static_dict('f00'), 'f00')
something.klass_dict()
def test_patch_spec_set(self):
@patch('%s.SomeClass' % __name__, spec=SomeClass, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch.object(support, 'SomeClass', spec=SomeClass, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch('%s.SomeClass' % __name__, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch.object(support, 'SomeClass', spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
def test_spec_set_inherit(self):
@patch('%s.SomeClass' % __name__, spec_set=True)
def test(MockClass):
instance = MockClass()
instance.z = 'foo'
self.assertRaises(AttributeError, test)
def test_patch_start_stop(self):
original = something
patcher = patch('%s.something' % __name__)
self.assertIs(something, original)
mock = patcher.start()
try:
self.assertIsNot(mock, original)
self.assertIs(something, mock)
finally:
patcher.stop()
self.assertIs(something, original)
def test_stop_without_start(self):
patcher = patch(foo_name, 'bar', 3)
# calling stop without start used to produce a very obscure error
self.assertRaises(RuntimeError, patcher.stop)
def test_patchobject_start_stop(self):
original = something
patcher = patch.object(PTModule, 'something', 'foo')
self.assertIs(something, original)
replaced = patcher.start()
try:
self.assertEqual(replaced, 'foo')
self.assertIs(something, replaced)
finally:
patcher.stop()
self.assertIs(something, original)
def test_patch_dict_start_stop(self):
d = {'foo': 'bar'}
original = d.copy()
patcher = patch.dict(d, [('spam', 'eggs')], clear=True)
self.assertEqual(d, original)
patcher.start()
try:
self.assertEqual(d, {'spam': 'eggs'})
finally:
patcher.stop()
self.assertEqual(d, original)
def test_patch_dict_class_decorator(self):
this = self
d = {'spam': 'eggs'}
original = d.copy()
class Test(object):
def test_first(self):
this.assertEqual(d, {'foo': 'bar'})
def test_second(self):
this.assertEqual(d, {'foo': 'bar'})
Test = patch.dict(d, {'foo': 'bar'}, clear=True)(Test)
self.assertEqual(d, original)
test = Test()
test.test_first()
self.assertEqual(d, original)
test.test_second()
self.assertEqual(d, original)
test = Test()
test.test_first()
self.assertEqual(d, original)
test.test_second()
self.assertEqual(d, original)
def test_get_only_proxy(self):
class Something(object):
foo = 'foo'
class SomethingElse:
foo = 'foo'
for thing in Something, SomethingElse, Something(), SomethingElse:
proxy = _get_proxy(thing)
@patch.object(proxy, 'foo', 'bar')
def test():
self.assertEqual(proxy.foo, 'bar')
test()
self.assertEqual(proxy.foo, 'foo')
self.assertEqual(thing.foo, 'foo')
self.assertNotIn('foo', proxy.__dict__)
def test_get_set_delete_proxy(self):
class Something(object):
foo = 'foo'
class SomethingElse:
foo = 'foo'
for thing in Something, SomethingElse, Something(), SomethingElse:
proxy = _get_proxy(Something, get_only=False)
@patch.object(proxy, 'foo', 'bar')
def test():
self.assertEqual(proxy.foo, 'bar')
test()
self.assertEqual(proxy.foo, 'foo')
self.assertEqual(thing.foo, 'foo')
self.assertNotIn('foo', proxy.__dict__)
def test_patch_keyword_args(self):
kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
'foo': MagicMock()}
patcher = patch(foo_name, **kwargs)
mock = patcher.start()
patcher.stop()
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
def test_patch_object_keyword_args(self):
kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
'foo': MagicMock()}
patcher = patch.object(Foo, 'f', **kwargs)
mock = patcher.start()
patcher.stop()
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
def test_patch_dict_keyword_args(self):
original = {'foo': 'bar'}
copy = original.copy()
patcher = patch.dict(original, foo=3, bar=4, baz=5)
patcher.start()
try:
self.assertEqual(original, dict(foo=3, bar=4, baz=5))
finally:
patcher.stop()
self.assertEqual(original, copy)
def test_autospec(self):
class Boo(object):
def __init__(self, a):
pass
def f(self, a):
pass
def g(self):
pass
foo = 'bar'
class Bar(object):
def a(self):
pass
def _test(mock):
mock(1)
mock.assert_called_with(1)
self.assertRaises(TypeError, mock)
def _test2(mock):
mock.f(1)
mock.f.assert_called_with(1)
self.assertRaises(TypeError, mock.f)
mock.g()
mock.g.assert_called_with()
self.assertRaises(TypeError, mock.g, 1)
self.assertRaises(AttributeError, getattr, mock, 'h')
mock.foo.lower()
mock.foo.lower.assert_called_with()
self.assertRaises(AttributeError, getattr, mock.foo, 'bar')
mock.Bar()
mock.Bar.assert_called_with()
mock.Bar.a()
mock.Bar.a.assert_called_with()
self.assertRaises(TypeError, mock.Bar.a, 1)
mock.Bar().a()
mock.Bar().a.assert_called_with()
self.assertRaises(TypeError, mock.Bar().a, 1)
self.assertRaises(AttributeError, getattr, mock.Bar, 'b')
self.assertRaises(AttributeError, getattr, mock.Bar(), 'b')
def function(mock):
_test(mock)
_test2(mock)
_test2(mock(1))
self.assertIs(mock, Foo)
return mock
test = patch(foo_name, autospec=True)(function)
mock = test()
self.assertIsNot(Foo, mock)
# test patching a second time works
test()
module = sys.modules[__name__]
test = patch.object(module, 'Foo', autospec=True)(function)
mock = test()
self.assertIsNot(Foo, mock)
# test patching a second time works
test()
def test_autospec_function(self):
@patch('%s.function' % __name__, autospec=True)
def test(mock):
function(1)
function.assert_called_with(1)
function(2, 3)
function.assert_called_with(2, 3)
self.assertRaises(TypeError, function)
self.assertRaises(AttributeError, getattr, function, 'foo')
test()
def test_autospec_keywords(self):
@patch('%s.function' % __name__, autospec=True,
return_value=3)
def test(mock_function):
#self.assertEqual(function.abc, 'foo')
return function(1, 2)
result = test()
self.assertEqual(result, 3)
def test_autospec_with_new(self):
patcher = patch('%s.function' % __name__, new=3, autospec=True)
self.assertRaises(TypeError, patcher.start)
module = sys.modules[__name__]
patcher = patch.object(module, 'function', new=3, autospec=True)
self.assertRaises(TypeError, patcher.start)
def test_autospec_with_object(self):
class Bar(Foo):
extra = []
patcher = patch(foo_name, autospec=Bar)
mock = patcher.start()
try:
self.assertIsInstance(mock, Bar)
self.assertIsInstance(mock.extra, list)
finally:
patcher.stop()
def test_autospec_inherits(self):
FooClass = Foo
patcher = patch(foo_name, autospec=True)
mock = patcher.start()
try:
self.assertIsInstance(mock, FooClass)
self.assertIsInstance(mock(3), FooClass)
finally:
patcher.stop()
def test_autospec_name(self):
patcher = patch(foo_name, autospec=True)
mock = patcher.start()
try:
self.assertIn(" name='Foo'", repr(mock))
self.assertIn(" name='Foo.f'", repr(mock.f))
self.assertIn(" name='Foo()'", repr(mock(None)))
self.assertIn(" name='Foo().f'", repr(mock(None).f))
finally:
patcher.stop()
def test_tracebacks(self):
@patch.object(Foo, 'f', object())
def test():
raise AssertionError
try:
test()
except:
err = sys.exc_info()
result = unittest.TextTestResult(None, None, 0)
traceback = result._exc_info_to_string(err, self)
self.assertIn('raise AssertionError', traceback)
def test_new_callable_patch(self):
patcher = patch(foo_name, new_callable=NonCallableMagicMock)
m1 = patcher.start()
patcher.stop()
m2 = patcher.start()
patcher.stop()
self.assertIsNot(m1, m2)
for mock in m1, m2:
self.assertNotCallable(m1)
def test_new_callable_patch_object(self):
patcher = patch.object(Foo, 'f', new_callable=NonCallableMagicMock)
m1 = patcher.start()
patcher.stop()
m2 = patcher.start()
patcher.stop()
self.assertIsNot(m1, m2)
for mock in m1, m2:
self.assertNotCallable(m1)
def test_new_callable_keyword_arguments(self):
class Bar(object):
kwargs = None
def __init__(self, **kwargs):
Bar.kwargs = kwargs
patcher = patch(foo_name, new_callable=Bar, arg1=1, arg2=2)
m = patcher.start()
try:
self.assertIs(type(m), Bar)
self.assertEqual(Bar.kwargs, dict(arg1=1, arg2=2))
finally:
patcher.stop()
def test_new_callable_spec(self):
class Bar(object):
kwargs = None
def __init__(self, **kwargs):
Bar.kwargs = kwargs
patcher = patch(foo_name, new_callable=Bar, spec=Bar)
patcher.start()
try:
self.assertEqual(Bar.kwargs, dict(spec=Bar))
finally:
patcher.stop()
patcher = patch(foo_name, new_callable=Bar, spec_set=Bar)
patcher.start()
try:
self.assertEqual(Bar.kwargs, dict(spec_set=Bar))
finally:
patcher.stop()
def test_new_callable_create(self):
non_existent_attr = '%s.weeeee' % foo_name
p = patch(non_existent_attr, new_callable=NonCallableMock)
self.assertRaises(AttributeError, p.start)
p = patch(non_existent_attr, new_callable=NonCallableMock,
create=True)
m = p.start()
try:
self.assertNotCallable(m, magic=False)
finally:
p.stop()
def test_new_callable_incompatible_with_new(self):
self.assertRaises(
ValueError, patch, foo_name, new=object(), new_callable=MagicMock
)
self.assertRaises(
ValueError, patch.object, Foo, 'f', new=object(),
new_callable=MagicMock
)
def test_new_callable_incompatible_with_autospec(self):
self.assertRaises(
ValueError, patch, foo_name, new_callable=MagicMock,
autospec=True
)
self.assertRaises(
ValueError, patch.object, Foo, 'f', new_callable=MagicMock,
autospec=True
)
def test_new_callable_inherit_for_mocks(self):
class MockSub(Mock):
pass
MockClasses = (
NonCallableMock, NonCallableMagicMock, MagicMock, Mock, MockSub
)
for Klass in MockClasses:
for arg in 'spec', 'spec_set':
kwargs = {arg: True}
p = patch(foo_name, new_callable=Klass, **kwargs)
m = p.start()
try:
instance = m.return_value
self.assertRaises(AttributeError, getattr, instance, 'x')
finally:
p.stop()
def test_new_callable_inherit_non_mock(self):
class NotAMock(object):
def __init__(self, spec):
self.spec = spec
p = patch(foo_name, new_callable=NotAMock, spec=True)
m = p.start()
try:
self.assertTrue(is_instance(m, NotAMock))
self.assertRaises(AttributeError, getattr, m, 'return_value')
finally:
p.stop()
self.assertEqual(m.spec, Foo)
def test_new_callable_class_decorating(self):
test = self
original = Foo
class SomeTest(object):
def _test(self, mock_foo):
test.assertIsNot(Foo, original)
test.assertIs(Foo, mock_foo)
test.assertIsInstance(Foo, SomeClass)
def test_two(self, mock_foo):
self._test(mock_foo)
def test_one(self, mock_foo):
self._test(mock_foo)
SomeTest = patch(foo_name, new_callable=SomeClass)(SomeTest)
SomeTest().test_one()
SomeTest().test_two()
self.assertIs(Foo, original)
def test_patch_multiple(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
patcher1 = patch.multiple(foo_name, f=1, g=2)
patcher2 = patch.multiple(Foo, f=1, g=2)
for patcher in patcher1, patcher2:
patcher.start()
try:
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, 1)
self.assertEqual(Foo.g, 2)
finally:
patcher.stop()
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
@patch.multiple(foo_name, f=3, g=4)
def test():
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, 3)
self.assertEqual(Foo.g, 4)
test()
def test_patch_multiple_no_kwargs(self):
self.assertRaises(ValueError, patch.multiple, foo_name)
self.assertRaises(ValueError, patch.multiple, Foo)
def test_patch_multiple_create_mocks(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
@patch.multiple(foo_name, f=DEFAULT, g=3, foo=DEFAULT)
def test(f, foo):
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertEqual(Foo.g, 3)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
test()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create_mocks_different_order(self):
# bug revealed by Jython!
original_f = Foo.f
original_g = Foo.g
patcher = patch.object(Foo, 'f', 3)
patcher.attribute_name = 'f'
other = patch.object(Foo, 'g', DEFAULT)
other.attribute_name = 'g'
patcher.additional_patchers = [other]
@patcher
def test(g):
self.assertIs(Foo.g, g)
self.assertEqual(Foo.f, 3)
test()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_stacked_decorators(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
@patch.multiple(foo_name, f=DEFAULT)
@patch.multiple(foo_name, foo=DEFAULT)
@patch(foo_name + '.g')
def test1(g, **kwargs):
_test(g, **kwargs)
@patch.multiple(foo_name, f=DEFAULT)
@patch(foo_name + '.g')
@patch.multiple(foo_name, foo=DEFAULT)
def test2(g, **kwargs):
_test(g, **kwargs)
@patch(foo_name + '.g')
@patch.multiple(foo_name, f=DEFAULT)
@patch.multiple(foo_name, foo=DEFAULT)
def test3(g, **kwargs):
_test(g, **kwargs)
def _test(g, **kwargs):
f = kwargs.pop('f')
foo = kwargs.pop('foo')
self.assertFalse(kwargs)
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertIs(Foo.g, g)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(g, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
test1()
test2()
test3()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create_mocks_patcher(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
patcher = patch.multiple(foo_name, f=DEFAULT, g=3, foo=DEFAULT)
result = patcher.start()
try:
f = result['f']
foo = result['foo']
self.assertEqual(set(result), set(['f', 'foo']))
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
finally:
patcher.stop()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_decorating_class(self):
test = self
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
class SomeTest(object):
def _test(self, f, foo):
test.assertIs(Foo, original_foo)
test.assertIs(Foo.f, f)
test.assertEqual(Foo.g, 3)
test.assertIs(Foo.foo, foo)
test.assertTrue(is_instance(f, MagicMock))
test.assertTrue(is_instance(foo, MagicMock))
def test_two(self, f, foo):
self._test(f, foo)
def test_one(self, f, foo):
self._test(f, foo)
SomeTest = patch.multiple(
foo_name, f=DEFAULT, g=3, foo=DEFAULT
)(SomeTest)
thing = SomeTest()
thing.test_one()
thing.test_two()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create(self):
patcher = patch.multiple(Foo, blam='blam')
self.assertRaises(AttributeError, patcher.start)
patcher = patch.multiple(Foo, blam='blam', create=True)
patcher.start()
try:
self.assertEqual(Foo.blam, 'blam')
finally:
patcher.stop()
self.assertFalse(hasattr(Foo, 'blam'))
def test_patch_multiple_spec_set(self):
# if spec_set works then we can assume that spec and autospec also
# work as the underlying machinery is the same
patcher = patch.multiple(Foo, foo=DEFAULT, spec_set=['a', 'b'])
result = patcher.start()
try:
self.assertEqual(Foo.foo, result['foo'])
Foo.foo.a(1)
Foo.foo.b(2)
Foo.foo.a.assert_called_with(1)
Foo.foo.b.assert_called_with(2)
self.assertRaises(AttributeError, setattr, Foo.foo, 'c', None)
finally:
patcher.stop()
def test_patch_multiple_new_callable(self):
class Thing(object):
pass
patcher = patch.multiple(
Foo, f=DEFAULT, g=DEFAULT, new_callable=Thing
)
result = patcher.start()
try:
self.assertIs(Foo.f, result['f'])
self.assertIs(Foo.g, result['g'])
self.assertIsInstance(Foo.f, Thing)
self.assertIsInstance(Foo.g, Thing)
self.assertIsNot(Foo.f, Foo.g)
finally:
patcher.stop()
def test_nested_patch_failure(self):
original_f = Foo.f
original_g = Foo.g
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'missing', 1)
@patch.object(Foo, 'f', 1)
def thing1():
pass
@patch.object(Foo, 'missing', 1)
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
def thing2():
pass
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
@patch.object(Foo, 'missing', 1)
def thing3():
pass
for func in thing1, thing2, thing3:
self.assertRaises(AttributeError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_new_callable_failure(self):
original_f = Foo.f
original_g = Foo.g
original_foo = Foo.foo
def crasher():
raise NameError('crasher')
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'foo', new_callable=crasher)
@patch.object(Foo, 'f', 1)
def thing1():
pass
@patch.object(Foo, 'foo', new_callable=crasher)
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
def thing2():
pass
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
@patch.object(Foo, 'foo', new_callable=crasher)
def thing3():
pass
for func in thing1, thing2, thing3:
self.assertRaises(NameError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
self.assertEqual(Foo.foo, original_foo)
def test_patch_multiple_failure(self):
original_f = Foo.f
original_g = Foo.g
patcher = patch.object(Foo, 'f', 1)
patcher.attribute_name = 'f'
good = patch.object(Foo, 'g', 1)
good.attribute_name = 'g'
bad = patch.object(Foo, 'missing', 1)
bad.attribute_name = 'missing'
for additionals in [good, bad], [bad, good]:
patcher.additional_patchers = additionals
@patcher
def func():
pass
self.assertRaises(AttributeError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_new_callable_failure(self):
original_f = Foo.f
original_g = Foo.g
original_foo = Foo.foo
def crasher():
raise NameError('crasher')
patcher = patch.object(Foo, 'f', 1)
patcher.attribute_name = 'f'
good = patch.object(Foo, 'g', 1)
good.attribute_name = 'g'
bad = patch.object(Foo, 'foo', new_callable=crasher)
bad.attribute_name = 'foo'
for additionals in [good, bad], [bad, good]:
patcher.additional_patchers = additionals
@patcher
def func():
pass
self.assertRaises(NameError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
self.assertEqual(Foo.foo, original_foo)
def test_patch_multiple_string_subclasses(self):
Foo = type('Foo', (str,), {'fish': 'tasty'})
foo = Foo()
@patch.multiple(foo, fish='nearly gone')
def test():
self.assertEqual(foo.fish, 'nearly gone')
test()
self.assertEqual(foo.fish, 'tasty')
@patch('unittest.mock.patch.TEST_PREFIX', 'foo')
def test_patch_test_prefix(self):
class Foo(object):
thing = 'original'
def foo_one(self):
return self.thing
def foo_two(self):
return self.thing
def test_one(self):
return self.thing
def test_two(self):
return self.thing
Foo = patch.object(Foo, 'thing', 'changed')(Foo)
foo = Foo()
self.assertEqual(foo.foo_one(), 'changed')
self.assertEqual(foo.foo_two(), 'changed')
self.assertEqual(foo.test_one(), 'original')
self.assertEqual(foo.test_two(), 'original')
@patch('unittest.mock.patch.TEST_PREFIX', 'bar')
def test_patch_dict_test_prefix(self):
class Foo(object):
def bar_one(self):
return dict(the_dict)
def bar_two(self):
return dict(the_dict)
def test_one(self):
return dict(the_dict)
def test_two(self):
return dict(the_dict)
the_dict = {'key': 'original'}
Foo = patch.dict(the_dict, key='changed')(Foo)
foo =Foo()
self.assertEqual(foo.bar_one(), {'key': 'changed'})
self.assertEqual(foo.bar_two(), {'key': 'changed'})
self.assertEqual(foo.test_one(), {'key': 'original'})
self.assertEqual(foo.test_two(), {'key': 'original'})
def test_patch_with_spec_mock_repr(self):
for arg in ('spec', 'autospec', 'spec_set'):
p = patch('%s.SomeClass' % __name__, **{arg: True})
m = p.start()
try:
self.assertIn(" name='SomeClass'", repr(m))
self.assertIn(" name='SomeClass.class_attribute'",
repr(m.class_attribute))
self.assertIn(" name='SomeClass()'", repr(m()))
self.assertIn(" name='SomeClass().class_attribute'",
repr(m().class_attribute))
finally:
p.stop()
def test_patch_nested_autospec_repr(self):
with patch('unittest.test.testmock.support', autospec=True) as m:
self.assertIn(" name='support.SomeClass.wibble()'",
repr(m.SomeClass.wibble()))
self.assertIn(" name='support.SomeClass().wibble()'",
repr(m.SomeClass().wibble()))
def test_mock_calls_with_patch(self):
for arg in ('spec', 'autospec', 'spec_set'):
p = patch('%s.SomeClass' % __name__, **{arg: True})
m = p.start()
try:
m.wibble()
kalls = [call.wibble()]
self.assertEqual(m.mock_calls, kalls)
self.assertEqual(m.method_calls, kalls)
self.assertEqual(m.wibble.mock_calls, [call()])
result = m()
kalls.append(call())
self.assertEqual(m.mock_calls, kalls)
result.wibble()
kalls.append(call().wibble())
self.assertEqual(m.mock_calls, kalls)
self.assertEqual(result.mock_calls, [call.wibble()])
self.assertEqual(result.wibble.mock_calls, [call()])
self.assertEqual(result.method_calls, [call.wibble()])
finally:
p.stop()
def test_patch_imports_lazily(self):
sys.modules.pop('squizz', None)
p1 = patch('squizz.squozz')
self.assertRaises(ImportError, p1.start)
squizz = Mock()
squizz.squozz = 6
sys.modules['squizz'] = squizz
p1 = patch('squizz.squozz')
squizz.squozz = 3
p1.start()
p1.stop()
self.assertEqual(squizz.squozz, 3)
def test_patch_propogrates_exc_on_exit(self):
class holder:
exc_info = None, None, None
class custom_patch(_patch):
def __exit__(self, etype=None, val=None, tb=None):
_patch.__exit__(self, etype, val, tb)
holder.exc_info = etype, val, tb
stop = __exit__
def with_custom_patch(target):
getter, attribute = _get_target(target)
return custom_patch(
getter, attribute, DEFAULT, None, False, None,
None, None, {}
)
@with_custom_patch('squizz.squozz')
def test(mock):
raise RuntimeError
self.assertRaises(RuntimeError, test)
self.assertIs(holder.exc_info[0], RuntimeError)
self.assertIsNotNone(holder.exc_info[1],
'exception value not propgated')
self.assertIsNotNone(holder.exc_info[2],
'exception traceback not propgated')
def test_create_and_specs(self):
for kwarg in ('spec', 'spec_set', 'autospec'):
p = patch('%s.doesnotexist' % __name__, create=True,
**{kwarg: True})
self.assertRaises(TypeError, p.start)
self.assertRaises(NameError, lambda: doesnotexist)
# check that spec with create is innocuous if the original exists
p = patch(MODNAME, create=True, **{kwarg: True})
p.start()
p.stop()
def test_multiple_specs(self):
original = PTModule
for kwarg in ('spec', 'spec_set'):
p = patch(MODNAME, autospec=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
for kwarg in ('spec', 'autospec'):
p = patch(MODNAME, spec_set=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
for kwarg in ('spec_set', 'autospec'):
p = patch(MODNAME, spec=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
def test_specs_false_instead_of_none(self):
p = patch(MODNAME, spec=False, spec_set=False, autospec=False)
mock = p.start()
try:
# no spec should have been set, so attribute access should not fail
mock.does_not_exist
mock.does_not_exist = 3
finally:
p.stop()
def test_falsey_spec(self):
for kwarg in ('spec', 'autospec', 'spec_set'):
p = patch(MODNAME, **{kwarg: 0})
m = p.start()
try:
self.assertRaises(AttributeError, getattr, m, 'doesnotexit')
finally:
p.stop()
def test_spec_set_true(self):
for kwarg in ('spec', 'autospec'):
p = patch(MODNAME, spec_set=True, **{kwarg: True})
m = p.start()
try:
self.assertRaises(AttributeError, setattr, m,
'doesnotexist', 'something')
self.assertRaises(AttributeError, getattr, m, 'doesnotexist')
finally:
p.stop()
def test_callable_spec_as_list(self):
spec = ('__call__',)
p = patch(MODNAME, spec=spec)
m = p.start()
try:
self.assertTrue(callable(m))
finally:
p.stop()
def test_not_callable_spec_as_list(self):
spec = ('foo', 'bar')
p = patch(MODNAME, spec=spec)
m = p.start()
try:
self.assertFalse(callable(m))
finally:
p.stop()
def test_patch_stopall(self):
unlink = os.unlink
chdir = os.chdir
path = os.path
patch('os.unlink', something).start()
patch('os.chdir', something_else).start()
@patch('os.path')
def patched(mock_path):
patch.stopall()
self.assertIs(os.path, mock_path)
self.assertIs(os.unlink, unlink)
self.assertIs(os.chdir, chdir)
patched()
self.assertIs(os.path, path)
if __name__ == '__main__':
unittest.main()
|
avatar29A/adbook
|
refs/heads/master
|
adbook/addressbook.py
|
1
|
# coding=utf-8
from typing import NewType, Dict, List, Type
from adbook.drivers.json_store_driver import JsonStoreDriver
from adbook.orm.default_collections.groups import GroupCollection
from adbook.orm.default_collections.persons import PersonCollection
from adbook.abs_store_driver import AbsStoreDriver
from adbook.orm.collection_manager import CollectionManager, Collection
Collections = NewType("CollectionList", Dict[str, Collection])
class AddressBook(CollectionManager):
"""
AddressBook is main class with which works a user.
Use example:
with ab = AddressBook():
...
or
connection = MongoDBConnectionDriver(host="localhost", port="20199")
with ab = AddressBook(connection):
...
Note: If Addressbook is created with nothing arguments, then it uses a default db driver
(by default uses JsonStoreDriver).
"""
# User could override this driver, if don't wishes to have dependencies from 'jsonpickle'.
#
# AddressBook.default_store_driver = CustomDefaultDriver
default_store_driver = JsonStoreDriver
def __init__(self, driver: AbsStoreDriver=None, collections: List[Type[Collection]] = None):
super().__init__()
self.__collections = Collections({})
if driver is None:
self.__driver = self.default_store_driver("address_book", "")
else:
self.__driver = driver
if collections is None:
self.register(self.PERSON_COLLECTION_NAME, PersonCollection(self))
self.register(self.GROUP_COLLECTION_NAME, GroupCollection(self))
else:
for collection in collections:
self.register(collection.name, collection(self))
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return True
def __getitem__(self, item):
if self.exists(item):
return self.get(item)
else:
return self.__dict__[item]
@property
def persons(self) -> PersonCollection:
persons_collection = self.get(self.PERSON_COLLECTION_NAME)
return self.default_is_none(persons_collection, PersonCollection)
@property
def groups(self) -> GroupCollection:
groups_collection = self.get(self.GROUP_COLLECTION_NAME)
return self.default_is_none(groups_collection, GroupCollection)
def default_is_none(self, collection: Collection, default: Type[Collection]) -> Collection:
if collection is None:
default_collection = default(self)
# registered collection:
self.register(default_collection.name, default_collection)
return default_collection
return collection
#
# Implements CollectionManager
def get(self, collection_name: str) -> Collection:
if not self.exists(collection_name):
return None
return self.__collections[collection_name]
def register(self, collection_name: str, collection: Collection, override: bool = False) -> Collection:
if self.exists(collection_name) and not override:
raise Exception("Collection with same name '{}' is exists".format(collection_name))
self.__collections[collection_name] = collection
return collection
def exists(self, collection_name: str) -> bool:
return collection_name in self.__collections
#
def open(self):
"""
Open connection to db
:return:
"""
self.__driver.open()
def save(self):
"""
Save to db
"""
self.__driver.save(self)
def close(self):
"""
Close connection with database
"""
self.save()
self.__driver.close()
|
galaxyfreak/android_kernel_htc_z4u
|
refs/heads/master
|
scripts/rt-tester/rt-tester.py
|
11005
|
#!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
smallyear/linuxLearn
|
refs/heads/master
|
salt/salt/cli/run.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
from salt.utils import parsers
from salt.utils import activate_profile
from salt.utils import output_profile
from salt.utils.verify import check_user, verify_log
from salt.exceptions import SaltClientError
import salt.defaults.exitcodes # pylint: disable=W0611
class SaltRun(parsers.SaltRunOptionParser):
'''
Used to execute Salt runners
'''
def run(self):
'''
Execute salt-run
'''
import salt.runner
self.parse_args()
# Setup file logging!
self.setup_logfile_logger()
verify_log(self.config)
profiling_enabled = self.options.profiling_enabled
runner = salt.runner.Runner(self.config)
if self.options.doc:
runner.print_docs()
self.exit(salt.defaults.exitcodes.EX_OK)
# Run this here so SystemExit isn't raised anywhere else when
# someone tries to use the runners via the python API
try:
if check_user(self.config['user']):
pr = activate_profile(profiling_enabled)
try:
runner.run()
finally:
output_profile(
pr,
stats_path=self.options.profiling_path,
stop=True)
except SaltClientError as exc:
raise SystemExit(str(exc))
|
godstale/HomePy
|
refs/heads/master
|
src/tg/bot/MessageBox.py
|
1
|
#-*- coding: utf-8 -*-
import os
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
# Set language
# en: English, kr: Korean
msg_lang = 'en'
def set_msg_lang(lang_code):
global msg_lang
msg_lang = lang_code
def msg_welcome():
print msg_lang
if msg_lang == 'kr':
return '안녕하세요!!'
else:
return 'Hello!!'
def msg_invalid_param():
if msg_lang == 'kr':
return '파리미터에 오류가 있습니다!!'
else:
return 'Invalid parameter!!'
def msg_devnum_error():
if msg_lang == 'kr':
return '장치넘버에 오류가 있습니다.'
else:
return 'Wrong device number'
def msg_device_not_found():
if msg_lang == 'kr':
return '장치를 찾을 수 없습니다.'
else:
return 'Cannot find device'
def msg_current_lang():
if msg_lang == 'kr':
return '현재언어설정'
else:
return 'Current language'
def msg_lang_changed():
if msg_lang == 'kr':
return '언어가 변경되었습니다.'
else:
return 'Language setting changed.'
def msg_cctv_on():
if msg_lang == 'kr':
return 'CCTV가 활성화 되었습니다.'
else:
return 'CCTV started!!'
def msg_cctv_already_on():
if msg_lang == 'kr':
return '이미 CCTV가 활성화 되어있습니다.'
else:
return 'CCTV is already active.'
def msg_cctv_off():
if msg_lang == 'kr':
return 'CCTV가 종료되었습니다.'
else:
return 'CCTV stopped.'
def msg_turnoff_cctv():
if msg_lang == 'kr':
return 'CCTV를 종료합니다.'
else:
return 'CCTV stopped.'
def msg_turnoff_cctv():
if msg_lang == 'kr':
return 'CCTV가 종료되었습니다.'
else:
return 'CCTV stopped.'
def msg_remove_pictures():
if msg_lang == 'kr':
return '이미지 파일을 모두 삭제하였습니다.'
else:
return 'Removed pictures.'
def msg_device_number():
if msg_lang == 'kr':
return '장치넘버'
else:
return 'Device number'
def msg_location():
if msg_lang == 'kr':
return '위치'
else:
return 'Located at'
def msg_category():
if msg_lang == 'kr':
return '분류'
else:
return 'Category'
def msg_devdesc_command():
if msg_lang == 'kr':
return '아래 명령어로 각 장치의 세부 내용을 볼 수 있습니다.\n장치 상세 (장치넘버)'
else:
return 'See the details with below command:\ndevice desc (device_number)'
def msg_update_time():
if msg_lang == 'kr':
return '등록시간'
else:
return 'Update time'
def msg_control_signal():
if msg_lang == 'kr':
return '제어신호'
else:
return 'Control signal'
def msg_data_type():
if msg_lang == 'kr':
return '데이터타입'
else:
return 'Data type'
def msg_ctrlsignal_desc():
if msg_lang == 'kr':
return '아래 명령어로 원격조종 신호를 보낼 수 있습니다.\n제어 (장치넘버) (신호1) (신호2) (신호3) (신호4)'
else:
return 'Control remote device with below command:\ncontrol (device_number) (signal1) (signal2) (signal3) (signal4)'
def msg_device_removed():
if msg_lang == 'kr':
return '장치 %d이 제거되었습니다.'
else:
return 'Device %d removed.'
def msg_device_remove_error():
if msg_lang == 'kr':
return '장치 %d 제거에 문제가 발생했습니다!!'
else:
return 'Failed in removing device %d!!'
def msg_every_device_removed():
if msg_lang == 'kr':
return '등록된 장치 정보가 모두 삭제되었습니다.'
else:
return 'Every device information is removed.'
def msg_every_device_remove_error():
if msg_lang == 'kr':
return '모든 장치정보 삭제에 문제가 발생했습니다!!'
else:
return 'Failed in removing every device!!'
def msg_sensordata_removed():
if msg_lang == 'kr':
return '장치 %d의 센서 정보가 제거되었습니다.'
else:
return 'Removed sensor data of device %d.'
def msg_sensordata_remove_error():
if msg_lang == 'kr':
return '장치 %d의 센서정보 제거에 문제가 발생했습니다!!'
else:
return 'Failed in removing sensor data of device %d!!'
def msg_every_sensordata_removed():
if msg_lang == 'kr':
return '센서 정보가 모두 삭제되었습니다.'
else:
return 'Removed every sensor data.'
def msg_every_sensordata_remove_error():
if msg_lang == 'kr':
return '모든 장치정보 삭제에 문제가 발생했습니다!!'
else:
return 'Failed in removing every sensor data!!'
def msg_need_devnum():
if msg_lang == 'kr':
return '장치넘버 파라미터가 필요합니다!!'
else:
return 'Need device number!!'
def msg_no_matching_result():
if msg_lang == 'kr':
return '장치에 해당하는 정보가 없습니다.'
else:
return 'No matching result.'
def msg_cannot_open_graph():
if msg_lang == 'kr':
return '그래프 파일을 열 수 없습니다!!'
else:
return 'Cannot open graph file!!'
def msg_wrong_device():
if msg_lang == 'kr':
return '잘못된 장치를 지정하셨습니다!!'
else:
return 'Wrong device number!!'
def msg_wrong_param1():
if msg_lang == 'kr':
return '파라미터1 값이 잘못되었습니다!!'
else:
return 'Param1 value is missing or not valid!!'
def msg_sent_signal():
if msg_lang == 'kr':
return '장치 %d에 제어 신호를 보냈습니다.'
else:
return 'Sent control signal to %d.'
def msg_recv_ping_response():
if msg_lang == 'kr':
return 'Ping 응답 신호를 받았습니다.'
else:
return 'Received ping response.'
def msg_ctrlsignal_response():
if msg_lang == 'kr':
return '제어신호에 대한 응답을 받았습니다.'
else:
return 'Received response:'
def msg_noti():
if msg_lang == 'kr':
return '알림'
else:
return 'Notification'
def msg_no_noti():
if msg_lang == 'kr':
return '알림 설정이 없습니다.'
else:
return 'Notification setting is not exist.'
def msg_type_noti_del_param():
if msg_lang == 'kr':
return '알림 삭제에 필요한 파라미터에 오류가 있습니다!! 아래와 같은 명령으로 삭제할 수 있습니다.\n알림 삭제 (알림ID)\n알림 삭제 (분류1) (분류2) (장치ID)'
else:
return 'Invalid parameters!! Use command like below:\nnoti del (noti-ID)\nnoti del (category1) (category2) (device ID)'
def msg_noti_del_success():
if msg_lang == 'kr':
return '알림을 삭제 했습니다.'
else:
return 'Removed notifications.'
def msg_noti_del_fail():
if msg_lang == 'kr':
return '알림 삭제에 실패했습니다!!'
else:
return 'Cannot remove notification!!'
def msg_add_noti_param():
if msg_lang == 'kr':
return '알림 추가 파라미터에 오류가 있습니다!! 아래와 같은 명령으로 추가할 수 있습니다.\n\n알림 추가 (장치넘버) (data1비교문) (data2비교문) (data3비교문) (data4비교문)\n\n예) 알림 추가 1 data1<10 data2==12 data3>=13 data4!=0\n\ndata1, data2, data3, data4 중 하나만 입력 가능'
else:
return 'Invalid parameters!! Use command like below:\n\nnoti add (device ID) (data1 comparison) (data2 comparison) ... (data4 comparison)\n\nEx) noti add 1 data1<7 data2==3 data3>=12 data4!=0\n\nAt least one comparison is needed.'
def msg_add_noti_comp_error():
if msg_lang == 'kr':
return '데이터 비교문에 오류가 있습니다!! 아래와 같은 형식을 사용하세요.\n\n알림 추가 (장치넘버) (data1비교문) (data2비교문) (data3비교문) (data4비교문)\n\n예) 알림 추가 1 data1<10 data2==12 data3>=13 data4!=0\n\ndata1, data2, data3, data4 중 하나이상 입력 필요'
else:
return 'Invalid comparison format!! Use command like below:\n\nnoti add (device ID) (data1 comparison) (data2 comparison) ... (data4 comparison)\n\nEx) noti add 1 data1<7 data2==3 data3>=12 data4!=0\n\nAt least one comparison is needed.'
def msg_noti_received():
if msg_lang == 'kr':
return '설정한 조건에 맞는 데이터가 업데이트 되었습니다!!'
else:
return 'HomePy received new update data that you want!!'
def msg_add_noti_success():
if msg_lang == 'kr':
return '알림을 추가했습니다.'
else:
return 'Added a new notification.'
def msg_add_noti_failed():
if msg_lang == 'kr':
return '알림을 DB에 추가하는 중 오류가 발생했습니다!!'
else:
return 'Error occured while inserting into DB!!'
def msg_invalid_noti_cmd():
if msg_lang == 'kr':
return '알림 커맨드에 오류가 있습니다!!'
else:
return 'Invalid notification command!!'
def msg_macro():
if msg_lang == 'kr':
return '매크로'
else:
return 'Macro'
def msg_no_macro():
if msg_lang == 'kr':
return '매크로 설정이 없습니다.'
else:
return 'Macro setting is not exist.'
def msg_type_macro_del_param():
if msg_lang == 'kr':
return '매크로 삭제에 필요한 파라미터에 오류가 있습니다!! 아래와 같은 명령으로 삭제할 수 있습니다.\n매크로 삭제 (매크로ID)\n매크로 삭제 (분류1) (분류2) (장치ID)'
else:
return 'Invalid parameters!! Use command like below:\nmacro del (macro-ID)\nmacro del (category1) (category2) (device ID)'
def msg_macro_del_success():
if msg_lang == 'kr':
return '매크로를 삭제 했습니다.'
else:
return 'Removed macro.'
def msg_macro_del_fail():
if msg_lang == 'kr':
return '매크로 삭제에 실패했습니다!!'
else:
return 'Cannot remove macro!!'
def msg_add_macro_param():
if msg_lang == 'kr':
return '매크로 추가 파라미터에 오류가 있습니다!! 아래와 같은 명령으로 추가할 수 있습니다.\n\n매크로 추가 (알림넘버) (명령문)\n\n예) 매크로 추가 1 제어 1 10'
else:
return 'Invalid parameters!! Use command like below:\n\nmacro add (noti ID) (command)\n\nEx) macro add 1 send 1 10'
def msg_invalid_noti_id():
if msg_lang == 'kr':
return '잘못된 알림 ID 입니다!!'
else:
return 'Invalid notification ID!!'
def msg_add_macro_success():
if msg_lang == 'kr':
return '매크로를 추가 했습니다.'
else:
return 'Added macro.'
def msg_add_macro_fail():
if msg_lang == 'kr':
return '매크로 추가에 실패했습니다!!'
else:
return 'Cannot add macro!!'
def msg_timer():
if msg_lang == 'kr':
return '타이머'
else:
return 'Timer'
def msg_no_timer():
if msg_lang == 'kr':
return '타이머 설정이 없습니다.'
else:
return 'Timer setting is not exist.'
def msg_type_timer_del_param():
if msg_lang == 'kr':
return '타이머 삭제에 필요한 파라미터에 오류가 있습니다!! 아래와 같은 명령으로 삭제할 수 있습니다.\n타이머 삭제 (타이머ID)'
else:
return 'Invalid parameters!! Use command like below:\ntimer del (timer-ID)'
def msg_timer_del_success():
if msg_lang == 'kr':
return '타이머를 삭제 했습니다.'
else:
return 'Removed timer.'
def msg_timer_del_fail():
if msg_lang == 'kr':
return '타이머 삭제에 실패했습니다!!'
else:
return 'Cannot remove timer!!'
def msg_add_timer_param():
if msg_lang == 'kr':
return '타이머 추가 파라미터에 오류가 있습니다!! 아래와 같은 명령으로 추가할 수 있습니다.\n\n타이머 추가 (시간간격/분) (명령문)\n\n예) 타이머 추가 10 제어 1 5\n 타이머 추가 13:55 제어 1 5'
else:
return 'Invalid parameters!! Use command like below:\n\ntimer add (interva/min) (command)\n\nEx) timer add 10 send 1 5\n timer add 13:55 send 1 5'
def msg_invalid_timer_id():
if msg_lang == 'kr':
return '잘못된 타이머 ID 입니다!!'
else:
return 'Invalid timer ID!!'
def msg_add_timer_success():
if msg_lang == 'kr':
return '타이머를 추가 했습니다.'
else:
return 'Added timer.'
def msg_add_timer_fail():
if msg_lang == 'kr':
return '타이머 추가에 실패했습니다!!'
else:
return 'Cannot add timer!!'
def msg_current_queue():
if msg_lang == 'kr':
return '재생목록'
else:
return 'Current queue'
def msg_invalid_playlist():
if msg_lang == 'kr':
return '플레이리스트를 잘못 지정하셨습니다!!'
else:
return 'Invalid playlist number!!'
def msg_changed_to():
if msg_lang == 'kr':
return '변경'
else:
return 'changed to'
def msg_control_panel_closed():
if msg_lang == 'kr':
return '컨트롤 패널을 닫습니다.'
else:
return 'control panel closed.'
def msg_control_panel():
if msg_lang == 'kr':
return '컨트롤 패널'
else:
return 'control panel'
def msg_keypad():
if msg_lang == 'kr':
return '키패드'
else:
return 'control panel'
def msg_keypad_set_dev():
if msg_lang == 'kr':
return '키패드 컨트롤 장치를 설정하였습니다.'
else:
return 'Set target device for keypad.'
def msg_keypad_cur_dev():
if msg_lang == 'kr':
return '키패드 컨트롤 장치'
else:
return 'Target device for keypad'
def msg_help_text():
if msg_lang == 'kr':
msg = """hello
홈파이 서버가 응답 가능한지 확인.
chat (메시지)
현재 활성화된 대화창에 메시지를 출력
ping (장치넘버)
지정한 장치에 ping 신호를 보내 응답이 오는지 확인. 장치가 동작중인지 확인.
lang (en, kr)
언어 설정을 변경. 영어(en), 한글(kr)만 지원
cctv (on, off)
카메라를 동작시키고 JPG 스트리밍을 on/off. CCTV를 볼 수 있는 URL을 전달
pic
사진을 촬영해서 이미지 파일을 전송해줌. 기존에 cctv가 동작중인 경우 cctv 동작이 중단됨
pic del
서버에 저장된 이미지 파일을 모두 삭제
dev
현재까지 감지되었던 장치들의 리스트를 보여줌. 동작이 중단된 장치가 있을 수 있음. 각 장치는 장치넘버를 가짐
dev desc (장치넘버)
해당하는 장치의 상세 정보를 보여줌. 각 장치가 처리할 수 있는 제어신호(control signal) 정보도 함께 출력.
dev del (장치넘버)
지정한 장치를 삭제. 장치가 보내준 센서 데이터도 모두 삭제됨
dev delall
등록된 장치들을 모두 삭제, 모든 센서 데이터도 삭제됨
sensor (장치넘버) [갯수]
지정한 장치가 보내준 센서 데이터를 [갯수] 만큼 출력. 최대 100개까지 출력. [갯수]가 생략된 경우 최신 데이터 1개만 출력
sensor del (장치넘버) [시간]
지정한 장치의 센서 데이터를 [시간] 단위로 삭제. 해당 장치의 지정한 시간 이전의 데이터는 모두 삭제됨. [시간] 항목을 생략하면 해당 장치의 센서 데이터 모두를 삭제
sensor delall [시간]
모든 센서 데이터를 [시간] 단위로 삭제. 지정한 시간 이전의 데이터는 모두 삭제됨. [시간] 항목을 생략하면 센서 데이터 모두를 삭제
graph (장치넘버) [갯수]
지정한 장치의 센서 데이터를 [갯수]만큼 추출해서 그래프로 그린 뒤 이미지 파일로 전송해줌. 최대 100개까지 출력. [갯수]가 생략된 경우 최신 데이터 10개를 사용.
graph del
서버에 저장된 그래프 파일을 모두 삭제
send (장치넘버) (제어신호1) [제어신호2] ... [제어신호4]
지정한 장치로 제어신호를 전송. 제어신호1 은 필수며 2, 3, 4는 생략가능. 반드시 제어신호는 순서대로 기재해야 함. 사용가능한 제어신호의 정수값은 "장치 상세 (장치넘버)" 명령으로 확인가능.
noti
사용자가 설정한 알림 설정을 모두 보여줌. 각각의 알림 설정은 알림 ID를 가짐.
noti add (장치넘버) (조건식1) [조건식2] ... [조건식4]
지정한 장치에서 조건식에 맞는 센서 데이터를 보낼 경우 메시지로 알려주도록 설정함. 조건식 한 개는 필수. 반드시 조건식은 data1, data2, data3, data4 로 센서 데이터를 구분해야 하며 공백없이 기재.
data1>=1 data2<=99 data3!=0 data4==1
noti del (알림ID)
지정한 알림ID 에 해당하는 설정을 삭제. 알림ID 대신 분류1, 분류2, 장치ID 를 사용할 경우 해당 장치의 알림 모두를 삭제. 알림 삭제시 연결된 매크로도 모두 삭제.
macro
현재 설정된 매크로를 모두 보여줌.
macro add (알림ID) (명령어)
특정 알림이 실행될 때 챗을 보내는대신 명령어를 실행.\n예) macro add 1 pic => 알림 1 조건에 맞는 센서값이 들어오면 사진 촬영 후 전송
macro del (매크로ID)
지정한 매크로 삭제
timer
현재 설정된 타이머를 모두 보여줌.
timer add (시간간격/분) (명령어)
일정 시간 간격으로 명령어를 자동 실행하도록 설정
timer add (시):(분) (명령어)
특정 시간에 명령어를 실행하도록 설정
timer del (타이머ID)
지정한 타이머 삭제
music
현재 재생목록에 포함된 음악들을 표시
music play (음악넘버)
선택한 음악을 재생. 음악넘버를 생략할 경우 처음부터 재생.
music stop
음악 재생을 중단
music playlist
저장된 playlist들을 출력.
music playlist (playlist 넘버)
재생목록을 지우고 선택한 playlist의 음악들을 추가
music control (on/off)
음악 컨트롤 패널을 표시, 제거.
"""
return msg
else:
msg = """hello
: Check if HomePy is available
chat (message)
: Send message to activated chat room
ping (device_number)
: Check if specified device is active or not
lang (en, kr)
: Change language setting
cctv (on, off)
: Turn on cctv. Also sends streaming URL message.
pic
: Take a picture and sends image file. This command stops cctv if its running.
pic del
: Remove pictures in picture directory.
dev
: Show registered devices. Every device has its own device number
dev desc (device_number)
: Show details of specified device. Also prints (control signal) info.
dev del (device_number)
: Remove selected device. This command removes sensor data also.
dev delall
: Remove every device.
sensor (device_number) [count]
: Print sensor data from specified device. Max 100, shows most recent 1 if count is missing.
sensor del (device_number) [hour_unit]
: Remove device's sensor data which is older than [hour_unit]. If [hour_unit] is missing, delete all data of the device.
sensor delall [hour_unit]
: Remove all sensor data which is older than [hour_unit].
graph (device_number) [count]
: Draw graph with sensor data and send it as image file. Max 100, use 10 if count is missing.
graph del
: Remove graph file in graph directory.
send (device_number) (control1) [control2] ... [control4]
: Send control signal to specified device. Control1 value is mandatory and others are optional. Check available control value with "dev desc (device_number)" command.
noti
: Show every notification settings.
noti add (device_number) (comp_str1) [comp_str1] ... [comp_str1]
: Add a new notification setting. If HomePy receives sensor data which is matching with noti setting, HomePy alerts you with a message. Check comp_str format:
data1>=1 data2<=99 data3!=0 data4==1
noti del (noti_ID)
: Remove notification setting.
macro
Show every macro.
macro add (noti_ID) (command)
Add a macro.\nex) macro add 1 pic => Take a picture when HomePy enables noti 1.
macro del (macro_ID)
delete a macro.
timer
Show every timer.
timer add (interval/min) (command)
Add an interval based timer.\nex) timer add 10 pic => Take a picture every 10 minute.
timer add (hour):(min) (command)
Add a time based timer.\nex) timer add 13:55 pic => Take a picture at 13:55.
macro del (timer_ID)
Delete a timer.
music
Show music files in queue.
music play (music number)
Play selected music. If music number is not specified, starts at first.
music stop
Stop playing.
music playlist
Show playlists.
music playlist (playlist number)
Delete all queued files and add files in selected playlist.
music control (on/off)
Enable/Disable music control panel.
"""
return msg
|
lynxis/pyLogicSniffer
|
refs/heads/master
|
simple_test_frame.py
|
2
|
# -*- coding: UTF-8 -*-
'''Frame class to run a dialog for testing.
Copyright © 2011, Mel Wilson mwilson@melwilsonsoftware.ca
This file is part of pyLogicSniffer.
pyLogicSniffer is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pyLogicSniffer is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with pyLogicSniffer. If not, see <http://www.gnu.org/licenses/>.
'''
import wx
class SimpleTestFrame (wx.Frame):
'''Top application frame.'''
def __init__ (self, title=None, about_caption=None, about_text=None):
if title is None:
title = 'SimpleTestFrame title'
if about_caption is None:
about_caption = 'SimpleTestFrame about_caption'
if about_text is None:
about_text = 'SimpleTestFrame about_text'
wx.Frame.__init__ (self, None, wx.ID_ANY, title)
self.about_caption = about_caption
self.about_text = about_text
self.SetMenuBar (self._main_menu())
button = wx.Button (self, wx.ID_ANY, '&Test')
wx.EVT_MENU (self, wx.ID_ABOUT, self.OnHelpAbout)
wx.EVT_MENU (self, wx.ID_EXIT, self.OnFileExit)
wx.EVT_MENU (self, wx.ID_NEW, self.OnTest)
button.Bind (wx.EVT_BUTTON, self.OnTest)
top_sizer = wx.BoxSizer (wx.VERTICAL)
top_sizer.Add (button, 0, wx.CENTER)
self.SetAutoLayout (True)
self.SetSizer (top_sizer)
top_sizer.Fit (self)
top_sizer.SetSizeHints (self)
def _main_menu (self):
'''Quasi-boilerplate to create the main menu.'''
menu = wx.MenuBar ()
filemenu = wx.Menu()
filemenu.Append (wx.ID_NEW, '&Test')
filemenu.AppendSeparator()
filemenu.Append (wx.ID_EXIT, 'E&xit')
menu.Append (filemenu, '&File')
helpmenu = wx.Menu()
helpmenu.Append (wx.ID_ABOUT, '&About')
menu.Append (helpmenu, '&Help')
return menu
def OnFileExit (self, evt):
self.Destroy()
def OnHelpAbout (self, evt):
wx.MessageBox (self.about_text, self.about_caption, style=wx.ICON_INFORMATION|wx.OK)
def OnTest (self, evt):
'''Overridable method to display a dialog.'''
wx.MessageBox ('Override the OnTest method to display your dialog.', 'SimpleTestFrame')
|
sharad/calibre
|
refs/heads/master
|
src/calibre/gui2/preferences/keyboard.py
|
1
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from PyQt5.Qt import QVBoxLayout
from calibre.gui2.preferences import (ConfigWidgetBase, test_widget)
from calibre.gui2.keyboard import ShortcutConfig
class ConfigWidget(ConfigWidgetBase):
def genesis(self, gui):
self.gui = gui
self.conf_widget = ShortcutConfig(self)
self.conf_widget.changed_signal.connect(self.changed_signal)
self._layout = l = QVBoxLayout()
self.setLayout(l)
l.addWidget(self.conf_widget)
def initialize(self):
ConfigWidgetBase.initialize(self)
self.conf_widget.initialize(self.gui.keyboard)
def restore_defaults(self):
ConfigWidgetBase.restore_defaults(self)
self.conf_widget.restore_defaults()
def commit(self):
self.conf_widget.commit()
return ConfigWidgetBase.commit(self)
def refresh_gui(self, gui):
gui.keyboard.finalize()
def highlight_group(self, group_name):
self.conf_widget.highlight_group(group_name)
if __name__ == '__main__':
from PyQt5.Qt import QApplication
app = QApplication([])
test_widget('Advanced', 'Keyboard')
|
TeamEOS/external_chromium_org
|
refs/heads/lp5.0
|
chrome/test/chromedriver/client/webelement.py
|
43
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from command_executor import Command
class WebElement(object):
"""Represents an HTML element."""
def __init__(self, chromedriver, id_):
self._chromedriver = chromedriver
self._id = id_
def _Execute(self, command, params=None):
if params is None:
params = {}
params['id'] = self._id;
return self._chromedriver.ExecuteCommand(command, params)
def FindElement(self, strategy, target):
return self._Execute(
Command.FIND_CHILD_ELEMENT, {'using': strategy, 'value': target})
def FindElements(self, strategy, target):
return self._Execute(
Command.FIND_CHILD_ELEMENTS, {'using': strategy, 'value': target})
def GetText(self):
return self._Execute(Command.GET_ELEMENT_TEXT)
def HoverOver(self):
self._Execute(Command.HOVER_OVER_ELEMENT)
def Click(self):
self._Execute(Command.CLICK_ELEMENT)
def SingleTap(self):
self._Execute(Command.TOUCH_SINGLE_TAP)
def Clear(self):
self._Execute(Command.CLEAR_ELEMENT)
def SendKeys(self, *values):
typing = []
for value in values:
if isinstance(value, int):
value = str(value)
for i in range(len(value)):
typing.append(value[i])
self._Execute(Command.SEND_KEYS_TO_ELEMENT, {'value': typing})
def GetLocation(self):
return self._Execute(Command.GET_ELEMENT_LOCATION)
|
mdj2/django
|
refs/heads/master
|
tests/admin_validation/models.py
|
192
|
"""
Tests of ModelAdmin validation logic.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class Album(models.Model):
title = models.CharField(max_length=150)
@python_2_unicode_compatible
class Song(models.Model):
title = models.CharField(max_length=150)
album = models.ForeignKey(Album)
original_release = models.DateField(editable=False)
class Meta:
ordering = ('title',)
def __str__(self):
return self.title
def readonly_method_on_model(self):
# does nothing
pass
class TwoAlbumFKAndAnE(models.Model):
album1 = models.ForeignKey(Album, related_name="album1_set")
album2 = models.ForeignKey(Album, related_name="album2_set")
e = models.CharField(max_length=1)
class Author(models.Model):
name = models.CharField(max_length=100)
class Book(models.Model):
name = models.CharField(max_length=100)
subtitle = models.CharField(max_length=100)
price = models.FloatField()
authors = models.ManyToManyField(Author, through='AuthorsBooks')
class AuthorsBooks(models.Model):
author = models.ForeignKey(Author)
book = models.ForeignKey(Book)
class State(models.Model):
name = models.CharField(max_length=15)
class City(models.Model):
state = models.ForeignKey(State)
|
blaggacao/OpenUpgrade
|
refs/heads/8.0
|
addons/crm_profiling/crm_profiling.py
|
333
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp.osv import orm
from openerp.tools.translate import _
def _get_answers(cr, uid, ids):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm profiling’s IDs """
query = """
select distinct(answer)
from profile_question_yes_rel
where profile IN %s"""
cr.execute(query, (tuple(ids),))
ans_yes = [x[0] for x in cr.fetchall()]
query = """
select distinct(answer)
from profile_question_no_rel
where profile IN %s"""
cr.execute(query, (tuple(ids),))
ans_no = [x[0] for x in cr.fetchall()]
return [ans_yes, ans_no]
def _get_parents(cr, uid, ids):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm profiling’s IDs
@return: Get parents's Id """
ids_to_check = ids
cr.execute("""
select distinct(parent_id)
from crm_segmentation
where parent_id is not null
and id IN %s""",(tuple(ids),))
parent_ids = [x[0] for x in cr.fetchall()]
trigger = False
for x in parent_ids:
if x not in ids_to_check:
ids_to_check.append(x)
trigger = True
if trigger:
ids_to_check = _get_parents(cr, uid, ids_to_check)
return ids_to_check
def test_prof(cr, uid, seg_id, pid, answers_ids=None):
""" return True if the partner pid fetch the segmentation rule seg_id
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param seg_id: Segmentaion's ID
@param pid: partner's ID
@param answers_ids: Answers's IDs
"""
ids_to_check = _get_parents(cr, uid, [seg_id])
[yes_answers, no_answers] = _get_answers(cr, uid, ids_to_check)
temp = True
for y_ans in yes_answers:
if y_ans not in answers_ids:
temp = False
break
if temp:
for ans in answers_ids:
if ans in no_answers:
temp = False
break
if temp:
return True
return False
def _recompute_categ(self, cr, uid, pid, answers_ids):
""" Recompute category
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param pid: partner's ID
@param answers_ids: Answers's IDs
"""
ok = []
cr.execute('''
select r.category_id
from res_partner_res_partner_category_rel r left join crm_segmentation s on (r.category_id = s.categ_id)
where r.partner_id = %s and (s.exclusif = false or s.exclusif is null)
''', (pid,))
for x in cr.fetchall():
ok.append(x[0])
query = '''
select id, categ_id
from crm_segmentation
where profiling_active = true'''
if ok != []:
query = query +''' and categ_id not in(%s)'''% ','.join([str(i) for i in ok ])
query = query + ''' order by id '''
cr.execute(query)
segm_cat_ids = cr.fetchall()
for (segm_id, cat_id) in segm_cat_ids:
if test_prof(cr, uid, segm_id, pid, answers_ids):
ok.append(cat_id)
return ok
class question(osv.osv):
""" Question """
_name="crm_profiling.question"
_description= "Question"
_columns={
'name': fields.char("Question", required=True),
'answers_ids': fields.one2many("crm_profiling.answer", "question_id", "Available Answers", copy=True),
}
class questionnaire(osv.osv):
""" Questionnaire """
_name="crm_profiling.questionnaire"
_description= "Questionnaire"
_columns = {
'name': fields.char("Questionnaire", required=True),
'description':fields.text("Description", required=True),
'questions_ids': fields.many2many('crm_profiling.question','profile_questionnaire_quest_rel',\
'questionnaire', 'question', "Questions"),
}
class answer(osv.osv):
_name="crm_profiling.answer"
_description="Answer"
_columns={
"name": fields.char("Answer", required=True),
"question_id": fields.many2one('crm_profiling.question',"Question"),
}
class partner(osv.osv):
_inherit="res.partner"
_columns={
"answers_ids": fields.many2many("crm_profiling.answer","partner_question_rel",\
"partner","answer","Answers"),
}
def _questionnaire_compute(self, cr, uid, answers, context=None):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param data: Get Data
@param context: A standard dictionary for contextual values """
partner_id = context.get('active_id')
query = "select answer from partner_question_rel where partner=%s"
cr.execute(query, (partner_id,))
for x in cr.fetchall():
answers.append(x[0])
self.write(cr, uid, [partner_id], {'answers_ids': [[6, 0, answers]]}, context=context)
return {}
def write(self, cr, uid, ids, vals, context=None):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm profiling’s IDs
@param context: A standard dictionary for contextual values """
if 'answers_ids' in vals:
vals['category_id']=[[6, 0, _recompute_categ(self, cr, uid, ids[0], vals['answers_ids'][0][2])]]
return super(partner, self).write(cr, uid, ids, vals, context=context)
class crm_segmentation(osv.osv):
""" CRM Segmentation """
_inherit="crm.segmentation"
_columns={
"answer_yes": fields.many2many("crm_profiling.answer","profile_question_yes_rel",\
"profile","answer","Included Answers"),
"answer_no": fields.many2many("crm_profiling.answer","profile_question_no_rel",\
"profile","answer","Excluded Answers"),
'parent_id': fields.many2one('crm.segmentation', 'Parent Profile'),
'child_ids': fields.one2many('crm.segmentation', 'parent_id', 'Child Profiles'),
'profiling_active': fields.boolean('Use The Profiling Rules', help='Check\
this box if you want to use this tab as part of the \
segmentation rule. If not checked, the criteria beneath will be ignored')
}
_constraints = [
(osv.osv._check_recursion, 'Error ! You cannot create recursive profiles.', ['parent_id'])
]
def process_continue(self, cr, uid, ids, start=False):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm segmentation’s IDs """
partner_obj = self.pool.get('res.partner')
categs = self.read(cr,uid,ids,['categ_id','exclusif','partner_id', \
'sales_purchase_active', 'profiling_active'])
for categ in categs:
if start:
if categ['exclusif']:
cr.execute('delete from res_partner_res_partner_category_rel where \
category_id=%s', (categ['categ_id'][0],))
partner_obj.invalidate_cache(cr, uid, ['category_id'])
id = categ['id']
cr.execute('select id from res_partner order by id ')
partners = [x[0] for x in cr.fetchall()]
if categ['sales_purchase_active']:
to_remove_list=[]
cr.execute('select id from crm_segmentation_line where segmentation_id=%s', (id,))
line_ids = [x[0] for x in cr.fetchall()]
for pid in partners:
if (not self.pool.get('crm.segmentation.line').test(cr, uid, line_ids, pid)):
to_remove_list.append(pid)
for pid in to_remove_list:
partners.remove(pid)
if categ['profiling_active']:
to_remove_list = []
for pid in partners:
cr.execute('select distinct(answer) from partner_question_rel where partner=%s',(pid,))
answers_ids = [x[0] for x in cr.fetchall()]
if (not test_prof(cr, uid, id, pid, answers_ids)):
to_remove_list.append(pid)
for pid in to_remove_list:
partners.remove(pid)
for partner in partner_obj.browse(cr, uid, partners):
category_ids = [categ_id.id for categ_id in partner.category_id]
if categ['categ_id'][0] not in category_ids:
cr.execute('insert into res_partner_res_partner_category_rel (category_id,partner_id) values (%s,%s)', (categ['categ_id'][0],partner.id))
partner_obj.invalidate_cache(cr, uid, ['category_id'], [partner.id])
self.write(cr, uid, [id], {'state':'not running', 'partner_id':0})
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
iulian787/spack
|
refs/heads/develop
|
var/spack/repos/builtin/packages/py-docopt/package.py
|
5
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyDocopt(PythonPackage):
"""Command-line interface description language."""
homepage = "http://docopt.org/"
url = "https://pypi.io/packages/source/d/docopt/docopt-0.6.2.tar.gz"
import_modules = ['docopt']
version('0.6.2', sha256='49b3a825280bd66b3aa83585ef59c4a8c82f2c8a522dbe754a8bc8d08c85c491')
depends_on('py-setuptools', type='build')
|
BT-ojossen/odoo
|
refs/heads/8.0
|
addons/l10n_es/migrations/8.0.5.0/pre-migration.py
|
52
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2015 Serv. Tecnol. Avanz. (<http://www.serviciosbaeza.com>)
# Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>
# FactorLibre (<http://factorlibre.com>)
# Hugo santos <hugo.santos@factorlibre.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
__name__ = u"Renombrar impuestos, códigos de impuestos y posiciones fiscales"
def rename_fiscal_positions(cr):
cr.execute("""
UPDATE account_fiscal_position
SET name='Régimen Extracomunitario / Canarias, Ceuta y Melilla'
WHERE name='Régimen Extracomunitario'
""")
def rename_tax_codes(cr):
tax_code_mapping = [
# IVA devengado. Base
{'previous_code': '--',
'previous_name': 'IVA devengado. Base imponible', 'code': 'IDBI'},
{'previous_code': '--',
'previous_name': 'IVA Devengado Base Imponible', 'code': 'IDBI'},
{'previous_code': '[01]',
'code': 'RGIDBI4'},
{'previous_code': '[04]',
'previous_name': 'Régimen general IVA devengado. Base imponible 10%',
'code': 'RGIDBI10'},
{'previous_code': '[04]',
'previous_name': 'Régimen general IVA Devengado. Base Imponible 10%',
'code': 'RGIDBI10'},
{'previous_code': '[07]',
'previous_name': 'Régimen general IVA devengado. Base imponible 21%',
'code': 'RGIDBI21'},
{'previous_code': '[07]',
'previous_name': 'Régimen general IVA Devengado. Base Imponible 21%',
'code': 'RGIDBI21'},
# IVA devengado. Cuota
{'previous_code': '[21]', 'code': 'ITDC'},
{'previous_code': '[03]', 'code': 'RGIDC4'},
{'previous_code': '[06]',
'previous_name': 'Régimen general IVA devengado. Cuota 10%',
'code': 'RGIDC10'},
{'previous_code': '[06]',
'previous_name': 'Régimen general IVA Devengado. Cuota 10%',
'code': 'RGIDC10'},
{'previous_code': '[09]',
'previous_name': 'Régimen general IVA devengado. Cuota 21%',
'code': 'RGIDC21'},
{'previous_code': '[09]',
'previous_name': 'Régimen general IVA Devengado. Cuota 21%',
'code': 'RGIDC21'},
# Adquisiciones intracomunitarias
{'previous_code': '[19]', 'code': 'AIDBYSBI'},
{'previous_code': '[20]', 'code': 'AIDBYSC'},
# IVA deducible. Base Imponible
{'previous_code': '--',
'previous_name': 'IVA deducible. Base imponible', 'code': 'ISBI'},
{'previous_code': '--',
'previous_name': 'IVA Deducible Base Imponible', 'code': 'ISBI'},
{'previous_code': '--',
'previous_name': 'Base de compensaciones Régimen Especial A., G. y'
' P. 12%', 'code': 'CREAGYPBI12'},
# Base operaciones interiores corrientes
{'previous_code': '[22]', 'code': 'OICBI'},
{'previous_code': '--',
'previous_name': 'Base operaciones interiores corrientes (4%)',
'code': 'OICBI4'},
{'previous_code': '--',
'previous_name': 'Base operaciones interiores corrientes (10%)',
'code': 'OICBI10'},
{'previous_code': '--',
'previous_name': 'Base operaciones interiores corrientes (21%)',
'code': 'OICBI21'},
# Base operaciones interiores bienes de inversión
{'previous_code': '[24]', 'code': 'OIBIBI'},
{'previous_code': '--',
'previous_name': 'Base operaciones interiores bienes inversión (4%)',
'code': 'OIBIBI4'},
{'previous_code': '--',
'previous_name': 'Base operaciones interiores bienes inversión (10%)',
'code': 'OIBIBI10'},
{'previous_code': '--',
'previous_name': 'Base operaciones interiores bienes inversión (21%)',
'code': 'OIBIBI21'},
# Base importaciones de bienes corrientes
{'previous_code': '[26]', 'code': 'IBCBI'},
{'previous_code': '--',
'previous_name': 'Base importaciones bienes y servicios corrientes'
' (4%)', 'code': 'IBYSCBI4'},
{'previous_code': '--',
'previous_name': 'Base importaciones bienes y servicios corrientes'
' (10%)', 'code': 'IBYSCBI10'},
{'previous_code': '--',
'previous_name': 'Base importaciones bienes y servicios corrientes'
' (21%)', 'code': 'IBYSCBI21'},
# Base importaciones de bienes de inversión
{'previous_code': '[28]', 'code': 'IBIBI'},
{'previous_code': '--',
'previous_name': 'Base importaciones bienes inversión (4%)',
'code': 'IBIBI4'},
{'previous_code': '--',
'previous_name': 'Base importaciones bienes inversión (10%)',
'code': 'IBIBI10'},
{'previous_code': '--',
'previous_name': 'Base importaciones bienes inversión (21%)',
'code': 'IBIBI21'},
# Adquisiciones intracomunitarias de bienes corrientes
{'previous_code': '[30]', 'code': 'AIBYSCBI'},
{'previous_code': '--',
'previous_name': 'Base adquisiciones intracomunitarias bienes y'
' serv. corr. (4%)', 'code': 'AIBYSCBI4'},
{'previous_code': '--',
'previous_name': 'Base adquisiciones intracomunitarias bienes y'
' serv. corr. (10%)', 'code': 'AIBYSCBI10'},
{'previous_code': '--',
'previous_name': 'Base adquisiciones intracomunitarias bienes y'
' serv. corr. (21%)', 'code': 'AIBYSCBI21'},
# Adquisiciones intracomunitarias de bienes de inversión
{'previous_code': '[32]', 'code': 'AIBIBI'},
{'previous_code': '--',
'previous_name': 'Base adquisiciones intracomunitarias bienes'
' inversión (4%)', 'code': 'AIBIBI4'},
{'previous_code': '--',
'previous_name': 'Base adquisiciones intracomunitarias bienes'
' inversión (10%)', 'code': 'AIBIBI10'},
{'previous_code': '--',
'previous_name': 'Base adquisiciones intracomunitarias bienes'
' inversión (21%)', 'code': 'AIBIBI21'},
# Base recargo de equivalencia
{'previous_code': '--',
'previous_name': 'Recargo equivalencia ded. Base imponible 0.5%',
'code': 'REDBI05'},
{'previous_code': '--',
'previous_name': 'Recargo equivalencia ded. Base imponible 1.4%',
'code': 'REDBI014'},
{'previous_code': '--',
'previous_name': 'Recargo equivalencia ded. Base imponible 5.2%',
'code': 'REDBI52'},
# Iva deducible cuotas
{'previous_code': '[37]', 'code': 'ITADC'},
{'previous_code': '34', 'code': 'CREAGYP12'},
# Cuotas operaciones interiores corrientes
{'previous_code': '[23]', 'code': 'SOICC'},
{'previous_code': '--',
'previous_name': 'Cuotas soportadas operaciones interiores corrientes'
' (4%)', 'code': 'SOICC4'},
{'previous_code': '--',
'previous_name': 'Cuotas soportadas operaciones interiores corrientes'
' (10%)', 'code': 'SOICC10'},
{'previous_code': '--',
'previous_name': 'Cuotas soportadas operaciones interiores corrientes'
' (21%)', 'code': 'SOICC21'},
# Cuotas operaciones interiores con bienes de inversión
{'previous_code': '[25]', 'code': 'SOIBIC'},
{'previous_code': '--',
'previous_name': 'Cuotas soportadas operaciones interiores bienes'
' inversión (4%)', 'code': 'SOIBIC4'},
{'previous_code': '--',
'previous_name': 'Cuotas soportadas operaciones interiores bienes'
' inversión (10%)', 'code': 'SOIBIC10'},
{'previous_code': '--',
'previous_name': 'Cuotas soportadas operaciones interiores bienes'
' inversión (21%)', 'code': 'SOIBIC21'},
# Cuotas devengadas en importaciones de bienes y serv. corr.
{'previous_code': '[27]', 'code': 'DIBCC'},
{'previous_code': '--',
'previous_name': 'Cuotas devengadas importaciones bienes y serv.'
' corr. (4%)', 'code': 'DIBYSCC4'},
{'previous_code': '--',
'previous_name': 'Cuotas devengadas importaciones bienes y serv.'
' corr. (10%)', 'code': 'DIBYSCC10'},
{'previous_code': '--',
'previous_name': 'Cuotas devengadas importaciones bienes y serv.'
' corr. (21%)', 'code': 'DIBYSCC21'},
# Cuotas devengadas en importaciones de bienes de inversión
{'previous_code': '[29]', 'code': 'DIBIC'},
{'previous_code': '--',
'previous_name': 'Cuotas devengadas importaciones bienes inversión'
' (4%)', 'code': 'DIBIC4'},
{'previous_code': '--',
'previous_name': 'Cuotas devengadas importaciones bienes inversión'
' (10%)', 'code': 'DIBIC10'},
{'previous_code': '--',
'previous_name': 'Cuotas devengadas importaciones bienes inversión'
' (21%)', 'code': 'DIBIC21'},
# Adquisiciones intracomunitarias de bienes corrientes - Cuota
{'previous_code': '[31]', 'code': 'AIBYSCC'},
{'previous_code': '--',
'previous_name': 'En adquisiciones intracomunitarias bienes y serv.'
' corr. (4%)', 'code': 'AIBYSCC4'},
{'previous_code': '--',
'previous_name': 'En adquisiciones intracomunitarias bienes y serv.'
' corr. (10%)', 'code': 'AIBYSCC10'},
{'previous_code': '--',
'previous_name': 'En adquisiciones intracomunitarias bienes y serv.'
' corr. (21%)', 'code': 'AIBYSCC21'},
# Adquisiciones intracomunitarias bienes de inversión - Cuota
{'previous_code': '[33]', 'code': 'AIBIC'},
{'previous_code': '--',
'previous_name': 'En adquisiciones intracomunitarias bienes inversión'
' (4%)', 'code': 'AIBIC4'},
{'previous_code': '--',
'previous_name': 'En adquisiciones intracomunitarias bienes inversión'
' (10%)', 'code': 'AIBIC10'},
{'previous_code': '--',
'previous_name': 'En adquisiciones intracomunitarias bienes inversión'
' (21%)', 'code': 'AIBIC21'},
# Otros códigos de impuestos
{'previous_code': '[42]', 'code': 'EIDBYS'},
{'previous_code': '[43]', 'code': 'EYOA'},
# Recargo equivalencia Cuota
{'previous_code': '[12]',
'previous_name': 'Recargo equivalencia. Cuota 0.5%',
'code': 'REC05'},
{'previous_code': '[15]',
'previous_name': 'Recargo equivalencia. Cuota 1.4%',
'code': 'REC014'},
{'previous_code': '[18]',
'previous_name': 'Recargo equivalencia. Cuota 5.2%',
'code': 'REC52'},
# Recargo equivalencia ded. Cuota
{'previous_code': '[12]',
'previous_name': 'Recargo equivalencia ded. Cuota 0.5%',
'code': 'REDC05'},
{'previous_code': '[15]',
'previous_name': 'Recargo equivalencia ded. Cuota 1.4%',
'code': 'REDC014'},
{'previous_code': '[18]',
'previous_name': 'Recargo equivalencia ded. Cuota 5.2%',
'code': 'REDC52'},
# Recargo equivalencia base imponible
{'previous_code': '[10]', 'code': 'REBI05'},
{'previous_code': '[13]', 'code': 'REBI014'},
{'previous_code': '[16]', 'code': 'REBI52'},
# IRPF Retenciones a cuenta
{'previous_code': 'B.IRPF AC', 'code': 'IRACBI'},
{'previous_code': 'B.IRPF1 AC', 'code': 'IRACBI1'},
{'previous_code': 'B.IRPF2 AC', 'code': 'IRACBI2'},
{'previous_code': 'B.IRPF7 AC', 'code': 'IRACBI7'},
{'previous_code': 'B.IRPF9 AC', 'code': 'IRACBI9'},
{'previous_code': 'B.IRPF15 AC', 'code': 'IRACBI15'},
{'previous_code': 'B.IRPF20 AC', 'code': 'IRACBI20'},
{'previous_code': 'B.IRPF21 AC', 'code': 'IRACBI21'},
# IRPF total retenciones a cuenta
{'previous_code': 'IRPF AC', 'code': 'ITRACC'},
{'previous_code': 'IRPF1 AC', 'code': 'IRACC1'},
{'previous_code': 'IRPF2 AC', 'code': 'IRACC2'},
{'previous_code': 'IRPF7 AC', 'code': 'IRACC7'},
{'previous_code': 'IRPF9 AC', 'code': 'IRACC9'},
{'previous_code': 'IRPF15 AC', 'code': 'IRACC15'},
{'previous_code': 'IRPF20 AC', 'code': 'IRACC20'},
{'previous_code': 'IRPF21 AC', 'code': 'IRACC21'},
# IRPF retenciones practicadas. base imponible
{'previous_code': 'B.IRPF', 'code': 'IRPBI'},
{'previous_code': 'B.IRPF1', 'code': 'IRPBI1'},
{'previous_code': 'B.IRPF2', 'code': 'IRPBI2'},
{'previous_code': 'B.IRPF7', 'code': 'IRPBI7'},
{'previous_code': 'B.IRPF9', 'code': 'IRPBI9'},
{'previous_code': 'B.IRPF15', 'code': 'IRPBI15'},
{'previous_code': 'B.IRPF20', 'code': 'IRPBI20'},
{'previous_code': 'B.IRPF21', 'code': 'IRPBI21'},
# IRPF retenciones practicadas. total cuota
{'previous_code': 'IRPF', 'code': 'ITRPC'},
{'previous_code': 'IRPF1', 'code': 'IRPC1'},
{'previous_code': 'IRPF2', 'code': 'IRPC2'},
{'previous_code': 'IRPF7', 'code': 'IRPC7'},
{'previous_code': 'IRPF9', 'code': 'IRPC9'},
{'previous_code': 'IRPF15', 'code': 'IRPC15'},
{'previous_code': 'IRPF20', 'code': 'IRPC20'},
{'previous_code': 'IRPF21', 'code': 'IRPC21'},
# IVA exento
{'previous_code': '--',
'previous_name': 'Base adquisiciones exentas',
'code': 'AEBI'},
{'previous_code': '--',
'previous_name': 'Base ventas exentas',
'code': 'OESDAD'},
]
for mapping in tax_code_mapping:
sql = """
UPDATE account_tax_code
SET code=%s
WHERE code=%s"""
if mapping.get('previous_name'):
sql += " AND name=%s"
cr.execute(sql, (mapping['code'], mapping['previous_code'],
mapping['previous_name']))
else:
cr.execute(sql, (mapping['code'], mapping['previous_code']))
def rename_taxes(cr):
tax_mapping = {
'S_IVA4': 'S_IVA4B',
'S_IVA10': 'S_IVA10B',
'S_IVA21': 'S_IVA21B',
'P_IVA21_IC_SV': 'P_IVA21_SP_IN',
'P_IVA21_IC_SV_1': 'P_IVA21_SP_IN_1',
'P_IVA21_IC_SV_2': 'P_IVA21_SP_IN_2',
}
for old_description, new_description in tax_mapping.iteritems():
sql = """
UPDATE account_tax
SET description=%s
WHERE description=%s"""
cr.execute(sql, (new_description, old_description))
def change_refunds_tax_codes(cr):
"""Cambia los códigos de impuestos de los abonos posteriores a 2014 para
que vayan a la parte de modificación de bases/cuotas en lugar de minorar
las bases/cuotas normales.
"""
refund_tax_codes = {
# IVA repercutido
'RGIDBI4': 'MBYCRBI',
'RGIDBI10': 'MBYCRBI',
'RGIDBI21': 'MBYCRBI',
'RGIDC4': 'MBYCRC',
'RGIDC10': 'MBYCRC',
'RGIDC21': 'MBYCRC',
# Recargo equivalencia compras
'REDBI05': 'RDDSBI',
'REDBI014': 'RDDSBI',
'REDBI52': 'RDDSBI',
'REDC05': 'RDDSC',
'REDC014': 'RDDSC',
'REDC52': 'RDDSC',
# Recargo equivalencia ventas
'REBI05': 'MBYCDRDERBI',
'REBI014': 'MBYCDRDERBI',
'REBI52': 'MBYCDRDERBI',
'REC05': 'MBYCDRDERC',
'REC014': 'MBYCDRDERC',
'REC52': 'MBYCDRDERC',
# IVA soportado
'OICBI4': 'RDDSBI',
'OIBIBI4': 'RDDSBI',
'OICBI10': 'RDDSBI',
'OIBIBI10': 'RDDSBI',
'OICBI21': 'RDDSBI',
'OIBIBI21': 'RDDSBI',
'SOICC4': 'RDDSC',
'SOIBIC4': 'RDDSC',
'SOICC10': 'RDDSC',
'SOIBIC10': 'RDDSC',
'SOICC21': 'RDDSC',
'SOIBIC21': 'RDDSC',
# Importaciones
'IBYSCBI4': 'RDDSBI',
'IBYSCBI10': 'RDDSBI',
'IBYSCBI21': 'RDDSBI',
'IBIBI4': 'RDDSBI',
'IBIBI10': 'RDDSBI',
'IBIBI21': 'RDDSBI',
'DIBYSCC4': 'RDDSC',
'DIBYSCC10': 'RDDSC',
'DIBYSCC21': 'RDDSC',
'DIBIC4': 'RDDSC',
'DIBIC10': 'RDDSC',
'DIBIC21': 'RDDSC',
# Intracomunitario
'AIBYSCBI4': 'RDDSBI',
'AIBYSCBI10': 'RDDSBI',
'AIBYSCBI21': 'RDDSBI',
'AISCBI4': 'RDDSBI',
'AISCBI10': 'RDDSBI',
'AISCBI21': 'RDDSBI',
'AIBIBI4': 'RDDSBI',
'AIBIBI10': 'RDDSBI',
'AIBIBI21': 'RDDSBI',
'AIBYSCC4': 'RDDSC',
'AIBYSCC10': 'RDDSC',
'AIBYSCC21': 'RDDSC',
'AISCC4': 'RDDSC',
'AISCC10': 'RDDSC',
'AISCC21': 'RDDSC',
'AIBIC4': 'RDDSC',
'AIBIC10': 'RDDSC',
'AIBIC21': 'RDDSC',
'AIDBYSBI': 'MBYCRBI',
'AIBBI': 'MBYCRBI',
'AIBIBIA': 'MBYCRBI',
'OCIDSPEAIBI': 'MBYCRBI',
'AISBI': 'MBYCRBI',
'AIDBYSC': 'MBYCRC',
'AIBC': 'MBYCRC',
'AIBICA': 'MBYCRC',
'OOCIDSPEAIC': 'MBYCRC',
'AISC': 'MBYCRC',
}
cr.execute("SELECT id FROM res_company")
for record in cr.fetchall():
company_id = record[0]
for old_tax_code, new_tax_code in refund_tax_codes.iteritems():
cr.execute(
"SELECT id FROM account_tax_code WHERE code=%s",
(new_tax_code, ))
new_tax_code_id = cr.fetchone()
if not new_tax_code_id:
# Create fake tax code
cr.execute(
"""
INSERT INTO account_tax_code
(code, name, sign, company_id)
VALUES (%s, %s, %s, %s)
RETURNING id
""", (new_tax_code, new_tax_code, 1.0, company_id))
new_tax_code_id = cr.fetchone()[0]
cr.execute(
"""
UPDATE account_move_line aml
SET tax_code_id=%s
FROM account_tax_code atc
WHERE aml.tax_code_id=atc.id
AND atc.code=%s
AND aml.tax_amount < 0
AND aml.date>='2014-01-01'
AND aml.company_id=%s
""", (new_tax_code_id, old_tax_code, company_id))
def migrate(cr, version):
if not version:
return
rename_fiscal_positions(cr)
rename_tax_codes(cr)
rename_taxes(cr)
change_refunds_tax_codes(cr)
|
kitsunde/ansible
|
refs/heads/devel
|
lib/ansible/playbook/helpers.py
|
11
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from types import NoneType
from ansible.errors import AnsibleParserError
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleSequence
def load_list_of_blocks(ds, play, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
'''
Given a list of mixed task/block data (parsed from YAML),
return a list of Block() objects, where implicit blocks
are created for each bare Task.
'''
# we import here to prevent a circular dependency with imports
from ansible.playbook.block import Block
if not isinstance(ds, (list, type(None))):
raise AnsibleParserError('block has bad type: "%s". Expecting "list"' % type(ds).__name__, obj=ds)
block_list = []
if ds:
for block in ds:
b = Block.load(
block,
play=play,
parent_block=parent_block,
role=role,
task_include=task_include,
use_handlers=use_handlers,
variable_manager=variable_manager,
loader=loader
)
# Implicit blocks are created by bare tasks listed in a play withou
# an explicit block statement. If we have two implicit blocks in a row,
# squash them down to a single block to save processing time later.
if b._implicit and len(block_list) > 0 and block_list[-1]._implicit:
block_list[-1].block.extend(b.block)
else:
block_list.append(b)
return block_list
def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
'''
Given a list of task datastructures (parsed from YAML),
return a list of Task() or TaskInclude() objects.
'''
# we import here to prevent a circular dependency with imports
from ansible.playbook.block import Block
from ansible.playbook.handler import Handler
from ansible.playbook.task import Task
if not isinstance(ds, list):
raise AnsibleParserError('task has bad type: "%s". Expected "list"' % type(ds).__name__, obj=ds)
task_list = []
for task in ds:
if not isinstance(task, dict):
raise AnsibleParserError('task/handler has bad type: "%s". Expected "dict"' % type(task).__name__, obj=task)
if 'block' in task:
t = Block.load(
task,
play=play,
parent_block=block,
role=role,
task_include=task_include,
use_handlers=use_handlers,
variable_manager=variable_manager,
loader=loader,
)
else:
if use_handlers:
t = Handler.load(task, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
else:
t = Task.load(task, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
task_list.append(t)
return task_list
def load_list_of_roles(ds, play, current_role_path=None, variable_manager=None, loader=None):
'''
Loads and returns a list of RoleInclude objects from the datastructure
list of role definitions
'''
# we import here to prevent a circular dependency with imports
from ansible.playbook.role.include import RoleInclude
if not isinstance(ds, list):
raise AnsibleParserError('roles has bad type: "%s". Expectes "list"' % type(ds).__name__, obj=ds)
roles = []
for role_def in ds:
i = RoleInclude.load(role_def, play=play, current_role_path=current_role_path, variable_manager=variable_manager, loader=loader)
roles.append(i)
return roles
|
githubmlai/numpy
|
refs/heads/master
|
numpy/fft/fftpack.py
|
72
|
"""
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
from numpy.core import (array, asarray, zeros, swapaxes, shape, conjugate,
take, sqrt)
from . import fftpack_lite as fftpack
_fft_cache = {}
_real_fft_cache = {}
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache=_fft_cache):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified."
% n)
try:
# Thread-safety note: We rely on list.pop() here to atomically
# retrieve-and-remove a wsave from the cache. This ensures that no
# other thread can get the same wsave while we're using it.
wsave = fft_cache.setdefault(n, []).pop()
except (IndexError):
wsave = init_function(n)
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0, n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0, s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
# As soon as we put wsave back into the cache, another thread could pick it
# up and start using it, so we must not do this until after we're
# completely done using it ourselves.
fft_cache[n].append(wsave)
return r
def _unitary(norm):
if norm not in (None, "ortho"):
raise ValueError("Invalid norm value %s, should be None or \"ortho\"."
% norm)
return norm is not None
def fft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation.
"""
a = asarray(a).astype(complex)
if n is None:
n = a.shape[axis]
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
if _unitary(norm):
output *= 1 / sqrt(n)
return output
def ifft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e., ``a[0]`` should contain the zero frequency term,
``a[1:n/2+1]`` should contain the positive-frequency terms, and
``a[n/2+1:]`` should contain the negative-frequency terms, in order of
decreasingly negative frequency. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.legend(('real', 'imaginary'))
<matplotlib.legend.Legend object at 0x...>
>>> plt.show()
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def rfft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermitian-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n//2 + 1``.
When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains
the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
If `n` is even, ``A[-1]`` contains the term representing both positive
and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
the largest positive frequency (fs/2*(n-1)/n), and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf,
_real_fft_cache)
if _unitary(norm):
output *= 1 / sqrt(a.shape[axis])
return output
def irfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def hfft(a, n=None, axis=-1, norm=None):
"""
Compute the FFT of a signal which has Hermitian symmetry (real spectrum).
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([1, 2, 3, 4, 3, 2])
>>> np.fft.fft(signal)
array([ 15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j])
>>> np.fft.hfft(signal[:4]) # Input first half of signal
array([ 15., -4., 0., -1., 0., -4.])
>>> np.fft.hfft(signal, 6) # Input entire signal and truncate
array([ 15., -4., 0., -1., 0., -4.])
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
return irfft(conjugate(a), n, axis) * (sqrt(n) if unitary else n)
def ihfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse FFT of a signal which has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
>>> np.fft.ifft(spectrum)
array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j])
>>> np.fft.ihfft(spectrum)
array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = conjugate(rfft(a, n, axis))
return output * (1 / (sqrt(n) if unitary else n))
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = list(range(-len(s), 0))
if len(s) != len(axes):
raise ValueError("Shape and axes have different lengths.")
if invreal and shapeless:
s[-1] = (a.shape[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = list(range(len(axes)))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii], norm=norm)
return a
def fftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def fft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 50.0 +0.j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5+17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 +4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 -4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5-17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ]])
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def rfftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1], norm)
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii], norm)
return a
def rfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes, norm)
def irfftn(a, s=None, axes=None, norm=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input along the
axes specified by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii], norm)
a = irfft(a, s[-1], axes[-1], norm)
return a
def irfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes, norm)
|
Bindupriya/nuxeo-drive
|
refs/heads/master
|
nuxeo-drive-client/nxdrive/engine/engine.py
|
1
|
from PyQt4.QtCore import QObject, QCoreApplication
from PyQt4.QtCore import pyqtSlot, pyqtSignal
from nxdrive.logging_config import get_logger
from nxdrive.commandline import DEFAULT_REMOTE_WATCHER_DELAY
from nxdrive.commandline import DEFAULT_UPDATE_SITE_URL
from nxdrive.client.common import DEFAULT_REPOSITORY_NAME
from nxdrive.client.common import NotFound
from nxdrive.client import LocalClient
from nxdrive.client import RemoteFileSystemClient
from nxdrive.client import RemoteFilteredFileSystemClient
from nxdrive.client import RemoteDocumentClient
from nxdrive.utils import normalized_path
from nxdrive.engine.processor import Processor
from threading import current_thread
from nxdrive.osi import AbstractOSIntegration
from nxdrive.engine.workers import Worker, ThreadInterrupt, PairInterrupt
from nxdrive.engine.activity import Action, FileAction
from time import sleep
WindowsError = None
try:
from exceptions import WindowsError
except ImportError:
pass # this will never be raised under unix
import os
import datetime
from cookielib import CookieJar
from nxdrive.gui.resources import find_icon
import urllib2
log = get_logger(__name__)
class InvalidDriveException(Exception):
pass
class RootAlreadyBindWithDifferentAccount(Exception):
def __init__(self, username, url):
self._username = username
self._url = url
def get_username(self):
return self._username
def get_url(self):
return self._url
class FsMarkerException(Exception):
pass
class EngineLogger(QObject):
def __init__(self, engine):
super(EngineLogger, self).__init__()
self._dao = engine.get_dao()
self._engine = engine
self._engine.logger = self
self._level = 10
self._engine.syncStarted.connect(self.logSyncStart)
self._engine.syncCompleted.connect(self.logSyncComplete)
self._engine.newConflict.connect(self.logConflict)
self._engine.newSync.connect(self.logSync)
self._engine.newError.connect(self.logError)
self._engine.newQueueItem.connect(self.logQueueItem)
def _log_pair(self, row_id, msg, handler=None):
pair = self._dao.get_state_from_id(row_id)
if handler is not None:
log.log(self._level, msg, pair, handler)
else:
log.log(self._level, msg, pair)
@pyqtSlot()
def logSyncComplete(self):
log.log(self._level, "Synchronization is complete for engine %s",
self.sender().get_uid())
@pyqtSlot(object)
def logSyncStart(self):
log.log(self._level, "Synchronization starts ( items)")
@pyqtSlot(object)
def logConflict(self, row_id):
self._log_pair(row_id, "Conflict on %r")
@pyqtSlot(object, object)
def logSync(self, row, metrics):
log.log(self._level, "Sync on %r with %r", row, metrics)
@pyqtSlot(object)
def logError(self, row_id):
self._log_pair(row_id, "Error on %r")
@pyqtSlot(object)
def logQueueItem(self, row_id):
self._log_pair(row_id, "QueueItem on %r")
'''
' Used for threads interaction
'''
class Engine(QObject):
BATCH_MODE_UPLOAD = "upload"
BATCH_MODE_FOLDER = "folder"
BATCH_MODE_DOWNLOAD = "download"
BATCH_MODE_SYNC = "sync"
_start = pyqtSignal()
_stop = pyqtSignal()
_scanPair = pyqtSignal(str)
syncStarted = pyqtSignal(object)
syncCompleted = pyqtSignal()
syncSuspended = pyqtSignal()
syncResumed = pyqtSignal()
rootDeleted = pyqtSignal()
rootMoved = pyqtSignal(str)
invalidAuthentication = pyqtSignal()
invalidClientsCache = pyqtSignal()
newConflict = pyqtSignal(object)
newSync = pyqtSignal(object, object)
newError = pyqtSignal(object)
newQueueItem = pyqtSignal(object)
offline = pyqtSignal()
online = pyqtSignal()
def __init__(self, manager, definition, binder=None, processors=5,
remote_watcher_delay=DEFAULT_REMOTE_WATCHER_DELAY,
remote_doc_client_factory=RemoteDocumentClient,
remote_fs_client_factory=RemoteFileSystemClient,
remote_filtered_fs_client_factory=RemoteFilteredFileSystemClient):
super(Engine, self).__init__()
self.version = manager.get_version()
self._remote_clients = dict()
# Used for binding server / roots and managing tokens
self.remote_doc_client_factory = remote_doc_client_factory
# Used for FS synchronization operations
self.remote_fs_client_factory = remote_fs_client_factory
# Used for FS synchronization operations
self.remote_filtered_fs_client_factory = remote_filtered_fs_client_factory
# Stop if invalid credentials
self.invalidAuthentication.connect(self.stop)
# Folder locker - LocalFolder processor can prevent others processors to operate on a folder
self._folder_lock = None
# Case sensitive partition
self._case_sensitive = None
self.timeout = 30
self._handshake_timeout = 60
# Make all the automation client related to this manager
# share cookies using threadsafe jar
self.cookie_jar = CookieJar()
self._manager = manager
# Remove remote client cache on proxy update
self._manager.proxyUpdated.connect(self.invalidate_client_cache)
self._local_folder = definition.local_folder
self._type = "NXDRIVE"
self._uid = definition.uid
self._name = definition.name
self._stopped = True
self._pause = False
self._sync_started = False
self._invalid_credentials = False
self._offline_state = False
self._threads = list()
self._client_cache_timestamps = dict()
self._dao = self._create_dao()
if binder is not None:
self.bind(binder)
self._load_configuration()
self._local_watcher = self._create_local_watcher()
self.create_thread(worker=self._local_watcher)
self._remote_watcher = self._create_remote_watcher(remote_watcher_delay)
self.create_thread(worker=self._remote_watcher, start_connect=False)
# Launch remote_watcher after first local scan
self._local_watcher.rootDeleted.connect(self.rootDeleted)
self._local_watcher.rootMoved.connect(self.rootMoved)
self._local_watcher.localScanFinished.connect(self._remote_watcher.run)
self._queue_manager = self._create_queue_manager(processors)
# Launch queue processors after first remote_watcher pass
self._remote_watcher.initiate.connect(self._queue_manager.init_processors)
self._remote_watcher.remoteWatcherStopped.connect(self._queue_manager.shutdown_processors)
# Connect last_sync checked
self._remote_watcher.updated.connect(self._check_last_sync)
# Connect for sync start
self.newQueueItem.connect(self._check_sync_start)
self._queue_manager.newItem.connect(self._check_sync_start)
# Connect components signals to engine signals
self._queue_manager.newItem.connect(self.newQueueItem)
self._queue_manager.newError.connect(self.newError)
self._dao.newConflict.connect(self.conflict_resolver)
# Scan in remote_watcher thread
self._scanPair.connect(self._remote_watcher.scan_pair)
# Set the root icon
self._set_root_icon()
# Set user full name
self._user_cache = dict()
@pyqtSlot(object)
def _check_sync_start(self, row_id):
if not self._sync_started:
queue_size = self._queue_manager.get_overall_size()
if queue_size > 0:
self._sync_started = True
self.syncStarted.emit(queue_size)
def reinit(self):
started = not self._stopped
if started:
self.stop()
self._dao.reinit_states()
self._dao.delete_config("remote_last_sync_date")
self._dao.delete_config("remote_last_event_log_id")
self._dao.delete_config("remote_last_event_last_root_definitions")
self._dao.delete_config("remote_last_full_scan")
self._dao.delete_config("last_sync_date")
self._check_root()
if started:
self.start()
def stop_processor_on(self, path):
for worker in self.get_queue_manager().get_processors_on(path, exact_match=True):
log.trace("Quitting processor: %r as requested to stop on %s", worker, path)
worker.quit()
def set_local_folder_lock(self, path):
self._folder_lock = path
# Check for each processor
log.debug("Local Folder locking on '%s'", path)
while self.get_queue_manager().has_file_processors_on(path):
log.trace("Local folder locking wait for file processor to finish")
sleep(1)
log.debug("Local Folder lock setup completed on '%s'", path)
def release_folder_lock(self):
self._folder_lock = None
def get_last_files(self, number, direction=None):
return self._dao.get_last_files(number, direction)
def set_offline(self, value=True):
if value == self._offline_state:
return
self._offline_state = value
if value:
log.debug("Engine %s goes offline", self._uid)
self._queue_manager.suspend()
self.offline.emit()
else:
log.debug("Engine %s goes online", self._uid)
self._queue_manager.resume()
self.online.emit()
def is_offline(self):
return self._offline_state
def add_filter(self, path):
remote_ref = os.path.basename(path)
remote_parent_path = os.path.dirname(path)
if remote_ref is None:
return
self._dao.add_filter(path)
pair = self._dao.get_state_from_remote_with_path(remote_ref, remote_parent_path)
if pair is None:
log.debug("Can't find the pair: %s (%s)", remote_ref, remote_parent_path)
return
self._dao.delete_remote_state(pair)
def remove_filter(self, path):
self.get_dao().remove_filter(path)
# Scan the "new" pair, use signal/slot to not block UI
self._scanPair.emit(path)
def get_document_id(self, remote_ref):
remote_ref_segments = remote_ref.split("#", 2)
return remote_ref_segments[2]
def get_metadata_url(self, remote_ref):
DRIVE_METADATA_VIEW = 'view_drive_metadata'
metadata_url = self.get_server_url()
remote_ref_segments = remote_ref.split("#", 2)
repo = remote_ref_segments[1]
doc_id = remote_ref_segments[2]
metadata_url += ("nxdoc/" + repo + "/" + doc_id +
"/" + DRIVE_METADATA_VIEW)
return metadata_url
def is_syncing(self):
return self._sync_started
def is_paused(self):
return self._pause
def open_edit(self, remote_ref, remote_name):
doc_ref = remote_ref
if "#" in doc_ref:
doc_ref = doc_ref[doc_ref.rfind('#') + 1:]
log.debug("Will try to open edit : %s", doc_ref)
# TODO Implement a TemporaryWorker
from threading import Thread
def run():
self._manager.get_drive_edit().edit(self._server_url,
doc_ref, remote_name, user=self._remote_user)
self._edit_thread = Thread(target=run)
self._edit_thread.start()
def open_remote(self, url=None):
if url is None:
url = self.get_remote_url()
self._manager.open_local_file(url)
def get_previous_file(self, ref, mode):
if mode == Engine.BATCH_MODE_FOLDER:
return self._dao.get_previous_folder_file(ref)
if mode == Engine.BATCH_MODE_SYNC:
mode = None
return self._dao.get_previous_sync_file(ref, sync_mode=mode)
def get_next_file(self, ref, mode):
if mode == Engine.BATCH_MODE_FOLDER:
return self._dao.get_next_folder_file(ref)
if mode == Engine.BATCH_MODE_SYNC:
mode = None
return self._dao.get_next_sync_file(ref, sync_mode=mode)
def resume(self):
# If stopped then start the engine
if self._stopped:
self.start()
return
self._pause = False
self._queue_manager.resume()
for thread in self._threads:
if thread.isRunning():
thread.worker.resume()
else:
thread.start()
self.syncResumed.emit()
def suspend(self):
if self._pause:
return
self._pause = True
self._queue_manager.suspend()
for thread in self._threads:
thread.worker.suspend()
self.syncSuspended.emit()
def unbind(self):
self.stop()
try:
# Dont fail if not possible to remove token
doc_client = self.get_remote_doc_client()
doc_client.revoke_token()
except Exception as e:
log.exception(e)
self.dispose_db()
# Remove DB
log.debug("Remove DB file %s", self._get_db_file())
try:
os.remove(self._get_db_file())
except (IOError, WindowsError) as ioe:
log.exception(ioe)
return
def check_fs_marker(self):
tag = 'drive-fs-test'
tag_value = 'NXDRIVE_VERIFICATION'
client = self.get_local_client()
if not client.exists('/'):
self.rootDeleted.emit()
return False
client.set_remote_id('/', tag_value, tag)
if client.get_remote_id('/', tag) != tag_value:
return False
client.remove_remote_id('/', tag)
if client.get_remote_id('/', tag) != None:
return False
return True
def _normalize_url(self, url):
"""Ensure that user provided url always has a trailing '/'"""
if url is None or not url:
raise ValueError("Invalid url: %r" % url)
if not url.endswith(u'/'):
return url + u'/'
return url
def _load_configuration(self):
self._web_authentication = self._dao.get_config("web_authentication", "0") == "1"
self._server_url = self._dao.get_config("server_url")
self._remote_user = self._dao.get_config("remote_user")
self._remote_password = self._dao.get_config("remote_password")
self._remote_token = self._dao.get_config("remote_token")
self._device_id = self._manager.device_id
if self._remote_password is None and self._remote_token is None:
self.set_invalid_credentials(reason="found no password nor token in engine configuration")
def get_server_url(self):
return self._dao.get_config("server_url")
def get_remote_user(self):
return self._dao.get_config("remote_user")
def get_remote_token(self):
return self._dao.get_config("remote_token")
def _create_queue_manager(self, processors):
from nxdrive.engine.queue_manager import QueueManager
if self._manager.is_debug():
return QueueManager(self, self._dao, max_file_processors=2)
return QueueManager(self, self._dao)
def _create_remote_watcher(self, delay):
from nxdrive.engine.watcher.remote_watcher import RemoteWatcher
return RemoteWatcher(self, self._dao, delay)
def _create_local_watcher(self):
from nxdrive.engine.watcher.local_watcher import LocalWatcher
return LocalWatcher(self, self._dao)
def _get_db_file(self):
return os.path.join(normalized_path(self._manager.get_configuration_folder()),
"ndrive_" + self._uid + ".db")
def _create_dao(self):
from nxdrive.engine.dao.sqlite import EngineDAO
return EngineDAO(self._get_db_file())
def get_remote_url(self):
server_link = self._dao.get_config("server_url", "")
repository = DEFAULT_REPOSITORY_NAME
if not server_link.endswith('/'):
server_link += '/'
url_suffix = ('@view_home?tabIds=MAIN_TABS:home,'
'USER_CENTER:userCenterNuxeoDrive')
server_link += 'nxhome/' + repository + url_suffix
return server_link
def get_abspath(self, path):
return self.get_local_client()._abspath(path)
def get_binder(self):
from nxdrive.manager import ServerBindingSettings
return ServerBindingSettings(server_url=self._server_url,
web_authentication=self._web_authentication,
server_version=None,
username=self._remote_user,
local_folder=self._local_folder,
initialized=True,
pwd_update_required=self.has_invalid_credentials())
def get_local_folder(self):
return self._local_folder
def get_uid(self):
return self._uid
def set_invalid_credentials(self, value=True, reason=None, exception=None):
changed = self._invalid_credentials != value
self._invalid_credentials = value
if value and changed:
msg = 'Setting invalid credentials'
if reason is not None:
msg += ', reason is: %s' % reason
log.error(msg, exc_info=exception is not None)
self.invalidAuthentication.emit()
def has_invalid_credentials(self):
return self._invalid_credentials
def get_queue_manager(self):
return self._queue_manager
def get_remote_watcher(self):
return self._remote_watcher
def get_dao(self):
return self._dao
def local_rollback(self):
return False
def create_thread(self, worker=None, name=None, start_connect=True):
if worker is None:
worker = Worker(self, name=name)
# If subclass of Processor then connect the newSync signal
from nxdrive.engine.processor import Processor
if isinstance(worker, Processor):
worker.pairSync.connect(self.newSync)
thread = worker.get_thread()
if start_connect:
thread.started.connect(worker.run)
self._stop.connect(worker.quit)
thread.finished.connect(self._thread_finished)
self._threads.append(thread)
return thread
def retry_pair(self, row_id):
state = self._dao.get_state_from_id(row_id)
self._dao.reset_error(state)
def resolve_with_local(self, row_id):
row = self._dao.get_state_from_id(row_id)
self._dao.force_local(row)
def resolve_with_remote(self, row_id):
row = self._dao.get_state_from_id(row_id)
self._dao.force_remote(row)
def resolve_with_duplicate(self, row_id):
row = self._dao.get_state_from_id(row_id)
self._dao.increase_error(row, "DUPLICATING")
from threading import Thread
def run():
local_client = self.get_local_client()
# Duplicate the file
local_client.duplicate_file(row.local_path)
# Force the remote
self._dao.force_remote(row)
self._duplicate_thread = Thread(target=run)
self._duplicate_thread.start()
def get_last_sync(self):
return self._dao.get_config("last_sync_date", None)
@pyqtSlot()
def _check_last_sync(self):
from nxdrive.engine.watcher.local_watcher import WIN_MOVE_RESOLUTION_PERIOD
qm_active = self._queue_manager.active()
qm_size = self._queue_manager.get_overall_size()
empty_polls = self._remote_watcher.get_metrics()["empty_polls"]
log.debug('Checking sync completed: queue manager is %s, overall size = %d, empty polls count = %d',
'active' if qm_active else 'inactive', qm_size, empty_polls)
local_metrics = self._local_watcher.get_metrics()
if (qm_size == 0 and not qm_active and empty_polls > 0
and self._local_watcher.empty_events()
and (
not AbstractOSIntegration.is_windows()
or
self._local_watcher.win_queue_empty())):
self._dao.update_config("last_sync_date", datetime.datetime.utcnow())
if local_metrics['last_event'] == 0:
log.warn("No watchdog event detected but sync is completed")
if self._sync_started:
self._sync_started = False
log.debug('Emitting syncCompleted for engine %s', self.get_uid())
self.syncCompleted.emit()
def _thread_finished(self):
for thread in self._threads:
if thread == self._local_watcher._thread:
continue
if thread == self._remote_watcher._thread:
continue
if thread.isFinished():
self._threads.remove(thread)
def is_started(self):
return not self._stopped
def start(self):
if not self.check_fs_marker():
raise FsMarkerException()
self._stopped = False
Processor.soft_locks = dict()
log.debug("Engine start")
for thread in self._threads:
thread.start()
self.syncStarted.emit(0)
self._start.emit()
def get_threads(self):
return self._threads
def get_status(self):
QCoreApplication.processEvents()
log.debug("Engine status")
for thread in self._threads:
log.debug("%r" % thread.worker.get_metrics())
log.debug("%r" % self._queue_manager.get_metrics())
def get_metrics(self):
metrics = dict()
metrics["sync_folders"] = self._dao.get_sync_count(filetype="folder")
metrics["sync_files"] = self._dao.get_sync_count(filetype="file")
metrics["error_files"] = self._dao.get_error_count()
metrics["conflicted_files"] = self._dao.get_conflict_count()
metrics["files_size"] = self._dao.get_global_size()
metrics["invalid_credentials"] = self._invalid_credentials
return metrics
def get_conflicts(self):
return self._dao.get_conflicts()
def conflict_resolver(self, row_id):
try:
pair = self._dao.get_state_from_id(row_id)
local_client = self.get_local_client()
parent_ref = local_client.get_remote_id(pair.local_parent_path)
if (pair.remote_name == pair.local_name
and local_client.is_equal_digests(pair.local_digest, pair.remote_digest, pair.local_path)
and pair.remote_parent_ref == parent_ref):
self._dao.synchronize_state(pair)
else:
# Raise conflict only if not resolvable
self.newConflict.emit(row_id)
except Exception:
pass
def get_errors(self):
return self._dao.get_errors()
def is_stopped(self):
return self._stopped
def stop(self):
self._stopped = True
log.debug("Engine %s stopping", self._uid)
self._stop.emit()
for thread in self._threads:
if not thread.wait(5000):
log.warn("Thread is not responding - terminate it")
thread.terminate()
if not self._local_watcher._thread.wait(5000):
self._local_watcher._thread.terminate()
if not self._remote_watcher._thread.wait(5000):
self._remote_watcher._thread.terminate()
for thread in self._threads:
if thread.isRunning():
thread.wait(5000)
if not self._remote_watcher._thread.isRunning():
self._remote_watcher._thread.wait(5000)
if not self._local_watcher._thread.isRunning():
self._local_watcher._thread.wait(5000)
# Soft locks needs to be reinit in case of threads termination
Processor.soft_locks = dict()
log.debug("Engine %s stopped", self._uid)
def _get_client_cache(self):
return self._remote_clients
def use_trash(self):
return True
def get_update_infos(self, client=None):
if client is None:
client = self.get_remote_doc_client()
update_info = client.get_update_info()
log.debug("Fetched update info for engine [%s] from server %s: %r", self._name, self._server_url, update_info)
self._dao.update_config("server_version", update_info.get("serverVersion"))
self._dao.update_config("update_url", update_info.get("updateSiteURL"))
beta_update_site_url = update_info.get("betaUpdateSiteURL")
# Consider empty string as None
if not beta_update_site_url:
beta_update_site_url = None
self._dao.update_config("beta_update_url", beta_update_site_url)
def update_password(self, password):
self._load_configuration()
nxclient = self.remote_doc_client_factory(
self._server_url, self._remote_user, self._manager.device_id,
self._manager.client_version, proxies=self._manager.proxies,
proxy_exceptions=self._manager.proxy_exceptions,
password=str(password), timeout=self._handshake_timeout)
self._remote_token = nxclient.request_token()
if self._remote_token is None:
raise Exception
self._dao.update_config("remote_token", self._remote_token)
self.set_invalid_credentials(False)
# In case of a binding
self._check_root()
self.start()
def update_token(self, token):
self._load_configuration()
self._remote_token = token
self._dao.update_config("remote_token", self._remote_token)
self.set_invalid_credentials(False)
self.start()
def bind(self, binder):
check_credential = True
if hasattr(binder, 'no_check') and binder.no_check:
check_credential = False
check_fs = True
if hasattr(binder, 'no_fscheck') and binder.no_fscheck:
check_fs = False
self._server_url = self._normalize_url(binder.url)
self._remote_user = binder.username
self._remote_password = binder.password
self._remote_token = binder.token
self._web_authentication = self._remote_token is not None
if check_fs:
created_folder = False
try:
if not os.path.exists(os.path.dirname(self._local_folder)):
raise NotFound()
if not os.path.exists(self._local_folder):
os.mkdir(self._local_folder)
created_folder = True
self._check_fs(self._local_folder)
except Exception as e:
try:
if created_folder:
os.rmdir(self._local_folder)
except:
pass
raise e
nxclient = None
if check_credential:
nxclient = self.remote_doc_client_factory(
self._server_url, self._remote_user, self._manager.device_id,
self._manager.client_version, proxies=self._manager.proxies,
proxy_exceptions=self._manager.proxy_exceptions,
password=self._remote_password, token=self._remote_token,
timeout=self._handshake_timeout)
if self._remote_token is None:
self._remote_token = nxclient.request_token()
if self._remote_token is not None:
# The server supports token based identification: do not store the
# password in the DB
self._remote_password = None
# Save the configuration
self._dao.update_config("web_authentication", self._web_authentication)
self._dao.update_config("server_url", self._server_url)
self._dao.update_config("remote_user", self._remote_user)
self._dao.update_config("remote_password", self._remote_password)
self._dao.update_config("remote_token", self._remote_token)
if nxclient:
self.get_update_infos(nxclient)
# Check for the root
# If the top level state for the server binding doesn't exist,
# create the local folder and the top level state.
self._check_root()
def _check_fs(self, path):
if not self._manager.get_osi().is_partition_supported(path):
raise InvalidDriveException()
if os.path.exists(path):
local_client = self.get_local_client()
root_id = local_client.get_root_id()
if root_id is not None:
# server_url|user|device_id|uid
token = root_id.split("|")
if (self._server_url != token[0] or self._remote_user != token[1]):
raise RootAlreadyBindWithDifferentAccount(token[1], token[0])
def _check_root(self):
root = self._dao.get_state_from_local("/")
if root is None:
from nxdrive.client.common import BaseClient
if os.path.exists(self._local_folder):
BaseClient.unset_path_readonly(self._local_folder)
self._make_local_folder(self._local_folder)
self._add_top_level_state()
self._set_root_icon()
BaseClient.set_path_readonly(self._local_folder)
def _make_local_folder(self, local_folder):
if not os.path.exists(local_folder):
os.makedirs(local_folder)
# OSI package
# TODO self.register_folder_link(local_folder)
# Put the ROOT in readonly
def cancel_action_on(self, pair_id):
for thread in self._threads:
if hasattr(thread, "worker") and isinstance(thread.worker, Processor):
pair = thread.worker._current_doc_pair
if pair is not None and pair.id == pair_id:
thread.worker.quit()
def get_local_client(self):
client = LocalClient(self._local_folder, case_sensitive=self._case_sensitive)
if self._case_sensitive is None:
self._case_sensitive = client.is_case_sensitive()
return client
def get_server_version(self):
return self._dao.get_config("server_version")
def get_update_url(self):
return self._dao.get_config("update_url", DEFAULT_UPDATE_SITE_URL)
def get_beta_update_url(self):
return self._dao.get_config("beta_update_url")
@pyqtSlot()
def invalidate_client_cache(self):
log.debug("Invalidate client cache")
self._remote_clients.clear()
self.invalidClientsCache.emit()
def _set_root_icon(self):
local_client = self.get_local_client()
if local_client.has_folder_icon('/'):
return
if AbstractOSIntegration.is_mac():
if AbstractOSIntegration.os_version_below("10.10"):
icon = find_icon("NuxeoDrive_Mac_Folder.dat")
else:
icon = find_icon("NuxeoDrive_Mac_Yosemite_Folder.dat")
elif AbstractOSIntegration.is_windows():
if AbstractOSIntegration.os_version_below("5.2"):
icon = find_icon("NuxeoDrive_Windows_Xp_Folder.ico")
else:
icon = find_icon("NuxeoDrive_Windows_Folder.ico")
else:
# No implementation on Linux
return
locker = local_client.unlock_ref('/', unlock_parent=False)
try:
local_client.set_folder_icon('/', icon)
finally:
local_client.lock_ref('/', locker)
def _add_top_level_state(self):
local_client = self.get_local_client()
local_info = local_client.get_info(u'/')
remote_client = self.get_remote_client()
remote_info = remote_client.get_filesystem_root_info()
self._dao.insert_local_state(local_info, '')
row = self._dao.get_state_from_local('/')
self._dao.update_remote_state(row, remote_info, '', versionned=False)
local_client.set_root_id(self._server_url + "|" + self._remote_user +
"|" + self._manager.device_id + "|" + self._uid)
self._dao.synchronize_state(row)
# The root should also be sync
def suspend_client(self, reason):
if self.is_paused() or self._stopped:
raise ThreadInterrupt
# Verify thread status
thread_id = current_thread().ident
for thread in self._threads:
if hasattr(thread, "worker") and isinstance(thread.worker, Processor):
if (thread.worker._thread_id == thread_id and
thread.worker._continue == False):
raise ThreadInterrupt
# Get action
current_file = None
action = Action.get_current_action()
if isinstance(action, FileAction):
client = self.get_local_client()
current_file = client.get_path(action.filepath)
if (current_file is not None and self._folder_lock is not None
and current_file.startswith(self._folder_lock)):
log.debug("PairInterrupt '%s' because lock on '%s'",
current_file, self._folder_lock)
raise PairInterrupt
def complete_binder(self, row):
# Add more information
row.server_url = self._server_url
row.username = self._remote_user
row.has_invalid_credentials = self.has_invalid_credentials
def get_remote_client(self, filtered=True):
"""Return a client for the FileSystem abstraction."""
if self._invalid_credentials:
return None
cache = self._get_client_cache()
cache_key = (self._manager.device_id, filtered)
remote_client = cache.get(cache_key)
if remote_client is None:
if filtered:
remote_client = self.remote_filtered_fs_client_factory(
self._server_url, self._remote_user,
self._manager.device_id, self.version, self._dao,
proxies=self._manager.proxies,
proxy_exceptions=self._manager.proxy_exceptions,
password=self._remote_password,
timeout=self.timeout, cookie_jar=self.cookie_jar,
token=self._remote_token, check_suspended=self.suspend_client)
else:
remote_client = self.remote_fs_client_factory(
self._server_url, self._remote_user,
self._manager.device_id, self.version,
proxies=self._manager.proxies,
proxy_exceptions=self._manager.proxy_exceptions,
password=self._remote_password,
timeout=self.timeout, cookie_jar=self.cookie_jar,
token=self._remote_token, check_suspended=self.suspend_client)
cache[cache_key] = remote_client
return remote_client
def get_remote_doc_client(self, repository=DEFAULT_REPOSITORY_NAME, base_folder=None):
if self._invalid_credentials:
return None
cache = self._get_client_cache()
cache_key = (self._manager.device_id, 'remote_doc')
remote_client = cache.get(cache_key)
if remote_client is None:
remote_client = self.remote_doc_client_factory(
self._server_url, self._remote_user,
self._manager.device_id, self.version,
proxies=self._manager.proxies,
proxy_exceptions=self._manager.proxy_exceptions,
password=self._remote_password, token=self._remote_token,
repository=repository, base_folder=base_folder,
timeout=self._handshake_timeout, cookie_jar=self.cookie_jar, check_suspended=self.suspend_client)
cache[cache_key] = remote_client
return remote_client
def create_processor(self, item_getter, name=None):
from nxdrive.engine.processor import Processor
return Processor(self, item_getter, name=name)
def dispose_db(self):
if self._dao is not None:
self._dao.dispose()
def get_rest_api_client(self):
from nxdrive.client.rest_api_client import RestAPIClient
rest_client = RestAPIClient(self.get_server_url(), self.get_remote_user(),
self._manager.get_device_id(), self._manager.client_version, None,
self.get_remote_token(), timeout=self.timeout, cookie_jar=self.cookie_jar)
return rest_client
def get_user_full_name(self, userid):
"""
Get the last contributor full name
"""
fullname = userid
try:
if userid in self._user_cache:
fullname = self._user_cache[userid]
else:
rest_client = self.get_rest_api_client()
response = rest_client.get_user_full_name(userid)
if response and 'properties' in response:
fullname = " ".join([response['properties']['firstName'],
response['properties']['lastName']]).strip()
self._user_cache[userid] = fullname
except urllib2.URLError as e:
log.exception(e)
return fullname
|
tylertian/Openstack
|
refs/heads/master
|
openstack F/django_openstack_auth/openstack_auth/tests/__init__.py
|
12133432
| |
juanshishido/info290-dds
|
refs/heads/master
|
assignments/assignment02/code/tests/__init__.py
|
12133432
| |
laserjock/tittle
|
refs/heads/master
|
python_rtm/__init__.py
|
12133432
| |
fangxingli/hue
|
refs/heads/master
|
desktop/core/ext-py/django-extensions-1.5.0/django_extensions/db/fields/json.py
|
35
|
"""
JSONField automatically serializes most Python terms to JSON data.
Creates a TEXT field with a default value of "{}". See test_json.py for
more information.
from django.db import models
from django_extensions.db.fields import json
class LOL(models.Model):
extra = json.JSONField()
"""
from __future__ import absolute_import
import six
from decimal import Decimal
from django.db import models
from django.conf import settings
from django.core.serializers.json import DjangoJSONEncoder
try:
# Django >= 1.7
import json
except ImportError:
# Django <= 1.6 backwards compatibility
from django.utils import simplejson as json
def dumps(value):
return DjangoJSONEncoder().encode(value)
def loads(txt):
value = json.loads(
txt,
parse_float=Decimal,
encoding=settings.DEFAULT_CHARSET
)
return value
class JSONDict(dict):
"""
Hack so repr() called by dumpdata will output JSON instead of
Python formatted data. This way fixtures will work!
"""
def __repr__(self):
return dumps(self)
class JSONUnicode(six.text_type):
"""
As above
"""
def __repr__(self):
return dumps(self)
class JSONList(list):
"""
As above
"""
def __repr__(self):
return dumps(self)
class JSONField(six.with_metaclass(models.SubfieldBase, models.TextField)):
"""JSONField is a generic textfield that neatly serializes/unserializes
JSON objects seamlessly. Main thingy must be a dict object."""
def __init__(self, *args, **kwargs):
default = kwargs.get('default', None)
if default is None:
kwargs['default'] = '{}'
elif isinstance(default, (list, dict)):
kwargs['default'] = dumps(default)
models.TextField.__init__(self, *args, **kwargs)
def to_python(self, value):
"""Convert our string value to JSON after we load it from the DB"""
if value is None or value == '':
return {}
elif isinstance(value, six.string_types):
res = loads(value)
if isinstance(res, dict):
return JSONDict(**res)
elif isinstance(res, six.string_types):
return JSONUnicode(res)
elif isinstance(res, list):
return JSONList(res)
return res
else:
return value
def get_db_prep_save(self, value, connection, **kwargs):
"""Convert our JSON object to a string before we save"""
if value is None and self.null:
return None
# default values come in as strings; only non-strings should be
# run through `dumps`
if not isinstance(value, six.string_types):
value = dumps(value)
return super(JSONField, self).get_db_prep_save(value, connection=connection)
def south_field_triple(self):
"""Returns a suitable description of this field for South."""
# We'll just introspect the _actual_ field.
from south.modelsinspector import introspector
field_class = "django.db.models.fields.TextField"
args, kwargs = introspector(self)
# That's our definition!
return (field_class, args, kwargs)
def deconstruct(self):
name, path, args, kwargs = super(JSONField, self).deconstruct()
if self.default == '{}':
del kwargs['default']
return name, path, args, kwargs
|
zchking/odoo
|
refs/heads/8.0
|
addons/website/__openerp__.py
|
311
|
{
'name': 'Website Builder',
'category': 'Website',
'summary': 'Build Your Enterprise Website',
'website': 'https://www.odoo.com/page/website-builder',
'version': '1.0',
'description': """
OpenERP Website CMS
===================
""",
'author': 'OpenERP SA',
'depends': ['web', 'share', 'mail'],
'installable': True,
'data': [
'data/data.xml',
'security/ir.model.access.csv',
'security/ir_ui_view.xml',
'views/website_templates.xml',
'views/website_views.xml',
'views/snippets.xml',
'views/themes.xml',
'views/res_config.xml',
'views/ir_actions.xml',
'views/website_backend_navbar.xml',
],
'demo': [
'data/demo.xml',
],
'qweb': ['static/src/xml/website.backend.xml'],
'application': True,
}
|
gregdek/ansible
|
refs/heads/devel
|
test/units/modules/network/f5/test_bigip_profile_udp.py
|
21
|
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_profile_udp import ApiParameters
from library.modules.bigip_profile_udp import ModuleParameters
from library.modules.bigip_profile_udp import ModuleManager
from library.modules.bigip_profile_udp import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_profile_udp import ApiParameters
from ansible.modules.network.f5.bigip_profile_udp import ModuleParameters
from ansible.modules.network.f5.bigip_profile_udp import ModuleManager
from ansible.modules.network.f5.bigip_profile_udp import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
parent='bar',
idle_timeout='500',
datagram_load_balancing=False
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/bar'
assert p.idle_timeout == 500
assert p.datagram_load_balancing is False
def test_api_parameters(self):
args = load_fixture('load_ltm_profile_udp_1.json')
p = ApiParameters(params=args)
assert p.name == 'foo'
assert p.idle_timeout == 60
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
name='foo',
parent='bar',
idle_timeout=500,
datagram_load_balancing=True,
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['idle_timeout'] == 500
|
lukas-hetzenecker/home-assistant
|
refs/heads/dev
|
homeassistant/components/climate/radiotherm.py
|
5
|
"""
Support for Radio Thermostat wifi-enabled home thermostats.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.radiotherm/
"""
import datetime
import logging
import voluptuous as vol
from homeassistant.components.climate import (
STATE_AUTO, STATE_COOL, STATE_HEAT, STATE_IDLE, STATE_OFF,
ClimateDevice, PLATFORM_SCHEMA)
from homeassistant.const import CONF_HOST, TEMP_FAHRENHEIT, ATTR_TEMPERATURE
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['radiotherm==1.2']
_LOGGER = logging.getLogger(__name__)
ATTR_FAN = 'fan'
ATTR_MODE = 'mode'
CONF_HOLD_TEMP = 'hold_temp'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOST): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_HOLD_TEMP, default=False): cv.boolean,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Radio Thermostat."""
import radiotherm
hosts = []
if CONF_HOST in config:
hosts = config[CONF_HOST]
else:
hosts.append(radiotherm.discover.discover_address())
if hosts is None:
_LOGGER.error("No Radiotherm Thermostats detected")
return False
hold_temp = config.get(CONF_HOLD_TEMP)
tstats = []
for host in hosts:
try:
tstat = radiotherm.get_thermostat(host)
tstats.append(RadioThermostat(tstat, hold_temp))
except OSError:
_LOGGER.exception("Unable to connect to Radio Thermostat: %s",
host)
add_devices(tstats)
class RadioThermostat(ClimateDevice):
"""Representation of a Radio Thermostat."""
def __init__(self, device, hold_temp):
"""Initialize the thermostat."""
self.device = device
self.set_time()
self._target_temperature = None
self._current_temperature = None
self._current_operation = STATE_IDLE
self._name = None
self._fmode = None
self._tmode = None
self.hold_temp = hold_temp
self.update()
self._operation_list = [STATE_AUTO, STATE_COOL, STATE_HEAT, STATE_OFF]
@property
def name(self):
"""Return the name of the Radio Thermostat."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_FAHRENHEIT
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
return {
ATTR_FAN: self._fmode,
ATTR_MODE: self._tmode,
}
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def current_operation(self):
"""Return the current operation. head, cool idle."""
return self._current_operation
@property
def operation_list(self):
"""Return the operation modes list."""
return self._operation_list
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
def update(self):
"""Update the data from the thermostat."""
self._current_temperature = self.device.temp['raw']
self._name = self.device.name['raw']
self._fmode = self.device.fmode['human']
self._tmode = self.device.tmode['human']
if self._tmode == 'Cool':
self._target_temperature = self.device.t_cool['raw']
self._current_operation = STATE_COOL
elif self._tmode == 'Heat':
self._target_temperature = self.device.t_heat['raw']
self._current_operation = STATE_HEAT
else:
self._current_operation = STATE_IDLE
def set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
if self._current_operation == STATE_COOL:
self.device.t_cool = round(temperature * 2.0) / 2.0
elif self._current_operation == STATE_HEAT:
self.device.t_heat = round(temperature * 2.0) / 2.0
if self.hold_temp:
self.device.hold = 1
else:
self.device.hold = 0
def set_time(self):
"""Set device time."""
now = datetime.datetime.now()
self.device.time = {
'day': now.weekday(),
'hour': now.hour,
'minute': now.minute
}
def set_operation_mode(self, operation_mode):
"""Set operation mode (auto, cool, heat, off)."""
if operation_mode == STATE_OFF:
self.device.tmode = 0
elif operation_mode == STATE_AUTO:
self.device.tmode = 3
elif operation_mode == STATE_COOL:
self.device.t_cool = round(self._target_temperature * 2.0) / 2.0
elif operation_mode == STATE_HEAT:
self.device.t_heat = round(self._target_temperature * 2.0) / 2.0
|
Manabu-GT/DesignOverlay-Android
|
refs/heads/master
|
appium/android_sauce_labs.py
|
4
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Author : Manabu Shimobe
"""
__author__ = "Manabu Shimobe"
from appium import webdriver
from appium import SauceTestCase, on_platforms
from time import sleep
from logging import getLogger, StreamHandler, Formatter, DEBUG
from os import environ
import json
# load default platform configurations
json_file = open('appium/config_sauce_labs.json')
platforms = json.load(json_file)
for platform in platforms:
platform['app'] = "sauce-storage:%s" % environ.get('SAUCE_APK_FILE')
platform['customData'] = {'commit': environ.get('TRAVIS_COMMIT', environ.get('SAUCE_COMMIT')),
'versionName': environ.get('SAUCE_APK_VERSION_NAME'),
'versionCode': environ.get('SAUCE_APK_VERSION_CODE')}
platform['build'] = "build-%s" % environ.get('TRAVIS_BUILD_NUMBER', 'local')
json_file.close()
# set up logger
logger = getLogger(__name__)
logger.setLevel(DEBUG)
handler = StreamHandler()
handler.setFormatter(Formatter('%(asctime)s- %(name)s - %(levelname)s - %(message)s'))
handler.setLevel(DEBUG)
logger.addHandler(handler)
# the emulator is sometimes slow
SLEEP_TIME = 1
@on_platforms(platforms)
class SimpleAndroidSauceTests(SauceTestCase):
def test_settings(self):
sleep(SLEEP_TIME)
# Check if successfully started SettingsActivity
self.assertEqual('.activity.SettingsActivity_', self.driver.current_activity)
el_switch = self.driver.find_element_by_accessibility_id('Grid Switch')
self.assertIsNotNone(el_switch)
# Grid should be shown now
el_switch.click()
logger.info('Clicked Grid Switch')
sleep(SLEEP_TIME)
|
Francis-Liu/animated-broccoli
|
refs/heads/master
|
nova/api/openstack/compute/schemas/user_data.py
|
88
|
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
server_create = {
'user_data': {
'type': 'string',
'format': 'base64'
},
}
|
biddisco/VTK
|
refs/heads/master
|
ThirdParty/Twisted/twisted/conch/manhole.py
|
52
|
# -*- test-case-name: twisted.conch.test.test_manhole -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Line-input oriented interactive interpreter loop.
Provides classes for handling Python source input and arbitrary output
interactively from a Twisted application. Also included is syntax coloring
code with support for VT102 terminals, control code handling (^C, ^D, ^Q),
and reasonable handling of Deferreds.
@author: Jp Calderone
"""
import code, sys, StringIO, tokenize
from twisted.conch import recvline
from twisted.internet import defer
from twisted.python.htmlizer import TokenPrinter
class FileWrapper:
"""Minimal write-file-like object.
Writes are translated into addOutput calls on an object passed to
__init__. Newlines are also converted from network to local style.
"""
softspace = 0
state = 'normal'
def __init__(self, o):
self.o = o
def flush(self):
pass
def write(self, data):
self.o.addOutput(data.replace('\r\n', '\n'))
def writelines(self, lines):
self.write(''.join(lines))
class ManholeInterpreter(code.InteractiveInterpreter):
"""Interactive Interpreter with special output and Deferred support.
Aside from the features provided by L{code.InteractiveInterpreter}, this
class captures sys.stdout output and redirects it to the appropriate
location (the Manhole protocol instance). It also treats Deferreds
which reach the top-level specially: each is formatted to the user with
a unique identifier and a new callback and errback added to it, each of
which will format the unique identifier and the result with which the
Deferred fires and then pass it on to the next participant in the
callback chain.
"""
numDeferreds = 0
def __init__(self, handler, locals=None, filename="<console>"):
code.InteractiveInterpreter.__init__(self, locals)
self._pendingDeferreds = {}
self.handler = handler
self.filename = filename
self.resetBuffer()
def resetBuffer(self):
"""Reset the input buffer."""
self.buffer = []
def push(self, line):
"""Push a line to the interpreter.
The line should not have a trailing newline; it may have
internal newlines. The line is appended to a buffer and the
interpreter's runsource() method is called with the
concatenated contents of the buffer as source. If this
indicates that the command was executed or invalid, the buffer
is reset; otherwise, the command is incomplete, and the buffer
is left as it was after the line was appended. The return
value is 1 if more input is required, 0 if the line was dealt
with in some way (this is the same as runsource()).
"""
self.buffer.append(line)
source = "\n".join(self.buffer)
more = self.runsource(source, self.filename)
if not more:
self.resetBuffer()
return more
def runcode(self, *a, **kw):
orighook, sys.displayhook = sys.displayhook, self.displayhook
try:
origout, sys.stdout = sys.stdout, FileWrapper(self.handler)
try:
code.InteractiveInterpreter.runcode(self, *a, **kw)
finally:
sys.stdout = origout
finally:
sys.displayhook = orighook
def displayhook(self, obj):
self.locals['_'] = obj
if isinstance(obj, defer.Deferred):
# XXX Ick, where is my "hasFired()" interface?
if hasattr(obj, "result"):
self.write(repr(obj))
elif id(obj) in self._pendingDeferreds:
self.write("<Deferred #%d>" % (self._pendingDeferreds[id(obj)][0],))
else:
d = self._pendingDeferreds
k = self.numDeferreds
d[id(obj)] = (k, obj)
self.numDeferreds += 1
obj.addCallbacks(self._cbDisplayDeferred, self._ebDisplayDeferred,
callbackArgs=(k, obj), errbackArgs=(k, obj))
self.write("<Deferred #%d>" % (k,))
elif obj is not None:
self.write(repr(obj))
def _cbDisplayDeferred(self, result, k, obj):
self.write("Deferred #%d called back: %r" % (k, result), True)
del self._pendingDeferreds[id(obj)]
return result
def _ebDisplayDeferred(self, failure, k, obj):
self.write("Deferred #%d failed: %r" % (k, failure.getErrorMessage()), True)
del self._pendingDeferreds[id(obj)]
return failure
def write(self, data, async=False):
self.handler.addOutput(data, async)
CTRL_C = '\x03'
CTRL_D = '\x04'
CTRL_BACKSLASH = '\x1c'
CTRL_L = '\x0c'
CTRL_A = '\x01'
CTRL_E = '\x05'
class Manhole(recvline.HistoricRecvLine):
"""Mediator between a fancy line source and an interactive interpreter.
This accepts lines from its transport and passes them on to a
L{ManholeInterpreter}. Control commands (^C, ^D, ^\) are also handled
with something approximating their normal terminal-mode behavior. It
can optionally be constructed with a dict which will be used as the
local namespace for any code executed.
"""
namespace = None
def __init__(self, namespace=None):
recvline.HistoricRecvLine.__init__(self)
if namespace is not None:
self.namespace = namespace.copy()
def connectionMade(self):
recvline.HistoricRecvLine.connectionMade(self)
self.interpreter = ManholeInterpreter(self, self.namespace)
self.keyHandlers[CTRL_C] = self.handle_INT
self.keyHandlers[CTRL_D] = self.handle_EOF
self.keyHandlers[CTRL_L] = self.handle_FF
self.keyHandlers[CTRL_A] = self.handle_HOME
self.keyHandlers[CTRL_E] = self.handle_END
self.keyHandlers[CTRL_BACKSLASH] = self.handle_QUIT
def handle_INT(self):
"""
Handle ^C as an interrupt keystroke by resetting the current input
variables to their initial state.
"""
self.pn = 0
self.lineBuffer = []
self.lineBufferIndex = 0
self.interpreter.resetBuffer()
self.terminal.nextLine()
self.terminal.write("KeyboardInterrupt")
self.terminal.nextLine()
self.terminal.write(self.ps[self.pn])
def handle_EOF(self):
if self.lineBuffer:
self.terminal.write('\a')
else:
self.handle_QUIT()
def handle_FF(self):
"""
Handle a 'form feed' byte - generally used to request a screen
refresh/redraw.
"""
self.terminal.eraseDisplay()
self.terminal.cursorHome()
self.drawInputLine()
def handle_QUIT(self):
self.terminal.loseConnection()
def _needsNewline(self):
w = self.terminal.lastWrite
return not w.endswith('\n') and not w.endswith('\x1bE')
def addOutput(self, bytes, async=False):
if async:
self.terminal.eraseLine()
self.terminal.cursorBackward(len(self.lineBuffer) + len(self.ps[self.pn]))
self.terminal.write(bytes)
if async:
if self._needsNewline():
self.terminal.nextLine()
self.terminal.write(self.ps[self.pn])
if self.lineBuffer:
oldBuffer = self.lineBuffer
self.lineBuffer = []
self.lineBufferIndex = 0
self._deliverBuffer(oldBuffer)
def lineReceived(self, line):
more = self.interpreter.push(line)
self.pn = bool(more)
if self._needsNewline():
self.terminal.nextLine()
self.terminal.write(self.ps[self.pn])
class VT102Writer:
"""Colorizer for Python tokens.
A series of tokens are written to instances of this object. Each is
colored in a particular way. The final line of the result of this is
generally added to the output.
"""
typeToColor = {
'identifier': '\x1b[31m',
'keyword': '\x1b[32m',
'parameter': '\x1b[33m',
'variable': '\x1b[1;33m',
'string': '\x1b[35m',
'number': '\x1b[36m',
'op': '\x1b[37m'}
normalColor = '\x1b[0m'
def __init__(self):
self.written = []
def color(self, type):
r = self.typeToColor.get(type, '')
return r
def write(self, token, type=None):
if token and token != '\r':
c = self.color(type)
if c:
self.written.append(c)
self.written.append(token)
if c:
self.written.append(self.normalColor)
def __str__(self):
s = ''.join(self.written)
return s.strip('\n').splitlines()[-1]
def lastColorizedLine(source):
"""Tokenize and colorize the given Python source.
Returns a VT102-format colorized version of the last line of C{source}.
"""
w = VT102Writer()
p = TokenPrinter(w.write).printtoken
s = StringIO.StringIO(source)
tokenize.tokenize(s.readline, p)
return str(w)
class ColoredManhole(Manhole):
"""A REPL which syntax colors input as users type it.
"""
def getSource(self):
"""Return a string containing the currently entered source.
This is only the code which will be considered for execution
next.
"""
return ('\n'.join(self.interpreter.buffer) +
'\n' +
''.join(self.lineBuffer))
def characterReceived(self, ch, moreCharactersComing):
if self.mode == 'insert':
self.lineBuffer.insert(self.lineBufferIndex, ch)
else:
self.lineBuffer[self.lineBufferIndex:self.lineBufferIndex+1] = [ch]
self.lineBufferIndex += 1
if moreCharactersComing:
# Skip it all, we'll get called with another character in
# like 2 femtoseconds.
return
if ch == ' ':
# Don't bother to try to color whitespace
self.terminal.write(ch)
return
source = self.getSource()
# Try to write some junk
try:
coloredLine = lastColorizedLine(source)
except tokenize.TokenError:
# We couldn't do it. Strange. Oh well, just add the character.
self.terminal.write(ch)
else:
# Success! Clear the source on this line.
self.terminal.eraseLine()
self.terminal.cursorBackward(len(self.lineBuffer) + len(self.ps[self.pn]) - 1)
# And write a new, colorized one.
self.terminal.write(self.ps[self.pn] + coloredLine)
# And move the cursor to where it belongs
n = len(self.lineBuffer) - self.lineBufferIndex
if n:
self.terminal.cursorBackward(n)
|
dav1x/ansible
|
refs/heads/devel
|
lib/ansible/modules/messaging/rabbitmq_binding.py
|
69
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Manuel Sousa <manuel.sousa@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rabbitmq_binding
author: "Manuel Sousa (@manuel-sousa)"
version_added: "2.0"
short_description: This module manages rabbitMQ bindings
description:
- This module uses rabbitMQ Rest API to create/delete bindings
requirements: [ "requests >= 1.0.0" ]
options:
state:
description:
- Whether the exchange should be present or absent
- Only present implemented atm
choices: [ "present", "absent" ]
required: false
default: present
name:
description:
- source exchange to create binding on
required: true
aliases: [ "src", "source" ]
login_user:
description:
- rabbitMQ user for connection
required: false
default: guest
login_password:
description:
- rabbitMQ password for connection
required: false
default: false
login_host:
description:
- rabbitMQ host for connection
required: false
default: localhost
login_port:
description:
- rabbitMQ management api port
required: false
default: 15672
vhost:
description:
- rabbitMQ virtual host
- default vhost is /
required: false
default: "/"
destination:
description:
- destination exchange or queue for the binding
required: true
aliases: [ "dst", "dest" ]
destination_type:
description:
- Either queue or exchange
required: true
choices: [ "queue", "exchange" ]
aliases: [ "type", "dest_type" ]
routing_key:
description:
- routing key for the binding
- default is #
required: false
default: "#"
arguments:
description:
- extra arguments for exchange. If defined this argument is a key/value dictionary
required: false
default: {}
'''
EXAMPLES = '''
# Bind myQueue to directExchange with routing key info
- rabbitmq_binding:
name: directExchange
destination: myQueue
type: queue
routing_key: info
# Bind directExchange to topicExchange with routing key *.info
- rabbitmq_binding:
name: topicExchange
destination: topicExchange
type: exchange
routing_key: '*.info'
'''
import requests
import urllib
import json
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['present', 'absent'], type='str'),
name = dict(required=True, aliases=[ "src", "source" ], type='str'),
login_user = dict(default='guest', type='str'),
login_password = dict(default='guest', type='str', no_log=True),
login_host = dict(default='localhost', type='str'),
login_port = dict(default='15672', type='str'),
vhost = dict(default='/', type='str'),
destination = dict(required=True, aliases=[ "dst", "dest"], type='str'),
destination_type = dict(required=True, aliases=[ "type", "dest_type"], choices=[ "queue", "exchange" ],type='str'),
routing_key = dict(default='#', type='str'),
arguments = dict(default=dict(), type='dict')
),
supports_check_mode = True
)
if module.params['destination_type'] == "queue":
dest_type="q"
else:
dest_type="e"
if module.params['routing_key'] == "":
props = "~"
else:
props = urllib.quote(module.params['routing_key'],'')
url = "http://%s:%s/api/bindings/%s/e/%s/%s/%s/%s" % (
module.params['login_host'],
module.params['login_port'],
urllib.quote(module.params['vhost'],''),
urllib.quote(module.params['name'],''),
dest_type,
urllib.quote(module.params['destination'],''),
props
)
# Check if exchange already exists
r = requests.get( url, auth=(module.params['login_user'],module.params['login_password']))
if r.status_code==200:
binding_exists = True
response = r.json()
elif r.status_code==404:
binding_exists = False
response = r.text
else:
module.fail_json(
msg = "Invalid response from RESTAPI when trying to check if exchange exists",
details = r.text
)
if module.params['state']=='present':
change_required = not binding_exists
else:
change_required = binding_exists
# Exit if check_mode
if module.check_mode:
module.exit_json(
changed= change_required,
name = module.params['name'],
details = response,
arguments = module.params['arguments']
)
# Do changes
if change_required:
if module.params['state'] == 'present':
url = "http://%s:%s/api/bindings/%s/e/%s/%s/%s" % (
module.params['login_host'],
module.params['login_port'],
urllib.quote(module.params['vhost'],''),
urllib.quote(module.params['name'],''),
dest_type,
urllib.quote(module.params['destination'],'')
)
r = requests.post(
url,
auth = (module.params['login_user'],module.params['login_password']),
headers = { "content-type": "application/json"},
data = json.dumps({
"routing_key": module.params['routing_key'],
"arguments": module.params['arguments']
})
)
elif module.params['state'] == 'absent':
r = requests.delete( url, auth = (module.params['login_user'],module.params['login_password']))
if r.status_code == 204 or r.status_code == 201:
module.exit_json(
changed = True,
name = module.params['name'],
destination = module.params['destination']
)
else:
module.fail_json(
msg = "Error creating exchange",
status = r.status_code,
details = r.text
)
else:
module.exit_json(
changed = False,
name = module.params['name']
)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
eharney/nova
|
refs/heads/master
|
nova/api/openstack/compute/contrib/baremetal_ext_status.py
|
13
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import extensions
class Baremetal_ext_status(extensions.ExtensionDescriptor):
"""Add extended status in Baremetal Nodes v2 API."""
name = "BareMetalExtStatus"
alias = "os-baremetal-ext-status"
namespace = ("http://docs.openstack.org/compute/ext/"
"baremetal_ext_status/api/v2")
updated = "2013-08-27T00:00:00+00:00"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.