repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
jansohn/pyload | refs/heads/stable | module/plugins/hoster/FreeWayMe.py | 3 | # -*- coding: utf-8 -*-
from module.plugins.internal.MultiHoster import MultiHoster, create_getInfo
class FreeWayMe(MultiHoster):
__name__ = "FreeWayMe"
__type__ = "hoster"
__version__ = "0.20"
__status__ = "testing"
__pattern__ = r'https?://(?:www\.)?free-way\.(bz|me)/.+'
__config__ = [("use_premium" , "bool", "Use premium account if available" , True),
("revertfailed", "bool", "Revert to standard download if fails", True)]
__description__ = """FreeWayMe multi-hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("Nicolas Giese", "james@free-way.me")]
def setup(self):
self.resume_download = False
self.multiDL = self.premium
self.chunk_limit = 1
def handle_premium(self, pyfile):
user, data = self.account.select()
for _i in xrange(5):
#: Try it five times
header = self.load("http://www.free-way.bz/load.php", #@TODO: Revert to `https` in 0.4.10
get={'multiget': 7,
'url' : pyfile.url,
'user' : user,
'pw' : self.account.get_login('password'),
'json' : ""},
just_header=True)
if 'location' in header:
headers = self.load(header['location'], just_header=True)
if headers['code'] == 500:
#: Error on 2nd stage
self.log_error(_("Error [stage2]"))
else:
#: Seems to work..
self.download(header['location'])
break
else:
#: Error page first stage
self.log_error(_("Error [stage1]"))
#@TODO: handle errors
getInfo = create_getInfo(FreeWayMe)
|
loserxc/huhamhire-hosts | refs/heads/master | tui/hostsutil.py | 24 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# hostsutil.py: Start a TUI session of `Hosts Setup Utility`.
#
# Copyleft (C) 2014 - huhamhire <me@huhamhire.com>
# =====================================================================
# Licensed under the GNU General Public License, version 3. You should
# have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# =====================================================================
__author__ = "huhamhire <me@huhamhire.com>"
import os
from zipfile import BadZipfile
from curses_d import CursesDaemon
import sys
sys.path.append("..")
from util import CommonUtil, RetrieveData
class HostsUtil(CursesDaemon):
"""
HostsUtil class in :mod:`tui` module is the main entrance to the
Text-based User Interface (TUI) mode of `Hosts Setup Utility`. This class
contains methods to start a TUI session of `Hosts Setup Utility`.
.. note:: This class is subclass of :class:`~tui.curses_d.CursesDaemon`
class.
.. inheritance-diagram:: tui.hostsutil.HostsUtil
:parts: 2
Typical usage to start a TUI session::
import tui
util = tui.HostsUtil()
util.start()
:ivar str platform: Platform of current operating system. The value could
be `Windows`, `Linux`, `Unix`, `OS X`, and of course `Unknown`.
:ivar str hostname: The hostname of current operating system.
.. note:: This attribute would only be used on linux.
:ivar str hosts_path: The absolute path to the hosts file on current
operating system.
.. seealso:: :attr:`platform`, :attr:`hostname`, :attr:`hosts_path` in
:class:`~tui.curses_d.CursesDaemon` class.
:ivar str sys_eol: The End-Of-Line marker. This maker could typically be
one of `CR`, `LF`, or `CRLF`.
.. seealso:: :attr:`sys_eol` in :class:`~tui.curses_ui.CursesUI`
class.
"""
platform = ""
hostname = ""
hosts_path = ""
sys_eol = ""
def __init__(self):
"""
Initialize a new TUI session.
* Load server list from a configuration file under working directory.
* Try to load the hosts data file under working directory if it
exists.
.. note:: IF hosts data file does not exists correctly in current
working directory, a warning message box would popup. And
operations to change the hosts file on current system could be
done only until a new data file has been downloaded.
.. seealso:: :meth:`~tui.curses_d.CursesDaemon.session_daemon` method
in :class:`~tui.curses_d.CursesDaemon`.
.. seealso:: :meth:`~gui.hostsutil.HostsUtil.init_main` in
:class:`~gui.hostsutil.HostsUtil` class.
"""
super(HostsUtil, self).__init__()
# Set mirrors
self.settings[0][2] = CommonUtil.set_network("network.conf")
# Read data file and set function list
try:
self.set_platform()
RetrieveData.unpack()
RetrieveData.connect_db()
self.set_info()
self.set_func_list()
except IOError:
self.messagebox("No data file found! Press F6 to get data file "
"first.", 1)
except BadZipfile:
self.messagebox("Incorrect Data file! Press F6 to get a new data "
"file first.", 1)
def __del__(self):
"""
Reset the terminal and clear up the temporary data file while TUI
session is finished.
"""
super(HostsUtil, self).__del__()
try:
RetrieveData.clear()
except:
pass
def start(self):
"""
Start the TUI session.
.. note:: This method is the trigger to start a TUI session of
`Hosts Setup Utility`.
"""
while True:
# Reload
if self.session_daemon():
self.__del__()
self.__init__()
else:
break
def set_platform(self):
"""
Set the information about current operating system.
"""
system, hostname, path, encode, flag = CommonUtil.check_platform()
color = "GREEN" if flag else "RED"
self.platform = system
self.statusinfo[1][1] = system
self.hostname = hostname
self.hosts_path = path
self.statusinfo[1][2] = color
if encode == "win_ansi":
self.sys_eol = "\r\n"
else:
self.sys_eol = "\n"
def set_func_list(self):
"""
Set the function selection list in TUI session.
"""
for ip in range(2):
choice, defaults, slices = RetrieveData.get_choice(ip)
if os.path.isfile(self.custom):
choice.insert(0, [4, 1, 0, "customize"])
defaults[0x04] = [1]
for i in range(len(slices)):
slices[i] += 1
slices.insert(0, 0)
self.choice[ip] = choice
self.slices[ip] = slices
funcs = []
for func in choice:
if func[1] in defaults[func[0]]:
funcs.append(1)
else:
funcs.append(0)
self._funcs[ip] = funcs
def set_info(self):
"""
Set the information of the current local data file.
"""
info = RetrieveData.get_info()
build = info["Buildtime"]
self.hostsinfo["Version"] = info["Version"]
self.hostsinfo["Release"] = CommonUtil.timestamp_to_date(build)
if __name__ == "__main__":
main = HostsUtil()
main.start()
|
euphorie/Euphorie | refs/heads/master | src/euphorie/content/tests/test_survey.py | 1 | # coding=utf-8
from ..survey import View
from euphorie.content.module import IModule
from euphorie.content.profilequestion import IProfileQuestion
from euphorie.content.survey import handleSurveyUnpublish
from euphorie.content.survey import Survey
from euphorie.testing import EuphorieIntegrationTestCase
from plone.app.layout.globals.context import ContextState
from plone.folder.default import DefaultOrdering
from zope.annotation.attribute import AttributeAnnotations
from zope.annotation.interfaces import IAttributeAnnotatable
from zope.component import provideAdapter
from zope.interface import alsoProvides
from zope.interface import Interface
from zope.publisher.browser import TestRequest
import Acquisition
import mock
import unittest
class Mock(Acquisition.Explicit):
def __init__(self, **kwargs):
for (key, value) in kwargs.items():
setattr(self, key, value)
def absolute_url(self):
return "http://nohost/%s" % self.id
def manage_fixupOwnershipAfterAdd(self):
pass
class ViewTests(EuphorieIntegrationTestCase):
def setUp(self):
super(ViewTests, self).setUp()
provideAdapter(AttributeAnnotations)
provideAdapter(DefaultOrdering)
provideAdapter(
ContextState,
adapts=(Interface, Interface),
provides=Interface,
name="plone_context_state",
)
# grok makes unit testing extremely painful
View.__view_name__ = "View"
View.module_info = Mock()
View.module_info.package_dotted_name = "euphorie.content.survey.View"
def tearDown(self):
super(ViewTests, self).tearDown()
del View.__view_name__
del View.module_info
def _request(self):
req = TestRequest()
alsoProvides(req, IAttributeAnnotatable)
return req
def test_update_no_children(self):
survey = Survey()
view = View(survey, self._request())
view.update()
self.assertEqual(view.children, [])
def test_update_with_profile(self):
survey = Survey()
child = Mock(id="child", title=u"Child")
alsoProvides(child, IProfileQuestion)
survey["child"] = child
view = View(survey, self._request())
view._morph = mock.Mock(return_value="info")
view.update()
self.assertEqual(view.children, ["info"])
def test_update_with_module(self):
survey = Survey()
child = Mock(id="child", title=u"Child")
alsoProvides(child, IModule)
survey["child"] = child
view = View(survey, self._request())
view._morph = mock.Mock(return_value="info")
view.update()
self.assertEqual(view.children, ["info"])
def test_update_other_child(self):
survey = Survey()
view = View(survey, self._request())
child = Mock(id="child", title=u"Child")
survey["child"] = child
view.update()
self.assertEqual(view.children, [])
def test_moprh(self):
child = Mock(id="child", title=u"Child")
view = View(None, self._request())
self.assertEqual(
view._morph(child),
{"id": "child", "title": u"Child", "url": "http://nohost/child"},
)
class HandleSurveyUnpublishTests(unittest.TestCase):
def handleSurveyUnpublish(self, *a, **kw):
return handleSurveyUnpublish(*a, **kw)
def testRemovePublishedFromSurvey(self):
surveygroup = Mock(published=None)
surveygroup.survey = Mock(id="survey", published="yes")
self.handleSurveyUnpublish(surveygroup.survey, None)
self.assertTrue(not hasattr(surveygroup.survey, "published"))
def testUpdatateSurveygroupIfCurrentlyPublished(self):
surveygroup = Mock(published="survey")
surveygroup.survey = Mock(id="survey", published="yes")
self.handleSurveyUnpublish(surveygroup.survey, None)
self.assertEqual(surveygroup.published, None)
def testUpdatateSurveygroupIfOtherPublished(self):
surveygroup = Mock(published="other")
surveygroup.survey = Mock(id="survey", published="yes")
self.handleSurveyUnpublish(surveygroup.survey, None)
self.assertEqual(surveygroup.published, "other")
|
spinellic/Mission-Planner | refs/heads/master | Lib/site-packages/scipy/ndimage/tests/test_ndimage.py | 53 | # Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy
import numpy as np
from numpy import fft
from numpy.testing import assert_, assert_equal, assert_array_equal, \
TestCase, run_module_suite, \
assert_array_almost_equal, assert_almost_equal
import scipy.ndimage as ndimage
eps = 1e-12
def sumsq(a, b):
return math.sqrt(((a - b)**2).sum())
class TestNdimage(TestCase):
def setUp(self):
# list of numarray data types
self.types = [numpy.int8, numpy.uint8, numpy.int16,
numpy.uint16, numpy.int32, numpy.uint32,
numpy.int64, numpy.uint64,
numpy.float32, numpy.float64]
# list of boundary modes:
self.modes = ['nearest', 'wrap', 'reflect', 'mirror', 'constant']
def test_correlate01(self):
"correlation 1"
array = numpy.array([1, 2])
weights = numpy.array([2])
expected = [2, 4]
output = ndimage.correlate(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.convolve(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.correlate1d(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.convolve1d(array, weights)
assert_array_almost_equal(output, expected)
def test_correlate02(self):
"correlation 2"
array = numpy.array([1, 2, 3])
kernel = numpy.array([1])
output = ndimage.correlate(array, kernel)
assert_array_almost_equal(array, output)
output = ndimage.convolve(array, kernel)
assert_array_almost_equal(array, output)
output = ndimage.correlate1d(array, kernel)
assert_array_almost_equal(array, output)
output = ndimage.convolve1d(array, kernel)
assert_array_almost_equal(array, output)
def test_correlate03(self):
"correlation 3"
array = numpy.array([1])
weights = numpy.array([1, 1])
expected = [2]
output = ndimage.correlate(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.convolve(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.correlate1d(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.convolve1d(array, weights)
assert_array_almost_equal(output, expected)
def test_correlate04(self):
"correlation 4"
array = numpy.array([1, 2])
tcor = [2, 3]
tcov = [3, 4]
weights = numpy.array([1, 1])
output = ndimage.correlate(array, weights)
assert_array_almost_equal(output, tcor)
output = ndimage.convolve(array, weights)
assert_array_almost_equal(output, tcov)
output = ndimage.correlate1d(array, weights)
assert_array_almost_equal(output, tcor)
output = ndimage.convolve1d(array, weights)
assert_array_almost_equal(output, tcov)
def test_correlate05(self):
"correlation 5"
array = numpy.array([1, 2, 3])
tcor = [2, 3, 5]
tcov = [3, 5, 6]
kernel = numpy.array([1, 1])
output = ndimage.correlate(array, kernel)
assert_array_almost_equal(tcor, output)
output = ndimage.convolve(array, kernel)
assert_array_almost_equal(tcov, output)
output = ndimage.correlate1d(array, kernel)
assert_array_almost_equal(tcor, output)
output = ndimage.convolve1d(array, kernel)
assert_array_almost_equal(tcov, output)
def test_correlate06(self):
"correlation 6"
array = numpy.array([1, 2, 3])
tcor = [9, 14, 17]
tcov = [7, 10, 15]
weights = numpy.array([1, 2, 3])
output = ndimage.correlate(array, weights)
assert_array_almost_equal(output, tcor)
output = ndimage.convolve(array, weights)
assert_array_almost_equal(output, tcov)
output = ndimage.correlate1d(array, weights)
assert_array_almost_equal(output, tcor)
output = ndimage.convolve1d(array, weights)
assert_array_almost_equal(output, tcov)
def test_correlate07(self):
"correlation 7"
array = numpy.array([1, 2, 3])
expected = [5, 8, 11]
weights = numpy.array([1, 2, 1])
output = ndimage.correlate(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.convolve(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.correlate1d(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.convolve1d(array, weights)
assert_array_almost_equal(output, expected)
def test_correlate08(self):
"correlation 8"
array = numpy.array([1, 2, 3])
tcor = [1, 2, 5]
tcov = [3, 6, 7]
weights = numpy.array([1, 2, -1])
output = ndimage.correlate(array, weights)
assert_array_almost_equal(output, tcor)
output = ndimage.convolve(array, weights)
assert_array_almost_equal(output, tcov)
output = ndimage.correlate1d(array, weights)
assert_array_almost_equal(output, tcor)
output = ndimage.convolve1d(array, weights)
assert_array_almost_equal(output, tcov)
def test_correlate09(self):
"correlation 9"
array = []
kernel = numpy.array([1, 1])
output = ndimage.correlate(array, kernel)
assert_array_almost_equal(array, output)
output = ndimage.convolve(array, kernel)
assert_array_almost_equal(array, output)
output = ndimage.correlate1d(array, kernel)
assert_array_almost_equal(array, output)
output = ndimage.convolve1d(array, kernel)
assert_array_almost_equal(array, output)
def test_correlate10(self):
"correlation 10"
array = [[]]
kernel = numpy.array([[1, 1]])
output = ndimage.correlate(array, kernel)
assert_array_almost_equal(array, output)
output = ndimage.convolve(array, kernel)
assert_array_almost_equal(array, output)
def test_correlate11(self):
"correlation 11"
array = numpy.array([[1, 2, 3],
[4, 5, 6]])
kernel = numpy.array([[1, 1],
[1, 1]])
output = ndimage.correlate(array, kernel)
assert_array_almost_equal([[4, 6, 10], [10, 12, 16]], output)
output = ndimage.convolve(array, kernel)
assert_array_almost_equal([[12, 16, 18], [18, 22, 24]], output)
def test_correlate12(self):
"correlation 12"
array = numpy.array([[1, 2, 3],
[4, 5, 6]])
kernel = numpy.array([[1, 0],
[0, 1]])
output = ndimage.correlate(array, kernel)
assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
output = ndimage.convolve(array, kernel)
assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
def test_correlate13(self):
"correlation 13"
kernel = numpy.array([[1, 0],
[0, 1]])
for type1 in self.types:
array = numpy.array([[1, 2, 3],
[4, 5, 6]], type1)
for type2 in self.types:
output = ndimage.correlate(array, kernel,
output=type2)
assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
assert_equal(output.dtype.type, type2)
output = ndimage.convolve(array, kernel,
output=type2)
assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
assert_equal(output.dtype.type, type2)
def test_correlate14(self):
"correlation 14"
kernel = numpy.array([[1, 0],
[0, 1]])
for type1 in self.types:
array = numpy.array([[1, 2, 3],
[4, 5, 6]], type1)
for type2 in self.types:
output = numpy.zeros(array.shape, type2)
ndimage.correlate(array, kernel,
output=output)
assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
assert_equal(output.dtype.type, type2)
ndimage.convolve(array, kernel, output=output)
assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
assert_equal(output.dtype.type, type2)
def test_correlate15(self):
"correlation 15"
kernel = numpy.array([[1, 0],
[0, 1]])
for type1 in self.types:
array = numpy.array([[1, 2, 3],
[4, 5, 6]], type1)
output = ndimage.correlate(array, kernel,
output=numpy.float32)
assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
assert_equal(output.dtype.type, numpy.float32)
output = ndimage.convolve(array, kernel,
output=numpy.float32)
assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
assert_equal(output.dtype.type, numpy.float32)
def test_correlate16(self):
"correlation 16"
kernel = numpy.array([[0.5, 0 ],
[0, 0.5]])
for type1 in self.types:
array = numpy.array([[1, 2, 3],
[4, 5, 6]], type1)
output = ndimage.correlate(array, kernel,
output=numpy.float32)
assert_array_almost_equal([[1, 1.5, 2.5], [2.5, 3, 4]], output)
assert_equal(output.dtype.type, numpy.float32)
output = ndimage.convolve(array, kernel,
output=numpy.float32)
assert_array_almost_equal([[3, 4, 4.5], [4.5, 5.5, 6]], output)
assert_equal(output.dtype.type, numpy.float32)
def test_correlate17(self):
"correlation 17"
array = numpy.array([1, 2, 3])
tcor = [3, 5, 6]
tcov = [2, 3, 5]
kernel = numpy.array([1, 1])
output = ndimage.correlate(array, kernel, origin=-1)
assert_array_almost_equal(tcor, output)
output = ndimage.convolve(array, kernel, origin=-1)
assert_array_almost_equal(tcov, output)
output = ndimage.correlate1d(array, kernel, origin=-1)
assert_array_almost_equal(tcor, output)
output = ndimage.convolve1d(array, kernel, origin=-1)
assert_array_almost_equal(tcov, output)
def test_correlate18(self):
"correlation 18"
kernel = numpy.array([[1, 0],
[0, 1]])
for type1 in self.types:
array = numpy.array([[1, 2, 3],
[4, 5, 6]], type1)
output = ndimage.correlate(array, kernel,
output=numpy.float32,
mode='nearest', origin=-1)
assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
assert_equal(output.dtype.type, numpy.float32)
output = ndimage.convolve(array, kernel,
output=numpy.float32,
mode='nearest', origin=-1)
assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
assert_equal(output.dtype.type, numpy.float32)
def test_correlate19(self):
"correlation 19"
kernel = numpy.array([[1, 0],
[0, 1]])
for type1 in self.types:
array = numpy.array([[1, 2, 3],
[4, 5, 6]], type1)
output = ndimage.correlate(array, kernel,
output=numpy.float32,
mode='nearest', origin=[-1, 0])
assert_array_almost_equal([[5, 6, 8], [8, 9, 11]], output)
assert_equal(output.dtype.type, numpy.float32)
output = ndimage.convolve(array, kernel,
output=numpy.float32,
mode='nearest', origin=[-1, 0])
assert_array_almost_equal([[3, 5, 6], [6, 8, 9]], output)
assert_equal(output.dtype.type, numpy.float32)
def test_correlate20(self):
"correlation 20"
weights = numpy.array([1, 2, 1])
expected = [[5, 10, 15], [7, 14, 21]]
for type1 in self.types:
array = numpy.array([[1, 2, 3],
[2, 4, 6]], type1)
for type2 in self.types:
output = numpy.zeros((2, 3), type2)
ndimage.correlate1d(array, weights, axis=0,
output=output)
assert_array_almost_equal(output, expected)
ndimage.convolve1d(array, weights, axis=0,
output=output)
assert_array_almost_equal(output, expected)
def test_correlate21(self):
"correlation 21"
array = numpy.array([[1, 2, 3],
[2, 4, 6]])
expected = [[5, 10, 15], [7, 14, 21]]
weights = numpy.array([1, 2, 1])
output = ndimage.correlate1d(array, weights, axis=0)
assert_array_almost_equal(output, expected)
output = ndimage.convolve1d(array, weights, axis=0)
assert_array_almost_equal(output, expected)
def test_correlate22(self):
"correlation 22"
weights = numpy.array([1, 2, 1])
expected = [[6, 12, 18], [6, 12, 18]]
for type1 in self.types:
array = numpy.array([[1, 2, 3],
[2, 4, 6]], type1)
for type2 in self.types:
output = numpy.zeros((2, 3), type2)
ndimage.correlate1d(array, weights, axis=0,
mode='wrap', output=output)
assert_array_almost_equal(output, expected)
ndimage.convolve1d(array, weights, axis=0,
mode='wrap', output=output)
assert_array_almost_equal(output, expected)
def test_correlate23(self):
"correlation 23"
weights = numpy.array([1, 2, 1])
expected = [[5, 10, 15], [7, 14, 21]]
for type1 in self.types:
array = numpy.array([[1, 2, 3],
[2, 4, 6]], type1)
for type2 in self.types:
output = numpy.zeros((2, 3), type2)
ndimage.correlate1d(array, weights, axis=0,
mode='nearest', output=output)
assert_array_almost_equal(output, expected)
ndimage.convolve1d(array, weights, axis=0,
mode='nearest', output=output)
assert_array_almost_equal(output, expected)
def test_correlate24(self):
"correlation 24"
weights = numpy.array([1, 2, 1])
tcor = [[7, 14, 21], [8, 16, 24]]
tcov = [[4, 8, 12], [5, 10, 15]]
for type1 in self.types:
array = numpy.array([[1, 2, 3],
[2, 4, 6]], type1)
for type2 in self.types:
output = numpy.zeros((2, 3), type2)
ndimage.correlate1d(array, weights, axis=0,
mode='nearest', output=output, origin=-1)
assert_array_almost_equal(output, tcor)
ndimage.convolve1d(array, weights, axis=0,
mode='nearest', output=output, origin=-1)
assert_array_almost_equal(output, tcov)
def test_correlate25(self):
"correlation 25"
weights = numpy.array([1, 2, 1])
tcor = [[4, 8, 12], [5, 10, 15]]
tcov = [[7, 14, 21], [8, 16, 24]]
for type1 in self.types:
array = numpy.array([[1, 2, 3],
[2, 4, 6]], type1)
for type2 in self.types:
output = numpy.zeros((2, 3), type2)
ndimage.correlate1d(array, weights, axis=0,
mode='nearest', output=output, origin=1)
assert_array_almost_equal(output, tcor)
ndimage.convolve1d(array, weights, axis=0,
mode='nearest', output=output, origin=1)
assert_array_almost_equal(output, tcov)
def test_gauss01(self):
"gaussian filter 1"
input = numpy.array([[1, 2, 3],
[2, 4, 6]], numpy.float32)
output = ndimage.gaussian_filter(input, 0)
assert_array_almost_equal(output, input)
def test_gauss02(self):
"gaussian filter 2"
input = numpy.array([[1, 2, 3],
[2, 4, 6]], numpy.float32)
output = ndimage.gaussian_filter(input, 1.0)
assert_equal(input.dtype, output.dtype)
assert_equal(input.shape, output.shape)
def test_gauss03(self):
"gaussian filter 3 - single precision data"
input = numpy.arange(100 * 100).astype(numpy.float32)
input.shape = (100, 100)
output = ndimage.gaussian_filter(input, [1.0, 1.0])
assert_equal(input.dtype, output.dtype)
assert_equal(input.shape, output.shape)
# input.sum() is 49995000.0. With single precision floats, we can't
# expect more than 8 digits of accuracy, so use decimal=0 in this test.
assert_almost_equal(output.sum(dtype='d'), input.sum(dtype='d'), decimal=0)
assert_(sumsq(input, output) > 1.0)
def test_gauss04(self):
"gaussian filter 4"
input = numpy.arange(100 * 100).astype(numpy.float32)
input.shape = (100, 100)
otype = numpy.float64
output = ndimage.gaussian_filter(input, [1.0, 1.0],
output=otype)
assert_equal(output.dtype.type, numpy.float64)
assert_equal(input.shape, output.shape)
assert_(sumsq(input, output) > 1.0)
def test_gauss05(self):
"gaussian filter 5"
input = numpy.arange(100 * 100).astype(numpy.float32)
input.shape = (100, 100)
otype = numpy.float64
output = ndimage.gaussian_filter(input, [1.0, 1.0],
order=1, output=otype)
assert_equal(output.dtype.type, numpy.float64)
assert_equal(input.shape, output.shape)
assert_(sumsq(input, output) > 1.0)
def test_gauss06(self):
"gaussian filter 6"
input = numpy.arange(100 * 100).astype(numpy.float32)
input.shape = (100, 100)
otype = numpy.float64
output1 = ndimage.gaussian_filter(input, [1.0, 1.0],
output=otype)
output2 = ndimage.gaussian_filter(input, 1.0,
output=otype)
assert_array_almost_equal(output1, output2)
def test_prewitt01(self):
"prewitt filter 1"
for type in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type)
t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 1)
output = ndimage.prewitt(array, 0)
assert_array_almost_equal(t, output)
def test_prewitt02(self):
"prewitt filter 2"
for type in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type)
t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 1)
output = numpy.zeros(array.shape, type)
ndimage.prewitt(array, 0, output)
assert_array_almost_equal(t, output)
def test_prewitt03(self):
"prewitt filter 3"
for type in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type)
t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 1)
t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 0)
output = ndimage.prewitt(array, 1)
assert_array_almost_equal(t, output)
def test_prewitt04(self):
"prewitt filter 4"
for type in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type)
t = ndimage.prewitt(array, -1)
output = ndimage.prewitt(array, 1)
assert_array_almost_equal(t, output)
def test_sobel01(self):
"sobel filter 1"
for type in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type)
t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 1)
output = ndimage.sobel(array, 0)
assert_array_almost_equal(t, output)
def test_sobel02(self):
"sobel filter 2"
for type in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type)
t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 1)
output = numpy.zeros(array.shape, type)
ndimage.sobel(array, 0, output)
assert_array_almost_equal(t, output)
def test_sobel03(self):
"sobel filter 3"
for type in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type)
t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 1)
t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 0)
output = numpy.zeros(array.shape, type)
output = ndimage.sobel(array, 1)
assert_array_almost_equal(t, output)
def test_sobel04(self):
"sobel filter 4"
for type in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type)
t = ndimage.sobel(array, -1)
output = ndimage.sobel(array, 1)
assert_array_almost_equal(t, output)
def test_laplace01(self):
"laplace filter 1"
for type in [numpy.int32, numpy.float32, numpy.float64]:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type) * 100
tmp1 = ndimage.correlate1d(array, [1, -2, 1], 0)
tmp2 = ndimage.correlate1d(array, [1, -2, 1], 1)
output = ndimage.laplace(array)
assert_array_almost_equal(tmp1 + tmp2, output)
def test_laplace02(self):
"laplace filter 2"
for type in [numpy.int32, numpy.float32, numpy.float64]:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type) * 100
tmp1 = ndimage.correlate1d(array, [1, -2, 1], 0)
tmp2 = ndimage.correlate1d(array, [1, -2, 1], 1)
output = numpy.zeros(array.shape, type)
ndimage.laplace(array, output=output)
assert_array_almost_equal(tmp1 + tmp2, output)
def test_gaussian_laplace01(self):
"gaussian laplace filter 1"
for type in [numpy.int32, numpy.float32, numpy.float64]:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type) * 100
tmp1 = ndimage.gaussian_filter(array, 1.0, [2, 0])
tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 2])
output = ndimage.gaussian_laplace(array, 1.0)
assert_array_almost_equal(tmp1 + tmp2, output)
def test_gaussian_laplace02(self):
"gaussian laplace filter 2"
for type in [numpy.int32, numpy.float32, numpy.float64]:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type) * 100
tmp1 = ndimage.gaussian_filter(array, 1.0, [2, 0])
tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 2])
output = numpy.zeros(array.shape, type)
ndimage.gaussian_laplace(array, 1.0, output)
assert_array_almost_equal(tmp1 + tmp2, output)
def test_generic_laplace01(self):
"generic laplace filter 1"
def derivative2(input, axis, output, mode, cval, a, b):
sigma = [a, b / 2.0]
input = numpy.asarray(input)
order = [0] * input.ndim
order[axis] = 2
return ndimage.gaussian_filter(input, sigma, order,
output, mode, cval)
for type in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type)
output = numpy.zeros(array.shape, type)
tmp = ndimage.generic_laplace(array, derivative2,
extra_arguments=(1.0,), extra_keywords={'b': 2.0})
ndimage.gaussian_laplace(array, 1.0, output)
assert_array_almost_equal(tmp, output)
def test_gaussian_gradient_magnitude01(self):
"gaussian gradient magnitude filter 1"
for type in [numpy.int32, numpy.float32, numpy.float64]:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type) * 100
tmp1 = ndimage.gaussian_filter(array, 1.0, [1, 0])
tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 1])
output = ndimage.gaussian_gradient_magnitude(array,
1.0)
expected = tmp1 * tmp1 + tmp2 * tmp2
numpy.sqrt(expected, expected)
assert_array_almost_equal(expected, output)
def test_gaussian_gradient_magnitude02(self):
"gaussian gradient magnitude filter 2"
for type in [numpy.int32, numpy.float32, numpy.float64]:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type) * 100
tmp1 = ndimage.gaussian_filter(array, 1.0, [1, 0])
tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 1])
output = numpy.zeros(array.shape, type)
ndimage.gaussian_gradient_magnitude(array, 1.0,
output)
expected = tmp1 * tmp1 + tmp2 * tmp2
numpy.sqrt(expected, expected)
assert_array_almost_equal(expected, output)
def test_generic_gradient_magnitude01(self):
"generic gradient magnitude 1"
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], numpy.float64)
def derivative(input, axis, output, mode, cval, a, b):
sigma = [a, b / 2.0]
input = numpy.asarray(input)
order = [0] * input.ndim
order[axis] = 1
return ndimage.gaussian_filter(input, sigma, order,
output, mode, cval)
tmp1 = ndimage.gaussian_gradient_magnitude(array, 1.0)
tmp2 = ndimage.generic_gradient_magnitude(array,
derivative, extra_arguments=(1.0,),
extra_keywords={'b': 2.0})
assert_array_almost_equal(tmp1, tmp2)
def test_uniform01(self):
"uniform filter 1"
array = numpy.array([2, 4, 6])
size = 2
output = ndimage.uniform_filter1d(array, size,
origin=-1)
assert_array_almost_equal([3, 5, 6], output)
def test_uniform02(self):
"uniform filter 2"
array = numpy.array([1, 2, 3])
filter_shape = [0]
output = ndimage.uniform_filter(array, filter_shape)
assert_array_almost_equal(array, output)
def test_uniform03(self):
"uniform filter 3"
array = numpy.array([1, 2, 3])
filter_shape = [1]
output = ndimage.uniform_filter(array, filter_shape)
assert_array_almost_equal(array, output)
def test_uniform04(self):
"uniform filter 4"
array = numpy.array([2, 4, 6])
filter_shape = [2]
output = ndimage.uniform_filter(array, filter_shape)
assert_array_almost_equal([2, 3, 5], output)
def test_uniform05(self):
"uniform filter 5"
array = []
filter_shape = [1]
output = ndimage.uniform_filter(array, filter_shape)
assert_array_almost_equal([], output)
def test_uniform06(self):
"uniform filter 6"
filter_shape = [2, 2]
for type1 in self.types:
array = numpy.array([[4, 8, 12],
[16, 20, 24]], type1)
for type2 in self.types:
output = ndimage.uniform_filter(array,
filter_shape, output=type2)
assert_array_almost_equal([[4, 6, 10], [10, 12, 16]], output)
assert_equal(output.dtype.type, type2)
def test_minimum_filter01(self):
"minimum filter 1"
array = numpy.array([1, 2, 3, 4, 5])
filter_shape = numpy.array([2])
output = ndimage.minimum_filter(array, filter_shape)
assert_array_almost_equal([1, 1, 2, 3, 4], output)
def test_minimum_filter02(self):
"minimum filter 2"
array = numpy.array([1, 2, 3, 4, 5])
filter_shape = numpy.array([3])
output = ndimage.minimum_filter(array, filter_shape)
assert_array_almost_equal([1, 1, 2, 3, 4], output)
def test_minimum_filter03(self):
"minimum filter 3"
array = numpy.array([3, 2, 5, 1, 4])
filter_shape = numpy.array([2])
output = ndimage.minimum_filter(array, filter_shape)
assert_array_almost_equal([3, 2, 2, 1, 1], output)
def test_minimum_filter04(self):
"minimum filter 4"
array = numpy.array([3, 2, 5, 1, 4])
filter_shape = numpy.array([3])
output = ndimage.minimum_filter(array, filter_shape)
assert_array_almost_equal([2, 2, 1, 1, 1], output)
def test_minimum_filter05(self):
"minimum filter 5"
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
filter_shape = numpy.array([2, 3])
output = ndimage.minimum_filter(array, filter_shape)
assert_array_almost_equal([[2, 2, 1, 1, 1],
[2, 2, 1, 1, 1],
[5, 3, 3, 1, 1]], output)
def test_minimum_filter06(self):
"minimum filter 6"
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 1, 1], [1, 1, 1]]
output = ndimage.minimum_filter(array,
footprint=footprint)
assert_array_almost_equal([[2, 2, 1, 1, 1],
[2, 2, 1, 1, 1],
[5, 3, 3, 1, 1]], output)
def test_minimum_filter07(self):
"minimum filter 7"
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.minimum_filter(array,
footprint=footprint)
assert_array_almost_equal([[2, 2, 1, 1, 1],
[2, 3, 1, 3, 1],
[5, 5, 3, 3, 1]], output)
def test_minimum_filter08(self):
"minimum filter 8"
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.minimum_filter(array,
footprint=footprint, origin=-1)
assert_array_almost_equal([[3, 1, 3, 1, 1],
[5, 3, 3, 1, 1],
[3, 3, 1, 1, 1]], output)
def test_minimum_filter09(self):
"minimum filter 9"
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.minimum_filter(array,
footprint=footprint, origin=[-1, 0])
assert_array_almost_equal([[2, 3, 1, 3, 1],
[5, 5, 3, 3, 1],
[5, 3, 3, 1, 1]], output)
def test_maximum_filter01(self):
"maximum filter 1"
array = numpy.array([1, 2, 3, 4, 5])
filter_shape = numpy.array([2])
output = ndimage.maximum_filter(array, filter_shape)
assert_array_almost_equal([1, 2, 3, 4, 5], output)
def test_maximum_filter02(self):
"maximum filter 2"
array = numpy.array([1, 2, 3, 4, 5])
filter_shape = numpy.array([3])
output = ndimage.maximum_filter(array, filter_shape)
assert_array_almost_equal([2, 3, 4, 5, 5], output)
def test_maximum_filter03(self):
"maximum filter 3"
array = numpy.array([3, 2, 5, 1, 4])
filter_shape = numpy.array([2])
output = ndimage.maximum_filter(array, filter_shape)
assert_array_almost_equal([3, 3, 5, 5, 4], output)
def test_maximum_filter04(self):
"maximum filter 4"
array = numpy.array([3, 2, 5, 1, 4])
filter_shape = numpy.array([3])
output = ndimage.maximum_filter(array, filter_shape)
assert_array_almost_equal([3, 5, 5, 5, 4], output)
def test_maximum_filter05(self):
"maximum filter 5"
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
filter_shape = numpy.array([2, 3])
output = ndimage.maximum_filter(array, filter_shape)
assert_array_almost_equal([[3, 5, 5, 5, 4],
[7, 9, 9, 9, 5],
[8, 9, 9, 9, 7]], output)
def test_maximum_filter06(self):
"maximum filter 6"
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 1, 1], [1, 1, 1]]
output = ndimage.maximum_filter(array,
footprint=footprint)
assert_array_almost_equal([[3, 5, 5, 5, 4],
[7, 9, 9, 9, 5],
[8, 9, 9, 9, 7]], output)
def test_maximum_filter07(self):
"maximum filter 7"
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.maximum_filter(array,
footprint=footprint)
assert_array_almost_equal([[3, 5, 5, 5, 4],
[7, 7, 9, 9, 5],
[7, 9, 8, 9, 7]], output)
def test_maximum_filter08(self):
"maximum filter 8"
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.maximum_filter(array,
footprint=footprint, origin=-1)
assert_array_almost_equal([[7, 9, 9, 5, 5],
[9, 8, 9, 7, 5],
[8, 8, 7, 7, 7]], output)
def test_maximum_filter09(self):
"maximum filter 9"
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.maximum_filter(array,
footprint=footprint, origin=[-1, 0])
assert_array_almost_equal([[7, 7, 9, 9, 5],
[7, 9, 8, 9, 7],
[8, 8, 8, 7, 7]], output)
def test_rank01(self):
"rank filter 1"
array = numpy.array([1, 2, 3, 4, 5])
output = ndimage.rank_filter(array, 1, size=2)
assert_array_almost_equal(array, output)
output = ndimage.percentile_filter(array, 100, size=2)
assert_array_almost_equal(array, output)
output = ndimage.median_filter(array, 2)
assert_array_almost_equal(array, output)
def test_rank02(self):
"rank filter 2"
array = numpy.array([1, 2, 3, 4, 5])
output = ndimage.rank_filter(array, 1, size=[3])
assert_array_almost_equal(array, output)
output = ndimage.percentile_filter(array, 50, size=3)
assert_array_almost_equal(array, output)
output = ndimage.median_filter(array, (3,))
assert_array_almost_equal(array, output)
def test_rank03(self):
"rank filter 3"
array = numpy.array([3, 2, 5, 1, 4])
output = ndimage.rank_filter(array, 1, size=[2])
assert_array_almost_equal([3, 3, 5, 5, 4], output)
output = ndimage.percentile_filter(array, 100, size=2)
assert_array_almost_equal([3, 3, 5, 5, 4], output)
def test_rank04(self):
"rank filter 4"
array = numpy.array([3, 2, 5, 1, 4])
expected = [3, 3, 2, 4, 4]
output = ndimage.rank_filter(array, 1, size=3)
assert_array_almost_equal(expected, output)
output = ndimage.percentile_filter(array, 50, size=3)
assert_array_almost_equal(expected, output)
output = ndimage.median_filter(array, size=3)
assert_array_almost_equal(expected, output)
def test_rank05(self):
"rank filter 5"
array = numpy.array([3, 2, 5, 1, 4])
expected = [3, 3, 2, 4, 4]
output = ndimage.rank_filter(array, -2, size=3)
assert_array_almost_equal(expected, output)
def test_rank06(self):
"rank filter 6"
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]])
expected = [[2, 2, 1, 1, 1],
[3, 3, 2, 1, 1],
[5, 5, 3, 3, 1]]
output = ndimage.rank_filter(array, 1, size=[2, 3])
assert_array_almost_equal(expected, output)
output = ndimage.percentile_filter(array, 17,
size=(2, 3))
assert_array_almost_equal(expected, output)
def test_rank07(self):
"rank filter 7"
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]])
expected = [[3, 5, 5, 5, 4],
[5, 5, 7, 5, 4],
[6, 8, 8, 7, 5]]
output = ndimage.rank_filter(array, -2, size=[2, 3])
assert_array_almost_equal(expected, output)
def test_rank08(self):
"median filter 8"
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]])
expected = [[3, 3, 2, 4, 4],
[5, 5, 5, 4, 4],
[5, 6, 7, 5, 5]]
kernel = numpy.array([2, 3])
output = ndimage.percentile_filter(array, 50.0,
size=(2, 3))
assert_array_almost_equal(expected, output)
output = ndimage.rank_filter(array, 3, size=(2, 3))
assert_array_almost_equal(expected, output)
output = ndimage.median_filter(array, size=(2, 3))
assert_array_almost_equal(expected, output)
def test_rank09(self):
"rank filter 9"
expected = [[3, 3, 2, 4, 4],
[3, 5, 2, 5, 1],
[5, 5, 8, 3, 5]]
footprint = [[1, 0, 1], [0, 1, 0]]
for type in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type)
output = ndimage.rank_filter(array, 1,
footprint=footprint)
assert_array_almost_equal(expected, output)
output = ndimage.percentile_filter(array, 35,
footprint=footprint)
assert_array_almost_equal(expected, output)
def test_rank10(self):
"rank filter 10"
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
expected = [[2, 2, 1, 1, 1],
[2, 3, 1, 3, 1],
[5, 5, 3, 3, 1]]
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.rank_filter(array, 0,
footprint=footprint)
assert_array_almost_equal(expected, output)
output = ndimage.percentile_filter(array, 0.0,
footprint=footprint)
assert_array_almost_equal(expected, output)
def test_rank11(self):
"rank filter 11"
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
expected = [[3, 5, 5, 5, 4],
[7, 7, 9, 9, 5],
[7, 9, 8, 9, 7]]
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.rank_filter(array, -1,
footprint=footprint)
assert_array_almost_equal(expected, output)
output = ndimage.percentile_filter(array, 100.0,
footprint=footprint)
assert_array_almost_equal(expected, output)
def test_rank12(self):
"rank filter 12"
expected = [[3, 3, 2, 4, 4],
[3, 5, 2, 5, 1],
[5, 5, 8, 3, 5]]
footprint = [[1, 0, 1], [0, 1, 0]]
for type in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type)
output = ndimage.rank_filter(array, 1,
footprint=footprint)
assert_array_almost_equal(expected, output)
output = ndimage.percentile_filter(array, 50.0,
footprint=footprint)
assert_array_almost_equal(expected, output)
output = ndimage.median_filter(array,
footprint=footprint)
assert_array_almost_equal(expected, output)
def test_rank13(self):
"rank filter 13"
expected = [[5, 2, 5, 1, 1],
[5, 8, 3, 5, 5],
[6, 6, 5, 5, 5]]
footprint = [[1, 0, 1], [0, 1, 0]]
for type in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type)
output = ndimage.rank_filter(array, 1,
footprint=footprint, origin=-1)
assert_array_almost_equal(expected, output)
def test_rank14(self):
"rank filter 14"
expected = [[3, 5, 2, 5, 1],
[5, 5, 8, 3, 5],
[5, 6, 6, 5, 5]]
footprint = [[1, 0, 1], [0, 1, 0]]
for type in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type)
output = ndimage.rank_filter(array, 1,
footprint=footprint, origin=[-1, 0])
assert_array_almost_equal(expected, output)
def test_generic_filter1d01(self):
"generic 1d filter 1"
weights = numpy.array([1.1, 2.2, 3.3])
def _filter_func(input, output, fltr, total):
fltr = fltr / total
for ii in range(input.shape[0] - 2):
output[ii] = input[ii] * fltr[0]
output[ii] += input[ii + 1] * fltr[1]
output[ii] += input[ii + 2] * fltr[2]
for type in self.types:
a = numpy.arange(12, dtype=type)
a.shape = (3,4)
r1 = ndimage.correlate1d(a, weights / weights.sum(), 0,
origin=-1)
r2 = ndimage.generic_filter1d(a, _filter_func, 3,
axis=0, origin=-1, extra_arguments=(weights,),
extra_keywords={'total': weights.sum()})
assert_array_almost_equal(r1, r2)
def test_generic_filter01(self):
"generic filter 1"
filter_ = numpy.array([[1.0, 2.0], [3.0, 4.0]])
footprint = numpy.array([[1, 0], [0, 1]])
cf = numpy.array([1., 4.])
def _filter_func(buffer, weights, total=1.0):
weights = cf / total
return (buffer * weights).sum()
for type in self.types:
a = numpy.arange(12, dtype=type)
a.shape = (3,4)
r1 = ndimage.correlate(a, filter_ * footprint)
r1 /= 5
r2 = ndimage.generic_filter(a, _filter_func,
footprint=footprint, extra_arguments=(cf,),
extra_keywords={'total': cf.sum()})
assert_array_almost_equal(r1, r2)
def test_extend01(self):
"line extension 1"
array = numpy.array([1, 2, 3])
weights = numpy.array([1, 0])
expected_values = [[1, 1, 2],
[3, 1, 2],
[1, 1, 2],
[2, 1, 2],
[0, 1, 2]]
for mode, expected_value in zip(self.modes, expected_values):
output = ndimage.correlate1d(array, weights, 0,
mode=mode, cval=0)
assert_array_equal(output,expected_value)
def test_extend02(self):
"line extension 2"
array = numpy.array([1, 2, 3])
weights = numpy.array([1, 0, 0, 0, 0, 0, 0, 0])
expected_values = [[1, 1, 1],
[3, 1, 2],
[3, 3, 2],
[1, 2, 3],
[0, 0, 0]]
for mode, expected_value in zip(self.modes, expected_values):
output = ndimage.correlate1d(array, weights, 0,
mode=mode, cval=0)
assert_array_equal(output, expected_value)
def test_extend03(self):
"line extension 3"
array = numpy.array([1, 2, 3])
weights = numpy.array([0, 0, 1])
expected_values = [[2, 3, 3],
[2, 3, 1],
[2, 3, 3],
[2, 3, 2],
[2, 3, 0]]
for mode, expected_value in zip(self.modes, expected_values):
output = ndimage.correlate1d(array, weights, 0,
mode=mode, cval=0)
assert_array_equal(output, expected_value)
def test_extend04(self):
"line extension 4"
array = numpy.array([1, 2, 3])
weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1])
expected_values = [[3, 3, 3],
[2, 3, 1],
[2, 1, 1],
[1, 2, 3],
[0, 0, 0]]
for mode, expected_value in zip(self.modes, expected_values):
output = ndimage.correlate1d(array, weights, 0,
mode=mode, cval=0)
assert_array_equal(output, expected_value)
def test_extend05(self):
"line extension 5"
array = numpy.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
weights = numpy.array([[1, 0], [0, 0]])
expected_values = [[[1, 1, 2], [1, 1, 2], [4, 4, 5]],
[[9, 7, 8], [3, 1, 2], [6, 4, 5]],
[[1, 1, 2], [1, 1, 2], [4, 4, 5]],
[[5, 4, 5], [2, 1, 2], [5, 4, 5]],
[[0, 0, 0], [0, 1, 2], [0, 4, 5]]]
for mode, expected_value in zip(self.modes, expected_values):
output = ndimage.correlate(array, weights,
mode=mode, cval=0)
assert_array_equal(output, expected_value)
def test_extend06(self):
"line extension 6"
array = numpy.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
weights = numpy.array([[0, 0, 0], [0, 0, 0], [0, 0, 1]])
expected_values = [[[5, 6, 6], [8, 9, 9], [8, 9, 9]],
[[5, 6, 4], [8, 9, 7], [2, 3, 1]],
[[5, 6, 6], [8, 9, 9], [8, 9, 9]],
[[5, 6, 5], [8, 9, 8], [5, 6, 5]],
[[5, 6, 0], [8, 9, 0], [0, 0, 0]]]
for mode, expected_value in zip(self.modes, expected_values):
output = ndimage.correlate(array, weights,
mode=mode, cval=0)
assert_array_equal(output, expected_value)
def test_extend07(self):
"line extension 7"
array = numpy.array([1, 2, 3])
weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1])
expected_values = [[3, 3, 3],
[2, 3, 1],
[2, 1, 1],
[1, 2, 3],
[0, 0, 0]]
for mode, expected_value in zip(self.modes, expected_values):
output = ndimage.correlate(array, weights,
mode=mode, cval=0)
assert_array_equal(output, expected_value)
def test_extend08(self):
"line extension 8"
array = numpy.array([[1], [2], [3]])
weights = numpy.array([[0], [0], [0], [0], [0], [0], [0],
[0], [1]])
expected_values = [[[3], [3], [3]],
[[2], [3], [1]],
[[2], [1], [1]],
[[1], [2], [3]],
[[0], [0], [0]]]
for mode, expected_value in zip(self.modes, expected_values):
output = ndimage.correlate(array, weights,
mode=mode, cval=0)
assert_array_equal(output, expected_value)
def test_extend09(self):
"line extension 9"
array = numpy.array([1, 2, 3])
weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1])
expected_values = [[3, 3, 3],
[2, 3, 1],
[2, 1, 1],
[1, 2, 3],
[0, 0, 0]]
for mode, expected_value in zip(self.modes, expected_values):
output = ndimage.correlate(array, weights,
mode=mode, cval=0)
assert_array_equal(output, expected_value)
def test_extend10(self):
"line extension 10"
array = numpy.array([[1], [2], [3]])
weights = numpy.array([[0], [0], [0], [0], [0], [0], [0],
[0], [1]])
expected_values = [[[3], [3], [3]],
[[2], [3], [1]],
[[2], [1], [1]],
[[1], [2], [3]],
[[0], [0], [0]]]
for mode, expected_value in zip(self.modes, expected_values):
output = ndimage.correlate(array, weights,
mode=mode, cval=0)
assert_array_equal(output, expected_value)
def test_boundaries(self):
"boundary modes"
def shift(x):
return (x[0] + 0.5,)
data = numpy.array([1,2,3,4.])
expected = {'constant': [1.5,2.5,3.5,-1,-1,-1,-1],
'wrap': [1.5,2.5,3.5,1.5,2.5,3.5,1.5],
'mirror' : [1.5,2.5,3.5,3.5,2.5,1.5,1.5],
'nearest' : [1.5,2.5,3.5,4,4,4,4]}
for mode in expected.keys():
assert_array_equal(expected[mode],
ndimage.geometric_transform(data,shift,
cval=-1,mode=mode,
output_shape=(7,),
order=1))
def test_boundaries2(self):
"boundary modes 2"
def shift(x):
return (x[0] - 0.9,)
data = numpy.array([1,2,3,4])
expected = {'constant': [-1,1,2,3],
'wrap': [3,1,2,3],
'mirror' : [2,1,2,3],
'nearest' : [1,1,2,3]}
for mode in expected.keys():
assert_array_equal(expected[mode],
ndimage.geometric_transform(data,shift,
cval=-1,mode=mode,
output_shape=(4,)))
def test_fourier_gaussian_real01(self):
"gaussian fourier filter for real transforms 1"
for shape in [(32, 16), (31, 15)]:
for type in [numpy.float32, numpy.float64]:
a = numpy.zeros(shape, type)
a[0, 0] = 1.0
a = fft.rfft(a, shape[0], 0)
a = fft.fft(a, shape[1], 1)
a = ndimage.fourier_gaussian(a, [5.0, 2.5],
shape[0], 0)
a = fft.ifft(a, shape[1], 1)
a = fft.irfft(a, shape[0], 0)
assert_almost_equal(ndimage.sum(a), 1)
def test_fourier_gaussian_complex01(self):
"gaussian fourier filter for complex transforms 1"
for shape in [(32, 16), (31, 15)]:
for type in [numpy.complex64, numpy.complex128]:
a = numpy.zeros(shape, type)
a[0, 0] = 1.0
a = fft.fft(a, shape[0], 0)
a = fft.fft(a, shape[1], 1)
a = ndimage.fourier_gaussian(a, [5.0, 2.5], -1,
0)
a = fft.ifft(a, shape[1], 1)
a = fft.ifft(a, shape[0], 0)
assert_almost_equal(ndimage.sum(a.real), 1.0)
def test_fourier_uniform_real01(self):
"uniform fourier filter for real transforms 1"
for shape in [(32, 16), (31, 15)]:
for type in [numpy.float32, numpy.float64]:
a = numpy.zeros(shape, type)
a[0, 0] = 1.0
a = fft.rfft(a, shape[0], 0)
a = fft.fft(a, shape[1], 1)
a = ndimage.fourier_uniform(a, [5.0, 2.5],
shape[0], 0)
a = fft.ifft(a, shape[1], 1)
a = fft.irfft(a, shape[0], 0)
assert_almost_equal(ndimage.sum(a), 1.0)
def test_fourier_uniform_complex01(self):
"uniform fourier filter for complex transforms 1"
for shape in [(32, 16), (31, 15)]:
for type in [numpy.complex64, numpy.complex128]:
a = numpy.zeros(shape, type)
a[0, 0] = 1.0
a = fft.fft(a, shape[0], 0)
a = fft.fft(a, shape[1], 1)
a = ndimage.fourier_uniform(a, [5.0, 2.5], -1, 0)
a = fft.ifft(a, shape[1], 1)
a = fft.ifft(a, shape[0], 0)
assert_almost_equal(ndimage.sum(a.real), 1.0)
def test_fourier_shift_real01(self):
"shift filter for real transforms 1"
for shape in [(32, 16), (31, 15)]:
for dtype in [numpy.float32, numpy.float64]:
expected = numpy.arange(shape[0] * shape[1], dtype=dtype)
expected.shape = shape
a = fft.rfft(expected, shape[0], 0)
a = fft.fft(a, shape[1], 1)
a = ndimage.fourier_shift(a, [1, 1], shape[0], 0)
a = fft.ifft(a, shape[1], 1)
a = fft.irfft(a, shape[0], 0)
assert_array_almost_equal(a[1:, 1:], expected[:-1, :-1])
assert_array_almost_equal(a.imag, numpy.zeros(shape))
def test_fourier_shift_complex01(self):
"shift filter for complex transforms 1"
for shape in [(32, 16), (31, 15)]:
for type in [numpy.complex64, numpy.complex128]:
expected = numpy.arange(shape[0] * shape[1],
dtype=type)
expected.shape = shape
a = fft.fft(expected, shape[0], 0)
a = fft.fft(a, shape[1], 1)
a = ndimage.fourier_shift(a, [1, 1], -1, 0)
a = fft.ifft(a, shape[1], 1)
a = fft.ifft(a, shape[0], 0)
assert_array_almost_equal(a.real[1:, 1:], expected[:-1, :-1])
assert_array_almost_equal(a.imag, numpy.zeros(shape))
def test_fourier_ellipsoid_real01(self):
"ellipsoid fourier filter for real transforms 1"
for shape in [(32, 16), (31, 15)]:
for type in [numpy.float32, numpy.float64]:
a = numpy.zeros(shape, type)
a[0, 0] = 1.0
a = fft.rfft(a, shape[0], 0)
a = fft.fft(a, shape[1], 1)
a = ndimage.fourier_ellipsoid(a, [5.0, 2.5],
shape[0], 0)
a = fft.ifft(a, shape[1], 1)
a = fft.irfft(a, shape[0], 0)
assert_almost_equal(ndimage.sum(a), 1.0)
def test_fourier_ellipsoid_complex01(self):
"ellipsoid fourier filter for complex transforms 1"
for shape in [(32, 16), (31, 15)]:
for type in [numpy.complex64, numpy.complex128]:
a = numpy.zeros(shape, type)
a[0, 0] = 1.0
a = fft.fft(a, shape[0], 0)
a = fft.fft(a, shape[1], 1)
a = ndimage.fourier_ellipsoid(a, [5.0, 2.5], -1,
0)
a = fft.ifft(a, shape[1], 1)
a = fft.ifft(a, shape[0], 0)
assert_almost_equal(ndimage.sum(a.real), 1.0)
def test_spline01(self):
"spline filter 1"
for type in self.types:
data = numpy.ones([], type)
for order in range(2, 6):
out = ndimage.spline_filter(data, order=order)
assert_array_almost_equal(out, 1)
def test_spline02(self):
"spline filter 2"
for type in self.types:
data = numpy.array([1])
for order in range(2, 6):
out = ndimage.spline_filter(data, order=order)
assert_array_almost_equal(out, [1])
def test_spline03(self):
"spline filter 3"
for type in self.types:
data = numpy.ones([], type)
for order in range(2, 6):
out = ndimage.spline_filter(data, order,
output=type)
assert_array_almost_equal(out, 1)
def test_spline04(self):
"spline filter 4"
for type in self.types:
data = numpy.ones([4], type)
for order in range(2, 6):
out = ndimage.spline_filter(data, order)
assert_array_almost_equal(out, [1, 1, 1, 1])
def test_spline05(self):
"spline filter 5"
for type in self.types:
data = numpy.ones([4, 4], type)
for order in range(2, 6):
out = ndimage.spline_filter(data, order=order)
assert_array_almost_equal(out, [[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]])
def test_geometric_transform01(self):
"geometric transform 1"
data = numpy.array([1])
def mapping(x):
return x
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
data.shape,
order=order)
assert_array_almost_equal(out, [1])
def test_geometric_transform02(self):
"geometric transform 2"
data = numpy.ones([4])
def mapping(x):
return x
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
data.shape, order=order)
assert_array_almost_equal(out, [1, 1, 1, 1])
def test_geometric_transform03(self):
"geometric transform 3"
data = numpy.ones([4])
def mapping(x):
return (x[0] - 1,)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
data.shape, order=order)
assert_array_almost_equal(out, [0, 1, 1, 1])
def test_geometric_transform04(self):
"geometric transform 4"
data = numpy.array([4, 1, 3, 2])
def mapping(x):
return (x[0] - 1,)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
data.shape, order=order)
assert_array_almost_equal(out, [0, 4, 1, 3])
def test_geometric_transform05(self):
"geometric transform 5"
data = numpy.array([[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]])
def mapping(x):
return (x[0], x[1] - 1)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
data.shape, order=order)
assert_array_almost_equal(out, [[0, 1, 1, 1],
[0, 1, 1, 1],
[0, 1, 1, 1]])
def test_geometric_transform06(self):
"geometric transform 6"
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
def mapping(x):
return (x[0], x[1] - 1)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
data.shape, order=order)
assert_array_almost_equal(out, [[0, 4, 1, 3],
[0, 7, 6, 8],
[0, 3, 5, 3]])
def test_geometric_transform07(self):
"geometric transform 7"
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
def mapping(x):
return (x[0] - 1, x[1])
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
data.shape, order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[4, 1, 3, 2],
[7, 6, 8, 5]])
def test_geometric_transform08(self):
"geometric transform 8"
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
def mapping(x):
return (x[0] - 1, x[1] - 1)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
data.shape, order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
def test_geometric_transform10(self):
"geometric transform 10"
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
def mapping(x):
return (x[0] - 1, x[1] - 1)
for order in range(0, 6):
if (order > 1):
filtered = ndimage.spline_filter(data,
order=order)
else:
filtered = data
out = ndimage.geometric_transform(filtered, mapping,
data.shape, order=order, prefilter=False)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
def test_geometric_transform13(self):
"geometric transform 13"
data = numpy.ones([2], numpy.float64)
def mapping(x):
return (x[0] // 2,)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
[4], order=order)
assert_array_almost_equal(out, [1, 1, 1, 1])
def test_geometric_transform14(self):
"geometric transform 14"
data = [1, 5, 2, 6, 3, 7, 4, 4]
def mapping(x):
return (2 * x[0],)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
[4], order=order)
assert_array_almost_equal(out, [1, 2, 3, 4])
def test_geometric_transform15(self):
"geometric transform 15"
data = [1, 2, 3, 4]
def mapping(x):
return (x[0] / 2,)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
[8], order=order)
assert_array_almost_equal(out[::2], [1, 2, 3, 4])
def test_geometric_transform16(self):
"geometric transform 16"
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9.0, 10, 11, 12]]
def mapping(x):
return (x[0], x[1] * 2)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
(3, 2), order=order)
assert_array_almost_equal(out, [[1, 3], [5, 7], [9, 11]])
def test_geometric_transform17(self):
"geometric transform 17"
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x):
return (x[0] * 2, x[1])
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
(1, 4), order=order)
assert_array_almost_equal(out, [[1, 2, 3, 4]])
def test_geometric_transform18(self):
"geometric transform 18"
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x):
return (x[0] * 2, x[1] * 2)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
(1, 2), order=order)
assert_array_almost_equal(out, [[1, 3]])
def test_geometric_transform19(self):
"geometric transform 19"
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x):
return (x[0], x[1] / 2)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
(3, 8), order=order)
assert_array_almost_equal(out[..., ::2], data)
def test_geometric_transform20(self):
"geometric transform 20"
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x):
return (x[0] / 2, x[1])
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
(6, 4), order=order)
assert_array_almost_equal(out[::2, ...], data)
def test_geometric_transform21(self):
"geometric transform 21"
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x):
return (x[0] / 2, x[1] / 2)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
(6, 8), order=order)
assert_array_almost_equal(out[::2, ::2], data)
def test_geometric_transform22(self):
"geometric transform 22"
data = numpy.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], numpy.float64)
def mapping1(x):
return (x[0] / 2, x[1] / 2)
def mapping2(x):
return (x[0] * 2, x[1] * 2)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping1,
(6, 8), order=order)
out = ndimage.geometric_transform(out, mapping2,
(3, 4), order=order)
assert_array_almost_equal(out, data)
def test_geometric_transform23(self):
"geometric transform 23"
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x):
return (1, x[0] * 2)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
(2,), order=order)
out = out.astype(numpy.int32)
assert_array_almost_equal(out, [5, 7])
def test_geometric_transform24(self):
"geometric transform 24"
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x, a, b):
return (a, x[0] * b)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
(2,), order=order, extra_arguments=(1,),
extra_keywords={'b': 2})
assert_array_almost_equal(out, [5, 7])
def test_map_coordinates01(self):
"map coordinates 1"
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
idx = numpy.indices(data.shape)
idx -= 1
for order in range(0, 6):
out = ndimage.map_coordinates(data, idx, order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
def test_map_coordinates02(self):
"map coordinates 2"
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
idx = numpy.indices(data.shape, numpy.float64)
idx -= 0.5
for order in range(0, 6):
out1 = ndimage.shift(data, 0.5, order=order)
out2 = ndimage.map_coordinates(data, idx,
order=order)
assert_array_almost_equal(out1, out2)
def test_affine_transform01(self):
"affine_transform 1"
data = numpy.array([1])
for order in range(0, 6):
out = ndimage.affine_transform(data, [[1]],
order=order)
assert_array_almost_equal(out, [1])
def test_affine_transform02(self):
"affine transform 2"
data = numpy.ones([4])
for order in range(0, 6):
out = ndimage.affine_transform(data, [[1]],
order=order)
assert_array_almost_equal(out, [1, 1, 1, 1])
def test_affine_transform03(self):
"affine transform 3"
data = numpy.ones([4])
for order in range(0, 6):
out = ndimage.affine_transform(data, [[1]], -1,
order=order)
assert_array_almost_equal(out, [0, 1, 1, 1])
def test_affine_transform04(self):
"affine transform 4"
data = numpy.array([4, 1, 3, 2])
for order in range(0, 6):
out = ndimage.affine_transform(data, [[1]], -1,
order=order)
assert_array_almost_equal(out, [0, 4, 1, 3])
def test_affine_transform05(self):
"affine transform 5"
data = numpy.array([[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]])
for order in range(0, 6):
out = ndimage.affine_transform(data, [[1, 0],
[0, 1]],
[0, -1], order=order)
assert_array_almost_equal(out, [[0, 1, 1, 1],
[0, 1, 1, 1],
[0, 1, 1, 1]])
def test_affine_transform06(self):
"affine transform 6"
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
for order in range(0, 6):
out = ndimage.affine_transform(data, [[1, 0],
[0, 1]],
[0, -1], order=order)
assert_array_almost_equal(out, [[0, 4, 1, 3],
[0, 7, 6, 8],
[0, 3, 5, 3]])
def test_affine_transform07(self):
"affine transform 7"
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
for order in range(0, 6):
out = ndimage.affine_transform(data, [[1, 0],
[0, 1]],
[-1, 0], order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[4, 1, 3, 2],
[7, 6, 8, 5]])
def test_affine_transform08(self):
"affine transform 8"
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
for order in range(0, 6):
out = ndimage.affine_transform(data, [[1, 0],
[0, 1]],
[-1, -1], order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
def test_affine_transform09(self):
"affine transform 9"
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
for order in range(0, 6):
if (order > 1):
filtered = ndimage.spline_filter(data,
order=order)
else:
filtered = data
out = ndimage.affine_transform(filtered,[[1, 0],
[0, 1]],
[-1, -1], order=order, prefilter=False)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
def test_affine_transform10(self):
"affine transform 10"
data = numpy.ones([2], numpy.float64)
for order in range(0, 6):
out = ndimage.affine_transform(data, [[0.5]],
output_shape=(4,), order=order)
assert_array_almost_equal(out, [1, 1, 1, 0])
def test_affine_transform11(self):
"affine transform 11"
data = [1, 5, 2, 6, 3, 7, 4, 4]
for order in range(0, 6):
out = ndimage.affine_transform(data, [[2]], 0, (4,),
order=order)
assert_array_almost_equal(out, [1, 2, 3, 4])
def test_affine_transform12(self):
"affine transform 12"
data = [1, 2, 3, 4]
for order in range(0, 6):
out = ndimage.affine_transform(data, [[0.5]], 0,
(8,), order=order)
assert_array_almost_equal(out[::2], [1, 2, 3, 4])
def test_affine_transform13(self):
"affine transform 13"
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9.0, 10, 11, 12]]
for order in range(0, 6):
out = ndimage.affine_transform(data, [[1, 0],
[0, 2]], 0,
(3, 2), order=order)
assert_array_almost_equal(out, [[1, 3], [5, 7], [9, 11]])
def test_affine_transform14(self):
"affine transform 14"
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
for order in range(0, 6):
out = ndimage.affine_transform(data, [[2, 0],
[0, 1]], 0,
(1, 4), order=order)
assert_array_almost_equal(out, [[1, 2, 3, 4]])
def test_affine_transform15(self):
"affine transform 15"
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
for order in range(0, 6):
out = ndimage.affine_transform(data, [[2, 0],
[0, 2]], 0,
(1, 2), order=order)
assert_array_almost_equal(out, [[1, 3]])
def test_affine_transform16(self):
"affine transform 16"
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
for order in range(0, 6):
out = ndimage.affine_transform(data, [[1, 0.0],
[0, 0.5]], 0,
(3, 8), order=order)
assert_array_almost_equal(out[..., ::2], data)
def test_affine_transform17(self):
"affine transform 17"
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
for order in range(0, 6):
out = ndimage.affine_transform(data, [[0.5, 0],
[0, 1]], 0,
(6, 4), order=order)
assert_array_almost_equal(out[::2, ...], data)
def test_affine_transform18(self):
"affine transform 18"
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
for order in range(0, 6):
out = ndimage.affine_transform(data,
[[0.5, 0],
[0, 0.5]], 0,
(6, 8), order=order)
assert_array_almost_equal(out[::2, ::2], data)
def test_affine_transform19(self):
"affine transform 19"
data = numpy.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], numpy.float64)
for order in range(0, 6):
out = ndimage.affine_transform(data,
[[0.5, 0],
[0, 0.5]], 0,
(6, 8), order=order)
out = ndimage.affine_transform(out,
[[2.0, 0],
[0, 2.0]], 0,
(3, 4), order=order)
assert_array_almost_equal(out, data)
def test_affine_transform20(self):
"affine transform 20"
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
for order in range(0, 6):
out = ndimage.affine_transform(data, [[0], [2]], 0,
(2,), order=order)
assert_array_almost_equal(out, [1, 3])
def test_affine_transform21(self):
"affine transform 21"
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
for order in range(0, 6):
out = ndimage.affine_transform(data, [[2], [0]], 0,
(2,), order=order)
assert_array_almost_equal(out, [1, 9])
def test_shift01(self):
"shift 1"
data = numpy.array([1])
for order in range(0, 6):
out = ndimage.shift(data, [1], order=order)
assert_array_almost_equal(out, [0])
def test_shift02(self):
"shift 2"
data = numpy.ones([4])
for order in range(0, 6):
out = ndimage.shift(data, [1], order=order)
assert_array_almost_equal(out, [0, 1, 1, 1])
def test_shift03(self):
"shift 3"
data = numpy.ones([4])
for order in range(0, 6):
out = ndimage.shift(data, -1, order=order)
assert_array_almost_equal(out, [1, 1, 1, 0])
def test_shift04(self):
"shift 4"
data = numpy.array([4, 1, 3, 2])
for order in range(0, 6):
out = ndimage.shift(data, 1, order=order)
assert_array_almost_equal(out, [0, 4, 1, 3])
def test_shift05(self):
"shift 5"
data = numpy.array([[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]])
for order in range(0, 6):
out = ndimage.shift(data, [0, 1], order=order)
assert_array_almost_equal(out, [[0, 1, 1, 1],
[0, 1, 1, 1],
[0, 1, 1, 1]])
def test_shift06(self):
"shift 6"
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
for order in range(0, 6):
out = ndimage.shift(data, [0, 1], order=order)
assert_array_almost_equal(out, [[0, 4, 1, 3],
[0, 7, 6, 8],
[0, 3, 5, 3]])
def test_shift07(self):
"shift 7"
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
for order in range(0, 6):
out = ndimage.shift(data, [1, 0], order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[4, 1, 3, 2],
[7, 6, 8, 5]])
def test_shift08(self):
"shift 8"
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
for order in range(0, 6):
out = ndimage.shift(data, [1, 1], order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
def test_shift09(self):
"shift 9"
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
for order in range(0, 6):
if (order > 1):
filtered = ndimage.spline_filter(data,
order=order)
else:
filtered = data
out = ndimage.shift(filtered, [1, 1], order=order,
prefilter=False)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
def test_zoom1(self):
"zoom 1"
for order in range(0,6):
for z in [2,[2,2]]:
arr = numpy.array(range(25)).reshape((5,5)).astype(float)
arr = ndimage.zoom(arr, z, order=order)
assert_equal(arr.shape,(10,10))
assert_(numpy.all(arr[-1,:] != 0))
assert_(numpy.all(arr[-1,:] >= (20 - eps)))
assert_(numpy.all(arr[0,:] <= (5 + eps)))
assert_(numpy.all(arr >= (0 - eps)))
assert_(numpy.all(arr <= (24 + eps)))
def test_zoom2(self):
"zoom 2"
arr = numpy.arange(12).reshape((3,4))
out = ndimage.zoom(ndimage.zoom(arr,2),0.5)
assert_array_equal(out,arr)
def test_zoom_affine01(self):
"zoom by affine transformation 1"
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
for order in range(0, 6):
out = ndimage.affine_transform(data, [0.5, 0.5], 0,
(6, 8), order=order)
assert_array_almost_equal(out[::2, ::2], data)
def test_rotate01(self):
"rotate 1"
data = numpy.array([[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]], dtype=numpy.float64)
for order in range(0, 6):
out = ndimage.rotate(data, 0)
assert_array_almost_equal(out, data)
def test_rotate02(self):
"rotate 2"
data = numpy.array([[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0]], dtype=numpy.float64)
expected = numpy.array([[0, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]], dtype=numpy.float64)
for order in range(0, 6):
out = ndimage.rotate(data, 90)
assert_array_almost_equal(out, expected)
def test_rotate03(self):
"rotate 3"
data = numpy.array([[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0]], dtype=numpy.float64)
expected = numpy.array([[0, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 0]], dtype=numpy.float64)
for order in range(0, 6):
out = ndimage.rotate(data, 90)
assert_array_almost_equal(out, expected)
def test_rotate04(self):
"rotate 4"
data = numpy.array([[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0]], dtype=numpy.float64)
expected = numpy.array([[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0]], dtype=numpy.float64)
for order in range(0, 6):
out = ndimage.rotate(data, 90, reshape=False)
assert_array_almost_equal(out, expected)
def test_rotate05(self):
"rotate 5"
data = numpy.empty((4,3,3))
for i in range(3):
data[:,:,i] = numpy.array([[0,0,0],
[0,1,0],
[0,1,0],
[0,0,0]], dtype=numpy.float64)
expected = numpy.array([[0,0,0,0],
[0,1,1,0],
[0,0,0,0]], dtype=numpy.float64)
for order in range(0, 6):
out = ndimage.rotate(data, 90)
for i in range(3):
assert_array_almost_equal(out[:,:,i], expected)
def test_rotate06(self):
"rotate 6"
data = numpy.empty((3,4,3))
for i in range(3):
data[:,:,i] = numpy.array([[0,0,0,0],
[0,1,1,0],
[0,0,0,0]], dtype=numpy.float64)
expected = numpy.array([[0,0,0],
[0,1,0],
[0,1,0],
[0,0,0]], dtype=numpy.float64)
for order in range(0, 6):
out = ndimage.rotate(data, 90)
for i in range(3):
assert_array_almost_equal(out[:,:,i], expected)
def test_rotate07(self):
"rotate 7"
data = numpy.array([[[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0]]] * 2,
dtype=numpy.float64)
data = data.transpose()
expected = numpy.array([[[0, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 0],
[0, 0, 0]]] * 2, dtype=numpy.float64)
expected = expected.transpose([2,1,0])
for order in range(0, 6):
out = ndimage.rotate(data, 90, axes=(0, 1))
assert_array_almost_equal(out, expected)
def test_rotate08(self):
"rotate 8"
data = numpy.array([[[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0]]] * 2,
dtype=numpy.float64)
data = data.transpose()
expected = numpy.array([[[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0]]] * 2,
dtype=numpy.float64)
expected = expected.transpose()
for order in range(0, 6):
out = ndimage.rotate(data, 90, axes=(0, 1),
reshape=False)
assert_array_almost_equal(out, expected)
def test_watershed_ift01(self):
"watershed_ift 1"
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], numpy.uint8)
markers = numpy.array([[ -1, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 1, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0]],
numpy.int8)
out = ndimage.watershed_ift(data, markers,
structure=[[1,1,1],
[1,1,1],
[1,1,1]])
expected = [[-1, -1, -1, -1, -1, -1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_watershed_ift02(self):
"watershed_ift 2"
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], numpy.uint8)
markers = numpy.array([[ -1, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 1, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0]],
numpy.int8)
out = ndimage.watershed_ift(data, markers)
expected = [[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, 1, 1, 1, -1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, -1, 1, 1, 1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_watershed_ift03(self):
"watershed_ift 3"
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]], numpy.uint8)
markers = numpy.array([[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 2, 0, 3, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, -1]],
numpy.int8)
out = ndimage.watershed_ift(data, markers)
expected = [[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, 2, -1, 3, -1, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, -1, 2, -1, 3, -1, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_watershed_ift04(self):
"watershed_ift 4"
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]], numpy.uint8)
markers = numpy.array([[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 2, 0, 3, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, -1]],
numpy.int8)
out = ndimage.watershed_ift(data, markers,
structure=[[1,1,1],
[1,1,1],
[1,1,1]])
expected = [[-1, -1, -1, -1, -1, -1, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_watershed_ift05(self):
"watershed_ift 5"
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]], numpy.uint8)
markers = numpy.array([[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 3, 0, 2, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, -1]],
numpy.int8)
out = ndimage.watershed_ift(data, markers,
structure=[[1,1,1],
[1,1,1],
[1,1,1]])
expected = [[-1, -1, -1, -1, -1, -1, -1],
[-1, 3, 3, 2, 2, 2, -1],
[-1, 3, 3, 2, 2, 2, -1],
[-1, 3, 3, 2, 2, 2, -1],
[-1, 3, 3, 2, 2, 2, -1],
[-1, 3, 3, 2, 2, 2, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_watershed_ift06(self):
"watershed_ift 6"
data = numpy.array([[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], numpy.uint8)
markers = numpy.array([[ -1, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 1, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0]],
numpy.int8)
out = ndimage.watershed_ift(data, markers,
structure=[[1,1,1],
[1,1,1],
[1,1,1]])
expected = [[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_watershed_ift07(self):
"watershed_ift 7"
shape = (7, 6)
data = numpy.zeros(shape, dtype=numpy.uint8)
data = data.transpose()
data[...] = numpy.array([[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], numpy.uint8)
markers = numpy.array([[-1, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 1, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0]],
numpy.int8)
out = numpy.zeros(shape, dtype = numpy.int16)
out = out.transpose()
ndimage.watershed_ift(data, markers,
structure=[[1,1,1],
[1,1,1],
[1,1,1]],
output=out)
expected = [[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_distance_transform_bf01(self):
"brute force distance transform 1"
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type)
out, ft = ndimage.distance_transform_bf(data, 'euclidean',
return_indices=True)
expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 2, 4, 2, 1, 0, 0],
[0, 0, 1, 4, 8, 4, 1, 0, 0],
[0, 0, 1, 2, 4, 2, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
assert_array_almost_equal(out * out, expected)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 1, 2, 2, 2, 2],
[3, 3, 3, 2, 1, 2, 3, 3, 3],
[4, 4, 4, 4, 6, 4, 4, 4, 4],
[5, 5, 6, 6, 7, 6, 6, 5, 5],
[6, 6, 6, 7, 7, 7, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 4, 6, 6, 7, 8],
[0, 1, 1, 2, 4, 6, 7, 7, 8],
[0, 1, 1, 1, 6, 7, 7, 7, 8],
[0, 1, 2, 2, 4, 6, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8]]]
assert_array_almost_equal(ft, expected)
def test_distance_transform_bf02(self):
"brute force distance transform 2"
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type)
out, ft = ndimage.distance_transform_bf(data, 'cityblock',
return_indices=True)
expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 2, 2, 2, 1, 0, 0],
[0, 0, 1, 2, 3, 2, 1, 0, 0],
[0, 0, 1, 2, 2, 2, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
assert_array_almost_equal(out, expected)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 1, 2, 2, 2, 2],
[3, 3, 3, 3, 1, 3, 3, 3, 3],
[4, 4, 4, 4, 7, 4, 4, 4, 4],
[5, 5, 6, 7, 7, 7, 6, 5, 5],
[6, 6, 6, 7, 7, 7, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 4, 6, 6, 7, 8],
[0, 1, 1, 1, 4, 7, 7, 7, 8],
[0, 1, 1, 1, 4, 7, 7, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8]]]
assert_array_almost_equal(expected, ft)
def test_distance_transform_bf03(self):
"brute force distance transform 3"
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type)
out, ft = ndimage.distance_transform_bf(data, 'chessboard',
return_indices=True)
expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 2, 1, 1, 0, 0],
[0, 0, 1, 2, 2, 2, 1, 0, 0],
[0, 0, 1, 1, 2, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
assert_array_almost_equal(out, expected)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 1, 2, 2, 2, 2],
[3, 3, 4, 2, 2, 2, 4, 3, 3],
[4, 4, 5, 6, 6, 6, 5, 4, 4],
[5, 5, 6, 6, 7, 6, 6, 5, 5],
[6, 6, 6, 7, 7, 7, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 5, 6, 6, 7, 8],
[0, 1, 1, 2, 6, 6, 7, 7, 8],
[0, 1, 1, 2, 6, 7, 7, 7, 8],
[0, 1, 2, 2, 6, 6, 7, 7, 8],
[0, 1, 2, 4, 5, 6, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8]]]
assert_array_almost_equal(ft, expected)
def test_distance_transform_bf04(self):
"brute force distance transform 4"
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type)
tdt, tft = ndimage.distance_transform_bf(data,
return_indices=1)
dts = []
fts = []
dt = numpy.zeros(data.shape, dtype=numpy.float64)
ndimage.distance_transform_bf(data, distances=dt)
dts.append(dt)
ft = ndimage.distance_transform_bf(data,
return_distances=False, return_indices=1)
fts.append(ft)
ft = numpy.indices(data.shape, dtype=numpy.int32)
ndimage.distance_transform_bf(data,
return_distances=False, return_indices=True, indices=ft)
fts.append(ft)
dt, ft = ndimage.distance_transform_bf(data,
return_indices=1)
dts.append(dt)
fts.append(ft)
dt = numpy.zeros(data.shape, dtype=numpy.float64)
ft = ndimage.distance_transform_bf(data, distances=dt,
return_indices=True)
dts.append(dt)
fts.append(ft)
ft = numpy.indices(data.shape, dtype=numpy.int32)
dt = ndimage.distance_transform_bf(data,
return_indices=True, indices=ft)
dts.append(dt)
fts.append(ft)
dt = numpy.zeros(data.shape, dtype=numpy.float64)
ft = numpy.indices(data.shape, dtype=numpy.int32)
ndimage.distance_transform_bf(data, distances=dt,
return_indices=True, indices=ft)
dts.append(dt)
fts.append(ft)
for dt in dts:
assert_array_almost_equal(tdt, dt)
for ft in fts:
assert_array_almost_equal(tft, ft)
def test_distance_transform_bf05(self):
"brute force distance transform 5"
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type)
out, ft = ndimage.distance_transform_bf(data,
'euclidean', return_indices=True, sampling=[2, 2])
expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 4, 4, 4, 0, 0, 0],
[0, 0, 4, 8, 16, 8, 4, 0, 0],
[0, 0, 4, 16, 32, 16, 4, 0, 0],
[0, 0, 4, 8, 16, 8, 4, 0, 0],
[0, 0, 0, 4, 4, 4, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
assert_array_almost_equal(out * out, expected)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 1, 2, 2, 2, 2],
[3, 3, 3, 2, 1, 2, 3, 3, 3],
[4, 4, 4, 4, 6, 4, 4, 4, 4],
[5, 5, 6, 6, 7, 6, 6, 5, 5],
[6, 6, 6, 7, 7, 7, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 4, 6, 6, 7, 8],
[0, 1, 1, 2, 4, 6, 7, 7, 8],
[0, 1, 1, 1, 6, 7, 7, 7, 8],
[0, 1, 2, 2, 4, 6, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8]]]
assert_array_almost_equal(ft, expected)
def test_distance_transform_bf06(self):
"brute force distance transform 6"
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type)
out, ft = ndimage.distance_transform_bf(data,
'euclidean', return_indices=True, sampling=[2, 1])
expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 4, 1, 0, 0, 0],
[0, 0, 1, 4, 8, 4, 1, 0, 0],
[0, 0, 1, 4, 9, 4, 1, 0, 0],
[0, 0, 1, 4, 8, 4, 1, 0, 0],
[0, 0, 0, 1, 4, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
assert_array_almost_equal(out * out, expected)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 2, 2, 2, 2, 2],
[3, 3, 3, 3, 2, 3, 3, 3, 3],
[4, 4, 4, 4, 4, 4, 4, 4, 4],
[5, 5, 5, 5, 6, 5, 5, 5, 5],
[6, 6, 6, 6, 7, 6, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 6, 6, 6, 7, 8],
[0, 1, 1, 1, 6, 7, 7, 7, 8],
[0, 1, 1, 1, 7, 7, 7, 7, 8],
[0, 1, 1, 1, 6, 7, 7, 7, 8],
[0, 1, 2, 2, 4, 6, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8]]]
assert_array_almost_equal(ft, expected)
def test_distance_transform_cdt01(self):
"chamfer type distance transform 1"
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type)
out, ft = ndimage.distance_transform_cdt(data,
'cityblock', return_indices=True)
bf = ndimage.distance_transform_bf(data, 'cityblock')
assert_array_almost_equal(bf, out)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 1, 1, 1, 2, 2, 2],
[3, 3, 2, 1, 1, 1, 2, 3, 3],
[4, 4, 4, 4, 1, 4, 4, 4, 4],
[5, 5, 5, 5, 7, 7, 6, 5, 5],
[6, 6, 6, 6, 7, 7, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 1, 1, 4, 7, 7, 7, 8],
[0, 1, 1, 1, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],]]
assert_array_almost_equal(ft, expected)
def test_distance_transform_cdt02(self):
"chamfer type distance transform 2"
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type)
out, ft = ndimage.distance_transform_cdt(data, 'chessboard',
return_indices=True)
bf = ndimage.distance_transform_bf(data, 'chessboard')
assert_array_almost_equal(bf, out)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 1, 1, 1, 2, 2, 2],
[3, 3, 2, 2, 1, 2, 2, 3, 3],
[4, 4, 3, 2, 2, 2, 3, 4, 4],
[5, 5, 4, 6, 7, 6, 4, 5, 5],
[6, 6, 6, 6, 7, 7, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 3, 4, 6, 7, 8],
[0, 1, 1, 2, 2, 6, 6, 7, 8],
[0, 1, 1, 1, 2, 6, 7, 7, 8],
[0, 1, 1, 2, 6, 6, 7, 7, 8],
[0, 1, 2, 2, 5, 6, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],]]
assert_array_almost_equal(ft, expected)
def test_distance_transform_cdt03(self):
"chamfer type distance transform 3"
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type)
tdt, tft = ndimage.distance_transform_cdt(data,
return_indices=True)
dts = []
fts = []
dt = numpy.zeros(data.shape, dtype = numpy.int32)
ndimage.distance_transform_cdt(data, distances = dt)
dts.append(dt)
ft = ndimage.distance_transform_cdt(data,
return_distances=False, return_indices=True)
fts.append(ft)
ft = numpy.indices(data.shape, dtype=numpy.int32)
ndimage.distance_transform_cdt(data,
return_distances=False, return_indices=True, indices=ft)
fts.append(ft)
dt, ft = ndimage.distance_transform_cdt(data,
return_indices=True)
dts.append(dt)
fts.append(ft)
dt = numpy.zeros(data.shape, dtype=numpy.int32)
ft = ndimage.distance_transform_cdt(data, distances=dt,
return_indices = True)
dts.append(dt)
fts.append(ft)
ft = numpy.indices(data.shape, dtype=numpy.int32)
dt = ndimage.distance_transform_cdt(data,
return_indices=True, indices=ft)
dts.append(dt)
fts.append(ft)
dt = numpy.zeros(data.shape, dtype=numpy.int32)
ft = numpy.indices(data.shape, dtype=numpy.int32)
ndimage.distance_transform_cdt(data, distances=dt,
return_indices=True, indices=ft)
dts.append(dt)
fts.append(ft)
for dt in dts:
assert_array_almost_equal(tdt, dt)
for ft in fts:
assert_array_almost_equal(tft, ft)
def test_distance_transform_edt01(self):
"euclidean distance transform 1"
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type)
out, ft = ndimage.distance_transform_edt(data,
return_indices=True)
bf = ndimage.distance_transform_bf(data, 'euclidean')
assert_array_almost_equal(bf, out)
dt = ft - numpy.indices(ft.shape[1:], dtype=ft.dtype)
dt = dt.astype(numpy.float64)
numpy.multiply(dt, dt, dt)
dt = numpy.add.reduce(dt, axis=0)
numpy.sqrt(dt, dt)
assert_array_almost_equal(bf, dt)
def test_distance_transform_edt02(self):
"euclidean distance transform 2"
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type)
tdt, tft = ndimage.distance_transform_edt(data,
return_indices=True)
dts = []
fts = []
dt = numpy.zeros(data.shape, dtype=numpy.float64)
ndimage.distance_transform_edt(data, distances=dt)
dts.append(dt)
ft = ndimage.distance_transform_edt(data,
return_distances=0, return_indices=True)
fts.append(ft)
ft = numpy.indices(data.shape, dtype=numpy.int32)
ndimage.distance_transform_edt(data,
return_distances=False,return_indices=True, indices=ft)
fts.append(ft)
dt, ft = ndimage.distance_transform_edt(data,
return_indices=True)
dts.append(dt)
fts.append(ft)
dt = numpy.zeros(data.shape, dtype=numpy.float64)
ft = ndimage.distance_transform_edt(data, distances=dt,
return_indices=True)
dts.append(dt)
fts.append(ft)
ft = numpy.indices(data.shape, dtype=numpy.int32)
dt = ndimage.distance_transform_edt(data,
return_indices=True, indices=ft)
dts.append(dt)
fts.append(ft)
dt = numpy.zeros(data.shape, dtype=numpy.float64)
ft = numpy.indices(data.shape, dtype=numpy.int32)
ndimage.distance_transform_edt(data, distances=dt,
return_indices=True, indices=ft)
dts.append(dt)
fts.append(ft)
for dt in dts:
assert_array_almost_equal(tdt, dt)
for ft in fts:
assert_array_almost_equal(tft, ft)
def test_distance_transform_edt03(self):
"euclidean distance transform 3"
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type)
ref = ndimage.distance_transform_bf(data, 'euclidean',
sampling=[2, 2])
out = ndimage.distance_transform_edt(data,
sampling=[2, 2])
assert_array_almost_equal(ref, out)
def test_distance_transform_edt4(self):
"euclidean distance transform 4"
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type)
ref = ndimage.distance_transform_bf(data, 'euclidean',
sampling=[2, 1])
out = ndimage.distance_transform_edt(data,
sampling=[2, 1])
assert_array_almost_equal(ref, out)
def test_generate_structure01(self):
"generation of a binary structure 1"
struct = ndimage.generate_binary_structure(0, 1)
assert_array_almost_equal(struct, 1)
def test_generate_structure02(self):
"generation of a binary structure 2"
struct = ndimage.generate_binary_structure(1, 1)
assert_array_almost_equal(struct, [1, 1, 1])
def test_generate_structure03(self):
"generation of a binary structure 3"
struct = ndimage.generate_binary_structure(2, 1)
assert_array_almost_equal(struct, [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
def test_generate_structure04(self):
"generation of a binary structure 4"
struct = ndimage.generate_binary_structure(2, 2)
assert_array_almost_equal(struct, [[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
def test_iterate_structure01(self):
"iterating a structure 1"
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
out = ndimage.iterate_structure(struct, 2)
assert_array_almost_equal(out, [[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0]])
def test_iterate_structure02(self):
"iterating a structure 2"
struct = [[0, 1],
[1, 1],
[0, 1]]
out = ndimage.iterate_structure(struct, 2)
assert_array_almost_equal(out, [[0, 0, 1],
[0, 1, 1],
[1, 1, 1],
[0, 1, 1],
[0, 0, 1]])
def test_iterate_structure03(self):
"iterating a structure 3"
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
out = ndimage.iterate_structure(struct, 2, 1)
expected = [[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0]]
assert_array_almost_equal(out[0], expected)
assert_equal(out[1], [2, 2])
def test_binary_erosion01(self):
"binary erosion 1"
for type in self.types:
data = numpy.ones([], type)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, 1)
def test_binary_erosion02(self):
"binary erosion 2"
for type in self.types:
data = numpy.ones([], type)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, 1)
def test_binary_erosion03(self):
"binary erosion 3"
for type in self.types:
data = numpy.ones([1], type)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [0])
def test_binary_erosion04(self):
"binary erosion 4"
for type in self.types:
data = numpy.ones([1], type)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [1])
def test_binary_erosion05(self):
"binary erosion 5"
for type in self.types:
data = numpy.ones([3], type)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [0, 1, 0])
def test_binary_erosion06(self):
"binary erosion 6"
for type in self.types:
data = numpy.ones([3], type)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [1, 1, 1])
def test_binary_erosion07(self):
"binary erosion 7"
for type in self.types:
data = numpy.ones([5], type)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [0, 1, 1, 1, 0])
def test_binary_erosion08(self):
"binary erosion 8"
for type in self.types:
data = numpy.ones([5], type)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [1, 1, 1, 1, 1])
def test_binary_erosion09(self):
"binary erosion 9"
for type in self.types:
data = numpy.ones([5], type)
data[2] = 0
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [0, 0, 0, 0, 0])
def test_binary_erosion10(self):
"binary erosion 10"
for type in self.types:
data = numpy.ones([5], type)
data[2] = 0
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [1, 0, 0, 0, 1])
def test_binary_erosion11(self):
"binary erosion 11"
for type in self.types:
data = numpy.ones([5], type)
data[2] = 0
struct = [1, 0, 1]
out = ndimage.binary_erosion(data, struct,
border_value=1)
assert_array_almost_equal(out, [1, 0, 1, 0, 1])
def test_binary_erosion12(self):
"binary erosion 12"
for type in self.types:
data = numpy.ones([5], type)
data[2] = 0
struct = [1, 0, 1]
out = ndimage.binary_erosion(data, struct,
border_value=1,
origin=-1)
assert_array_almost_equal(out, [0, 1, 0, 1, 1])
def test_binary_erosion13(self):
"binary erosion 13"
for type in self.types:
data = numpy.ones([5], type)
data[2] = 0
struct = [1, 0, 1]
out = ndimage.binary_erosion(data, struct,
border_value=1,
origin=1)
assert_array_almost_equal(out, [1, 1, 0, 1, 0])
def test_binary_erosion14(self):
"binary erosion 14"
for type in self.types:
data = numpy.ones([5], type)
data[2] = 0
struct = [1, 1]
out = ndimage.binary_erosion(data, struct,
border_value=1)
assert_array_almost_equal(out, [1, 1, 0, 0, 1])
def test_binary_erosion15(self):
"binary erosion 15"
for type in self.types:
data = numpy.ones([5], type)
data[2] = 0
struct = [1, 1]
out = ndimage.binary_erosion(data, struct,
border_value=1,
origin=-1)
assert_array_almost_equal(out, [1, 0, 0, 1, 1])
def test_binary_erosion16(self):
"binary erosion 16"
for type in self.types:
data = numpy.ones([1, 1], type)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [[1]])
def test_binary_erosion17(self):
"binary erosion 17"
for type in self.types:
data = numpy.ones([1, 1], type)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [[0]])
def test_binary_erosion18(self):
"binary erosion 18"
for type in self.types:
data = numpy.ones([1, 3], type)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [[0, 0, 0]])
def test_binary_erosion19(self):
"binary erosion 19"
for type in self.types:
data = numpy.ones([1, 3], type)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [[1, 1, 1]])
def test_binary_erosion20(self):
"binary erosion 20"
for type in self.types:
data = numpy.ones([3, 3], type)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
def test_binary_erosion21(self):
"binary erosion 21"
for type in self.types:
data = numpy.ones([3, 3], type)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
def test_binary_erosion22(self):
"binary erosion 22"
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_erosion23(self):
"binary erosion 23"
struct = ndimage.generate_binary_structure(2, 2)
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_erosion(data, struct,
border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_erosion24(self):
"binary erosion 24"
struct = [[0, 1],
[1, 1]]
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_erosion(data, struct,
border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_erosion25(self):
"binary erosion 25"
struct = [[0, 1, 0],
[1, 0, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 0, 1, 1],
[0, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_erosion(data, struct,
border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_erosion26(self):
"binary erosion 26"
struct = [[0, 1, 0],
[1, 0, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 0, 0, 1],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1]]
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 0, 1, 1],
[0, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_erosion(data, struct,
border_value=1, origin=(-1, -1))
assert_array_almost_equal(out, expected)
def test_binary_erosion27(self):
"binary erosion 27"
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_erosion(data, struct,
border_value=1, iterations=2)
assert_array_almost_equal(out, expected)
def test_binary_erosion28(self):
"binary erosion 28"
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], bool)
out = numpy.zeros(data.shape, bool)
ndimage.binary_erosion(data, struct, border_value=1,
iterations=2, output=out)
assert_array_almost_equal(out, expected)
def test_binary_erosion29(self):
"binary erosion 29"
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0]], bool)
out = ndimage.binary_erosion(data, struct,
border_value=1, iterations=3)
assert_array_almost_equal(out, expected)
def test_binary_erosion30(self):
"binary erosion 30"
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0]], bool)
out = numpy.zeros(data.shape, bool)
ndimage.binary_erosion(data, struct, border_value=1,
iterations=3, output=out)
assert_array_almost_equal(out, expected)
def test_binary_erosion31(self):
"binary erosion 31"
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 1],
[0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1]]
data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0]], bool)
out = numpy.zeros(data.shape, bool)
ndimage.binary_erosion(data, struct, border_value=1,
iterations=1, output=out, origin=(-1, -1))
assert_array_almost_equal(out, expected)
def test_binary_erosion32(self):
"binary erosion 32"
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_erosion(data, struct,
border_value=1, iterations=2)
assert_array_almost_equal(out, expected)
def test_binary_erosion33(self):
"binary erosion 33"
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
mask = [[1, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1]]
data = numpy.array([[0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0, 1],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_erosion(data, struct,
border_value=1, mask=mask, iterations=-1)
assert_array_almost_equal(out, expected)
def test_binary_erosion34(self):
"binary erosion 34"
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
mask = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_erosion(data, struct,
border_value=1, mask=mask)
assert_array_almost_equal(out, expected)
def test_binary_erosion35(self):
"binary erosion 35"
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
mask = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0]], bool)
tmp = [[0, 0, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 1],
[0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1]]
expected = numpy.logical_and(tmp, mask)
tmp = numpy.logical_and(data, numpy.logical_not(mask))
expected = numpy.logical_or(expected, tmp)
out = numpy.zeros(data.shape, bool)
ndimage.binary_erosion(data, struct, border_value=1,
iterations=1, output=out,
origin=(-1, -1), mask=mask)
assert_array_almost_equal(out, expected)
def test_binary_erosion36(self):
"binary erosion 36"
struct = [[0, 1, 0],
[1, 0, 1],
[0, 1, 0]]
mask = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
tmp = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 0, 0, 1],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 0, 1, 1],
[0, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]])
expected = numpy.logical_and(tmp, mask)
tmp = numpy.logical_and(data, numpy.logical_not(mask))
expected = numpy.logical_or(expected, tmp)
out = ndimage.binary_erosion(data, struct, mask=mask,
border_value=1, origin=(-1, -1))
assert_array_almost_equal(out, expected)
def test_binary_dilation01(self):
"binary dilation 1"
for type in self.types:
data = numpy.ones([], type)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, 1)
def test_binary_dilation02(self):
"binary dilation 2"
for type in self.types:
data = numpy.zeros([], type)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, 0)
def test_binary_dilation03(self):
"binary dilation 3"
for type in self.types:
data = numpy.ones([1], type)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [1])
def test_binary_dilation04(self):
"binary dilation 4"
for type in self.types:
data = numpy.zeros([1], type)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [0])
def test_binary_dilation05(self):
"binary dilation 5"
for type in self.types:
data = numpy.ones([3], type)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [1, 1, 1])
def test_binary_dilation06(self):
"binary dilation 6"
for type in self.types:
data = numpy.zeros([3], type)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [0, 0, 0])
def test_binary_dilation07(self):
"binary dilation 7"
struct = ndimage.generate_binary_structure(1, 1)
for type in self.types:
data = numpy.zeros([3], type)
data[1] = 1
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [1, 1, 1])
def test_binary_dilation08(self):
"binary dilation 8"
for type in self.types:
data = numpy.zeros([5], type)
data[1] = 1
data[3] = 1
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [1, 1, 1, 1, 1])
def test_binary_dilation09(self):
"binary dilation 9"
for type in self.types:
data = numpy.zeros([5], type)
data[1] = 1
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [1, 1, 1, 0, 0])
def test_binary_dilation10(self):
"binary dilation 10"
for type in self.types:
data = numpy.zeros([5], type)
data[1] = 1
out = ndimage.binary_dilation(data, origin=-1)
assert_array_almost_equal(out, [0, 1, 1, 1, 0])
def test_binary_dilation11(self):
"binary dilation 11"
for type in self.types:
data = numpy.zeros([5], type)
data[1] = 1
out = ndimage.binary_dilation(data, origin=1)
assert_array_almost_equal(out, [1, 1, 0, 0, 0])
def test_binary_dilation12(self):
"binary dilation 12"
for type in self.types:
data = numpy.zeros([5], type)
data[1] = 1
struct = [1, 0, 1]
out = ndimage.binary_dilation(data, struct)
assert_array_almost_equal(out, [1, 0, 1, 0, 0])
def test_binary_dilation13(self):
"binary dilation 13"
for type in self.types:
data = numpy.zeros([5], type)
data[1] = 1
struct = [1, 0, 1]
out = ndimage.binary_dilation(data, struct,
border_value=1)
assert_array_almost_equal(out, [1, 0, 1, 0, 1])
def test_binary_dilation14(self):
"binary dilation 14"
for type in self.types:
data = numpy.zeros([5], type)
data[1] = 1
struct = [1, 0, 1]
out = ndimage.binary_dilation(data, struct,
origin=-1)
assert_array_almost_equal(out, [0, 1, 0, 1, 0])
def test_binary_dilation15(self):
"binary dilation 15"
for type in self.types:
data = numpy.zeros([5], type)
data[1] = 1
struct = [1, 0, 1]
out = ndimage.binary_dilation(data, struct,
origin=-1, border_value=1)
assert_array_almost_equal(out, [1, 1, 0, 1, 0])
def test_binary_dilation16(self):
"binary dilation 16"
for type in self.types:
data = numpy.ones([1, 1], type)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [[1]])
def test_binary_dilation17(self):
"binary dilation 17"
for type in self.types:
data = numpy.zeros([1, 1], type)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [[0]])
def test_binary_dilation18(self):
"binary dilation 18"
for type in self.types:
data = numpy.ones([1, 3], type)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [[1, 1, 1]])
def test_binary_dilation19(self):
"binary dilation 19"
for type in self.types:
data = numpy.ones([3, 3], type)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
def test_binary_dilation20(self):
"binary dilation 20"
for type in self.types:
data = numpy.zeros([3, 3], type)
data[1, 1] = 1
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
def test_binary_dilation21(self):
"binary dilation 21"
struct = ndimage.generate_binary_structure(2, 2)
for type in self.types:
data = numpy.zeros([3, 3], type)
data[1, 1] = 1
out = ndimage.binary_dilation(data, struct)
assert_array_almost_equal(out, [[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
def test_binary_dilation22(self):
"binary dilation 22"
expected = [[0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, expected)
def test_binary_dilation23(self):
"binary dilation 23"
expected = [[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 0, 1],
[1, 1, 0, 0, 0, 1, 0, 1],
[1, 0, 0, 1, 1, 1, 1, 1],
[1, 0, 1, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 1, 0, 0, 1, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1]]
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_dilation(data, border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_dilation24(self):
"binary dilation 24"
expected = [[1, 1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_dilation(data, origin=(1, 1))
assert_array_almost_equal(out, expected)
def test_binary_dilation25(self):
"binary dilation 25"
expected = [[1, 1, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 1, 0, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 0, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 0, 0, 1, 0, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1]]
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_dilation(data, origin=(1, 1),
border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_dilation26(self):
"binary dilation 26"
struct = ndimage.generate_binary_structure(2, 2)
expected = [[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_dilation(data, struct)
assert_array_almost_equal(out, expected)
def test_binary_dilation27(self):
"binary dilation 27"
struct = [[0, 1],
[1, 1]]
expected = [[0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_dilation(data, struct)
assert_array_almost_equal(out, expected)
def test_binary_dilation28(self):
"binary dilation 28"
expected = [[1, 1, 1, 1],
[1, 0, 0, 1],
[1, 0, 0, 1],
[1, 1, 1, 1]]
for type in self.types:
data = numpy.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], type)
out = ndimage.binary_dilation(data, border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_dilation29(self):
"binary dilation 29"
struct = [[0, 1],
[1, 1]]
expected = [[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0]], bool)
out = ndimage.binary_dilation(data, struct,
iterations=2)
assert_array_almost_equal(out, expected)
def test_binary_dilation30(self):
"binary dilation 30"
struct = [[0, 1],
[1, 1]]
expected = [[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0]], bool)
out = numpy.zeros(data.shape, bool)
ndimage.binary_dilation(data, struct, iterations=2,
output=out)
assert_array_almost_equal(out, expected)
def test_binary_dilation31(self):
"binary dilation 31"
struct = [[0, 1],
[1, 1]]
expected = [[0, 0, 0, 1, 0],
[0, 0, 1, 1, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 0],
[0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0]], bool)
out = ndimage.binary_dilation(data, struct,
iterations=3)
assert_array_almost_equal(out, expected)
def test_binary_dilation32(self):
"binary dilation 32"
struct = [[0, 1],
[1, 1]]
expected = [[0, 0, 0, 1, 0],
[0, 0, 1, 1, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 0],
[0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0]], bool)
out = numpy.zeros(data.shape, bool)
ndimage.binary_dilation(data, struct, iterations=3,
output=out)
assert_array_almost_equal(out, expected)
def test_binary_dilation33(self):
"binary dilation 33"
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_dilation(data, struct,
iterations=-1, mask=mask, border_value=0)
assert_array_almost_equal(out, expected)
def test_binary_dilation34(self):
"binary dilation 34"
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.zeros(mask.shape, bool)
out = ndimage.binary_dilation(data, struct,
iterations=-1, mask=mask, border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_dilation35(self):
"binary dilation 35"
tmp = [[1, 1, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 1, 0, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 0, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 0, 0, 1, 0, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]])
mask = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
expected = numpy.logical_and(tmp, mask)
tmp = numpy.logical_and(data, numpy.logical_not(mask))
expected = numpy.logical_or(expected, tmp)
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_dilation(data, mask=mask,
origin=(1, 1), border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_propagation01(self):
"binary propagation 1"
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_propagation(data, struct,
mask=mask, border_value=0)
assert_array_almost_equal(out, expected)
def test_binary_propagation02(self):
"binary propagation 2"
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.zeros(mask.shape, bool)
out = ndimage.binary_propagation(data, struct,
mask=mask, border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_opening01(self):
"binary opening 1"
expected = [[0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type in self.types:
data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 0, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_opening(data)
assert_array_almost_equal(out, expected)
def test_binary_opening02(self):
"binary opening 2"
struct = ndimage.generate_binary_structure(2, 2)
expected = [[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type in self.types:
data = numpy.array([[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_opening(data, struct)
assert_array_almost_equal(out, expected)
def test_binary_closing01(self):
"binary closing 1"
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type in self.types:
data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 0, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_closing(data)
assert_array_almost_equal(out, expected)
def test_binary_closing02(self):
"binary closing 2"
struct = ndimage.generate_binary_structure(2, 2)
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type in self.types:
data = numpy.array([[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_closing(data, struct)
assert_array_almost_equal(out, expected)
def test_binary_fill_holes01(self):
"binary fill holes 1"
expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_fill_holes(data)
assert_array_almost_equal(out, expected)
def test_binary_fill_holes02(self):
"binary fill holes 2"
expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_fill_holes(data)
assert_array_almost_equal(out, expected)
def test_binary_fill_holes03(self):
"binary fill holes 3"
expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 1, 1, 1],
[0, 1, 1, 1, 0, 1, 1, 1],
[0, 1, 1, 1, 0, 1, 1, 1],
[0, 0, 1, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 1, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1],
[0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 1, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_fill_holes(data)
assert_array_almost_equal(out, expected)
def test_grey_erosion01(self):
"grey erosion 1"
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.grey_erosion(array,
footprint=footprint)
assert_array_almost_equal([[2, 2, 1, 1, 1],
[2, 3, 1, 3, 1],
[5, 5, 3, 3, 1]], output)
def test_grey_erosion02(self):
"grey erosion 2"
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
output = ndimage.grey_erosion(array,
footprint=footprint, structure=structure)
assert_array_almost_equal([[2, 2, 1, 1, 1],
[2, 3, 1, 3, 1],
[5, 5, 3, 3, 1]], output)
def test_grey_erosion03(self):
"grey erosion 3"
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[1, 1, 1], [1, 1, 1]]
output = ndimage.grey_erosion(array,
footprint=footprint, structure=structure)
assert_array_almost_equal([[1, 1, 0, 0, 0],
[1, 2, 0, 2, 0],
[4, 4, 2, 2, 0]], output)
def test_grey_dilation01(self):
"grey dilation 1"
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[0, 1, 1], [1, 0, 1]]
output = ndimage.grey_dilation(array,
footprint=footprint)
assert_array_almost_equal([[7, 7, 9, 9, 5],
[7, 9, 8, 9, 7],
[8, 8, 8, 7, 7]], output)
def test_grey_dilation02(self):
"grey dilation 2"
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[0, 1, 1], [1, 0, 1]]
structure = [[0, 0, 0], [0, 0, 0]]
output = ndimage.grey_dilation(array,
footprint=footprint, structure=structure)
assert_array_almost_equal([[7, 7, 9, 9, 5],
[7, 9, 8, 9, 7],
[8, 8, 8, 7, 7]], output)
def test_grey_dilation03(self):
"grey dilation 3"
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[0, 1, 1], [1, 0, 1]]
structure = [[1, 1, 1], [1, 1, 1]]
output = ndimage.grey_dilation(array,
footprint=footprint, structure=structure)
assert_array_almost_equal([[8, 8, 10, 10, 6],
[8, 10, 9, 10, 8],
[9, 9, 9, 8, 8]], output)
def test_grey_opening01(self):
"grey opening 1"
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
tmp = ndimage.grey_erosion(array, footprint=footprint)
expected = ndimage.grey_dilation(tmp, footprint=footprint)
output = ndimage.grey_opening(array,
footprint=footprint)
assert_array_almost_equal(expected, output)
def test_grey_opening02(self):
"grey opening 2"
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp = ndimage.grey_erosion(array, footprint=footprint,
structure=structure)
expected = ndimage.grey_dilation(tmp, footprint=footprint,
structure=structure)
output = ndimage.grey_opening(array,
footprint=footprint, structure=structure)
assert_array_almost_equal(expected, output)
def test_grey_closing01(self):
"grey closing 1"
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
tmp = ndimage.grey_dilation(array, footprint=footprint)
expected = ndimage.grey_erosion(tmp, footprint=footprint)
output = ndimage.grey_closing(array,
footprint=footprint)
assert_array_almost_equal(expected, output)
def test_grey_closing02(self):
"grey closing 2"
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp = ndimage.grey_dilation(array, footprint=footprint,
structure=structure)
expected = ndimage.grey_erosion(tmp, footprint=footprint,
structure=structure)
output = ndimage.grey_closing(array,
footprint=footprint, structure=structure)
assert_array_almost_equal(expected, output)
def test_morphological_gradient01(self):
"morphological gradient 1"
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp1 = ndimage.grey_dilation(array,
footprint=footprint, structure=structure)
tmp2 = ndimage.grey_erosion(array, footprint=footprint,
structure=structure)
expected = tmp1 - tmp2
output = numpy.zeros(array.shape, array.dtype)
ndimage.morphological_gradient(array,
footprint=footprint, structure=structure, output=output)
assert_array_almost_equal(expected, output)
def test_morphological_gradient02(self):
"morphological gradient 2"
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp1 = ndimage.grey_dilation(array,
footprint=footprint, structure=structure)
tmp2 = ndimage.grey_erosion(array, footprint=footprint,
structure=structure)
expected = tmp1 - tmp2
output =ndimage.morphological_gradient(array,
footprint=footprint, structure=structure)
assert_array_almost_equal(expected, output)
def test_morphological_laplace01(self):
"morphological laplace 1"
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp1 = ndimage.grey_dilation(array,
footprint=footprint, structure=structure)
tmp2 = ndimage.grey_erosion(array, footprint=footprint,
structure=structure)
expected = tmp1 + tmp2 - 2 * array
output = numpy.zeros(array.shape, array.dtype)
ndimage.morphological_laplace(array, footprint=footprint,
structure=structure, output=output)
assert_array_almost_equal(expected, output)
def test_morphological_laplace02(self):
"morphological laplace 2"
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp1 = ndimage.grey_dilation(array,
footprint=footprint, structure=structure)
tmp2 = ndimage.grey_erosion(array, footprint=footprint,
structure=structure)
expected = tmp1 + tmp2 - 2 * array
output = ndimage.morphological_laplace(array,
footprint=footprint, structure=structure)
assert_array_almost_equal(expected, output)
def test_white_tophat01(self):
"white tophat 1"
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp = ndimage.grey_opening(array, footprint=footprint,
structure=structure)
expected = array - tmp
output = numpy.zeros(array.shape, array.dtype)
ndimage.white_tophat(array, footprint=footprint,
structure=structure, output=output)
assert_array_almost_equal(expected, output)
def test_white_tophat02(self):
"white tophat 2"
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp = ndimage.grey_opening(array, footprint=footprint,
structure=structure)
expected = array - tmp
output = ndimage.white_tophat(array, footprint=footprint,
structure=structure)
assert_array_almost_equal(expected, output)
def test_black_tophat01(self):
"black tophat 1"
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp = ndimage.grey_closing(array, footprint=footprint,
structure=structure)
expected = tmp - array
output = numpy.zeros(array.shape, array.dtype)
ndimage.black_tophat(array, footprint=footprint,
structure=structure, output=output)
assert_array_almost_equal(expected, output)
def test_black_tophat02(self):
"black tophat 2"
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp = ndimage.grey_closing(array, footprint=footprint,
structure=structure)
expected = tmp - array
output = ndimage.black_tophat(array, footprint=footprint,
structure=structure)
assert_array_almost_equal(expected, output)
def test_hit_or_miss01(self):
"binary hit-or-miss transform 1"
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
for type in self.types:
data = numpy.array([[0, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 0, 1, 1],
[0, 0, 1, 1, 1],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 1],
[0, 1, 1, 1, 1],
[0, 0, 0, 0, 0]], type)
out = numpy.zeros(data.shape, bool)
ndimage.binary_hit_or_miss(data, struct,
output=out)
assert_array_almost_equal(expected, out)
def test_hit_or_miss02(self):
"binary hit-or-miss transform 2"
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type in self.types:
data = numpy.array([[0, 1, 0, 0, 1, 1, 1, 0],
[1, 1, 1, 0, 0, 1, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_hit_or_miss(data, struct)
assert_array_almost_equal(expected, out)
def test_hit_or_miss03(self):
"binary hit-or-miss transform 3"
struct1 = [[0, 0, 0],
[1, 1, 1],
[0, 0, 0]]
struct2 = [[1, 1, 1],
[0, 0, 0],
[1, 1, 1]]
expected = [[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type in self.types:
data = numpy.array([[0, 1, 0, 0, 1, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 0, 0, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_hit_or_miss(data, struct1,
struct2)
assert_array_almost_equal(expected, out)
#class NDImageTestResult(unittest.TestResult):
# separator1 = '=' * 70 + '\n'
# separator2 = '-' * 70 + '\n'
#
# def __init__(self, stream, verbose):
# unittest.TestResult.__init__(self)
# self.stream = stream
# self.verbose = verbose
#
# def getDescription(self, test):
# return test.shortDescription() or str(test)
#
# def startTest(self, test):
# unittest.TestResult.startTest(self, test)
# if self.verbose:
# self.stream.write(self.getDescription(test))
# self.stream.write(" ... ")
#
# def addSuccess(self, test):
# unittest.TestResult.addSuccess(self, test)
# if self.verbose:
# self.stream.write("ok\n")
#
# def addError(self, test, err):
# unittest.TestResult.addError(self, test, err)
# if self.verbose:
# self.stream.write("ERROR\n")
#
# def addFailure(self, test, err):
# unittest.TestResult.addFailure(self, test, err)
# if self.verbose:
# self.stream.write("FAIL\n")
#
# def printErrors(self):
# self.printErrorList('ERROR', self.errors)
# self.printErrorList('FAIL', self.failures)
#
# def printErrorList(self, flavour, errors):
# for test, err in errors:
# self.stream.write(self.separator1)
# description = self.getDescription(test)
# self.stream.write("%s: %s\n" % (flavour, description))
# self.stream.write(self.separator2)
# self.stream.write(err)
#
#def test():
# if '-v' in sys.argv[1:]:
# verbose = 1
# else:
# verbose = 0
# suite = unittest.TestSuite()
# suite.addTest(unittest.makeSuite(NDImageTest))
# result = NDImageTestResult(sys.stdout, verbose)
# suite(result)
# result.printErrors()
# return len(result.failures), result.testsRun
if __name__ == "__main__":
run_module_suite()
|
RenatoGeh/labprog2 | refs/heads/master | mini-ep3/revdns.py | 1 | class Host:
def __init__(self, domain, header):
self.domain = domain[1:]
self.entries = {}
self.header = header
def add_entry(self, name, ip):
self.entries[int(ip.split('.')[-1])] = name
hosts = []
ignored = False
current = -1
while True:
try:
s = input()
s_ = s
s = s.strip()
spl = s.split()
spl_ = s.split(' ')
if not ignored and (spl_[0] == "NS"):
ignored = True
elif not ignored:
print(s_)
if ignored:
if spl_[0] == "NS":
hosts.append(Host(s.split()[1].split('.'), s_))
current = current + 1
if len(spl) > 2:
h = hosts[current]
h.add_entry(spl[0], spl[2])
except(EOFError):
break
for k in hosts:
sorted_list = sorted(k.entries)
print(k.header)
for i in sorted_list:
entry = k.entries[i]
print(i, "PTR", end=' ')
print(entry, *k.domain, sep='.', end=".\n")
|
usc-isi/nova | refs/heads/hpc-trunk | nova/tests/console/__init__.py | 210 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work
from nova.tests import *
|
dalitun/os_image_factory | refs/heads/master | test-tools/pytesting_os_fe/cloudinit/__init__.py | 3 | import os, paramiko, time
import openstackutils
cwlib = openstackutils.OpenStackUtils()
test_resources = {}
def setup():
global test_resources
start_chrono = int(round(time.time() * 1000))
port= cwlib.create_port_with_sg()
keypair, private_key = cwlib.create_keypair()
floating_ip = cwlib.create_floating_ip()
#cwlib.associate_floating_ip_to_port(floating_ip)
userdata_path = os.path.dirname(os.path.realpath(__file__)) + '/userdata.yml'
server = cwlib.boot_vm_with_userdata_and_port(userdata_path,keypair,port)
cwlib.associate_floating_ip_to_server(floating_ip, server)
time.sleep(80)
test_resources['my_port']= port
test_resources['my_keypair'] = keypair
test_resources['my_floating'] = floating_ip
test_resources['my_server'] = server
test_resources['my_private_key'] = private_key
test_resources['ssh_connection'] = cwlib.initiate_ssh(floating_ip,private_key)
stop_chrono = int(round(time.time() * 1000))
print "Setup 'cloudinit' testsuite in " + str(stop_chrono - start_chrono) + " ms"
def teardown():
global test_resources
cwlib.destroy_server(test_resources['my_server'])
time.sleep(60)
cwlib.delete_floating_ip(test_resources['my_floating'])
cwlib.delete_keypair(test_resources['my_keypair'],test_resources['my_private_key'])
|
caktus/rapidsms | refs/heads/master | rapidsms/router/db/admin.py | 7 | from django.contrib import admin
from rapidsms.router.db.models import Message, Transmission
class MessageAdmin(admin.ModelAdmin):
search_fields = ('text',)
list_display = ('id', 'date', 'direction', 'text', 'status', 'updated',
'sent', 'delivered')
list_filter = ('direction', 'status',)
ordering = ('-updated',)
class TransmissionAdmin(admin.ModelAdmin):
list_display = ('id', 'date', 'message', 'status', 'connection', 'updated',
'sent', 'delivered')
ordering = ('-updated',)
list_filter = ('status',)
raw_id_fields = ('message', 'connection',)
search_fields = ('message__text',)
admin.site.register(Message, MessageAdmin)
admin.site.register(Transmission, TransmissionAdmin)
|
weisongchen/flaskapp | refs/heads/master | venv/lib/python2.7/site-packages/sqlalchemy/dialects/mysql/__init__.py | 32 | # mysql/__init__.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import base, mysqldb, oursql, \
pyodbc, zxjdbc, mysqlconnector, pymysql,\
gaerdbms, cymysql
# default dialect
base.dialect = mysqldb.dialect
from .base import \
BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, DATETIME, \
DECIMAL, DOUBLE, ENUM, DECIMAL,\
FLOAT, INTEGER, INTEGER, JSON, LONGBLOB, LONGTEXT, MEDIUMBLOB, \
MEDIUMINT, MEDIUMTEXT, NCHAR, \
NVARCHAR, NUMERIC, SET, SMALLINT, REAL, TEXT, TIME, TIMESTAMP, \
TINYBLOB, TINYINT, TINYTEXT,\
VARBINARY, VARCHAR, YEAR, dialect
__all__ = (
'BIGINT', 'BINARY', 'BIT', 'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME',
'DECIMAL', 'DOUBLE', 'ENUM', 'DECIMAL', 'FLOAT', 'INTEGER', 'INTEGER',
'JSON', 'LONGBLOB', 'LONGTEXT', 'MEDIUMBLOB', 'MEDIUMINT', 'MEDIUMTEXT',
'NCHAR', 'NVARCHAR', 'NUMERIC', 'SET', 'SMALLINT', 'REAL', 'TEXT', 'TIME',
'TIMESTAMP', 'TINYBLOB', 'TINYINT', 'TINYTEXT', 'VARBINARY', 'VARCHAR',
'YEAR', 'dialect'
)
|
factorlibre/OCB | refs/heads/8.0 | addons/hr_payroll_account/wizard/hr_payroll_payslips_by_employees.py | 337 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class hr_payslip_employees(osv.osv_memory):
_inherit ='hr.payslip.employees'
def compute_sheet(self, cr, uid, ids, context=None):
run_pool = self.pool.get('hr.payslip.run')
if context is None:
context = {}
if context.get('active_id'):
run_data = run_pool.read(cr, uid, context['active_id'], ['journal_id'])
journal_id = run_data.get('journal_id')
journal_id = journal_id and journal_id[0] or False
if journal_id:
context = dict(context, journal_id=journal_id)
return super(hr_payslip_employees, self).compute_sheet(cr, uid, ids, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
mohierf/shinken | refs/heads/master | test/test_parse_logevent.py | 18 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2014 - Savoir-Faire Linux inc.
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from shinken_test import *
from shinken.misc.logevent import LogEvent
class TestParseLogEvent(ShinkenTest):
def test_notification_service(self):
log = '[1402515279] SERVICE NOTIFICATION: admin;localhost;check-ssh;CRITICAL;notify-service-by-email;Connection refused'
expected = {
'hostname': 'localhost',
'event_type': 'NOTIFICATION',
'service_desc': 'check-ssh',
'state': 'CRITICAL',
'contact': 'admin',
'time': 1402515279,
'notification_method': 'notify-service-by-email',
'notification_type': 'SERVICE',
'output': 'Connection refused',
}
event = LogEvent(log)
self.assertEqual(event.data, expected)
def test_notification_host(self):
log = '[1402515279] HOST NOTIFICATION: admin;localhost;CRITICAL;notify-service-by-email;Connection refused'
expected = {
'hostname': 'localhost',
'event_type': 'NOTIFICATION',
'service_desc': None,
'state': 'CRITICAL',
'contact': 'admin',
'time': 1402515279,
'notification_method': 'notify-service-by-email',
'notification_type': 'HOST',
'output': 'Connection refused',
}
event = LogEvent(log)
self.assertEqual(event.data, expected)
def test_alert_service(self):
log = '[1329144231] SERVICE ALERT: dfw01-is02-006;cpu load maui;WARNING;HARD;4;WARNING - load average: 5.04, 4.67, 5.04'
expected = {
'alert_type': 'SERVICE',
'event_type': 'ALERT',
'service_desc': 'cpu load maui',
'attempts': 4,
'state_type': 'HARD',
'state': 'WARNING',
'time': 1329144231,
'output': 'WARNING - load average: 5.04, 4.67, 5.04',
'hostname': 'dfw01-is02-006'
}
event = LogEvent(log)
self.assertEqual(event.data, expected)
def test_alert_host(self):
log = '[1329144231] HOST ALERT: dfw01-is02-006;WARNING;HARD;4;WARNING - load average: 5.04, 4.67, 5.04'
expected = {
'alert_type': 'HOST',
'event_type': 'ALERT',
'service_desc': None,
'attempts': 4,
'state_type': 'HARD',
'state': 'WARNING',
'time': 1329144231,
'output': 'WARNING - load average: 5.04, 4.67, 5.04',
'hostname': 'dfw01-is02-006'
}
event = LogEvent(log)
self.assertEqual(event.data, expected)
def test_downtime_alert_host(self):
log = '[1279250211] HOST DOWNTIME ALERT: maast64;STARTED; Host has entered a period of scheduled downtime'
expected = {
'event_type': 'DOWNTIME',
'hostname': 'maast64',
'state': 'STARTED',
'time': 1279250211,
'output': ' Host has entered a period of scheduled downtime',
'downtime_type': 'HOST'
}
event = LogEvent(log)
self.assertEqual(event.data, expected)
def test_host_flapping(self):
log = '[1375301662] SERVICE FLAPPING ALERT: testhost;check_ssh;STARTED; Service appears to have started flapping (24.2% change >= 20.0% threshold)'
expected = {
'alert_type': 'SERVICE',
'event_type': 'FLAPPING',
'hostname': 'testhost',
'output': ' Service appears to have started flapping (24.2% change >= 20.0% threshold)',
'service_desc': 'check_ssh',
'state': 'STARTED',
'time': 1375301662
}
event = LogEvent(log)
self.assertEqual(event.data, expected)
def test_service_flapping(self):
log = '[1375301662] HOST FLAPPING ALERT: hostbw;STARTED; Host appears to have started flapping (20.1% change > 20.0% threshold)'
expected = {
'alert_type': 'HOST',
'event_type': 'FLAPPING',
'hostname': 'hostbw',
'output': ' Host appears to have started flapping (20.1% change > 20.0% threshold)',
'service_desc': None,
'state': 'STARTED',
'time': 1375301662
}
event = LogEvent(log)
self.assertEqual(event.data, expected)
if __name__ == '__main__':
unittest.main()
|
ccnmtl/lettuce | refs/heads/master | tests/integration/lib/Django-1.2.5/django/conf/locale/tr/formats.py | 80 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'd F Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'd F'
SHORT_DATE_FORMAT = 'd M Y'
SHORT_DATETIME_FORMAT = 'd M Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # Pazartesi
DATE_INPUT_FORMATS = (
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
'%Y-%m-%d', '%y-%m-%d', # '2006-10-25', '06-10-25'
# '%d %B %Y', '%d %b. %Y', # '25 Ekim 2006', '25 Eki. 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
technologiescollege/s2a_fr | refs/heads/portable | s2a/Python/Lib/test/test_shutil.py | 46 | # Copyright (C) 2003 Python Software Foundation
import unittest
import shutil
import tempfile
import sys
import stat
import os
import os.path
import errno
from os.path import splitdrive
from distutils.spawn import find_executable, spawn
from shutil import (_make_tarball, _make_zipfile, make_archive,
register_archive_format, unregister_archive_format,
get_archive_formats)
import tarfile
import warnings
from test import test_support
from test.test_support import TESTFN, check_warnings, captured_stdout
TESTFN2 = TESTFN + "2"
try:
import grp
import pwd
UID_GID_SUPPORT = True
except ImportError:
UID_GID_SUPPORT = False
try:
import zlib
except ImportError:
zlib = None
try:
import zipfile
ZIP_SUPPORT = True
except ImportError:
ZIP_SUPPORT = find_executable('zip')
class TestShutil(unittest.TestCase):
def setUp(self):
super(TestShutil, self).setUp()
self.tempdirs = []
def tearDown(self):
super(TestShutil, self).tearDown()
while self.tempdirs:
d = self.tempdirs.pop()
shutil.rmtree(d, os.name in ('nt', 'cygwin'))
def write_file(self, path, content='xxx'):
"""Writes a file in the given path.
path can be a string or a sequence.
"""
if isinstance(path, (list, tuple)):
path = os.path.join(*path)
f = open(path, 'w')
try:
f.write(content)
finally:
f.close()
def mkdtemp(self):
"""Create a temporary directory that will be cleaned up.
Returns the path of the directory.
"""
d = tempfile.mkdtemp()
self.tempdirs.append(d)
return d
def test_rmtree_errors(self):
# filename is guaranteed not to exist
filename = tempfile.mktemp()
self.assertRaises(OSError, shutil.rmtree, filename)
# See bug #1071513 for why we don't run this on cygwin
# and bug #1076467 for why we don't run this as root.
if (hasattr(os, 'chmod') and sys.platform[:6] != 'cygwin'
and not (hasattr(os, 'geteuid') and os.geteuid() == 0)):
def test_on_error(self):
self.errorState = 0
os.mkdir(TESTFN)
self.childpath = os.path.join(TESTFN, 'a')
f = open(self.childpath, 'w')
f.close()
old_dir_mode = os.stat(TESTFN).st_mode
old_child_mode = os.stat(self.childpath).st_mode
# Make unwritable.
os.chmod(self.childpath, stat.S_IREAD)
os.chmod(TESTFN, stat.S_IREAD)
shutil.rmtree(TESTFN, onerror=self.check_args_to_onerror)
# Test whether onerror has actually been called.
self.assertEqual(self.errorState, 2,
"Expected call to onerror function did not happen.")
# Make writable again.
os.chmod(TESTFN, old_dir_mode)
os.chmod(self.childpath, old_child_mode)
# Clean up.
shutil.rmtree(TESTFN)
def check_args_to_onerror(self, func, arg, exc):
# test_rmtree_errors deliberately runs rmtree
# on a directory that is chmod 400, which will fail.
# This function is run when shutil.rmtree fails.
# 99.9% of the time it initially fails to remove
# a file in the directory, so the first time through
# func is os.remove.
# However, some Linux machines running ZFS on
# FUSE experienced a failure earlier in the process
# at os.listdir. The first failure may legally
# be either.
if self.errorState == 0:
if func is os.remove:
self.assertEqual(arg, self.childpath)
else:
self.assertIs(func, os.listdir,
"func must be either os.remove or os.listdir")
self.assertEqual(arg, TESTFN)
self.assertTrue(issubclass(exc[0], OSError))
self.errorState = 1
else:
self.assertEqual(func, os.rmdir)
self.assertEqual(arg, TESTFN)
self.assertTrue(issubclass(exc[0], OSError))
self.errorState = 2
def test_rmtree_dont_delete_file(self):
# When called on a file instead of a directory, don't delete it.
handle, path = tempfile.mkstemp()
os.fdopen(handle).close()
self.assertRaises(OSError, shutil.rmtree, path)
os.remove(path)
def test_copytree_simple(self):
def write_data(path, data):
f = open(path, "w")
f.write(data)
f.close()
def read_data(path):
f = open(path)
data = f.read()
f.close()
return data
src_dir = tempfile.mkdtemp()
dst_dir = os.path.join(tempfile.mkdtemp(), 'destination')
write_data(os.path.join(src_dir, 'test.txt'), '123')
os.mkdir(os.path.join(src_dir, 'test_dir'))
write_data(os.path.join(src_dir, 'test_dir', 'test.txt'), '456')
try:
shutil.copytree(src_dir, dst_dir)
self.assertTrue(os.path.isfile(os.path.join(dst_dir, 'test.txt')))
self.assertTrue(os.path.isdir(os.path.join(dst_dir, 'test_dir')))
self.assertTrue(os.path.isfile(os.path.join(dst_dir, 'test_dir',
'test.txt')))
actual = read_data(os.path.join(dst_dir, 'test.txt'))
self.assertEqual(actual, '123')
actual = read_data(os.path.join(dst_dir, 'test_dir', 'test.txt'))
self.assertEqual(actual, '456')
finally:
for path in (
os.path.join(src_dir, 'test.txt'),
os.path.join(dst_dir, 'test.txt'),
os.path.join(src_dir, 'test_dir', 'test.txt'),
os.path.join(dst_dir, 'test_dir', 'test.txt'),
):
if os.path.exists(path):
os.remove(path)
for path in (src_dir,
os.path.dirname(dst_dir)
):
if os.path.exists(path):
shutil.rmtree(path)
def test_copytree_with_exclude(self):
def write_data(path, data):
f = open(path, "w")
f.write(data)
f.close()
def read_data(path):
f = open(path)
data = f.read()
f.close()
return data
# creating data
join = os.path.join
exists = os.path.exists
src_dir = tempfile.mkdtemp()
try:
dst_dir = join(tempfile.mkdtemp(), 'destination')
write_data(join(src_dir, 'test.txt'), '123')
write_data(join(src_dir, 'test.tmp'), '123')
os.mkdir(join(src_dir, 'test_dir'))
write_data(join(src_dir, 'test_dir', 'test.txt'), '456')
os.mkdir(join(src_dir, 'test_dir2'))
write_data(join(src_dir, 'test_dir2', 'test.txt'), '456')
os.mkdir(join(src_dir, 'test_dir2', 'subdir'))
os.mkdir(join(src_dir, 'test_dir2', 'subdir2'))
write_data(join(src_dir, 'test_dir2', 'subdir', 'test.txt'), '456')
write_data(join(src_dir, 'test_dir2', 'subdir2', 'test.py'), '456')
# testing glob-like patterns
try:
patterns = shutil.ignore_patterns('*.tmp', 'test_dir2')
shutil.copytree(src_dir, dst_dir, ignore=patterns)
# checking the result: some elements should not be copied
self.assertTrue(exists(join(dst_dir, 'test.txt')))
self.assertTrue(not exists(join(dst_dir, 'test.tmp')))
self.assertTrue(not exists(join(dst_dir, 'test_dir2')))
finally:
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
try:
patterns = shutil.ignore_patterns('*.tmp', 'subdir*')
shutil.copytree(src_dir, dst_dir, ignore=patterns)
# checking the result: some elements should not be copied
self.assertTrue(not exists(join(dst_dir, 'test.tmp')))
self.assertTrue(not exists(join(dst_dir, 'test_dir2', 'subdir2')))
self.assertTrue(not exists(join(dst_dir, 'test_dir2', 'subdir')))
finally:
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
# testing callable-style
try:
def _filter(src, names):
res = []
for name in names:
path = os.path.join(src, name)
if (os.path.isdir(path) and
path.split()[-1] == 'subdir'):
res.append(name)
elif os.path.splitext(path)[-1] in ('.py'):
res.append(name)
return res
shutil.copytree(src_dir, dst_dir, ignore=_filter)
# checking the result: some elements should not be copied
self.assertTrue(not exists(join(dst_dir, 'test_dir2', 'subdir2',
'test.py')))
self.assertTrue(not exists(join(dst_dir, 'test_dir2', 'subdir')))
finally:
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
finally:
shutil.rmtree(src_dir)
shutil.rmtree(os.path.dirname(dst_dir))
if hasattr(os, "symlink"):
def test_dont_copy_file_onto_link_to_itself(self):
# bug 851123.
os.mkdir(TESTFN)
src = os.path.join(TESTFN, 'cheese')
dst = os.path.join(TESTFN, 'shop')
try:
f = open(src, 'w')
f.write('cheddar')
f.close()
os.link(src, dst)
self.assertRaises(shutil.Error, shutil.copyfile, src, dst)
with open(src, 'r') as f:
self.assertEqual(f.read(), 'cheddar')
os.remove(dst)
# Using `src` here would mean we end up with a symlink pointing
# to TESTFN/TESTFN/cheese, while it should point at
# TESTFN/cheese.
os.symlink('cheese', dst)
self.assertRaises(shutil.Error, shutil.copyfile, src, dst)
with open(src, 'r') as f:
self.assertEqual(f.read(), 'cheddar')
os.remove(dst)
finally:
try:
shutil.rmtree(TESTFN)
except OSError:
pass
def test_rmtree_on_symlink(self):
# bug 1669.
os.mkdir(TESTFN)
try:
src = os.path.join(TESTFN, 'cheese')
dst = os.path.join(TESTFN, 'shop')
os.mkdir(src)
os.symlink(src, dst)
self.assertRaises(OSError, shutil.rmtree, dst)
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
if hasattr(os, "mkfifo"):
# Issue #3002: copyfile and copytree block indefinitely on named pipes
def test_copyfile_named_pipe(self):
os.mkfifo(TESTFN)
try:
self.assertRaises(shutil.SpecialFileError,
shutil.copyfile, TESTFN, TESTFN2)
self.assertRaises(shutil.SpecialFileError,
shutil.copyfile, __file__, TESTFN)
finally:
os.remove(TESTFN)
def test_copytree_named_pipe(self):
os.mkdir(TESTFN)
try:
subdir = os.path.join(TESTFN, "subdir")
os.mkdir(subdir)
pipe = os.path.join(subdir, "mypipe")
os.mkfifo(pipe)
try:
shutil.copytree(TESTFN, TESTFN2)
except shutil.Error as e:
errors = e.args[0]
self.assertEqual(len(errors), 1)
src, dst, error_msg = errors[0]
self.assertEqual("`%s` is a named pipe" % pipe, error_msg)
else:
self.fail("shutil.Error should have been raised")
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
shutil.rmtree(TESTFN2, ignore_errors=True)
@unittest.skipUnless(hasattr(os, 'chflags') and
hasattr(errno, 'EOPNOTSUPP') and
hasattr(errno, 'ENOTSUP'),
"requires os.chflags, EOPNOTSUPP & ENOTSUP")
def test_copystat_handles_harmless_chflags_errors(self):
tmpdir = self.mkdtemp()
file1 = os.path.join(tmpdir, 'file1')
file2 = os.path.join(tmpdir, 'file2')
self.write_file(file1, 'xxx')
self.write_file(file2, 'xxx')
def make_chflags_raiser(err):
ex = OSError()
def _chflags_raiser(path, flags):
ex.errno = err
raise ex
return _chflags_raiser
old_chflags = os.chflags
try:
for err in errno.EOPNOTSUPP, errno.ENOTSUP:
os.chflags = make_chflags_raiser(err)
shutil.copystat(file1, file2)
# assert others errors break it
os.chflags = make_chflags_raiser(errno.EOPNOTSUPP + errno.ENOTSUP)
self.assertRaises(OSError, shutil.copystat, file1, file2)
finally:
os.chflags = old_chflags
@unittest.skipUnless(zlib, "requires zlib")
def test_make_tarball(self):
# creating something to tar
tmpdir = self.mkdtemp()
self.write_file([tmpdir, 'file1'], 'xxx')
self.write_file([tmpdir, 'file2'], 'xxx')
os.mkdir(os.path.join(tmpdir, 'sub'))
self.write_file([tmpdir, 'sub', 'file3'], 'xxx')
tmpdir2 = self.mkdtemp()
# force shutil to create the directory
os.rmdir(tmpdir2)
unittest.skipUnless(splitdrive(tmpdir)[0] == splitdrive(tmpdir2)[0],
"source and target should be on same drive")
base_name = os.path.join(tmpdir2, 'archive')
# working with relative paths to avoid tar warnings
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(splitdrive(base_name)[1], '.')
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
tarball = base_name + '.tar.gz'
self.assertTrue(os.path.exists(tarball))
# trying an uncompressed one
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(splitdrive(base_name)[1], '.', compress=None)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
def _tarinfo(self, path):
tar = tarfile.open(path)
try:
names = tar.getnames()
names.sort()
return tuple(names)
finally:
tar.close()
def _create_files(self):
# creating something to tar
tmpdir = self.mkdtemp()
dist = os.path.join(tmpdir, 'dist')
os.mkdir(dist)
self.write_file([dist, 'file1'], 'xxx')
self.write_file([dist, 'file2'], 'xxx')
os.mkdir(os.path.join(dist, 'sub'))
self.write_file([dist, 'sub', 'file3'], 'xxx')
os.mkdir(os.path.join(dist, 'sub2'))
tmpdir2 = self.mkdtemp()
base_name = os.path.join(tmpdir2, 'archive')
return tmpdir, tmpdir2, base_name
@unittest.skipUnless(zlib, "Requires zlib")
@unittest.skipUnless(find_executable('tar') and find_executable('gzip'),
'Need the tar command to run')
def test_tarfile_vs_tar(self):
tmpdir, tmpdir2, base_name = self._create_files()
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(base_name, 'dist')
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
tarball = base_name + '.tar.gz'
self.assertTrue(os.path.exists(tarball))
# now create another tarball using `tar`
tarball2 = os.path.join(tmpdir, 'archive2.tar.gz')
tar_cmd = ['tar', '-cf', 'archive2.tar', 'dist']
gzip_cmd = ['gzip', '-f9', 'archive2.tar']
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
with captured_stdout() as s:
spawn(tar_cmd)
spawn(gzip_cmd)
finally:
os.chdir(old_dir)
self.assertTrue(os.path.exists(tarball2))
# let's compare both tarballs
self.assertEqual(self._tarinfo(tarball), self._tarinfo(tarball2))
# trying an uncompressed one
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(base_name, 'dist', compress=None)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
# now for a dry_run
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(base_name, 'dist', compress=None, dry_run=True)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
@unittest.skipUnless(zlib, "Requires zlib")
@unittest.skipUnless(ZIP_SUPPORT, 'Need zip support to run')
def test_make_zipfile(self):
# creating something to tar
tmpdir = self.mkdtemp()
self.write_file([tmpdir, 'file1'], 'xxx')
self.write_file([tmpdir, 'file2'], 'xxx')
tmpdir2 = self.mkdtemp()
# force shutil to create the directory
os.rmdir(tmpdir2)
base_name = os.path.join(tmpdir2, 'archive')
_make_zipfile(base_name, tmpdir)
# check if the compressed tarball was created
tarball = base_name + '.zip'
self.assertTrue(os.path.exists(tarball))
def test_make_archive(self):
tmpdir = self.mkdtemp()
base_name = os.path.join(tmpdir, 'archive')
self.assertRaises(ValueError, make_archive, base_name, 'xxx')
@unittest.skipUnless(zlib, "Requires zlib")
def test_make_archive_owner_group(self):
# testing make_archive with owner and group, with various combinations
# this works even if there's not gid/uid support
if UID_GID_SUPPORT:
group = grp.getgrgid(0)[0]
owner = pwd.getpwuid(0)[0]
else:
group = owner = 'root'
base_dir, root_dir, base_name = self._create_files()
base_name = os.path.join(self.mkdtemp() , 'archive')
res = make_archive(base_name, 'zip', root_dir, base_dir, owner=owner,
group=group)
self.assertTrue(os.path.exists(res))
res = make_archive(base_name, 'zip', root_dir, base_dir)
self.assertTrue(os.path.exists(res))
res = make_archive(base_name, 'tar', root_dir, base_dir,
owner=owner, group=group)
self.assertTrue(os.path.exists(res))
res = make_archive(base_name, 'tar', root_dir, base_dir,
owner='kjhkjhkjg', group='oihohoh')
self.assertTrue(os.path.exists(res))
@unittest.skipUnless(zlib, "Requires zlib")
@unittest.skipUnless(UID_GID_SUPPORT, "Requires grp and pwd support")
def test_tarfile_root_owner(self):
tmpdir, tmpdir2, base_name = self._create_files()
old_dir = os.getcwd()
os.chdir(tmpdir)
group = grp.getgrgid(0)[0]
owner = pwd.getpwuid(0)[0]
try:
archive_name = _make_tarball(base_name, 'dist', compress=None,
owner=owner, group=group)
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
self.assertTrue(os.path.exists(archive_name))
# now checks the rights
archive = tarfile.open(archive_name)
try:
for member in archive.getmembers():
self.assertEqual(member.uid, 0)
self.assertEqual(member.gid, 0)
finally:
archive.close()
def test_make_archive_cwd(self):
current_dir = os.getcwd()
def _breaks(*args, **kw):
raise RuntimeError()
register_archive_format('xxx', _breaks, [], 'xxx file')
try:
try:
make_archive('xxx', 'xxx', root_dir=self.mkdtemp())
except Exception:
pass
self.assertEqual(os.getcwd(), current_dir)
finally:
unregister_archive_format('xxx')
def test_register_archive_format(self):
self.assertRaises(TypeError, register_archive_format, 'xxx', 1)
self.assertRaises(TypeError, register_archive_format, 'xxx', lambda: x,
1)
self.assertRaises(TypeError, register_archive_format, 'xxx', lambda: x,
[(1, 2), (1, 2, 3)])
register_archive_format('xxx', lambda: x, [(1, 2)], 'xxx file')
formats = [name for name, params in get_archive_formats()]
self.assertIn('xxx', formats)
unregister_archive_format('xxx')
formats = [name for name, params in get_archive_formats()]
self.assertNotIn('xxx', formats)
class TestMove(unittest.TestCase):
def setUp(self):
filename = "foo"
self.src_dir = tempfile.mkdtemp()
self.dst_dir = tempfile.mkdtemp()
self.src_file = os.path.join(self.src_dir, filename)
self.dst_file = os.path.join(self.dst_dir, filename)
# Try to create a dir in the current directory, hoping that it is
# not located on the same filesystem as the system tmp dir.
try:
self.dir_other_fs = tempfile.mkdtemp(
dir=os.path.dirname(__file__))
self.file_other_fs = os.path.join(self.dir_other_fs,
filename)
except OSError:
self.dir_other_fs = None
with open(self.src_file, "wb") as f:
f.write("spam")
def tearDown(self):
for d in (self.src_dir, self.dst_dir, self.dir_other_fs):
try:
if d:
shutil.rmtree(d)
except:
pass
def _check_move_file(self, src, dst, real_dst):
with open(src, "rb") as f:
contents = f.read()
shutil.move(src, dst)
with open(real_dst, "rb") as f:
self.assertEqual(contents, f.read())
self.assertFalse(os.path.exists(src))
def _check_move_dir(self, src, dst, real_dst):
contents = sorted(os.listdir(src))
shutil.move(src, dst)
self.assertEqual(contents, sorted(os.listdir(real_dst)))
self.assertFalse(os.path.exists(src))
def test_move_file(self):
# Move a file to another location on the same filesystem.
self._check_move_file(self.src_file, self.dst_file, self.dst_file)
def test_move_file_to_dir(self):
# Move a file inside an existing dir on the same filesystem.
self._check_move_file(self.src_file, self.dst_dir, self.dst_file)
def test_move_file_other_fs(self):
# Move a file to an existing dir on another filesystem.
if not self.dir_other_fs:
# skip
return
self._check_move_file(self.src_file, self.file_other_fs,
self.file_other_fs)
def test_move_file_to_dir_other_fs(self):
# Move a file to another location on another filesystem.
if not self.dir_other_fs:
# skip
return
self._check_move_file(self.src_file, self.dir_other_fs,
self.file_other_fs)
def test_move_dir(self):
# Move a dir to another location on the same filesystem.
dst_dir = tempfile.mktemp()
try:
self._check_move_dir(self.src_dir, dst_dir, dst_dir)
finally:
try:
shutil.rmtree(dst_dir)
except:
pass
def test_move_dir_other_fs(self):
# Move a dir to another location on another filesystem.
if not self.dir_other_fs:
# skip
return
dst_dir = tempfile.mktemp(dir=self.dir_other_fs)
try:
self._check_move_dir(self.src_dir, dst_dir, dst_dir)
finally:
try:
shutil.rmtree(dst_dir)
except:
pass
def test_move_dir_to_dir(self):
# Move a dir inside an existing dir on the same filesystem.
self._check_move_dir(self.src_dir, self.dst_dir,
os.path.join(self.dst_dir, os.path.basename(self.src_dir)))
def test_move_dir_to_dir_other_fs(self):
# Move a dir inside an existing dir on another filesystem.
if not self.dir_other_fs:
# skip
return
self._check_move_dir(self.src_dir, self.dir_other_fs,
os.path.join(self.dir_other_fs, os.path.basename(self.src_dir)))
def test_existing_file_inside_dest_dir(self):
# A file with the same name inside the destination dir already exists.
with open(self.dst_file, "wb"):
pass
self.assertRaises(shutil.Error, shutil.move, self.src_file, self.dst_dir)
def test_dont_move_dir_in_itself(self):
# Moving a dir inside itself raises an Error.
dst = os.path.join(self.src_dir, "bar")
self.assertRaises(shutil.Error, shutil.move, self.src_dir, dst)
def test_destinsrc_false_negative(self):
os.mkdir(TESTFN)
try:
for src, dst in [('srcdir', 'srcdir/dest')]:
src = os.path.join(TESTFN, src)
dst = os.path.join(TESTFN, dst)
self.assertTrue(shutil._destinsrc(src, dst),
msg='_destinsrc() wrongly concluded that '
'dst (%s) is not in src (%s)' % (dst, src))
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
def test_destinsrc_false_positive(self):
os.mkdir(TESTFN)
try:
for src, dst in [('srcdir', 'src/dest'), ('srcdir', 'srcdir.new')]:
src = os.path.join(TESTFN, src)
dst = os.path.join(TESTFN, dst)
self.assertFalse(shutil._destinsrc(src, dst),
msg='_destinsrc() wrongly concluded that '
'dst (%s) is in src (%s)' % (dst, src))
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
class TestCopyFile(unittest.TestCase):
_delete = False
class Faux(object):
_entered = False
_exited_with = None
_raised = False
def __init__(self, raise_in_exit=False, suppress_at_exit=True):
self._raise_in_exit = raise_in_exit
self._suppress_at_exit = suppress_at_exit
def read(self, *args):
return ''
def __enter__(self):
self._entered = True
def __exit__(self, exc_type, exc_val, exc_tb):
self._exited_with = exc_type, exc_val, exc_tb
if self._raise_in_exit:
self._raised = True
raise IOError("Cannot close")
return self._suppress_at_exit
def tearDown(self):
if self._delete:
del shutil.open
def _set_shutil_open(self, func):
shutil.open = func
self._delete = True
def test_w_source_open_fails(self):
def _open(filename, mode='r'):
if filename == 'srcfile':
raise IOError('Cannot open "srcfile"')
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
self.assertRaises(IOError, shutil.copyfile, 'srcfile', 'destfile')
def test_w_dest_open_fails(self):
srcfile = self.Faux()
def _open(filename, mode='r'):
if filename == 'srcfile':
return srcfile
if filename == 'destfile':
raise IOError('Cannot open "destfile"')
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
shutil.copyfile('srcfile', 'destfile')
self.assertTrue(srcfile._entered)
self.assertTrue(srcfile._exited_with[0] is IOError)
self.assertEqual(srcfile._exited_with[1].args,
('Cannot open "destfile"',))
def test_w_dest_close_fails(self):
srcfile = self.Faux()
destfile = self.Faux(True)
def _open(filename, mode='r'):
if filename == 'srcfile':
return srcfile
if filename == 'destfile':
return destfile
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
shutil.copyfile('srcfile', 'destfile')
self.assertTrue(srcfile._entered)
self.assertTrue(destfile._entered)
self.assertTrue(destfile._raised)
self.assertTrue(srcfile._exited_with[0] is IOError)
self.assertEqual(srcfile._exited_with[1].args,
('Cannot close',))
def test_w_source_close_fails(self):
srcfile = self.Faux(True)
destfile = self.Faux()
def _open(filename, mode='r'):
if filename == 'srcfile':
return srcfile
if filename == 'destfile':
return destfile
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
self.assertRaises(IOError,
shutil.copyfile, 'srcfile', 'destfile')
self.assertTrue(srcfile._entered)
self.assertTrue(destfile._entered)
self.assertFalse(destfile._raised)
self.assertTrue(srcfile._exited_with[0] is None)
self.assertTrue(srcfile._raised)
def test_move_dir_caseinsensitive(self):
# Renames a folder to the same name
# but a different case.
self.src_dir = tempfile.mkdtemp()
dst_dir = os.path.join(
os.path.dirname(self.src_dir),
os.path.basename(self.src_dir).upper())
self.assertNotEqual(self.src_dir, dst_dir)
try:
shutil.move(self.src_dir, dst_dir)
self.assertTrue(os.path.isdir(dst_dir))
finally:
if os.path.exists(dst_dir):
os.rmdir(dst_dir)
def test_main():
test_support.run_unittest(TestShutil, TestMove, TestCopyFile)
if __name__ == '__main__':
test_main()
|
poljeff/odoo | refs/heads/8.0 | openerp/addons/test_impex/models.py | 392 | # -*- coding: utf-8 -*-
from openerp.osv import orm, fields
def selection_fn(obj, cr, uid, context=None):
return list(enumerate(["Corge", "Grault", "Wheee", "Moog"]))
def function_fn(model, cr, uid, ids, field_name, arg, context):
return dict((id, 3) for id in ids)
def function_fn_write(model, cr, uid, id, field_name, field_value, fnct_inv_arg, context):
""" just so CreatorCase.export can be used
"""
pass
models = [
('boolean', fields.boolean()),
('integer', fields.integer()),
('float', fields.float()),
('decimal', fields.float(digits=(16, 3))),
('string.bounded', fields.char('unknown', size=16)),
('string.required', fields.char('unknown', size=None, required=True)),
('string', fields.char('unknown', size=None)),
('date', fields.date()),
('datetime', fields.datetime()),
('text', fields.text()),
('selection', fields.selection([(1, "Foo"), (2, "Bar"), (3, "Qux"), (4, '')])),
# here use size=-1 to store the values as integers instead of strings
('selection.function', fields.selection(selection_fn, size=-1)),
# just relate to an integer
('many2one', fields.many2one('export.integer')),
('one2many', fields.one2many('export.one2many.child', 'parent_id')),
('many2many', fields.many2many('export.many2many.other')),
('function', fields.function(function_fn, fnct_inv=function_fn_write, type="integer")),
# related: specialization of fields.function, should work the same way
# TODO: reference
]
for name, field in models:
class NewModel(orm.Model):
_name = 'export.%s' % name
_columns = {
'const': fields.integer(),
'value': field,
}
_defaults = {
'const': 4,
}
def name_get(self, cr, uid, ids, context=None):
return [(record.id, "%s:%s" % (self._name, record.value))
for record in self.browse(cr, uid, ids, context=context)]
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
if isinstance(name, basestring) and name.split(':')[0] == self._name:
ids = self.search(cr, user, [['value', operator, int(name.split(':')[1])]])
return self.name_get(cr, user, ids, context=context)
else:
return []
class One2ManyChild(orm.Model):
_name = 'export.one2many.child'
# FIXME: orm.py:1161, fix to name_get on m2o field
_rec_name = 'value'
_columns = {
'parent_id': fields.many2one('export.one2many'),
'str': fields.char('unknown', size=None),
'value': fields.integer(),
}
def name_get(self, cr, uid, ids, context=None):
return [(record.id, "%s:%s" % (self._name, record.value))
for record in self.browse(cr, uid, ids, context=context)]
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
if isinstance(name, basestring) and name.split(':')[0] == self._name:
ids = self.search(cr, user, [['value', operator, int(name.split(':')[1])]])
return self.name_get(cr, user, ids, context=context)
else:
return []
class One2ManyMultiple(orm.Model):
_name = 'export.one2many.multiple'
_columns = {
'parent_id': fields.many2one('export.one2many.recursive'),
'const': fields.integer(),
'child1': fields.one2many('export.one2many.child.1', 'parent_id'),
'child2': fields.one2many('export.one2many.child.2', 'parent_id'),
}
_defaults = {
'const': 36,
}
class One2ManyChildMultiple(orm.Model):
_name = 'export.one2many.multiple.child'
# FIXME: orm.py:1161, fix to name_get on m2o field
_rec_name = 'value'
_columns = {
'parent_id': fields.many2one('export.one2many.multiple'),
'str': fields.char('unknown', size=None),
'value': fields.integer(),
}
def name_get(self, cr, uid, ids, context=None):
return [(record.id, "%s:%s" % (self._name, record.value))
for record in self.browse(cr, uid, ids, context=context)]
class One2ManyChild1(orm.Model):
_name = 'export.one2many.child.1'
_inherit = 'export.one2many.multiple.child'
class One2ManyChild2(orm.Model):
_name = 'export.one2many.child.2'
_inherit = 'export.one2many.multiple.child'
class Many2ManyChild(orm.Model):
_name = 'export.many2many.other'
# FIXME: orm.py:1161, fix to name_get on m2o field
_rec_name = 'value'
_columns = {
'str': fields.char('unknown', size=None),
'value': fields.integer(),
}
def name_get(self, cr, uid, ids, context=None):
return [(record.id, "%s:%s" % (self._name, record.value))
for record in self.browse(cr, uid, ids, context=context)]
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
if isinstance(name, basestring) and name.split(':')[0] == self._name:
ids = self.search(cr, user, [['value', operator, int(name.split(':')[1])]])
return self.name_get(cr, user, ids, context=context)
else:
return []
class SelectionWithDefault(orm.Model):
_name = 'export.selection.withdefault'
_columns = {
'const': fields.integer(),
'value': fields.selection([(1, "Foo"), (2, "Bar")]),
}
_defaults = {
'const': 4,
'value': 2,
}
class RecO2M(orm.Model):
_name = 'export.one2many.recursive'
_columns = {
'value': fields.integer(),
'child': fields.one2many('export.one2many.multiple', 'parent_id'),
}
class OnlyOne(orm.Model):
_name = 'export.unique'
_columns = {
'value': fields.integer(),
}
_sql_constraints = [
('value_unique', 'unique (value)', "The value must be unique"),
]
|
DongjunLee/kino-bot | refs/heads/master | kino/skills/fitbit.py | 1 | #!/usr/bin/env python
import fitbit
from hbconfig import Config
from kino.slack.slackbot import SlackerAdapter
from kino.utils.data_handler import DataHandler
class Fitbit:
def __init__(self, slackbot=None):
self.api = fitbit.api.Fitbit(
Config.open_api.fitbit.CLIENT_ID,
Config.open_api.fitbit.CLIENT_SECRET,
access_token=Config.open_api.fitbit.ACCESS_TOKEN,
refresh_token="<refresh>",
)
self.data_handelr = DataHandler()
if slackbot is None:
self.slackbot = SlackerAdapter(
channel=Config.slack.channel.get("REPORT", "#general")
)
else:
self.slackbot = slackbot
def get_sleeps(self):
sleep_data = self.api.sleep()
datas = []
for s in sleep_data["sleep"]:
data = {
"is_main": s["isMainSleep"],
"start_time": s["startTime"],
"end_time": s["endTime"]
}
datas.append(data)
return datas, sleep_data["summary"]
|
philanthropy-u/edx-platform | refs/heads/master | lms/djangoapps/email_marketing/migrations/0010_auto_20180425_0800.py | 13 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-04-25 12:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('email_marketing', '0009_remove_emailmarketingconfiguration_sailthru_activation_template'),
]
operations = [
migrations.AddField(
model_name='emailmarketingconfiguration',
name='sailthru_verification_failed_template',
field=models.CharField(blank=True, help_text='Sailthru send template to use on failed ID verification.', max_length=20),
),
migrations.AddField(
model_name='emailmarketingconfiguration',
name='sailthru_verification_passed_template',
field=models.CharField(blank=True, help_text='Sailthru send template to use on passed ID verification.', max_length=20),
),
]
|
motion2015/a3 | refs/heads/a3 | lms/djangoapps/courseware/features/lti.py | 49 | # pylint: disable=missing-docstring
import datetime
import os
import pytz
from django.conf import settings
from mock import patch
from pytz import UTC
from splinter.exceptions import ElementDoesNotExist
from selenium.common.exceptions import NoAlertPresentException
from nose.tools import assert_true, assert_equal, assert_in, assert_is_none
from lettuce import world, step
from courseware.tests.factories import InstructorFactory, BetaTesterFactory
from courseware.access import has_access
from student.tests.factories import UserFactory
from common import visit_scenario_item
TEST_COURSE_NAME = "test_course_a"
@step('I view the LTI and error is shown$')
def lti_is_not_rendered(_step):
# error is shown
assert world.is_css_present('.error_message', wait_time=0)
# iframe is not presented
assert not world.is_css_present('iframe', wait_time=0)
# link is not presented
assert not world.is_css_present('.link_lti_new_window', wait_time=0)
def check_lti_iframe_content(text):
# inside iframe test content is presented
location = world.scenario_dict['LTI'].location.html_id()
iframe_name = 'ltiFrame-' + location
with world.browser.get_iframe(iframe_name) as iframe:
# iframe does not contain functions from terrain/ui_helpers.py
assert iframe.is_element_present_by_css('.result', wait_time=0)
assert (text == world.retry_on_exception(
lambda: iframe.find_by_css('.result')[0].text,
max_attempts=5
))
@step('I view the LTI and it is rendered in (.*)$')
def lti_is_rendered(_step, rendered_in):
if rendered_in.strip() == 'iframe':
world.wait_for_present('iframe')
assert world.is_css_present('iframe', wait_time=2)
assert not world.is_css_present('.link_lti_new_window', wait_time=0)
assert not world.is_css_present('.error_message', wait_time=0)
# iframe is visible
assert world.css_visible('iframe')
check_lti_iframe_content("This is LTI tool. Success.")
elif rendered_in.strip() == 'new page':
assert not world.is_css_present('iframe', wait_time=2)
assert world.is_css_present('.link_lti_new_window', wait_time=0)
assert not world.is_css_present('.error_message', wait_time=0)
click_and_check_lti_popup()
else: # incorrect rendered_in parameter
assert False
@step('I view the permission alert$')
def view_lti_permission_alert(_step):
assert not world.is_css_present('iframe', wait_time=2)
assert world.is_css_present('.link_lti_new_window', wait_time=0)
assert not world.is_css_present('.error_message', wait_time=0)
world.css_find('.link_lti_new_window').first.click()
alert = world.browser.get_alert()
assert alert is not None
assert len(world.browser.windows) == 1
def check_no_alert():
"""
Make sure the alert has gone away.
Note that the splinter documentation indicates that
get_alert should return None if no alert is present,
however that is not the case. Instead a
NoAlertPresentException is raised.
"""
try:
assert_is_none(world.browser.get_alert())
except NoAlertPresentException:
pass
@step('I accept the permission alert and view the LTI$')
def accept_lti_permission_alert(_step):
parent_window = world.browser.current_window # Save the parent window
# To start with you should only have one window/tab
assert len(world.browser.windows) == 1
alert = world.browser.get_alert()
alert.accept()
check_no_alert()
# Give it a few seconds for the LTI window to appear
world.wait_for(
lambda _: len(world.browser.windows) == 2,
timeout=5,
timeout_msg="Timed out waiting for the LTI window to appear."
)
# Verify the LTI window
check_lti_popup(parent_window)
@step('I reject the permission alert and do not view the LTI$')
def reject_lti_permission_alert(_step):
alert = world.browser.get_alert()
alert.dismiss()
check_no_alert()
assert len(world.browser.windows) == 1
@step('I view the LTI but incorrect_signature warning is rendered$')
def incorrect_lti_is_rendered(_step):
assert world.is_css_present('iframe', wait_time=2)
assert not world.is_css_present('.link_lti_new_window', wait_time=0)
assert not world.is_css_present('.error_message', wait_time=0)
# inside iframe test content is presented
check_lti_iframe_content("Wrong LTI signature")
@step('the course has correct LTI credentials with registered (.*)$')
def set_correct_lti_passport(_step, user='Instructor'):
coursenum = TEST_COURSE_NAME
metadata = {
'lti_passports': ["correct_lti_id:test_client_key:test_client_secret"]
}
i_am_registered_for_the_course(coursenum, metadata, user)
@step('the course has incorrect LTI credentials$')
def set_incorrect_lti_passport(_step):
coursenum = TEST_COURSE_NAME
metadata = {
'lti_passports': ["test_lti_id:test_client_key:incorrect_lti_secret_key"]
}
i_am_registered_for_the_course(coursenum, metadata)
@step('the course has an LTI component with (.*) fields(?:\:)?$') # , new_page is(.*), graded is(.*)
def add_correct_lti_to_course(_step, fields):
category = 'lti'
metadata = {
'lti_id': 'correct_lti_id',
'launch_url': 'http://127.0.0.1:{}/correct_lti_endpoint'.format(settings.LTI_PORT),
}
if fields.strip() == 'incorrect_lti_id': # incorrect fields
metadata.update({
'lti_id': 'incorrect_lti_id'
})
elif fields.strip() == 'correct': # correct fields
pass
elif fields.strip() == 'no_launch_url':
metadata.update({
'launch_url': u''
})
else: # incorrect parameter
assert False
if _step.hashes:
metadata.update(_step.hashes[0])
world.scenario_dict['LTI'] = world.ItemFactory.create(
parent_location=world.scenario_dict['SECTION'].location,
category=category,
display_name='LTI',
metadata=metadata,
)
setattr(world.scenario_dict['LTI'], 'TEST_BASE_PATH', '{host}:{port}'.format(
host=world.browser.host,
port=world.browser.port,
))
visit_scenario_item('LTI')
def create_course_for_lti(course, metadata):
# First clear the modulestore so we don't try to recreate
# the same course twice
# This also ensures that the necessary templates are loaded
world.clear_courses()
weight = 0.1
grading_policy = {
"GRADER": [
{
"type": "Homework",
"min_count": 1,
"drop_count": 0,
"short_label": "HW",
"weight": weight
},
]
}
# Create the course
# We always use the same org and display name,
# but vary the course identifier (e.g. 600x or 191x)
world.scenario_dict['COURSE'] = world.CourseFactory.create(
org='edx',
number=course,
display_name='Test Course',
metadata=metadata,
grading_policy=grading_policy,
)
# Add a section to the course to contain problems
world.scenario_dict['CHAPTER'] = world.ItemFactory.create(
parent_location=world.scenario_dict['COURSE'].location,
category='chapter',
display_name='Test Chapter',
)
world.scenario_dict['SECTION'] = world.ItemFactory.create(
parent_location=world.scenario_dict['CHAPTER'].location,
category='sequential',
display_name='Test Section',
metadata={'graded': True, 'format': 'Homework'})
@patch.dict('courseware.access.settings.FEATURES', {'DISABLE_START_DATES': False})
def i_am_registered_for_the_course(coursenum, metadata, user='Instructor'):
# Create user
if user == 'BetaTester':
# Create the course
now = datetime.datetime.now(pytz.UTC)
tomorrow = now + datetime.timedelta(days=5)
metadata.update({'days_early_for_beta': 5, 'start': tomorrow})
create_course_for_lti(coursenum, metadata)
course_descriptor = world.scenario_dict['COURSE']
# create beta tester
user = BetaTesterFactory(course_key=course_descriptor.id)
normal_student = UserFactory()
instructor = InstructorFactory(course_key=course_descriptor.id)
assert not has_access(normal_student, 'load', course_descriptor)
assert has_access(user, 'load', course_descriptor)
assert has_access(instructor, 'load', course_descriptor)
else:
metadata.update({'start': datetime.datetime(1970, 1, 1, tzinfo=UTC)})
create_course_for_lti(coursenum, metadata)
course_descriptor = world.scenario_dict['COURSE']
user = InstructorFactory(course_key=course_descriptor.id)
# Enroll the user in the course and log them in
if has_access(user, 'load', course_descriptor):
world.enroll_user(user, course_descriptor.id)
world.log_in(username=user.username, password='test')
def check_lti_popup(parent_window):
# You should now have 2 browser windows open, the original courseware and the LTI
windows = world.browser.windows
assert_equal(len(windows), 2)
# For verification, iterate through the window titles and make sure that
# both are there.
tabs = []
expected_tabs = [u'LTI | Test Section | {0} Courseware | edX'.format(TEST_COURSE_NAME), u'TEST TITLE']
for window in windows:
world.browser.switch_to_window(window)
tabs.append(world.browser.title)
assert_equal(tabs, expected_tabs) # pylint: disable=no-value-for-parameter
# Now verify the contents of the LTI window (which is the 2nd window/tab)
# Note: The LTI opens in a new browser window, but Selenium sticks with the
# current window until you explicitly switch to the context of the new one.
world.browser.switch_to_window(windows[1])
url = world.browser.url
basename = os.path.basename(url)
pathname = os.path.splitext(basename)[0]
assert_equal(pathname, u'correct_lti_endpoint')
result = world.css_find('.result').first.text
assert_equal(result, u'This is LTI tool. Success.')
world.browser.driver.close() # Close the pop-up window
world.browser.switch_to_window(parent_window) # Switch to the main window again
def click_and_check_lti_popup():
parent_window = world.browser.current_window # Save the parent window
world.css_find('.link_lti_new_window').first.click()
check_lti_popup(parent_window)
@step('visit the LTI component')
def visit_lti_component(_step):
visit_scenario_item('LTI')
@step('I see LTI component (.*) with text "([^"]*)"$')
def see_elem_text(_step, elem, text):
selector_map = {
'progress': '.problem-progress',
'feedback': '.problem-feedback',
'module title': '.problem-header',
'button': '.link_lti_new_window',
'description': '.lti-description'
}
assert_in(elem, selector_map)
assert_true(world.css_has_text(selector_map[elem], text))
@step('I see text "([^"]*)"$')
def check_progress(_step, text):
assert world.browser.is_text_present(text)
@step('I see graph with total progress "([^"]*)"$')
def see_graph(_step, progress):
selector = 'grade-detail-graph'
xpath = '//div[@id="{parent}"]//div[text()="{progress}"]'.format(
parent=selector,
progress=progress,
)
node = world.browser.find_by_xpath(xpath)
assert node
@step('I see in the gradebook table that "([^"]*)" is "([^"]*)"$')
def see_value_in_the_gradebook(_step, label, text):
table_selector = '.grade-table'
index = 0
table_headers = world.css_find('{0} thead th'.format(table_selector))
for i, element in enumerate(table_headers):
if element.text.strip() == label:
index = i
break
assert_true(world.css_has_text('{0} tbody td'.format(table_selector), text, index=index))
@step('I submit answer to LTI (.*) question$')
def click_grade(_step, version):
version_map = {
'1': {'selector': 'submit-button', 'expected_text': 'LTI consumer (edX) responded with XML content'},
'2': {'selector': 'submit-lti2-button', 'expected_text': 'LTI consumer (edX) responded with HTTP 200'},
}
assert_in(version, version_map)
location = world.scenario_dict['LTI'].location.html_id()
iframe_name = 'ltiFrame-' + location
with world.browser.get_iframe(iframe_name) as iframe:
iframe.find_by_name(version_map[version]['selector']).first.click()
assert iframe.is_text_present(version_map[version]['expected_text'])
@step('LTI provider deletes my grade and feedback$')
def click_delete_button(_step):
with world.browser.get_iframe(get_lti_frame_name()) as iframe:
iframe.find_by_name('submit-lti2-delete-button').first.click()
def get_lti_frame_name():
location = world.scenario_dict['LTI'].location.html_id()
return 'ltiFrame-' + location
@step('I see in iframe that LTI role is (.*)$')
def check_role(_step, role):
world.wait_for_present('iframe')
location = world.scenario_dict['LTI'].location.html_id()
iframe_name = 'ltiFrame-' + location
with world.browser.get_iframe(iframe_name) as iframe:
expected_role = 'Role: ' + role
role = world.retry_on_exception(
lambda: iframe.find_by_tag('h5').first.value,
max_attempts=5,
ignored_exceptions=ElementDoesNotExist
)
assert_equal(expected_role, role)
@step('I switch to (.*)$')
def switch_view(_step, view):
staff_status = world.css_find('#action-preview-select').first.value
if staff_status != view:
world.browser.select("select", view)
world.wait_for_ajax_complete()
assert_equal(world.css_find('#action-preview-select').first.value, view)
@step("in the LTI component I do not see (.*)$")
def check_lti_component_no_elem(_step, text):
selector_map = {
'a launch button': '.link_lti_new_window',
'an provider iframe': '.ltiLaunchFrame',
'feedback': '.problem-feedback',
'progress': '.problem-progress',
}
assert_in(text, selector_map)
assert_true(world.is_css_not_present(selector_map[text]))
|
Swall0w/clib | refs/heads/master | tests/utils/test_boolian.py | 1 | import unittest
from clib.utils import randombool
class RandomBoolTest(unittest.TestCase):
def test_randombool(self):
self.assertIsInstance(randombool(), bool)
|
scherroman/mugen | refs/heads/master | mugen/video/sources/VideoSource.py | 1 | import glob as globber
import os
import random
from typing import Union, List, Optional as Opt, NamedTuple, Tuple
from numpy.random import choice
import mugen.utility as util
from mugen import paths
from mugen.constants import TIME_FORMAT
from mugen.utility import convert_time_to_seconds
from mugen.video.segments.VideoSegment import VideoSegment
from mugen.video.sources.Source import Source, SourceList
class TimeRangeBase(NamedTuple):
start: float
end: float
class TimeRange(TimeRangeBase):
__slots__ = ()
@convert_time_to_seconds(['start', 'end'])
def __new__(cls, start, end):
self = super().__new__(cls, start, end)
return self
@property
def duration(self):
return self.end - self.start
class VideoSource(Source):
"""
A video source for sampling video segments
"""
time_boundaries: List[Tuple[(TIME_FORMAT, TIME_FORMAT)]]
def __init__(self, file: str, *, time_boundaries: Opt[List[Tuple[(TIME_FORMAT, TIME_FORMAT)]]] = None,
**kwargs):
"""
Parameters
----------
file
video file to sample from
time_boundaries
the set of time ranges to sample from in the video.
For supported formats, see :data:`~mugen.constants.TIME_FORMAT`.
"""
super().__init__(**kwargs)
self.segment = VideoSegment(file)
self.time_boundaries = time_boundaries if time_boundaries else []
def __repr__(self):
return f"<{self.__class__.__name__}: {self.name}, duration: {self.segment.duration_time_code}, " \
f"weight: {self.weight}>"
@property
def file(self):
return self.segment.file
@property
def name(self):
return self.segment.name
def sample(self, duration: float) -> VideoSegment:
"""
Randomly samples a video segment with the specified duration.
Parameters
----------
duration
duration of the video segment to sample
"""
if self.time_boundaries:
# Select a random time boundary to sample from, weighted by duration
time_ranges = [TimeRange(*boundary) for boundary in self.time_boundaries]
time_ranges = [time_range for time_range in time_ranges if time_range.duration >= duration]
total_duration = sum([time_range.duration for time_range in time_ranges])
time_range_weights = [time_range.duration / total_duration for time_range in time_ranges]
time_range_to_sample = time_ranges[choice(len(time_ranges), p=time_range_weights)]
else:
time_range_to_sample = TimeRange(0, self.segment.duration)
start_time = random.uniform(time_range_to_sample.start, time_range_to_sample.end - duration)
sampled_clip = self.segment.subclip(start_time, start_time + duration)
return sampled_clip
class VideoSourceList(SourceList):
"""
A list of VideoSources
"""
name: Opt[str]
def __init__(self, sources=Opt[Union[List[Union[Source, 'VideoSourceList']], str]], **kwargs):
"""
Parameters
----------
sources
A list of sources.
Accepts arbitrarily nested video files, file globs, directories, VideoSources, and VideoSourceLists.
"""
self.name = None
if isinstance(sources, str):
self.name = paths.filename_from_path(sources)
# Build list of sources from directory or file glob
if os.path.isdir(sources):
sources = self._sources_from_files(util.files_from_directory(sources))
else:
sources = self._sources_from_files(globber.glob(sources))
else:
# Convert any source files to VideoSources, and any lists, file globs or directories to VideoSourceLists
sources = self._fill_in_sources(sources)
super().__init__(sources, **kwargs)
def list_repr(self):
"""
Repr for use in lists
"""
if self.name:
return f"<{self.__class__.__name__} ({len(self)}): {self.name}, weight: {self.weight}>"
return super().list_repr()
@staticmethod
def _sources_from_files(files: List[str]):
sources = []
for file in files:
try:
source = VideoSource(file)
except IOError:
continue
sources.append(source)
return sources
@staticmethod
def _fill_in_sources(sources: list):
for index, source in enumerate(sources):
if isinstance(source, str):
try:
sources[index] = VideoSource(source)
except IOError:
sources[index] = VideoSourceList(source)
if isinstance(source, list) and not isinstance(source, VideoSourceList):
sources[index] = VideoSourceList(source)
return sources
|
KrzysztofCwalina/coreclr | refs/heads/master | tests/scripts/smarty_parser.py | 127 | #!/usr/bin/env python
#
## Licensed to the .NET Foundation under one or more agreements.
## The .NET Foundation licenses this file to you under the MIT license.
## See the LICENSE file in the project root for more information.
#
##
# Title :smarty_parser.py
#
# Notes:
#
# Simple class to parse through the smarty .smrt files and individual output
# files.
#
# Expects:
#
# Smarty results directory:
#
# --Smarty.run.xx
# |
# |---
# |
# |--- Smarty.run.xx.fail.smrt (optional, if there are failures)
# |--- Smarty.run.xx.pass.smrt (optional, if there are passes)
# |--- Smarty.rpt.xx.html
# |--- Smarty.rpt.xx.passed.html
# |--- Smarty.xml
# |--- Smrt00000xx
# |
# |---
# |
# |---Tests.lst_<test_name>.cmd_xx.x.y.html
# |---Tests.lst_<test_name>.cmd_xx.x.y.txt
#
################################################################################
from collections import defaultdict
import os
import re
import unittest
import sys
################################################################################
class SmartyParser:
def __init__(self, path):
if not os.path.isdir(path):
raise Exception("Expected a valid path to parse through")
self.m_path = path
self.m_missing = []
self.m_tests = []
self.m_passed = []
self.m_failed = []
def parse(self):
files = os.listdir(self.m_path)
failed_smrt_files = []
passed_smrt_files = []
for file in files:
if "fail.smrt" in file:
failed_smrt_files.append(file)
elif "pass.smrt" in file:
passed_smrt_files.append(file)
def parse_smrt_files(smrt_list):
test_list = []
for smrt_file in smrt_list:
lines = None
with open(os.path.join(self.m_path, smrt_file)) as file_handle:
lines = file_handle.readlines()
lines = "\n".join(lines)
lines = lines.split("[TESTS]")
tests = lines[1].split("Tests.lst=")[1:]
test_names = []
for test in tests:
split = test.split(",")
dir = split[3].strip()
test_name = split[0].strip()
test_name = "cmd_".join(test_name.split("cmd"))
test_names.append((dir, test_name))
for test in test_names:
test_list.append(test)
return test_list
failed_tests = parse_smrt_files(failed_smrt_files)
passed_tests = parse_smrt_files(passed_smrt_files)
cached_ls = defaultdict(lambda: None)
def iterate_tests(test_list):
local_tests = []
for test in test_list:
smrt_dir = test[0]
if cached_ls[smrt_dir] is None:
cached_ls[smrt_dir] = os.listdir(os.path.join(self.m_path, smrt_dir))
ds = defaultdict(lambda: [])
file_names = cached_ls[smrt_dir]
for file_name in file_names:
split = file_name.split(".result")
ds[split[0]].append(".result".join(split))
cached_ls[smrt_dir] = ds
result_files = cached_ls[test[0]]["Tests.lst_" + test[1]]
if len(result_files) == 0:
self.m_missing.append(test[0])
else:
for file in result_files:
if os.path.splitext(file)[1] == ".html":
result = self.parse_smarty_file(os.path.join(self.m_path, test[0], file))
local_tests.append(result)
self.m_tests.append(result)
self.m_failed = iterate_tests(failed_tests)
self.m_passed = iterate_tests(passed_tests)
def parse_smarty_file(self, path):
lines = self.remove_tags(path)
tags = defaultdict(lambda: False)
tags["TEST_IDENTIFIER"] = True
tags["CATEGORIES"] = True
tags["RELATIVEPATH"] = True
tags["WORKINGDIR"] = True
tags["TEST_CMD_LINE"] = True
tags["TEST_EXPECTED_RETURN_CODE"] = True
tags["TEST_ACTUAL_RETURN_CODE"] = True
tags["TEST_START_TIME"] = True
tags["TEST_END_TIME"] = True
tags["TEST_RESULT"] = True
tags["TEST_OUTPUT"] = True
capturing_output = False
for line in lines:
if "TEST OUTPUT" in line:
capturing_output = True
tags["TEST_OUTPUT"] = []
elif capturing_output is True:
if "TEXT_EXPECTED_RETURN_CODE" in line:
capturing_output = False
tags["TEST_OUTPUT"] = "\n".join(tags["TEST_OUTPUT"])
else:
tags["TEST_OUTPUT"].append(line)
elif "=" in line:
split = line.split(" = ")
if tags[split[0]] is True:
value = "=".join(split[1:])
tags[split[0]] = value.strip()
elif ":" in line:
# TEST_CMD_LINE does not use =
split = line.split(": ")
if tags[split[0]] is True:
tags[split[0]] = ":".join(split[1:])
return tags
def remove_tags(self, path):
tag_re = re.compile(r'<[^>]+>')
lines = []
with open(path) as file_handle:
for line in file_handle:
line = tag_re.sub('', line)
# Smarty has a bug such that </BODY will
# possible be missing the ending >
# Check for this and remove the tag if found.
if "</BODY" in line and "</BODY>" not in line:
line = line.replace("</BODY", "")
line = line.replace("\r\n", "")
if len(line) != 0:
lines.append(line)
return lines
|
yangming85/lettuce | refs/heads/master | tests/integration/lib/Django-1.3/django/conf/urls/static.py | 156 | import re
from django.conf import settings
from django.conf.urls.defaults import patterns, url
from django.core.exceptions import ImproperlyConfigured
def static(prefix, view='django.views.static.serve', **kwargs):
"""
Helper function to return a URL pattern for serving files in debug mode.
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = patterns('',
# ... the rest of your URLconf goes here ...
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
"""
# No-op if not in debug mode or an non-local prefix
if not settings.DEBUG or (prefix and '://' in prefix):
return []
elif not prefix:
raise ImproperlyConfigured("Empty static prefix not permitted")
return patterns('',
url(r'^%s(?P<path>.*)$' % re.escape(prefix.lstrip('/')), view, kwargs=kwargs),
)
|
srajag/contrail-controller | refs/heads/master | src/config/svc-monitor/setup.py | 5 | #
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import setuptools, re
def requirements(filename):
with open(filename) as f:
lines = f.read().splitlines()
c = re.compile(r'\s*#.*')
return filter(bool, map(lambda y: c.sub('', y).strip(), lines))
setuptools.setup(
name='svc-monitor',
version='0.1dev',
packages=setuptools.find_packages(),
package_data={'': ['*.html', '*.css', '*.xml']},
# metadata
author="OpenContrail",
author_email="dev@lists.opencontrail.org",
license="Apache Software License",
url="http://www.opencontrail.org/",
long_description="VNC Service Monitor",
install_requires=requirements('requirements.txt'),
tests_require=requirements('test-requirements.txt'),
test_suite='svc_monitor.tests',
entry_points = {
# Please update sandesh/common/vns.sandesh on process name change
'console_scripts' : [
'contrail-svc-monitor = svc_monitor.svc_monitor:server_main',
],
},
)
|
xiangel/hue | refs/heads/master | desktop/core/ext-py/python-openid-2.2.5/openid/extensions/sreg.py | 143 | """Simple registration request and response parsing and object representation
This module contains objects representing simple registration requests
and responses that can be used with both OpenID relying parties and
OpenID providers.
1. The relying party creates a request object and adds it to the
C{L{AuthRequest<openid.consumer.consumer.AuthRequest>}} object
before making the C{checkid_} request to the OpenID provider::
auth_request.addExtension(SRegRequest(required=['email']))
2. The OpenID provider extracts the simple registration request from
the OpenID request using C{L{SRegRequest.fromOpenIDRequest}},
gets the user's approval and data, creates a C{L{SRegResponse}}
object and adds it to the C{id_res} response::
sreg_req = SRegRequest.fromOpenIDRequest(checkid_request)
# [ get the user's approval and data, informing the user that
# the fields in sreg_response were requested ]
sreg_resp = SRegResponse.extractResponse(sreg_req, user_data)
sreg_resp.toMessage(openid_response.fields)
3. The relying party uses C{L{SRegResponse.fromSuccessResponse}} to
extract the data from the OpenID response::
sreg_resp = SRegResponse.fromSuccessResponse(success_response)
@since: 2.0
@var sreg_data_fields: The names of the data fields that are listed in
the sreg spec, and a description of them in English
@var sreg_uri: The preferred URI to use for the simple registration
namespace and XRD Type value
"""
from openid.message import registerNamespaceAlias, \
NamespaceAliasRegistrationError
from openid.extension import Extension
from openid import oidutil
try:
basestring #pylint:disable-msg=W0104
except NameError:
# For Python 2.2
basestring = (str, unicode) #pylint:disable-msg=W0622
__all__ = [
'SRegRequest',
'SRegResponse',
'data_fields',
'ns_uri',
'ns_uri_1_0',
'ns_uri_1_1',
'supportsSReg',
]
# The data fields that are listed in the sreg spec
data_fields = {
'fullname':'Full Name',
'nickname':'Nickname',
'dob':'Date of Birth',
'email':'E-mail Address',
'gender':'Gender',
'postcode':'Postal Code',
'country':'Country',
'language':'Language',
'timezone':'Time Zone',
}
def checkFieldName(field_name):
"""Check to see that the given value is a valid simple
registration data field name.
@raise ValueError: if the field name is not a valid simple
registration data field name
"""
if field_name not in data_fields:
raise ValueError('%r is not a defined simple registration field' %
(field_name,))
# URI used in the wild for Yadis documents advertising simple
# registration support
ns_uri_1_0 = 'http://openid.net/sreg/1.0'
# URI in the draft specification for simple registration 1.1
# <http://openid.net/specs/openid-simple-registration-extension-1_1-01.html>
ns_uri_1_1 = 'http://openid.net/extensions/sreg/1.1'
# This attribute will always hold the preferred URI to use when adding
# sreg support to an XRDS file or in an OpenID namespace declaration.
ns_uri = ns_uri_1_1
try:
registerNamespaceAlias(ns_uri_1_1, 'sreg')
except NamespaceAliasRegistrationError, e:
oidutil.log('registerNamespaceAlias(%r, %r) failed: %s' % (ns_uri_1_1,
'sreg', str(e),))
def supportsSReg(endpoint):
"""Does the given endpoint advertise support for simple
registration?
@param endpoint: The endpoint object as returned by OpenID discovery
@type endpoint: openid.consumer.discover.OpenIDEndpoint
@returns: Whether an sreg type was advertised by the endpoint
@rtype: bool
"""
return (endpoint.usesExtension(ns_uri_1_1) or
endpoint.usesExtension(ns_uri_1_0))
class SRegNamespaceError(ValueError):
"""The simple registration namespace was not found and could not
be created using the expected name (there's another extension
using the name 'sreg')
This is not I{illegal}, for OpenID 2, although it probably
indicates a problem, since it's not expected that other extensions
will re-use the alias that is in use for OpenID 1.
If this is an OpenID 1 request, then there is no recourse. This
should not happen unless some code has modified the namespaces for
the message that is being processed.
"""
def getSRegNS(message):
"""Extract the simple registration namespace URI from the given
OpenID message. Handles OpenID 1 and 2, as well as both sreg
namespace URIs found in the wild, as well as missing namespace
definitions (for OpenID 1)
@param message: The OpenID message from which to parse simple
registration fields. This may be a request or response message.
@type message: C{L{openid.message.Message}}
@returns: the sreg namespace URI for the supplied message. The
message may be modified to define a simple registration
namespace.
@rtype: C{str}
@raise ValueError: when using OpenID 1 if the message defines
the 'sreg' alias to be something other than a simple
registration type.
"""
# See if there exists an alias for one of the two defined simple
# registration types.
for sreg_ns_uri in [ns_uri_1_1, ns_uri_1_0]:
alias = message.namespaces.getAlias(sreg_ns_uri)
if alias is not None:
break
else:
# There is no alias for either of the types, so try to add
# one. We default to using the modern value (1.1)
sreg_ns_uri = ns_uri_1_1
try:
message.namespaces.addAlias(ns_uri_1_1, 'sreg')
except KeyError, why:
# An alias for the string 'sreg' already exists, but it's
# defined for something other than simple registration
raise SRegNamespaceError(why[0])
# we know that sreg_ns_uri defined, because it's defined in the
# else clause of the loop as well, so disable the warning
return sreg_ns_uri #pylint:disable-msg=W0631
class SRegRequest(Extension):
"""An object to hold the state of a simple registration request.
@ivar required: A list of the required fields in this simple
registration request
@type required: [str]
@ivar optional: A list of the optional fields in this simple
registration request
@type optional: [str]
@ivar policy_url: The policy URL that was provided with the request
@type policy_url: str or NoneType
@group Consumer: requestField, requestFields, getExtensionArgs, addToOpenIDRequest
@group Server: fromOpenIDRequest, parseExtensionArgs
"""
ns_alias = 'sreg'
def __init__(self, required=None, optional=None, policy_url=None,
sreg_ns_uri=ns_uri):
"""Initialize an empty simple registration request"""
Extension.__init__(self)
self.required = []
self.optional = []
self.policy_url = policy_url
self.ns_uri = sreg_ns_uri
if required:
self.requestFields(required, required=True, strict=True)
if optional:
self.requestFields(optional, required=False, strict=True)
# Assign getSRegNS to a static method so that it can be
# overridden for testing.
_getSRegNS = staticmethod(getSRegNS)
def fromOpenIDRequest(cls, request):
"""Create a simple registration request that contains the
fields that were requested in the OpenID request with the
given arguments
@param request: The OpenID request
@type request: openid.server.CheckIDRequest
@returns: The newly created simple registration request
@rtype: C{L{SRegRequest}}
"""
self = cls()
# Since we're going to mess with namespace URI mapping, don't
# mutate the object that was passed in.
message = request.message.copy()
self.ns_uri = self._getSRegNS(message)
args = message.getArgs(self.ns_uri)
self.parseExtensionArgs(args)
return self
fromOpenIDRequest = classmethod(fromOpenIDRequest)
def parseExtensionArgs(self, args, strict=False):
"""Parse the unqualified simple registration request
parameters and add them to this object.
This method is essentially the inverse of
C{L{getExtensionArgs}}. This method restores the serialized simple
registration request fields.
If you are extracting arguments from a standard OpenID
checkid_* request, you probably want to use C{L{fromOpenIDRequest}},
which will extract the sreg namespace and arguments from the
OpenID request. This method is intended for cases where the
OpenID server needs more control over how the arguments are
parsed than that method provides.
>>> args = message.getArgs(ns_uri)
>>> request.parseExtensionArgs(args)
@param args: The unqualified simple registration arguments
@type args: {str:str}
@param strict: Whether requests with fields that are not
defined in the simple registration specification should be
tolerated (and ignored)
@type strict: bool
@returns: None; updates this object
"""
for list_name in ['required', 'optional']:
required = (list_name == 'required')
items = args.get(list_name)
if items:
for field_name in items.split(','):
try:
self.requestField(field_name, required, strict)
except ValueError:
if strict:
raise
self.policy_url = args.get('policy_url')
def allRequestedFields(self):
"""A list of all of the simple registration fields that were
requested, whether they were required or optional.
@rtype: [str]
"""
return self.required + self.optional
def wereFieldsRequested(self):
"""Have any simple registration fields been requested?
@rtype: bool
"""
return bool(self.allRequestedFields())
def __contains__(self, field_name):
"""Was this field in the request?"""
return (field_name in self.required or
field_name in self.optional)
def requestField(self, field_name, required=False, strict=False):
"""Request the specified field from the OpenID user
@param field_name: the unqualified simple registration field name
@type field_name: str
@param required: whether the given field should be presented
to the user as being a required to successfully complete
the request
@param strict: whether to raise an exception when a field is
added to a request more than once
@raise ValueError: when the field requested is not a simple
registration field or strict is set and the field was
requested more than once
"""
checkFieldName(field_name)
if strict:
if field_name in self.required or field_name in self.optional:
raise ValueError('That field has already been requested')
else:
if field_name in self.required:
return
if field_name in self.optional:
if required:
self.optional.remove(field_name)
else:
return
if required:
self.required.append(field_name)
else:
self.optional.append(field_name)
def requestFields(self, field_names, required=False, strict=False):
"""Add the given list of fields to the request
@param field_names: The simple registration data fields to request
@type field_names: [str]
@param required: Whether these values should be presented to
the user as required
@param strict: whether to raise an exception when a field is
added to a request more than once
@raise ValueError: when a field requested is not a simple
registration field or strict is set and a field was
requested more than once
"""
if isinstance(field_names, basestring):
raise TypeError('Fields should be passed as a list of '
'strings (not %r)' % (type(field_names),))
for field_name in field_names:
self.requestField(field_name, required, strict=strict)
def getExtensionArgs(self):
"""Get a dictionary of unqualified simple registration
arguments representing this request.
This method is essentially the inverse of
C{L{parseExtensionArgs}}. This method serializes the simple
registration request fields.
@rtype: {str:str}
"""
args = {}
if self.required:
args['required'] = ','.join(self.required)
if self.optional:
args['optional'] = ','.join(self.optional)
if self.policy_url:
args['policy_url'] = self.policy_url
return args
class SRegResponse(Extension):
"""Represents the data returned in a simple registration response
inside of an OpenID C{id_res} response. This object will be
created by the OpenID server, added to the C{id_res} response
object, and then extracted from the C{id_res} message by the
Consumer.
@ivar data: The simple registration data, keyed by the unqualified
simple registration name of the field (i.e. nickname is keyed
by C{'nickname'})
@ivar ns_uri: The URI under which the simple registration data was
stored in the response message.
@group Server: extractResponse
@group Consumer: fromSuccessResponse
@group Read-only dictionary interface: keys, iterkeys, items, iteritems,
__iter__, get, __getitem__, keys, has_key
"""
ns_alias = 'sreg'
def __init__(self, data=None, sreg_ns_uri=ns_uri):
Extension.__init__(self)
if data is None:
self.data = {}
else:
self.data = data
self.ns_uri = sreg_ns_uri
def extractResponse(cls, request, data):
"""Take a C{L{SRegRequest}} and a dictionary of simple
registration values and create a C{L{SRegResponse}}
object containing that data.
@param request: The simple registration request object
@type request: SRegRequest
@param data: The simple registration data for this
response, as a dictionary from unqualified simple
registration field name to string (unicode) value. For
instance, the nickname should be stored under the key
'nickname'.
@type data: {str:str}
@returns: a simple registration response object
@rtype: SRegResponse
"""
self = cls()
self.ns_uri = request.ns_uri
for field in request.allRequestedFields():
value = data.get(field)
if value is not None:
self.data[field] = value
return self
extractResponse = classmethod(extractResponse)
# Assign getSRegArgs to a static method so that it can be
# overridden for testing
_getSRegNS = staticmethod(getSRegNS)
def fromSuccessResponse(cls, success_response, signed_only=True):
"""Create a C{L{SRegResponse}} object from a successful OpenID
library response
(C{L{openid.consumer.consumer.SuccessResponse}}) response
message
@param success_response: A SuccessResponse from consumer.complete()
@type success_response: C{L{openid.consumer.consumer.SuccessResponse}}
@param signed_only: Whether to process only data that was
signed in the id_res message from the server.
@type signed_only: bool
@rtype: SRegResponse
@returns: A simple registration response containing the data
that was supplied with the C{id_res} response.
"""
self = cls()
self.ns_uri = self._getSRegNS(success_response.message)
if signed_only:
args = success_response.getSignedNS(self.ns_uri)
else:
args = success_response.message.getArgs(self.ns_uri)
if not args:
return None
for field_name in data_fields:
if field_name in args:
self.data[field_name] = args[field_name]
return self
fromSuccessResponse = classmethod(fromSuccessResponse)
def getExtensionArgs(self):
"""Get the fields to put in the simple registration namespace
when adding them to an id_res message.
@see: openid.extension
"""
return self.data
# Read-only dictionary interface
def get(self, field_name, default=None):
"""Like dict.get, except that it checks that the field name is
defined by the simple registration specification"""
checkFieldName(field_name)
return self.data.get(field_name, default)
def items(self):
"""All of the data values in this simple registration response
"""
return self.data.items()
def iteritems(self):
return self.data.iteritems()
def keys(self):
return self.data.keys()
def iterkeys(self):
return self.data.iterkeys()
def has_key(self, key):
return key in self
def __contains__(self, field_name):
checkFieldName(field_name)
return field_name in self.data
def __iter__(self):
return iter(self.data)
def __getitem__(self, field_name):
checkFieldName(field_name)
return self.data[field_name]
def __nonzero__(self):
return bool(self.data)
|
tonydebling/pythonExamples | refs/heads/master | Numbers0.py | 1 | # Numbers0.py
#
# Run this program
# Do you get the result that you expect?
# Can you explain why?
print("**** First Example ****")
number1 = input("Enter your first number: ")
number2 = input("Enter your second number: ")
print("Answer = ", number1 + number2)
|
MisterTea/HyperNEAT | refs/heads/master | boost_1_57_0/tools/build/test/using.py | 6 | #!/usr/bin/python
# Copyright (C) Vladimir Prus 2005.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
t = BoostBuild.Tester(use_test_config=False)
t.write("jamroot.jam", "using some_tool ;")
t.write("some_tool.jam", """\
import project ;
project.initialize $(__name__) ;
rule init ( ) { }
""")
t.write("some_tool.py", """\
from b2.manager import get_manager
get_manager().projects().initialize(__name__)
def init():
pass
""")
t.write("sub/a.cpp", "int main() {}\n")
t.write("sub/jamfile.jam", "exe a : a.cpp ;")
t.run_build_system(subdir="sub")
t.expect_addition("sub/bin/$toolset/debug/a.exe")
t.cleanup()
|
takeshineshiro/nova | refs/heads/master | nova/tests/functional/v3/test_keypairs.py | 3 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo_config import cfg
from nova.objects import keypair as keypair_obj
from nova.tests.functional.v3 import api_sample_base
from nova.tests.unit import fake_crypto
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class KeyPairsSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
request_api_version = None
sample_dir = "keypairs"
expected_delete_status_code = 202
expected_post_status_code = 200
_api_version = 'v2'
def _get_flags(self):
f = super(KeyPairsSampleJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.keypairs.Keypairs')
return f
def generalize_subs(self, subs, vanilla_regexes):
subs['keypair_name'] = 'keypair-[0-9a-f-]+'
return subs
def test_keypairs_post(self):
return self._check_keypairs_post()
def _check_keypairs_post(self, **kwargs):
"""Get api sample of key pairs post request."""
key_name = 'keypair-' + str(uuid.uuid4())
subs = dict(keypair_name=key_name, **kwargs)
response = self._do_post('os-keypairs', 'keypairs-post-req', subs,
api_version=self.request_api_version)
subs = self._get_regexes()
subs['keypair_name'] = '(%s)' % key_name
self._verify_response('keypairs-post-resp', subs, response,
self.expected_post_status_code)
# NOTE(maurosr): return the key_name is necessary cause the
# verification returns the label of the last compared information in
# the response, not necessarily the key name.
return key_name
def test_keypairs_import_key_post(self):
public_key = fake_crypto.get_ssh_public_key()
self._check_keypairs_import_key_post(public_key)
def _check_keypairs_import_key_post(self, public_key, **kwargs):
# Get api sample of key pairs post to import user's key.
key_name = 'keypair-' + str(uuid.uuid4())
subs = {
'keypair_name': key_name,
'public_key': public_key
}
subs.update(**kwargs)
response = self._do_post('os-keypairs', 'keypairs-import-post-req',
subs, api_version=self.request_api_version)
subs = self._get_regexes()
subs['keypair_name'] = '(%s)' % key_name
self._verify_response('keypairs-import-post-resp', subs, response,
self.expected_post_status_code)
def test_keypairs_list(self):
# Get api sample of key pairs list request.
key_name = self.test_keypairs_post()
response = self._do_get('os-keypairs',
api_version=self.request_api_version)
subs = self._get_regexes()
subs['keypair_name'] = '(%s)' % key_name
self._verify_response('keypairs-list-resp', subs, response, 200)
def test_keypairs_get(self):
# Get api sample of key pairs get request.
key_name = self.test_keypairs_post()
response = self._do_get('os-keypairs/%s' % key_name,
api_version=self.request_api_version)
subs = self._get_regexes()
subs['keypair_name'] = '(%s)' % key_name
self._verify_response('keypairs-get-resp', subs, response, 200)
def test_keypairs_delete(self):
# Get api sample of key pairs delete request.
key_name = self.test_keypairs_post()
response = self._do_delete('os-keypairs/%s' % key_name,
api_version=self.request_api_version)
self.assertEqual(self.expected_delete_status_code,
response.status_code)
class KeyPairsV22SampleJsonTest(KeyPairsSampleJsonTest):
request_api_version = '2.2'
expected_post_status_code = 201
expected_delete_status_code = 204
# NOTE(gmann): microversion tests do not need to run for v2 API
# so defining scenarios only for v2.2 which will run the original tests
# by appending '(v2_2)' in test_id.
scenarios = [('v2_2', {})]
_api_version = 'v2'
def test_keypairs_post(self):
# NOTE(claudiub): overrides the method with the same name in
# KeypairsSampleJsonTest, as it is used by other tests.
return self._check_keypairs_post(
keypair_type=keypair_obj.KEYPAIR_TYPE_SSH)
def test_keypairs_post_x509(self):
return self._check_keypairs_post(
keypair_type=keypair_obj.KEYPAIR_TYPE_X509)
def test_keypairs_post_invalid(self):
key_name = 'keypair-' + str(uuid.uuid4())
subs = dict(keypair_name=key_name, keypair_type='fakey_type')
response = self._do_post('os-keypairs', 'keypairs-post-req', subs,
api_version=self.request_api_version)
self.assertEqual(400, response.status_code)
def test_keypairs_import_key_post(self):
# NOTE(claudiub): overrides the method with the same name in
# KeypairsSampleJsonTest, since the API sample expects a keypair_type.
public_key = fake_crypto.get_ssh_public_key()
self._check_keypairs_import_key_post(
public_key, keypair_type=keypair_obj.KEYPAIR_TYPE_SSH)
def test_keypairs_import_key_post_x509(self):
public_key = fake_crypto.get_x509_cert_and_fingerprint()[0]
public_key = public_key.replace('\n', '\\n')
self._check_keypairs_import_key_post(
public_key, keypair_type=keypair_obj.KEYPAIR_TYPE_X509)
def _check_keypairs_import_key_post_invalid(self, keypair_type):
key_name = 'keypair-' + str(uuid.uuid4())
subs = {
'keypair_name': key_name,
'keypair_type': keypair_type,
'public_key': fake_crypto.get_ssh_public_key()
}
response = self._do_post('os-keypairs', 'keypairs-import-post-req',
subs, api_version=self.request_api_version)
self.assertEqual(400, response.status_code)
def test_keypairs_import_key_post_invalid_type(self):
self._check_keypairs_import_key_post_invalid(
keypair_type='fakey_type')
def test_keypairs_import_key_post_invalid_combination(self):
self._check_keypairs_import_key_post_invalid(
keypair_type=keypair_obj.KEYPAIR_TYPE_X509)
class KeyPairsV210SampleJsonTest(KeyPairsSampleJsonTest):
ADMIN_API = True
request_api_version = '2.10'
expected_post_status_code = 201
expected_delete_status_code = 204
scenarios = [('v2_10', {})]
_api_version = 'v2'
def test_keypair_create_for_user(self):
subs = {
'keypair_type': keypair_obj.KEYPAIR_TYPE_SSH,
'public_key': fake_crypto.get_ssh_public_key(),
'user_id': "fake"
}
self._check_keypairs_post(**subs)
def test_keypairs_post(self):
return self._check_keypairs_post(
keypair_type=keypair_obj.KEYPAIR_TYPE_SSH,
user_id="admin")
def test_keypairs_import_key_post(self):
# NOTE(claudiub): overrides the method with the same name in
# KeypairsSampleJsonTest, since the API sample expects a keypair_type.
public_key = fake_crypto.get_ssh_public_key()
self._check_keypairs_import_key_post(
public_key, keypair_type=keypair_obj.KEYPAIR_TYPE_SSH,
user_id="fake")
def test_keypairs_delete_for_user(self):
# Delete a keypair on behalf of a user
subs = {
'keypair_type': keypair_obj.KEYPAIR_TYPE_SSH,
'public_key': fake_crypto.get_ssh_public_key(),
'user_id': "fake"
}
key_name = self._check_keypairs_post(**subs)
response = self._do_delete('os-keypairs/%s?user_id=fake' % key_name,
api_version=self.request_api_version)
self.assertEqual(self.expected_delete_status_code,
response.status_code)
class KeyPairsV210SampleJsonTestNotAdmin(KeyPairsV210SampleJsonTest):
ADMIN_API = False
def test_keypairs_post(self):
return self._check_keypairs_post(
keypair_type=keypair_obj.KEYPAIR_TYPE_SSH,
user_id="fake")
def test_keypairs_post_for_other_user(self):
key_name = 'keypair-' + str(uuid.uuid4())
subs = dict(keypair_name=key_name,
keypair_type=keypair_obj.KEYPAIR_TYPE_SSH,
user_id='fake1')
response = self._do_post('os-keypairs', 'keypairs-post-req', subs,
api_version=self.request_api_version,
)
self.assertEqual(403, response.status_code)
|
MatiasBjorling/linux | refs/heads/master | tools/perf/scripts/python/sctop.py | 11180 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
meabsence/python-for-android | refs/heads/master | python-build/python-libs/gdata/tests/gdata_tests/docs_test.py | 89 | #!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = ('api.jfisher (Jeff Fisher), '
'api.eric@google.com (Eric Bidelman)')
import unittest
from gdata import test_data
import gdata.docs
class DocumentListEntryTest(unittest.TestCase):
def setUp(self):
self.dl_entry = gdata.docs.DocumentListEntryFromString(
test_data.DOCUMENT_LIST_ENTRY)
def testToAndFromStringWithData(self):
entry = gdata.docs.DocumentListEntryFromString(str(self.dl_entry))
self.assertEqual(entry.author[0].name.text, 'test.user')
self.assertEqual(entry.author[0].email.text, 'test.user@gmail.com')
self.assertEqual(entry.GetDocumentType(), 'spreadsheet')
self.assertEqual(entry.id.text,
'http://docs.google.com/feeds/documents/private/full/' +\
'spreadsheet%3Asupercalifragilisticexpealidocious')
self.assertEqual(entry.title.text,'Test Spreadsheet')
self.assertEqual(entry.resourceId.text,
'spreadsheet:supercalifragilisticexpealidocious')
self.assertEqual(entry.lastModifiedBy.name.text,'test.user')
self.assertEqual(entry.lastModifiedBy.email.text,'test.user@gmail.com')
self.assertEqual(entry.lastViewed.text,'2009-03-05T07:48:21.493Z')
self.assertEqual(entry.writersCanInvite.value, 'true')
class DocumentListFeedTest(unittest.TestCase):
def setUp(self):
self.dl_feed = gdata.docs.DocumentListFeedFromString(
test_data.DOCUMENT_LIST_FEED)
def testToAndFromString(self):
self.assert_(len(self.dl_feed.entry) == 2)
for an_entry in self.dl_feed.entry:
self.assert_(isinstance(an_entry, gdata.docs.DocumentListEntry))
new_dl_feed = gdata.docs.DocumentListFeedFromString(str(self.dl_feed))
for an_entry in new_dl_feed.entry:
self.assert_(isinstance(an_entry, gdata.docs.DocumentListEntry))
def testConvertActualData(self):
for an_entry in self.dl_feed.entry:
self.assertEqual(an_entry.author[0].name.text, 'test.user')
self.assertEqual(an_entry.author[0].email.text, 'test.user@gmail.com')
self.assertEqual(an_entry.lastModifiedBy.name.text, 'test.user')
self.assertEqual(an_entry.lastModifiedBy.email.text,
'test.user@gmail.com')
self.assertEqual(an_entry.lastViewed.text,'2009-03-05T07:48:21.493Z')
if(an_entry.GetDocumentType() == 'spreadsheet'):
self.assertEqual(an_entry.title.text, 'Test Spreadsheet')
self.assertEqual(an_entry.writersCanInvite.value, 'true')
elif(an_entry.GetDocumentType() == 'document'):
self.assertEqual(an_entry.title.text, 'Test Document')
self.assertEqual(an_entry.writersCanInvite.value, 'false')
def testLinkFinderFindsLinks(self):
for entry in self.dl_feed.entry:
# All Document List entries should have a self link
self.assert_(entry.GetSelfLink() is not None)
# All Document List entries should have an HTML link
self.assert_(entry.GetHtmlLink() is not None)
self.assert_(entry.feedLink.href is not None)
class DocumentListAclEntryTest(unittest.TestCase):
def setUp(self):
self.acl_entry = gdata.docs.DocumentListAclEntryFromString(
test_data.DOCUMENT_LIST_ACL_ENTRY)
def testToAndFromString(self):
self.assert_(isinstance(self.acl_entry, gdata.docs.DocumentListAclEntry))
self.assert_(isinstance(self.acl_entry.role, gdata.docs.Role))
self.assert_(isinstance(self.acl_entry.scope, gdata.docs.Scope))
self.assertEqual(self.acl_entry.scope.value, 'user@gmail.com')
self.assertEqual(self.acl_entry.scope.type, 'user')
self.assertEqual(self.acl_entry.role.value, 'writer')
acl_entry_str = str(self.acl_entry)
new_acl_entry = gdata.docs.DocumentListAclEntryFromString(acl_entry_str)
self.assert_(isinstance(new_acl_entry, gdata.docs.DocumentListAclEntry))
self.assert_(isinstance(new_acl_entry.role, gdata.docs.Role))
self.assert_(isinstance(new_acl_entry.scope, gdata.docs.Scope))
self.assertEqual(new_acl_entry.scope.value, self.acl_entry.scope.value)
self.assertEqual(new_acl_entry.scope.type, self.acl_entry.scope.type)
self.assertEqual(new_acl_entry.role.value, self.acl_entry.role.value)
def testCreateNewAclEntry(self):
cat = gdata.atom.Category(
term='http://schemas.google.com/acl/2007#accessRule',
scheme='http://schemas.google.com/g/2005#kind')
acl_entry = gdata.docs.DocumentListAclEntry(category=[cat])
acl_entry.scope = gdata.docs.Scope(value='user@gmail.com', type='user')
acl_entry.role = gdata.docs.Role(value='writer')
self.assert_(isinstance(acl_entry, gdata.docs.DocumentListAclEntry))
self.assert_(isinstance(acl_entry.role, gdata.docs.Role))
self.assert_(isinstance(acl_entry.scope, gdata.docs.Scope))
self.assertEqual(acl_entry.scope.value, 'user@gmail.com')
self.assertEqual(acl_entry.scope.type, 'user')
self.assertEqual(acl_entry.role.value, 'writer')
class DocumentListAclFeedTest(unittest.TestCase):
def setUp(self):
self.feed = gdata.docs.DocumentListAclFeedFromString(
test_data.DOCUMENT_LIST_ACL_FEED)
def testToAndFromString(self):
for entry in self.feed.entry:
self.assert_(isinstance(entry, gdata.docs.DocumentListAclEntry))
feed = gdata.docs.DocumentListAclFeedFromString(str(self.feed))
for entry in feed.entry:
self.assert_(isinstance(entry, gdata.docs.DocumentListAclEntry))
def testConvertActualData(self):
entries = self.feed.entry
self.assert_(len(entries) == 2)
self.assertEqual(entries[0].title.text,
'Document Permission - user@gmail.com')
self.assertEqual(entries[0].role.value, 'owner')
self.assertEqual(entries[0].scope.type, 'user')
self.assertEqual(entries[0].scope.value, 'user@gmail.com')
self.assert_(entries[0].GetSelfLink() is not None)
self.assert_(entries[0].GetEditLink() is not None)
self.assertEqual(entries[1].title.text,
'Document Permission - user2@google.com')
self.assertEqual(entries[1].role.value, 'writer')
self.assertEqual(entries[1].scope.type, 'domain')
self.assertEqual(entries[1].scope.value, 'google.com')
self.assert_(entries[1].GetSelfLink() is not None)
self.assert_(entries[1].GetEditLink() is not None)
if __name__ == '__main__':
unittest.main()
|
coderb0t/CouchPotatoServer | refs/heads/master | couchpotato/core/media/_base/providers/nzb/newznab.py | 27 | from urlparse import urlparse
import time
import traceback
import re
from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import cleanHost, splitString, tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.base import ResultList
from couchpotato.core.media._base.providers.nzb.base import NZBProvider
from couchpotato.environment import Env
from dateutil.parser import parse
from requests import HTTPError
log = CPLog(__name__)
class Base(NZBProvider, RSS):
urls = {
'detail': 'details/%s',
'download': 't=get&id=%s'
}
passwords_regex = 'password|wachtwoord'
limits_reached = {}
http_time_between_calls = 2 # Seconds
def search(self, media, quality):
hosts = self.getHosts()
results = ResultList(self, media, quality, imdb_results = True)
for host in hosts:
if self.isDisabled(host):
continue
self._searchOnHost(host, media, quality, results)
return results
def _searchOnHost(self, host, media, quality, results):
query = self.buildUrl(media, host)
url = '%s%s' % (self.getUrl(host['host']), query)
nzbs = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()})
for nzb in nzbs:
date = None
spotter = None
for item in nzb:
if date and spotter:
break
if item.attrib.get('name') == 'usenetdate':
date = item.attrib.get('value')
break
# Get the name of the person who posts the spot
if item.attrib.get('name') == 'poster':
if "@spot.net" in item.attrib.get('value'):
spotter = item.attrib.get('value').split("@")[0]
continue
if not date:
date = self.getTextElement(nzb, 'pubDate')
name = self.getTextElement(nzb, 'title')
detail_url = self.getTextElement(nzb, 'guid')
nzb_id = detail_url.split('/')[-1:].pop()
if '://' not in detail_url:
detail_url = (cleanHost(host['host']) + self.urls['detail']) % tryUrlencode(nzb_id)
if not name:
continue
name_extra = ''
if spotter:
name_extra = spotter
description = ''
if "@spot.net" in nzb_id:
try:
# Get details for extended description to retrieve passwords
query = self.buildDetailsUrl(nzb_id, host['api_key'])
url = '%s%s' % (self.getUrl(host['host']), query)
nzb_details = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()})[0]
description = self.getTextElement(nzb_details, 'description')
# Extract a password from the description
password = re.search('(?:' + self.passwords_regex + ')(?: *)(?:\:|\=)(?: *)(.*?)\<br\>|\n|$', description, flags = re.I).group(1)
if password:
name += ' {{%s}}' % password.strip()
except:
log.debug('Error getting details of "%s": %s', (name, traceback.format_exc()))
results.append({
'id': nzb_id,
'provider_extra': urlparse(host['host']).hostname or host['host'],
'name': toUnicode(name),
'name_extra': name_extra,
'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
'size': int(self.getElement(nzb, 'enclosure').attrib['length']) / 1024 / 1024,
'url': ((self.getUrl(host['host']) + self.urls['download']) % tryUrlencode(nzb_id)) + self.getApiExt(host),
'detail_url': detail_url,
'content': self.getTextElement(nzb, 'description'),
'description': description,
'score': host['extra_score'],
})
def getHosts(self):
uses = splitString(str(self.conf('use')), clean = False)
hosts = splitString(self.conf('host'), clean = False)
api_keys = splitString(self.conf('api_key'), clean = False)
extra_score = splitString(self.conf('extra_score'), clean = False)
custom_tags = splitString(self.conf('custom_tag'), clean = False)
list = []
for nr in range(len(hosts)):
try: key = api_keys[nr]
except: key = ''
try: host = hosts[nr]
except: host = ''
try: score = tryInt(extra_score[nr])
except: score = 0
try: custom_tag = custom_tags[nr]
except: custom_tag = ''
list.append({
'use': uses[nr],
'host': host,
'api_key': key,
'extra_score': score,
'custom_tag': custom_tag
})
return list
def belongsTo(self, url, provider = None, host = None):
hosts = self.getHosts()
for host in hosts:
result = super(Base, self).belongsTo(url, host = host['host'], provider = provider)
if result:
return result
def getUrl(self, host):
if '?page=newznabapi' in host:
return cleanHost(host)[:-1] + '&'
return cleanHost(host) + 'api?'
def isDisabled(self, host = None):
return not self.isEnabled(host)
def isEnabled(self, host = None):
# Return true if at least one is enabled and no host is given
if host is None:
for host in self.getHosts():
if self.isEnabled(host):
return True
return False
return NZBProvider.isEnabled(self) and host['host'] and host['api_key'] and int(host['use'])
def getApiExt(self, host):
return '&apikey=%s' % host['api_key']
def download(self, url = '', nzb_id = ''):
host = urlparse(url).hostname
if self.limits_reached.get(host):
# Try again in 3 hours
if self.limits_reached[host] > time.time() - 10800:
return 'try_next'
try:
data = self.urlopen(url, show_error = False, headers = {'User-Agent': Env.getIdentifier()})
self.limits_reached[host] = False
return data
except HTTPError as e:
sc = e.response.status_code
if sc in [503, 429]:
response = e.read().lower()
if sc == 429 or 'maximum api' in response or 'download limit' in response:
if not self.limits_reached.get(host):
log.error('Limit reached / to many requests for newznab provider: %s', host)
self.limits_reached[host] = time.time()
return 'try_next'
log.error('Failed download from %s: %s', (host, traceback.format_exc()))
return 'try_next'
def buildDetailsUrl(self, nzb_id, api_key):
query = tryUrlencode({
't': 'details',
'id': nzb_id,
'apikey': api_key,
})
return query
config = [{
'name': 'newznab',
'groups': [
{
'tab': 'searcher',
'list': 'nzb_providers',
'name': 'newznab',
'order': 10,
'description': 'Enable <a href="http://newznab.com/" target="_blank">NewzNab</a> such as <a href="https://nzb.su" target="_blank">NZB.su</a>, \
<a href="https://nzbs.org" target="_blank">NZBs.org</a>, <a href="http://dognzb.cr/" target="_blank">DOGnzb.cr</a>, \
<a href="https://github.com/spotweb/spotweb" target="_blank">Spotweb</a>, <a href="https://nzbgeek.info/" target="_blank">NZBGeek</a>, \
<a href="https://www.nzbfinder.ws" target="_blank">NZBFinder</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQAgMAAABinRfyAAAACVBMVEVjhwD///86aRovd/sBAAAAMklEQVQI12NgAIPQUCCRmQkjssDEShiRuRIqwZqZGcDAGBrqANUhGgIkWAOABKMDxCAA24UK50b26SAAAAAASUVORK5CYII=',
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': True,
},
{
'name': 'use',
'default': '0,0,0,0,0'
},
{
'name': 'host',
'default': 'api.nzb.su,api.dognzb.cr,nzbs.org,https://api.nzbgeek.info,https://www.nzbfinder.ws',
'description': 'The hostname of your newznab provider',
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'default': '0,0,0,0,0',
'description': 'Starting score for each release found via this provider.',
},
{
'name': 'custom_tag',
'advanced': True,
'label': 'Custom tag',
'default': ',,,,',
'description': 'Add custom tags, for example add rls=1 to get only scene releases from nzbs.org',
},
{
'name': 'api_key',
'default': ',,,,',
'label': 'Api Key',
'description': 'Can be found on your profile page',
'type': 'combined',
'combine': ['use', 'host', 'api_key', 'extra_score', 'custom_tag'],
},
],
},
],
}]
|
psfblair/FrameworkBenchmarks | refs/heads/master | frameworks/Python/falcon/app.py | 65 | #!/usr/bin/env python
import json
import falcon
# resource endpoints
class JSONResource(object):
def on_get(self, request, response):
json_data = {'message': "Hello, world!"}
response.body = json.dumps(json_data)
class PlaintextResource(object):
def on_get(self, request, response):
response.set_header('Content-Type', 'text/plain')
response.body = b'Hello, world!'
# setup
app = falcon.API()
app.add_route("/json", JSONResource())
app.add_route("/plaintext", PlaintextResource())
# entry point for debugging
if __name__ == "__main__":
from wsgiref import simple_server
httpd = simple_server.make_server('localhost', 8080, app)
httpd.serve_forever()
|
boooka/GeoPowerOff | refs/heads/master | venv/lib/python2.7/site-packages/annoying/middlewares.py | 12 | import re
from django.conf import settings
from django.views.static import serve
from django.shortcuts import redirect
from .exceptions import Redirect
class StaticServe(object):
"""
Django middleware for serving static files instead of using urls.py
"""
regex = re.compile(r'^%s(?P<path>.*)$' % settings.MEDIA_URL)
def process_request(self, request):
if settings.DEBUG:
match = self.regex.search(request.path)
if match:
return serve(request, match.group(1), settings.MEDIA_ROOT)
class RedirectMiddleware(object):
"""
You must add this middleware to MIDDLEWARE_CLASSES list,
to make work Redirect exception. All arguments passed to
Redirect will be passed to django built in redirect function.
"""
def process_exception(self, request, exception):
if not isinstance(exception, Redirect):
return
return redirect(*exception.args, **exception.kwargs)
|
Endika/edx-platform | refs/heads/master | lms/djangoapps/course_api/tests/mixins.py | 58 | """
Common mixins for Course API Tests
"""
from datetime import datetime
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.factories import ToyCourseFactory
TEST_PASSWORD = u'edx'
class CourseApiFactoryMixin(object):
"""
Mixin to allow creation of test courses and users.
"""
@staticmethod
def create_course(**kwargs):
"""
Create a course for use in test cases
"""
return ToyCourseFactory.create(
end=datetime(2015, 9, 19, 18, 0, 0),
enrollment_start=datetime(2015, 6, 15, 0, 0, 0),
enrollment_end=datetime(2015, 7, 15, 0, 0, 0),
emit_signals=True,
**kwargs
)
@staticmethod
def create_user(username, is_staff):
"""
Create a user as identified by username, email, password and is_staff.
"""
return UserFactory(
username=username,
email=u'{}@example.com'.format(username),
password=TEST_PASSWORD,
is_staff=is_staff
)
|
carlgao/lenga | refs/heads/master | images/lenny64-peon/usr/share/python-support/python-pygments/pygments/styles/emacs.py | 24 | # -*- coding: utf-8 -*-
"""
pygments.styles.emacs
~~~~~~~~~~~~~~~~~~~~~
A highlighting style for Pygments, inspired by Emacs.
:copyright: 2006-2007 by Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class EmacsStyle(Style):
"""
The default style (inspired by Emacs 22).
"""
background_color = "#f8f8f8"
default_style = ""
styles = {
Whitespace: "#bbbbbb",
Comment: "italic #008800",
Comment.Preproc: "noitalic",
Comment.Special: "noitalic bold",
Keyword: "bold #AA22FF",
Keyword.Pseudo: "nobold",
Keyword.Type: "bold #00BB00",
Operator: "#666666",
Operator.Word: "bold #AA22FF",
Name.Builtin: "#AA22FF",
Name.Function: "#00A000",
Name.Class: "#0000FF",
Name.Namespace: "bold #0000FF",
Name.Exception: "bold #D2413A",
Name.Variable: "#B8860B",
Name.Constant: "#880000",
Name.Label: "#A0A000",
Name.Entity: "bold #999999",
Name.Attribute: "#BB4444",
Name.Tag: "bold #008000",
Name.Decorator: "#AA22FF",
String: "#BB4444",
String.Doc: "italic",
String.Interpol: "bold #BB6688",
String.Escape: "bold #BB6622",
String.Regex: "#BB6688",
String.Symbol: "#B8860B",
String.Other: "#008000",
Number: "#666666",
Generic.Heading: "bold #000080",
Generic.Subheading: "bold #800080",
Generic.Deleted: "#A00000",
Generic.Inserted: "#00A000",
Generic.Error: "#FF0000",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold #000080",
Generic.Output: "#888",
Generic.Traceback: "#04D",
Error: "border:#FF0000"
}
|
totcoindev/totcoin | refs/heads/master | qa/rpc-tests/receivedby.py | 32 | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listreceivedbyaddress API
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
def get_sub_array_from_array(object_array, to_match):
'''
Finds and returns a sub array from an array of arrays.
to_match should be a unique idetifier of a sub array
'''
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
return item
return []
class ReceivedByTest(BitcoinTestFramework):
def setup_nodes(self):
#This test requires mocktime
enable_mocktime()
return start_nodes(4, self.options.tmpdir)
def run_test(self):
'''
listreceivedbyaddress Test
'''
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
#Check not listed in listreceivedbyaddress because has 0 confirmations
assert_array_result(self.nodes[1].listreceivedbyaddress(),
{"address":addr},
{ },
True)
#Bury Tx under 10 block so it will be returned by listreceivedbyaddress
self.nodes[1].generate(10)
self.sync_all()
assert_array_result(self.nodes[1].listreceivedbyaddress(),
{"address":addr},
{"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]})
#With min confidence < 10
assert_array_result(self.nodes[1].listreceivedbyaddress(5),
{"address":addr},
{"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]})
#With min confidence > 10, should not find Tx
assert_array_result(self.nodes[1].listreceivedbyaddress(11),{"address":addr},{ },True)
#Empty Tx
addr = self.nodes[1].getnewaddress()
assert_array_result(self.nodes[1].listreceivedbyaddress(0,True),
{"address":addr},
{"address":addr, "account":"", "amount":0, "confirmations":0, "txids":[]})
'''
getreceivedbyaddress Test
'''
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
#Check balance is 0 because of 0 confirmations
balance = self.nodes[1].getreceivedbyaddress(addr)
if balance != Decimal("0.0"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
#Check balance is 0.1
balance = self.nodes[1].getreceivedbyaddress(addr,0)
if balance != Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
#Bury Tx under 10 block so it will be returned by the default getreceivedbyaddress
self.nodes[1].generate(10)
self.sync_all()
balance = self.nodes[1].getreceivedbyaddress(addr)
if balance != Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
'''
listreceivedbyaccount + getreceivedbyaccount Test
'''
#set pre-state
addrArr = self.nodes[1].getnewaddress()
account = self.nodes[1].getaccount(addrArr)
received_by_account_json = get_sub_array_from_array(self.nodes[1].listreceivedbyaccount(),{"account":account})
if len(received_by_account_json) == 0:
raise AssertionError("No accounts found in node")
balance_by_account = rec_by_accountArr = self.nodes[1].getreceivedbyaccount(account)
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
# listreceivedbyaccount should return received_by_account_json because of 0 confirmations
assert_array_result(self.nodes[1].listreceivedbyaccount(),
{"account":account},
received_by_account_json)
# getreceivedbyaddress should return same balance because of 0 confirmations
balance = self.nodes[1].getreceivedbyaccount(account)
if balance != balance_by_account:
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
self.nodes[1].generate(10)
self.sync_all()
# listreceivedbyaccount should return updated account balance
assert_array_result(self.nodes[1].listreceivedbyaccount(),
{"account":account},
{"account":received_by_account_json["account"], "amount":(received_by_account_json["amount"] + Decimal("0.1"))})
# getreceivedbyaddress should return updates balance
balance = self.nodes[1].getreceivedbyaccount(account)
if balance != balance_by_account + Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
#Create a new account named "mynewaccount" that has a 0 balance
self.nodes[1].getaccountaddress("mynewaccount")
received_by_account_json = get_sub_array_from_array(self.nodes[1].listreceivedbyaccount(0,True),{"account":"mynewaccount"})
if len(received_by_account_json) == 0:
raise AssertionError("No accounts found in node")
# Test includeempty of listreceivedbyaccount
if received_by_account_json["amount"] != Decimal("0.0"):
raise AssertionError("Wrong balance returned by listreceivedbyaccount, %0.2f"%(received_by_account_json["amount"]))
# Test getreceivedbyaccount for 0 amount accounts
balance = self.nodes[1].getreceivedbyaccount("mynewaccount")
if balance != Decimal("0.0"):
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
if __name__ == '__main__':
ReceivedByTest().main()
|
sergiohgz/incubator-airflow | refs/heads/master | tests/utils/test_cli_util.py | 15 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import unittest
from argparse import Namespace
from contextlib import contextmanager
from datetime import datetime
from airflow.utils import cli, cli_action_loggers
class CliUtilTest(unittest.TestCase):
def test_metrics_build(self):
func_name = 'test'
exec_date = datetime.utcnow()
ns = Namespace(dag_id='foo', task_id='bar',
subcommand='test', execution_date=exec_date)
metrics = cli._build_metrics(func_name, ns)
expected = {'user': os.environ.get('USER'),
'sub_command': 'test',
'dag_id': 'foo',
'task_id': 'bar',
'execution_date': exec_date}
for k, v in expected.items():
self.assertEquals(v, metrics.get(k))
self.assertTrue(metrics.get('start_datetime') <= datetime.utcnow())
self.assertTrue(metrics.get('full_command'))
log_dao = metrics.get('log')
self.assertTrue(log_dao)
self.assertEquals(log_dao.dag_id, metrics.get('dag_id'))
self.assertEquals(log_dao.task_id, metrics.get('task_id'))
self.assertEquals(log_dao.execution_date, metrics.get('execution_date'))
self.assertEquals(log_dao.owner, metrics.get('user'))
def test_fail_function(self):
"""
Actual function is failing and fail needs to be propagated.
:return:
"""
with self.assertRaises(NotImplementedError):
fail_func(Namespace())
def test_success_function(self):
"""
Test success function but with failing callback.
In this case, failure should not propagate.
:return:
"""
with fail_action_logger_callback():
success_func(Namespace())
@contextmanager
def fail_action_logger_callback():
"""
Adding failing callback and revert it back when closed.
:return:
"""
tmp = cli_action_loggers.__pre_exec_callbacks[:]
def fail_callback(**_):
raise NotImplementedError
cli_action_loggers.register_pre_exec_callback(fail_callback)
yield
cli_action_loggers.__pre_exec_callbacks = tmp
@cli.action_logging
def fail_func(_):
raise NotImplementedError
@cli.action_logging
def success_func(_):
pass
if __name__ == '__main__':
unittest.main()
|
TNT-Samuel/Coding-Projects | refs/heads/master | DNS Server/Source/Lib/site-packages/urllib3/connectionpool.py | 24 | from __future__ import absolute_import
import errno
import logging
import sys
import warnings
from socket import error as SocketError, timeout as SocketTimeout
import socket
from .exceptions import (
ClosedPoolError,
ProtocolError,
EmptyPoolError,
HeaderParsingError,
HostChangedError,
LocationValueError,
MaxRetryError,
ProxyError,
ReadTimeoutError,
SSLError,
TimeoutError,
InsecureRequestWarning,
NewConnectionError,
)
from .packages.ssl_match_hostname import CertificateError
from .packages import six
from .packages.six.moves import queue
from .connection import (
port_by_scheme,
DummyConnection,
HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection,
HTTPException, BaseSSLError,
)
from .request import RequestMethods
from .response import HTTPResponse
from .util.connection import is_connection_dropped
from .util.request import set_file_position
from .util.response import assert_header_parsing
from .util.retry import Retry
from .util.timeout import Timeout
from .util.url import get_host, Url, NORMALIZABLE_SCHEMES
from .util.queue import LifoQueue
xrange = six.moves.xrange
log = logging.getLogger(__name__)
_Default = object()
# Pool objects
class ConnectionPool(object):
"""
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
"""
scheme = None
QueueCls = LifoQueue
def __init__(self, host, port=None):
if not host:
raise LocationValueError("No host specified.")
self.host = _ipv6_host(host, self.scheme)
self._proxy_host = host.lower()
self.port = port
def __str__(self):
return '%s(host=%r, port=%r)' % (type(self).__name__,
self.host, self.port)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
# Return False to re-raise any potential exceptions
return False
def close(self):
"""
Close all pooled connections and disable the pool.
"""
pass
# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])
class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`httplib.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`httplib.HTTPConnection`.
:param strict:
Causes BadStatusLine to be raised if the status line can't be parsed
as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`httplib.HTTPConnection`.
.. note::
Only works in Python 2. This parameter is ignored in Python 3.
:param timeout:
Socket timeout in seconds for each individual connection. This can
be a float or integer, which sets the timeout for the HTTP request,
or an instance of :class:`urllib3.util.Timeout` which gives you more
fine-grained control over request timeouts. After the constructor has
been parsed, this is always a `urllib3.util.Timeout` object.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to False, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param retries:
Retry configuration to use by default with requests in this pool.
:param _proxy:
Parsed proxy URL, should not be used directly, instead, see
:class:`urllib3.connectionpool.ProxyManager`"
:param _proxy_headers:
A dictionary with proxy headers, should not be used directly,
instead, see :class:`urllib3.connectionpool.ProxyManager`"
:param \\**conn_kw:
Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
:class:`urllib3.connection.HTTPSConnection` instances.
"""
scheme = 'http'
ConnectionCls = HTTPConnection
ResponseCls = HTTPResponse
def __init__(self, host, port=None, strict=False,
timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,
headers=None, retries=None,
_proxy=None, _proxy_headers=None,
**conn_kw):
ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers)
self.strict = strict
if not isinstance(timeout, Timeout):
timeout = Timeout.from_float(timeout)
if retries is None:
retries = Retry.DEFAULT
self.timeout = timeout
self.retries = retries
self.pool = self.QueueCls(maxsize)
self.block = block
self.proxy = _proxy
self.proxy_headers = _proxy_headers or {}
# Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize):
self.pool.put(None)
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
self.conn_kw = conn_kw
if self.proxy:
# Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
# We cannot know if the user has added default socket options, so we cannot replace the
# list.
self.conn_kw.setdefault('socket_options', [])
def _new_conn(self):
"""
Return a fresh :class:`HTTPConnection`.
"""
self.num_connections += 1
log.debug("Starting new HTTP connection (%d): %s:%s",
self.num_connections, self.host, self.port or "80")
conn = self.ConnectionCls(host=self.host, port=self.port,
timeout=self.timeout.connect_timeout,
strict=self.strict, **self.conn_kw)
return conn
def _get_conn(self, timeout=None):
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.")
except queue.Empty:
if self.block:
raise EmptyPoolError(self,
"Pool reached maximum size and no more "
"connections are allowed.")
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.debug("Resetting dropped connection: %s", self.host)
conn.close()
if getattr(conn, 'auto_open', 1) == 0:
# This is a proxied connection that has been mutated by
# httplib._tunnel() and cannot be reused (since it would
# attempt to bypass the proxy)
conn = None
return conn or self._new_conn()
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except queue.Full:
# This should never happen if self.block == True
log.warning(
"Connection pool is full, discarding connection: %s",
self.host)
# Connection never got put back into the pool, close it.
if conn:
conn.close()
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
pass
def _prepare_proxy(self, conn):
# Nothing to do for HTTP connections.
pass
def _get_timeout(self, timeout):
""" Helper that always returns a :class:`urllib3.util.Timeout` """
if timeout is _Default:
return self.timeout.clone()
if isinstance(timeout, Timeout):
return timeout.clone()
else:
# User passed us an int/float. This is for backwards compatibility,
# can be removed later
return Timeout.from_float(timeout)
def _raise_timeout(self, err, url, timeout_value):
"""Is the error actually a timeout? Will raise a ReadTimeout or pass"""
if isinstance(err, SocketTimeout):
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
# See the above comment about EAGAIN in Python 3. In Python 2 we have
# to specifically catch it and throw the timeout error
if hasattr(err, 'errno') and err.errno in _blocking_errnos:
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
# Catch possible read timeouts thrown as SSL errors. If not the
# case, rethrow the original. We need to do this because of:
# http://bugs.python.org/issue10272
if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
def _make_request(self, conn, method, url, timeout=_Default, chunked=False,
**httplib_request_kw):
"""
Perform a request on a given urllib connection object taken from our
pool.
:param conn:
a connection from one of our connection pools
:param timeout:
Socket timeout in seconds for the request. This can be a
float or integer, which will set the same timeout value for
the socket connect and the socket read, or an instance of
:class:`urllib3.util.Timeout`, which gives you more fine-grained
control over your timeouts.
"""
self.num_requests += 1
timeout_obj = self._get_timeout(timeout)
timeout_obj.start_connect()
conn.timeout = timeout_obj.connect_timeout
# Trigger any extra validation we need to do.
try:
self._validate_conn(conn)
except (SocketTimeout, BaseSSLError) as e:
# Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
raise
# conn.request() calls httplib.*.request, not the method in
# urllib3.request. It also calls makefile (recv) on the socket.
if chunked:
conn.request_chunked(method, url, **httplib_request_kw)
else:
conn.request(method, url, **httplib_request_kw)
# Reset the timeout for the recv() on the socket
read_timeout = timeout_obj.read_timeout
# App Engine doesn't have a sock attr
if getattr(conn, 'sock', None):
# In Python 3 socket.py will catch EAGAIN and return None when you
# try and read into the file pointer created by http.client, which
# instead raises a BadStatusLine exception. Instead of catching
# the exception and assuming all BadStatusLine exceptions are read
# timeouts, check for a zero timeout before making the request.
if read_timeout == 0:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % read_timeout)
if read_timeout is Timeout.DEFAULT_TIMEOUT:
conn.sock.settimeout(socket.getdefaulttimeout())
else: # None or a value
conn.sock.settimeout(read_timeout)
# Receive the response from the server
try:
try: # Python 2.7, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError: # Python 2.6 and older, Python 3
try:
httplib_response = conn.getresponse()
except Exception as e:
# Remove the TypeError from the exception chain in Python 3;
# otherwise it looks like a programming error was the cause.
six.raise_from(e, None)
except (SocketTimeout, BaseSSLError, SocketError) as e:
self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
raise
# AppEngine doesn't have a version attr.
http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
log.debug("%s://%s:%s \"%s %s %s\" %s %s", self.scheme, self.host, self.port,
method, url, http_version, httplib_response.status,
httplib_response.length)
try:
assert_header_parsing(httplib_response.msg)
except (HeaderParsingError, TypeError) as hpe: # Platform-specific: Python 3
log.warning(
'Failed to parse headers (url=%s): %s',
self._absolute_url(url), hpe, exc_info=True)
return httplib_response
def _absolute_url(self, path):
return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url
def close(self):
"""
Close all pooled connections and disable the pool.
"""
if self.pool is None:
return
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except queue.Empty:
pass # Done.
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith('/'):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
host = _ipv6_host(host, self.scheme)
# Use explicit default port for comparison when none is given
if self.port and not port:
port = port_by_scheme.get(scheme)
elif not self.port and port == port_by_scheme.get(scheme):
port = None
return (scheme, host, port) == (self.scheme, self.host, self.port)
def urlopen(self, method, url, body=None, headers=None, retries=None,
redirect=True, assert_same_host=True, timeout=_Default,
pool_timeout=None, release_conn=None, chunked=False,
body_pos=None, **response_kw):
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param body:
Data to send in the request body (useful for creating
POST requests, see HTTPConnectionPool.post_url for
more convenience).
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Configure the number of retries to allow before raising a
:class:`~urllib3.exceptions.MaxRetryError` exception.
Pass ``None`` to retry until you receive a response. Pass a
:class:`~urllib3.util.retry.Retry` object for fine-grained control
over different types of retries.
Pass an integer number to retry connection errors that many times,
but no other types of errors. Pass zero to never retry.
If ``False``, then retries are disabled and any exception is raised
immediately. Also, instead of raising a MaxRetryError on redirects,
the redirect response will be returned.
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry. Disabling retries
will disable redirect, too.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When False, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param chunked:
If True, urllib3 will send the body using chunked transfer
encoding. Otherwise, urllib3 will send the body using the standard
content-length form. Defaults to False.
:param int body_pos:
Position to seek to in file-like body in the event of a retry or
redirect. Typically this won't need to be set because urllib3 will
auto-populate the value when needed.
:param \\**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
if headers is None:
headers = self.headers
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
if release_conn is None:
release_conn = response_kw.get('preload_content', True)
# Check host
if assert_same_host and not self.is_same_host(url):
raise HostChangedError(self, url, retries)
conn = None
# Track whether `conn` needs to be released before
# returning/raising/recursing. Update this variable if necessary, and
# leave `release_conn` constant throughout the function. That way, if
# the function recurses, the original value of `release_conn` will be
# passed down into the recursive call, and its value will be respected.
#
# See issue #651 [1] for details.
#
# [1] <https://github.com/shazow/urllib3/issues/651>
release_this_conn = release_conn
# Merge the proxy headers. Only do this in HTTP. We have to copy the
# headers dict so we can safely change it without those changes being
# reflected in anyone else's copy.
if self.scheme == 'http':
headers = headers.copy()
headers.update(self.proxy_headers)
# Must keep the exception bound to a separate variable or else Python 3
# complains about UnboundLocalError.
err = None
# Keep track of whether we cleanly exited the except block. This
# ensures we do proper cleanup in finally.
clean_exit = False
# Rewind body position, if needed. Record current position
# for future rewinds in the event of a redirect/retry.
body_pos = set_file_position(body, body_pos)
try:
# Request a connection from the queue.
timeout_obj = self._get_timeout(timeout)
conn = self._get_conn(timeout=pool_timeout)
conn.timeout = timeout_obj.connect_timeout
is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
if is_new_proxy_conn:
self._prepare_proxy(conn)
# Make the request on the httplib connection object.
httplib_response = self._make_request(conn, method, url,
timeout=timeout_obj,
body=body, headers=headers,
chunked=chunked)
# If we're going to release the connection in ``finally:``, then
# the response doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = conn if not release_conn else None
# Pass method to Response for length checking
response_kw['request_method'] = method
# Import httplib's response into our own wrapper object
response = self.ResponseCls.from_httplib(httplib_response,
pool=self,
connection=response_conn,
retries=retries,
**response_kw)
# Everything went great!
clean_exit = True
except queue.Empty:
# Timed out by queue.
raise EmptyPoolError(self, "No pool connections are available.")
except (TimeoutError, HTTPException, SocketError, ProtocolError,
BaseSSLError, SSLError, CertificateError) as e:
# Discard the connection for these exceptions. It will be
# replaced during the next _get_conn() call.
clean_exit = False
if isinstance(e, (BaseSSLError, CertificateError)):
e = SSLError(e)
elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
e = ProxyError('Cannot connect to proxy.', e)
elif isinstance(e, (SocketError, HTTPException)):
e = ProtocolError('Connection aborted.', e)
retries = retries.increment(method, url, error=e, _pool=self,
_stacktrace=sys.exc_info()[2])
retries.sleep()
# Keep track of the error for the retry warning.
err = e
finally:
if not clean_exit:
# We hit some kind of exception, handled or otherwise. We need
# to throw the connection away unless explicitly told not to.
# Close the connection, set the variable to None, and make sure
# we put the None back in the pool to avoid leaking it.
conn = conn and conn.close()
release_this_conn = True
if release_this_conn:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if not conn:
# Try again
log.warning("Retrying (%r) after connection "
"broken by '%r': %s", retries, err, url)
return self.urlopen(method, url, body, headers, retries,
redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, body_pos=body_pos,
**response_kw)
def drain_and_release_conn(response):
try:
# discard any remaining response body, the connection will be
# released back to the pool once the entire response is read
response.read()
except (TimeoutError, HTTPException, SocketError, ProtocolError,
BaseSSLError, SSLError) as e:
pass
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
if response.status == 303:
method = 'GET'
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
# Drain and release the connection for this response, since
# we're not returning it to be released manually.
drain_and_release_conn(response)
raise
return response
# drain and return the connection to the pool before recursing
drain_and_release_conn(response)
retries.sleep_for_retry(response)
log.debug("Redirecting %s -> %s", url, redirect_location)
return self.urlopen(
method, redirect_location, body, headers,
retries=retries, redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, body_pos=body_pos,
**response_kw)
# Check if we should retry the HTTP response.
has_retry_after = bool(response.getheader('Retry-After'))
if retries.is_retry(method, response.status, has_retry_after):
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_status:
# Drain and release the connection for this response, since
# we're not returning it to be released manually.
drain_and_release_conn(response)
raise
return response
# drain and return the connection to the pool before recursing
drain_and_release_conn(response)
retries.sleep(response)
log.debug("Retry: %s", url)
return self.urlopen(
method, url, body, headers,
retries=retries, redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn,
body_pos=body_pos, **response_kw)
return response
class HTTPSConnectionPool(HTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
When Python is compiled with the :mod:`ssl` module, then
:class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
instead of :class:`.HTTPSConnection`.
:class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
``assert_hostname`` and ``host`` in this order to verify connections.
If ``assert_hostname`` is False, no verification is done.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
``ca_cert_dir``, and ``ssl_version`` are only used if :mod:`ssl` is
available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
the connection socket into an SSL socket.
"""
scheme = 'https'
ConnectionCls = HTTPSConnection
def __init__(self, host, port=None,
strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1,
block=False, headers=None, retries=None,
_proxy=None, _proxy_headers=None,
key_file=None, cert_file=None, cert_reqs=None,
ca_certs=None, ssl_version=None,
assert_hostname=None, assert_fingerprint=None,
ca_cert_dir=None, **conn_kw):
HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
block, headers, retries, _proxy, _proxy_headers,
**conn_kw)
if ca_certs and cert_reqs is None:
cert_reqs = 'CERT_REQUIRED'
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.ca_cert_dir = ca_cert_dir
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def _prepare_conn(self, conn):
"""
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used.
"""
if isinstance(conn, VerifiedHTTPSConnection):
conn.set_cert(key_file=self.key_file,
cert_file=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint)
conn.ssl_version = self.ssl_version
return conn
def _prepare_proxy(self, conn):
"""
Establish tunnel connection early, because otherwise httplib
would improperly set Host: header to proxy's IP:port.
"""
# Python 2.7+
try:
set_tunnel = conn.set_tunnel
except AttributeError: # Platform-specific: Python 2.6
set_tunnel = conn._set_tunnel
if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older
set_tunnel(self._proxy_host, self.port)
else:
set_tunnel(self._proxy_host, self.port, self.proxy_headers)
conn.connect()
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
"""
self.num_connections += 1
log.debug("Starting new HTTPS connection (%d): %s:%s",
self.num_connections, self.host, self.port or "443")
if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
raise SSLError("Can't connect to HTTPS URL because the SSL "
"module is not available.")
actual_host = self.host
actual_port = self.port
if self.proxy is not None:
actual_host = self.proxy.host
actual_port = self.proxy.port
conn = self.ConnectionCls(host=actual_host, port=actual_port,
timeout=self.timeout.connect_timeout,
strict=self.strict, **self.conn_kw)
return self._prepare_conn(conn)
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
super(HTTPSConnectionPool, self)._validate_conn(conn)
# Force connect early to allow us to validate the connection.
if not getattr(conn, 'sock', None): # AppEngine might not have `.sock`
conn.connect()
if not conn.is_verified:
warnings.warn((
'Unverified HTTPS request is being made. '
'Adding certificate verification is strongly advised. See: '
'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
'#ssl-warnings'),
InsecureRequestWarning)
def connection_from_url(url, **kw):
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \\**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, host, port = get_host(url)
port = port or port_by_scheme.get(scheme, 80)
if scheme == 'https':
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw)
def _ipv6_host(host, scheme):
"""
Process IPv6 address literals
"""
# httplib doesn't like it when we include brackets in IPv6 addresses
# Specifically, if we include brackets but also pass the port then
# httplib crazily doubles up the square brackets on the Host header.
# Instead, we need to make sure we never pass ``None`` as the port.
# However, for backward compatibility reasons we can't actually
# *assert* that. See http://bugs.python.org/issue28539
#
# Also if an IPv6 address literal has a zone identifier, the
# percent sign might be URIencoded, convert it back into ASCII
if host.startswith('[') and host.endswith(']'):
host = host.replace('%25', '%').strip('[]')
if scheme in NORMALIZABLE_SCHEMES:
host = host.lower()
return host
|
TangHao1987/intellij-community | refs/heads/master | python/helpers/pydev/third_party/wrapped_for_pydev/ctypes/macholib/__init__.py | 320 | """
Enough Mach-O to make your head spin.
See the relevant header files in /usr/include/mach-o
And also Apple's documentation.
"""
__version__ = '1.0'
|
indictranstech/buyback-erp | refs/heads/master | erpnext/hr/doctype/designation/test_designation.py | 40 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import frappe
test_records = frappe.get_test_records('Designation') |
zhengyongbo/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/steps_unittest.py | 121 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.config.ports import DeprecatedPort
from webkitpy.tool.mocktool import MockOptions, MockTool
from webkitpy.tool import steps
class StepsTest(unittest.TestCase):
def _step_options(self):
options = MockOptions()
options.non_interactive = True
options.port = 'MOCK port'
options.quiet = True
options.test = True
return options
def _run_step(self, step, tool=None, options=None, state=None):
if not tool:
tool = MockTool()
if not options:
options = self._step_options()
if not state:
state = {}
step(tool, options).run(state)
def test_update_step(self):
tool = MockTool()
options = self._step_options()
options.update = True
expected_logs = "Updating working directory\n"
OutputCapture().assert_outputs(self, self._run_step, [steps.Update, tool, options], expected_logs=expected_logs)
def test_prompt_for_bug_or_title_step(self):
tool = MockTool()
tool.user.prompt = lambda message: 50000
self._run_step(steps.PromptForBugOrTitle, tool=tool)
def _post_diff_options(self):
options = self._step_options()
options.git_commit = None
options.description = None
options.comment = None
options.review = True
options.request_commit = False
options.open_bug = True
return options
def _assert_step_output_with_bug(self, step, bug_id, expected_logs, options=None):
state = {'bug_id': bug_id}
OutputCapture().assert_outputs(self, self._run_step, [step, MockTool(), options, state], expected_logs=expected_logs)
def _assert_post_diff_output_for_bug(self, step, bug_id, expected_logs):
self._assert_step_output_with_bug(step, bug_id, expected_logs, self._post_diff_options())
def test_post_diff(self):
expected_logs = "MOCK add_patch_to_bug: bug_id=78, description=Patch, mark_for_review=True, mark_for_commit_queue=False, mark_for_landing=False\nMOCK: user.open_url: http://example.com/78\n"
self._assert_post_diff_output_for_bug(steps.PostDiff, 78, expected_logs)
def test_post_diff_for_commit(self):
expected_logs = "MOCK add_patch_to_bug: bug_id=78, description=Patch for landing, mark_for_review=False, mark_for_commit_queue=False, mark_for_landing=True\n"
self._assert_post_diff_output_for_bug(steps.PostDiffForCommit, 78, expected_logs)
def test_ensure_bug_is_open_and_assigned(self):
expected_logs = "MOCK reopen_bug 50004 with comment 'Reopening to attach new patch.'\n"
self._assert_step_output_with_bug(steps.EnsureBugIsOpenAndAssigned, 50004, expected_logs)
expected_logs = "MOCK reassign_bug: bug_id=50002, assignee=None\n"
self._assert_step_output_with_bug(steps.EnsureBugIsOpenAndAssigned, 50002, expected_logs)
def test_runtests_args(self):
mock_options = self._step_options()
mock_options.non_interactive = False
step = steps.RunTests(MockTool(log_executive=True), mock_options)
tool = MockTool(log_executive=True)
# FIXME: We shouldn't use a real port-object here, but there is too much to mock at the moment.
tool._deprecated_port = DeprecatedPort()
step = steps.RunTests(tool, mock_options)
expected_logs = """Running Python unit tests
MOCK run_and_throw_if_fail: ['Tools/Scripts/test-webkitpy'], cwd=/mock-checkout
Running Perl unit tests
MOCK run_and_throw_if_fail: ['Tools/Scripts/test-webkitperl'], cwd=/mock-checkout
Running JavaScriptCore tests
MOCK run_and_throw_if_fail: ['Tools/Scripts/run-javascriptcore-tests'], cwd=/mock-checkout
Running bindings generation tests
MOCK run_and_throw_if_fail: ['Tools/Scripts/run-bindings-tests'], cwd=/mock-checkout
Running run-webkit-tests
MOCK run_and_throw_if_fail: ['Tools/Scripts/run-webkit-tests', '--quiet'], cwd=/mock-checkout
"""
OutputCapture().assert_outputs(self, step.run, [{}], expected_logs=expected_logs)
|
chadelofson/vbs2py3 | refs/heads/master | venv/Lib/site-packages/pip/commands/list.py | 84 | from __future__ import absolute_import
import logging
import warnings
from pip.basecommand import Command
from pip.exceptions import DistributionNotFound
from pip.index import PackageFinder
from pip.req import InstallRequirement
from pip.utils import get_installed_distributions, dist_is_editable
from pip.utils.deprecation import RemovedInPip7Warning
from pip.cmdoptions import make_option_group, index_group
logger = logging.getLogger(__name__)
class ListCommand(Command):
"""
List installed packages, including editables.
Packages are listed in a case-insensitive sorted order.
"""
name = 'list'
usage = """
%prog [options]"""
summary = 'List installed packages.'
def __init__(self, *args, **kw):
super(ListCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-o', '--outdated',
action='store_true',
default=False,
help='List outdated packages (excluding editables)')
cmd_opts.add_option(
'-u', '--uptodate',
action='store_true',
default=False,
help='List uptodate packages (excluding editables)')
cmd_opts.add_option(
'-e', '--editable',
action='store_true',
default=False,
help='List editable projects.')
cmd_opts.add_option(
'-l', '--local',
action='store_true',
default=False,
help=('If in a virtualenv that has global access, do not list '
'globally-installed packages.'),
)
self.cmd_opts.add_option(
'--user',
dest='user',
action='store_true',
default=False,
help='Only output packages installed in user-site.')
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help=("Include pre-release and development versions. By default, "
"pip only finds stable versions."),
)
index_opts = make_option_group(index_group, self.parser)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this list command.
"""
return PackageFinder(
find_links=options.find_links,
index_urls=index_urls,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
allow_all_prereleases=options.pre,
trusted_hosts=options.trusted_hosts,
process_dependency_links=options.process_dependency_links,
session=session,
)
def run(self, options, args):
if options.outdated:
self.run_outdated(options)
elif options.uptodate:
self.run_uptodate(options)
elif options.editable:
self.run_editables(options)
else:
self.run_listing(options)
def run_outdated(self, options):
for dist, version in self.find_packages_latests_versions(options):
if version > dist.parsed_version:
logger.info(
'%s (Current: %s Latest: %s)',
dist.project_name, dist.version, version,
)
def find_packages_latests_versions(self, options):
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.info('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
if options.use_mirrors:
warnings.warn(
"--use-mirrors has been deprecated and will be removed in the "
"future. Explicit uses of --index-url and/or --extra-index-url"
" is suggested.",
RemovedInPip7Warning,
)
if options.mirrors:
warnings.warn(
"--mirrors has been deprecated and will be removed in the "
"future. Explicit uses of --index-url and/or --extra-index-url"
" is suggested.",
RemovedInPip7Warning,
)
index_urls += options.mirrors
dependency_links = []
for dist in get_installed_distributions(local_only=options.local,
user_only=options.user):
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt'),
)
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
finder.add_dependency_links(dependency_links)
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
include_editables=False,
)
for dist in installed_packages:
req = InstallRequirement.from_line(
dist.key, None, isolated=options.isolated_mode,
)
try:
link = finder.find_requirement(req, True)
# If link is None, means installed version is most
# up-to-date
if link is None:
continue
except DistributionNotFound:
continue
else:
remote_version = finder._link_package_versions(
link, req.name
).version
yield dist, remote_version
def run_listing(self, options):
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
)
self.output_package_listing(installed_packages)
def run_editables(self, options):
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
editables_only=True,
)
self.output_package_listing(installed_packages)
def output_package_listing(self, installed_packages):
installed_packages = sorted(
installed_packages,
key=lambda dist: dist.project_name.lower(),
)
for dist in installed_packages:
if dist_is_editable(dist):
line = '%s (%s, %s)' % (
dist.project_name,
dist.version,
dist.location,
)
else:
line = '%s (%s)' % (dist.project_name, dist.version)
logger.info(line)
def run_uptodate(self, options):
uptodate = []
for dist, version in self.find_packages_latests_versions(options):
if dist.parsed_version == version:
uptodate.append(dist)
self.output_package_listing(uptodate)
|
endlessm/chromium-browser | refs/heads/master | tools/perf/core/benchmark_finders.py | 5 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import imp
import inspect
import os
import sys
from core import path_util
from core import perf_benchmark
from telemetry import benchmark as benchmark_module
from py_utils import discover
def GetClassFilePath(clazz):
""" Return the absolute file path to |clazz|. """
assert inspect.isclass(clazz)
path = os.path.abspath(inspect.getfile(clazz))
if path.endswith('.pyc'):
return path[:-1]
return path
def GetBenchmarkNamesForFile(top_level_dir, benchmark_file_dir):
""" Return the list of all benchmark names of benchmarks defined in
|benchmark_file_dir|.
"""
original_sys_path = sys.path[:]
top_level_dir = os.path.abspath(top_level_dir)
original_sys_path = sys.path[:]
if top_level_dir not in sys.path:
sys.path.append(top_level_dir)
try:
module = imp.load_source('_tmp_module_name_', benchmark_file_dir)
benchmark_names = []
for _, obj in inspect.getmembers(module):
if (inspect.isclass(obj) and issubclass(obj, perf_benchmark.PerfBenchmark)
and GetClassFilePath(obj) == benchmark_file_dir):
benchmark_names.append(obj.Name())
return sorted(benchmark_names)
finally:
sys.path = original_sys_path
def GetOfficialBenchmarks():
"""Returns the list of all benchmarks to be run on perf waterfall.
The benchmarks are sorted by order of their names.
"""
benchmarks = discover.DiscoverClasses(
start_dir=path_util.GetOfficialBenchmarksDir(),
top_level_dir=path_util.GetPerfDir(),
base_class=benchmark_module.Benchmark,
index_by_class_name=True).values()
benchmarks.sort(key=lambda b: b.Name())
return benchmarks
def GetContribBenchmarks():
"""Returns the list of all contrib benchmarks.
The benchmarks are sorted by order of their names.
"""
benchmarks = discover.DiscoverClasses(
start_dir=path_util.GetContribDir(),
top_level_dir=path_util.GetPerfDir(),
base_class=benchmark_module.Benchmark,
index_by_class_name=True).values()
benchmarks.sort(key=lambda b: b.Name())
return benchmarks
def GetAllBenchmarks():
"""Returns all benchmarks in tools/perf directory.
The benchmarks are sorted by order of their names.
"""
waterfall_benchmarks = GetOfficialBenchmarks()
contrib_benchmarks = GetContribBenchmarks()
benchmarks = waterfall_benchmarks + contrib_benchmarks
benchmarks.sort(key=lambda b: b.Name())
return benchmarks
def GetBenchmarksInSubDirectory(directory):
return discover.DiscoverClasses(
start_dir=directory,
top_level_dir = path_util.GetPerfDir(),
base_class=benchmark_module.Benchmark,
index_by_class_name=True).values()
|
gpapaz/eve-wspace | refs/heads/develop | evewspace/Jabber/models.py | 10 | from django.conf import settings
from Alerts.models import SubscriptionGroup
from django.db import models
# Create your models here.
User = settings.AUTH_USER_MODEL
class JabberSubscription(models.Model):
"""
A registered User / Group combo for jabber.
"""
user = models.ForeignKey(User, related_name='jabber_subs')
group = models.ForeignKey(SubscriptionGroup, related_name='jabber_subs')
def __unicode__(self):
return "User %s Group %s" % (self.user.username, self.group.name)
class JabberAccount(models.Model):
"""
A jabber account to send messages to. JID is in user@host.tld format
"""
user = models.ForeignKey(User, related_name='jabber_accounts')
jid = models.CharField(max_length=200)
def __unicode__(self):
return "User: %s JID: %s" % (self.user.username, self.jid)
|
shekkbuilder/scratch | refs/heads/master | graceful_unlisten/client.py | 2 |
import os
import socket
import sys
addr = ('127.1', 6969)
pool_size = 8
def connect_loop(i):
while True:
s = socket.socket()
try:
s.connect(addr)
except socket.error, e:
print '%d: exit due to %s' % (i, e)
return 0
try:
ss = s.recv(64)
except socket.error, e:
print '%d: %s' % (i, e)
return -1
for i in xrange(pool_size):
if not os.fork():
sys.exit(connect_loop(i))
for _ in xrange(pool_size):
print ['child exit', os.waitpid(-1, 0)]
|
hoheinzollern/tracker | refs/heads/master | tests/functional-tests/common/utils/applicationstest.py | 3 | #!/usr/bin/env python
#
# Copyright (C) 2010, Nokia <ivan.frade@nokia.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
from common.utils import configuration as cfg
from common.utils.system import TrackerSystemAbstraction
from common.utils.helpers import log
import unittest2 as ut
from gi.repository import GLib
import shutil
import os
import time
APPLICATIONS_TMP_DIR = os.path.join (cfg.TEST_MONITORED_TMP_DIR, "test-applications-monitored")
index_dirs = [APPLICATIONS_TMP_DIR]
CONF_OPTIONS = {
cfg.DCONF_MINER_SCHEMA: {
'index-recursive-directories': GLib.Variant.new_strv(index_dirs),
'index-single-directories': GLib.Variant.new_strv([]),
'index-optical-discs': GLib.Variant.new_boolean(False),
'index-removable-devices': GLib.Variant.new_boolean(False),
}
}
# Copy rate, 10KBps (1024b/100ms)
SLOWCOPY_RATE = 1024
class CommonTrackerApplicationTest (ut.TestCase):
def get_urn_count_by_url (self, url):
select = """
SELECT ?u WHERE { ?u nie:url \"%s\" }
""" % (url)
return len (self.tracker.query (select))
def get_test_image (self):
TEST_IMAGE = "test-image-1.jpg"
return TEST_IMAGE
def get_test_video (self):
TEST_VIDEO = "test-video-1.mp4"
return TEST_VIDEO
def get_test_music (self):
TEST_AUDIO = "test-music-1.mp3"
return TEST_AUDIO
def get_data_dir (self):
return self.datadir
def get_dest_dir (self):
return APPLICATIONS_TMP_DIR
def slowcopy_file_fd (self, src, fdest, rate=SLOWCOPY_RATE):
"""
@rate: bytes per 100ms
"""
log ("Copying slowly\n '%s' to\n '%s'" % (src, fdest.name))
fsrc = open (src, 'rb')
buffer_ = fsrc.read (rate)
while (buffer_ != ""):
fdest.write (buffer_)
time.sleep (0.1)
buffer_ = fsrc.read (rate)
fsrc.close ()
def slowcopy_file (self, src, dst, rate=SLOWCOPY_RATE):
"""
@rate: bytes per 100ms
"""
fdest = open (dst, 'wb')
self.slowcopy_file_fd (src, fdest, rate)
fdest.close ()
@classmethod
def setUp (self):
# Create temp directory to monitor
if (os.path.exists (APPLICATIONS_TMP_DIR)):
shutil.rmtree (APPLICATIONS_TMP_DIR)
os.makedirs (APPLICATIONS_TMP_DIR)
# Use local directory if available. Installation otherwise.
if os.path.exists (os.path.join (os.getcwd (),
"test-apps-data")):
self.datadir = os.path.join (os.getcwd (),
"test-apps-data")
else:
self.datadir = os.path.join (cfg.DATADIR,
"tracker-tests",
"test-apps-data")
self.system = TrackerSystemAbstraction ()
self.system.tracker_all_testing_start (CONF_OPTIONS)
# Returns when ready
self.tracker = self.system.store
log ("Ready to go!")
@classmethod
def tearDown (self):
#print "Stopping the daemon in test mode (Doing nothing now)"
self.system.tracker_all_testing_stop ()
# Remove monitored directory
if (os.path.exists (APPLICATIONS_TMP_DIR)):
shutil.rmtree (APPLICATIONS_TMP_DIR)
|
radlws/AWS-ElasticBeanstalk-CLI | refs/heads/master | eb/linux/python3/scli/cli_parse.py | 4 | #!/usr/bin/env python
# ==============================================================================
# Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Amazon Software License (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is
# located at
#
# http://aws.amazon.com/asl/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or
# implied. See the License for the specific language governing permissions
# and limitations under the License.
#==============================================================================
import argparse
from contextlib import closing
import logging
from io import StringIO
from lib.utility import misc
from lib.elasticbeanstalk.model import EnvironmentTier
from scli.resources import CLISwitch, CLISwitchMsg, EBSCliAttr
from scli.constants import CommandType, ServiceDefault, ServiceRegionId, \
ParameterName, ParameterSource
from scli.parameter import Parameter
from scli.exception import ArgumentError
log = logging.getLogger('cli')
def _word_join(word_list, separator=''):
x = separator.join(map(misc.to_unicode, word_list))
return x
def command(string):
command = misc.to_unicode(string)
for item in CommandType:
if item.lower() == command.lower().strip():
return item
raise AttributeError(EBSCliAttr.InvalidCommand.format(command))
def subcommand(string):
subcommand = misc.to_unicode(string)
return subcommand
def _init_parser(parser):
commands = ', '.join(map(str.lower, CommandType))
parser.add_argument(CLISwitch[ParameterName.Command],
type=command,
metavar='COMMAND', help=commands)
parser.add_argument(CLISwitch[ParameterName.SubCommand],
type=subcommand, nargs='*',
default=None,
metavar='SUBCOMMAND', )
# AWS credential
parser.add_argument('-I', '--' + CLISwitch[ParameterName.AwsAccessKeyId],
dest=ParameterName.AwsAccessKeyId,
metavar='ACCESS_KEY_ID',
help=CLISwitchMsg[ParameterName.AwsAccessKeyId])
parser.add_argument('-S', '--' + CLISwitch[ParameterName.AwsSecretAccessKey],
dest=ParameterName.AwsSecretAccessKey,
metavar='SECRET_ACCESS_KEY',
help=CLISwitchMsg[ParameterName.AwsSecretAccessKey])
parser.add_argument('--' + CLISwitch[ParameterName.AwsCredentialFile],
dest=ParameterName.AwsCredentialFile,
metavar='FILE_PATH_NAME',
help=CLISwitchMsg[ParameterName.AwsCredentialFile])
# Application/environment
parser.add_argument('-t', '--' + CLISwitch[ParameterName.EnvironmentTier],
dest=ParameterName.EnvironmentTier,
metavar='ENVIRONMENT_TIER',
help=CLISwitchMsg[ParameterName.EnvironmentTier])
parser.add_argument('-s', '--' + CLISwitch[ParameterName.SolutionStack],
dest=ParameterName.SolutionStack, nargs='+',
metavar='',
help=CLISwitchMsg[ParameterName.SolutionStack])
parser.add_argument('-a', '--' + CLISwitch[ParameterName.ApplicationName],
dest=ParameterName.ApplicationName,
metavar='APPLICATION_NAME',
help=CLISwitchMsg[ParameterName.ApplicationName])
parser.add_argument('-l', '--' + CLISwitch[ParameterName.ApplicationVersionName],
dest=ParameterName.ApplicationVersionName,
metavar='VERSION_LABEL',
help=CLISwitchMsg[ParameterName.ApplicationVersionName])
parser.add_argument('-e', '--' + CLISwitch[ParameterName.EnvironmentName],
dest=ParameterName.EnvironmentName,
metavar='ENVIRONMENT_NAME',
help=CLISwitchMsg[ParameterName.EnvironmentName])
# Output
parser.add_argument('--' + CLISwitch[ParameterName.Verbose],
action='store_const', const=ServiceDefault.ENABLED,
dest=ParameterName.Verbose,
metavar='',
help=CLISwitchMsg[ParameterName.Verbose])
parser.add_argument('-f', '--' + CLISwitch[ParameterName.Force],
action='store_const', const=ServiceDefault.ENABLED,
dest=ParameterName.Force,
metavar='',
help=CLISwitchMsg[ParameterName.Force])
# Service
parser.add_argument('--' + CLISwitch[ParameterName.WaitForFinishTimeout], type=int,
dest=ParameterName.WaitForFinishTimeout,
metavar='TIMEOUT_IN_SEC',
help=str.format(CLISwitchMsg[ParameterName.WaitForFinishTimeout],
ServiceDefault.WAIT_TIMEOUT_IN_SEC))
parser.add_argument('--' + CLISwitch[ParameterName.Region],
dest=ParameterName.Region,
metavar='REGION',
help=CLISwitchMsg[ParameterName.Region])
parser.add_argument('--' + CLISwitch[ParameterName.ServiceEndpoint],
dest=ParameterName.ServiceEndpoint,
metavar='ENDPOINT',
help=CLISwitchMsg[ParameterName.ServiceEndpoint])
# SCli Helper switch
parser.add_argument('--version', action='version', version=EBSCliAttr.Version)
# List of non string parameters
NON_STRING_PARAMETERS = [ParameterName.EnvironmentTier]
def parse(parameter_pool, line=None):
''' Parse command arguments'''
parser = ArgumentParser(description=EBSCliAttr.Name,
usage=EBSCliAttr.Usage)
_init_parser(parser)
if line is not None:
args = vars(parser.parse_args(line.split()))
else:
args = vars(parser.parse_args())
# Post prcessing
if args[ParameterName.EnvironmentTier] is not None:
tier_serialized = args[ParameterName.EnvironmentTier]
args[ParameterName.EnvironmentTier] = EnvironmentTier.from_serialized_string(tier_serialized)
if args[ParameterName.SolutionStack] is not None:
solution_stack = _word_join(args[ParameterName.SolutionStack], ' ')
args[ParameterName.SolutionStack] = solution_stack
if args[ParameterName.Region] is not None:
region_id = args[ParameterName.Region]
region = list(ServiceRegionId.keys())[list(ServiceRegionId.values()).index(region_id)]
args[ParameterName.Region] = region
# Store command line arguments into parameter pool
for arg, value in args.items():
arg = misc.to_unicode(arg, convert_none=False)
# Try to convert string/list-of-string parameters to unicode
if arg not in NON_STRING_PARAMETERS:
if isinstance(value, list):
value = [misc.to_unicode(item) for item in value]
else:
value = misc.to_unicode(value, convert_none=False)
if arg == CLISwitch[ParameterName.Command]:
parameter_pool.put(Parameter(ParameterName.Command,
value,
ParameterSource.CliArgument))
elif arg == CLISwitch[ParameterName.SubCommand]:
parameter_pool.put(Parameter(ParameterName.SubCommand,
value,
ParameterSource.CliArgument))
elif value is not None:
parameter_pool.put(Parameter(arg,
value,
ParameterSource.CliArgument))
log.info('Finished parsing command line arguments')
if log.isEnabledFor(logging.DEBUG):
log.debug('Received arguments: {0}'. \
format(misc.collection_to_string(parameter_pool.parameter_names)))
return args
class ArgumentParser(argparse.ArgumentParser):
'''Subclass of argparse.ArgumentParser to override behavior of error()'''
def error(self, error_message):
with closing(StringIO()) as usage:
self.print_usage(usage)
message = EBSCliAttr.ErrorMsg.format(error_message, usage.getvalue(), self.prog)
raise ArgumentError(message)
|
kdeloach/otm-core | refs/heads/develop | opentreemap/treemap/lib/user.py | 3 | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import urllib
from django.db.models import Q
from treemap.audit import Audit, Authorizable, get_auditable_class
from treemap.models import Instance, MapFeature, InstanceUser, User
from treemap.util import get_filterable_audit_models
from treemap.lib.object_caches import udf_defs
from treemap.udf import UDFModel
def _instance_ids_edited_by(user):
return Audit.objects.filter(user=user)\
.values_list('instance_id', flat=True)\
.exclude(instance_id=None)\
.distinct()
def get_audits(logged_in_user, instance, query_vars, user, models,
model_id, page=0, page_size=20, exclude_pending=True,
should_count=False):
start_pos = page * page_size
end_pos = start_pos + page_size
if instance:
if instance.is_accessible_by(logged_in_user):
instances = Instance.objects.filter(pk=instance.pk)
else:
instances = Instance.objects.none()
# If we didn't specify an instance we only want to
# show audits where the user has permission
else:
instances = Instance.objects\
.filter(pk__in=_instance_ids_edited_by(user))\
.filter(user_accessible_instance_filter(
logged_in_user))\
.distinct()
if not instances.exists():
# Force no results
return {'audits': Audit.objects.none(),
'total_count': 0,
'next_page': None,
'prev_page': None}
map_feature_models = set(MapFeature.subclass_dict().keys())
model_filter = Q()
# We only want to show the TreePhoto's image, not other fields
# and we want to do it automatically if 'Tree' was specified as
# a model. The same goes for MapFeature(s) <-> MapFeaturePhoto
# There is no need to check permissions, because photos are always visible
if 'Tree' in models:
model_filter = model_filter | Q(model='TreePhoto', field='image')
if map_feature_models.intersection(models):
model_filter = model_filter | Q(model='MapFeaturePhoto', field='image')
for inst in instances:
eligible_models = ({'Tree', 'TreePhoto', 'MapFeaturePhoto'} |
set(inst.map_feature_types)) & set(models)
if logged_in_user == user:
eligible_udfs = {'udf:%s' % udf.id for udf in udf_defs(inst)
if udf.model_type in eligible_models
and udf.iscollection}
# The logged-in user can see all their own edits
model_filter = model_filter | Q(
instance=inst, model__in=(eligible_models | eligible_udfs))
else:
# Filter other users' edits by their visibility to the
# logged-in user
for model in eligible_models:
ModelClass = get_auditable_class(model)
fake_model = ModelClass(instance=inst)
if issubclass(ModelClass, Authorizable):
visible_fields = fake_model.visible_fields(logged_in_user)
model_filter = model_filter |\
Q(model=model, field__in=visible_fields, instance=inst)
else:
model_filter = model_filter | Q(model=model, instance=inst)
if issubclass(ModelClass, UDFModel):
model_collection_udfs_audit_names = (
fake_model.visible_collection_udfs_audit_names(
logged_in_user))
model_filter = model_filter | (
Q(model__in=model_collection_udfs_audit_names))
udf_bookkeeping_fields = Q(
model__startswith='udf:',
field__in=('id', 'model_id', 'field_definition'))
audits = (Audit.objects
.filter(model_filter)
.filter(instance__in=instances)
.select_related('instance')
.exclude(udf_bookkeeping_fields)
.exclude(user=User.system_user())
.order_by('-created'))
if user:
audits = audits.filter(user=user)
if model_id:
audits = audits.filter(model_id=model_id)
if exclude_pending:
audits = audits.exclude(requires_auth=True, ref__isnull=True)
total_count = audits.count() if should_count else 0
audits = audits[start_pos:end_pos]
query_vars = {k: v for (k, v) in query_vars.iteritems() if k != 'page'}
next_page = None
prev_page = None
# We are using len(audits) instead of audits.count() because we
# have already realized the queryset at this point
if len(audits) == page_size:
query_vars['page'] = page + 1
next_page = "?" + urllib.urlencode(query_vars)
if page > 0:
query_vars['page'] = page - 1
prev_page = "?" + urllib.urlencode(query_vars)
return {'audits': audits,
'total_count': total_count,
'next_page': next_page,
'prev_page': prev_page}
def get_audits_params(request):
PAGE_MAX = 100
PAGE_DEFAULT = 20
r = request.REQUEST
page_size = min(int(r.get('page_size', PAGE_DEFAULT)), PAGE_MAX)
page = int(r.get('page', 0))
models = []
allowed_models = get_filterable_audit_models()
models_param = r.get('models', None)
if models_param:
for model in models_param.split(','):
if model.lower() in allowed_models:
models.append(allowed_models[model.lower()])
else:
raise Exception("Invalid model: %s" % model)
else:
models = allowed_models.values()
model_id = r.get('model_id', None)
if model_id is not None and len(models) != 1:
raise Exception("You must specific one and only model "
"when looking up by id")
exclude_pending = r.get('exclude_pending', "false") == "true"
return (page, page_size, models, model_id, exclude_pending)
def user_accessible_instance_filter(logged_in_user):
public = Q(is_public=True)
if logged_in_user is not None and not logged_in_user.is_anonymous():
private_with_access = Q(instanceuser__user=logged_in_user)
instance_filter = public | private_with_access
else:
instance_filter = public
return instance_filter
def get_user_instances(logged_in_user, user, current_instance=None):
# Which instances can the logged-in user see?
instance_filter = (user_accessible_instance_filter(logged_in_user))
user_instance_ids = (InstanceUser.objects
.filter(user_id=user.pk)
.values_list('instance_id', flat=True))
instance_filter = Q(instance_filter, Q(pk__in=user_instance_ids))
# The logged-in user should see the current instance in their own list
if current_instance and logged_in_user == user:
instance_filter = instance_filter | Q(pk=current_instance.id)
return (Instance.objects
.filter(instance_filter)
.distinct()
.order_by('name'))
|
wsmith323/django | refs/heads/master | tests/migrations/test_graph.py | 99 | import warnings
from django.db.migrations.exceptions import (
CircularDependencyError, NodeNotFoundError,
)
from django.db.migrations.graph import RECURSION_DEPTH_WARNING, MigrationGraph
from django.test import SimpleTestCase
from django.utils.encoding import force_text
class GraphTests(SimpleTestCase):
"""
Tests the digraph structure.
"""
def test_simple_graph(self):
"""
Tests a basic dependency graph:
app_a: 0001 <-- 0002 <--- 0003 <-- 0004
/
app_b: 0001 <-- 0002 <-/
"""
# Build graph
graph = MigrationGraph()
graph.add_node(("app_a", "0001"), None)
graph.add_node(("app_a", "0002"), None)
graph.add_node(("app_a", "0003"), None)
graph.add_node(("app_a", "0004"), None)
graph.add_node(("app_b", "0001"), None)
graph.add_node(("app_b", "0002"), None)
graph.add_dependency("app_a.0004", ("app_a", "0004"), ("app_a", "0003"))
graph.add_dependency("app_a.0003", ("app_a", "0003"), ("app_a", "0002"))
graph.add_dependency("app_a.0002", ("app_a", "0002"), ("app_a", "0001"))
graph.add_dependency("app_a.0003", ("app_a", "0003"), ("app_b", "0002"))
graph.add_dependency("app_b.0002", ("app_b", "0002"), ("app_b", "0001"))
# Test root migration case
self.assertEqual(
graph.forwards_plan(("app_a", "0001")),
[('app_a', '0001')],
)
# Test branch B only
self.assertEqual(
graph.forwards_plan(("app_b", "0002")),
[("app_b", "0001"), ("app_b", "0002")],
)
# Test whole graph
self.assertEqual(
graph.forwards_plan(("app_a", "0004")),
[
('app_b', '0001'), ('app_b', '0002'), ('app_a', '0001'),
('app_a', '0002'), ('app_a', '0003'), ('app_a', '0004'),
],
)
# Test reverse to b:0002
self.assertEqual(
graph.backwards_plan(("app_b", "0002")),
[('app_a', '0004'), ('app_a', '0003'), ('app_b', '0002')],
)
# Test roots and leaves
self.assertEqual(
graph.root_nodes(),
[('app_a', '0001'), ('app_b', '0001')],
)
self.assertEqual(
graph.leaf_nodes(),
[('app_a', '0004'), ('app_b', '0002')],
)
def test_complex_graph(self):
"""
Tests a complex dependency graph:
app_a: 0001 <-- 0002 <--- 0003 <-- 0004
\ \ / /
app_b: 0001 <-\ 0002 <-X /
\ \ /
app_c: \ 0001 <-- 0002 <-
"""
# Build graph
graph = MigrationGraph()
graph.add_node(("app_a", "0001"), None)
graph.add_node(("app_a", "0002"), None)
graph.add_node(("app_a", "0003"), None)
graph.add_node(("app_a", "0004"), None)
graph.add_node(("app_b", "0001"), None)
graph.add_node(("app_b", "0002"), None)
graph.add_node(("app_c", "0001"), None)
graph.add_node(("app_c", "0002"), None)
graph.add_dependency("app_a.0004", ("app_a", "0004"), ("app_a", "0003"))
graph.add_dependency("app_a.0003", ("app_a", "0003"), ("app_a", "0002"))
graph.add_dependency("app_a.0002", ("app_a", "0002"), ("app_a", "0001"))
graph.add_dependency("app_a.0003", ("app_a", "0003"), ("app_b", "0002"))
graph.add_dependency("app_b.0002", ("app_b", "0002"), ("app_b", "0001"))
graph.add_dependency("app_a.0004", ("app_a", "0004"), ("app_c", "0002"))
graph.add_dependency("app_c.0002", ("app_c", "0002"), ("app_c", "0001"))
graph.add_dependency("app_c.0001", ("app_c", "0001"), ("app_b", "0001"))
graph.add_dependency("app_c.0002", ("app_c", "0002"), ("app_a", "0002"))
# Test branch C only
self.assertEqual(
graph.forwards_plan(("app_c", "0002")),
[('app_b', '0001'), ('app_c', '0001'), ('app_a', '0001'), ('app_a', '0002'), ('app_c', '0002')],
)
# Test whole graph
self.assertEqual(
graph.forwards_plan(("app_a", "0004")),
[
('app_b', '0001'), ('app_c', '0001'), ('app_a', '0001'),
('app_a', '0002'), ('app_c', '0002'), ('app_b', '0002'),
('app_a', '0003'), ('app_a', '0004'),
],
)
# Test reverse to b:0001
self.assertEqual(
graph.backwards_plan(("app_b", "0001")),
[
('app_a', '0004'), ('app_c', '0002'), ('app_c', '0001'),
('app_a', '0003'), ('app_b', '0002'), ('app_b', '0001'),
],
)
# Test roots and leaves
self.assertEqual(
graph.root_nodes(),
[('app_a', '0001'), ('app_b', '0001'), ('app_c', '0001')],
)
self.assertEqual(
graph.leaf_nodes(),
[('app_a', '0004'), ('app_b', '0002'), ('app_c', '0002')],
)
def test_circular_graph(self):
"""
Tests a circular dependency graph.
"""
# Build graph
graph = MigrationGraph()
graph.add_node(("app_a", "0001"), None)
graph.add_node(("app_a", "0002"), None)
graph.add_node(("app_a", "0003"), None)
graph.add_node(("app_b", "0001"), None)
graph.add_node(("app_b", "0002"), None)
graph.add_dependency("app_a.0003", ("app_a", "0003"), ("app_a", "0002"))
graph.add_dependency("app_a.0002", ("app_a", "0002"), ("app_a", "0001"))
graph.add_dependency("app_a.0001", ("app_a", "0001"), ("app_b", "0002"))
graph.add_dependency("app_b.0002", ("app_b", "0002"), ("app_b", "0001"))
graph.add_dependency("app_b.0001", ("app_b", "0001"), ("app_a", "0003"))
# Test whole graph
self.assertRaises(
CircularDependencyError,
graph.forwards_plan, ("app_a", "0003"),
)
def test_circular_graph_2(self):
graph = MigrationGraph()
graph.add_node(('A', '0001'), None)
graph.add_node(('C', '0001'), None)
graph.add_node(('B', '0001'), None)
graph.add_dependency('A.0001', ('A', '0001'), ('B', '0001'))
graph.add_dependency('B.0001', ('B', '0001'), ('A', '0001'))
graph.add_dependency('C.0001', ('C', '0001'), ('B', '0001'))
self.assertRaises(
CircularDependencyError,
graph.forwards_plan, ('C', '0001')
)
def test_graph_recursive(self):
graph = MigrationGraph()
root = ("app_a", "1")
graph.add_node(root, None)
expected = [root]
for i in range(2, 750):
parent = ("app_a", str(i - 1))
child = ("app_a", str(i))
graph.add_node(child, None)
graph.add_dependency(str(i), child, parent)
expected.append(child)
leaf = expected[-1]
forwards_plan = graph.forwards_plan(leaf)
self.assertEqual(expected, forwards_plan)
backwards_plan = graph.backwards_plan(root)
self.assertEqual(expected[::-1], backwards_plan)
def test_graph_iterative(self):
graph = MigrationGraph()
root = ("app_a", "1")
graph.add_node(root, None)
expected = [root]
for i in range(2, 1000):
parent = ("app_a", str(i - 1))
child = ("app_a", str(i))
graph.add_node(child, None)
graph.add_dependency(str(i), child, parent)
expected.append(child)
leaf = expected[-1]
with warnings.catch_warnings(record=True) as w:
forwards_plan = graph.forwards_plan(leaf)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
self.assertEqual(str(w[-1].message), RECURSION_DEPTH_WARNING)
self.assertEqual(expected, forwards_plan)
with warnings.catch_warnings(record=True) as w:
backwards_plan = graph.backwards_plan(root)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
self.assertEqual(str(w[-1].message), RECURSION_DEPTH_WARNING)
self.assertEqual(expected[::-1], backwards_plan)
def test_plan_invalid_node(self):
"""
Tests for forwards/backwards_plan of nonexistent node.
"""
graph = MigrationGraph()
message = "Node ('app_b', '0001') not a valid node"
with self.assertRaisesMessage(NodeNotFoundError, message):
graph.forwards_plan(("app_b", "0001"))
with self.assertRaisesMessage(NodeNotFoundError, message):
graph.backwards_plan(("app_b", "0001"))
def test_missing_parent_nodes(self):
"""
Tests for missing parent nodes.
"""
# Build graph
graph = MigrationGraph()
graph.add_node(("app_a", "0001"), None)
graph.add_node(("app_a", "0002"), None)
graph.add_node(("app_a", "0003"), None)
graph.add_node(("app_b", "0001"), None)
graph.add_dependency("app_a.0003", ("app_a", "0003"), ("app_a", "0002"))
graph.add_dependency("app_a.0002", ("app_a", "0002"), ("app_a", "0001"))
msg = "Migration app_a.0001 dependencies reference nonexistent parent node ('app_b', '0002')"
with self.assertRaisesMessage(NodeNotFoundError, msg):
graph.add_dependency("app_a.0001", ("app_a", "0001"), ("app_b", "0002"))
def test_missing_child_nodes(self):
"""
Tests for missing child nodes.
"""
# Build graph
graph = MigrationGraph()
graph.add_node(("app_a", "0001"), None)
msg = "Migration app_a.0002 dependencies reference nonexistent child node ('app_a', '0002')"
with self.assertRaisesMessage(NodeNotFoundError, msg):
graph.add_dependency("app_a.0002", ("app_a", "0002"), ("app_a", "0001"))
def test_infinite_loop(self):
"""
Tests a complex dependency graph:
app_a: 0001 <-
\
app_b: 0001 <- x 0002 <-
/ \
app_c: 0001<- <------------- x 0002
And apply squashing on app_c.
"""
graph = MigrationGraph()
graph.add_node(("app_a", "0001"), None)
graph.add_node(("app_b", "0001"), None)
graph.add_node(("app_b", "0002"), None)
graph.add_node(("app_c", "0001_squashed_0002"), None)
graph.add_dependency("app_b.0001", ("app_b", "0001"), ("app_c", "0001_squashed_0002"))
graph.add_dependency("app_b.0002", ("app_b", "0002"), ("app_a", "0001"))
graph.add_dependency("app_b.0002", ("app_b", "0002"), ("app_b", "0001"))
graph.add_dependency("app_c.0001_squashed_0002", ("app_c", "0001_squashed_0002"), ("app_b", "0002"))
with self.assertRaises(CircularDependencyError):
graph.forwards_plan(("app_c", "0001_squashed_0002"))
def test_stringify(self):
graph = MigrationGraph()
self.assertEqual(force_text(graph), "Graph: 0 nodes, 0 edges")
graph.add_node(("app_a", "0001"), None)
graph.add_node(("app_a", "0002"), None)
graph.add_node(("app_a", "0003"), None)
graph.add_node(("app_b", "0001"), None)
graph.add_node(("app_b", "0002"), None)
graph.add_dependency("app_a.0002", ("app_a", "0002"), ("app_a", "0001"))
graph.add_dependency("app_a.0003", ("app_a", "0003"), ("app_a", "0002"))
graph.add_dependency("app_a.0003", ("app_a", "0003"), ("app_b", "0002"))
self.assertEqual(force_text(graph), "Graph: 5 nodes, 3 edges")
self.assertEqual(repr(graph), "<MigrationGraph: nodes=5, edges=3>")
|
ashwingoldfish/eddy | refs/heads/master | eddy/core/items/nodes/common/label.py | 2 | # -*- coding: utf-8 -*-
##########################################################################
# #
# Eddy: a graphical editor for the specification of Graphol ontologies #
# Copyright (C) 2015 Daniele Pantaleone <danielepantaleone@me.com> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##################### ##################### #
# #
# Graphol is developed by members of the DASI-lab group of the #
# Dipartimento di Ingegneria Informatica, Automatica e Gestionale #
# A.Ruberti at Sapienza University of Rome: http://www.dis.uniroma1.it #
# #
# - Domenico Lembo <lembo@dis.uniroma1.it> #
# - Valerio Santarelli <santarelli@dis.uniroma1.it> #
# - Domenico Fabio Savo <savo@dis.uniroma1.it> #
# - Daniele Pantaleone <pantaleone@dis.uniroma1.it> #
# - Marco Console <console@dis.uniroma1.it> #
# #
##########################################################################
from PyQt5 import QtCore
from eddy.core.commands.labels import CommandLabelChange
from eddy.core.datatypes.misc import DiagramMode
from eddy.core.functions.misc import isEmpty
from eddy.core.items.common import AbstractLabel
class NodeLabel(AbstractLabel):
"""
This class implements the label to be attached to the graphol nodes.
"""
def __init__(self, template='', pos=None, movable=True, editable=True, parent=None):
"""
Initialize the label.
:type template: str
:type pos: callable
:type movable: bool
:type editable: bool
:type parent: QObject
"""
defaultPos = lambda: QtCore.QPointF(0, 0)
self.defaultPos = pos or defaultPos
super().__init__(template, movable, editable, parent=parent)
self.setPos(self.defaultPos())
#############################################
# EVENTS
#################################
def keyPressEvent(self, keyEvent):
"""
Executed when a key is pressed.
:type keyEvent: QKeyEvent
"""
moved = self.isMoved()
super().keyPressEvent(keyEvent)
self.updatePos(moved)
def mousePressEvent(self, mouseEvent):
"""
Executed when the mouse is pressed on the text item.
:type mouseEvent: QGraphicsSceneMouseEvent
"""
if self.diagram.mode is DiagramMode.LabelEdit:
super().mousePressEvent(mouseEvent)
#############################################
# INTERFACE
#################################
def isMoved(self):
"""
Returns True if the label has been moved from its default location, else False.
:return: bool
"""
return (self.pos() - self.defaultPos()).manhattanLength() >= 1
def setText(self, text):
"""
Set the given text as plain text.
:type text: str.
"""
moved = self.isMoved()
super().setText(text)
self.updatePos(moved)
def updatePos(self, moved=False):
"""
Update the current text position with respect to its parent node.
:type moved: bool.
"""
if not moved:
self.setPos(self.defaultPos())
class FacetQuotedLabel(NodeLabel):
"""
This class implements the quoted label of Facet nodes.
"""
def __init__(self, **kwargs):
"""
Initialize the label.
:type kwargs: dict
"""
super().__init__(**kwargs)
self.focusInFacet = None
#############################################
# EVENTS
#################################
def focusInEvent(self, focusEvent):
"""
Executed when the text item is focused.
:type focusEvent: QFocusEvent
"""
# Make the label focusable only by performing a double click on the
# text: this will exclude any other type of focus action (dunno why
# but sometime the label gets the focus when hovering the mouse cursor
# on the text: mostly happens when loading a diagram from file)
if focusEvent.reason() == QtCore.Qt.OtherFocusReason:
node = self.parentItem()
self.focusInData = self.text()
self.focusInFacet = node.facet
self.setText(self.text().strip('"'))
self.diagram.clearSelection()
self.diagram.setMode(DiagramMode.LabelEdit)
self.setSelectedText(True)
super(AbstractLabel, self).focusInEvent(focusEvent)
else:
self.clearFocus()
def focusOutEvent(self, focusEvent):
"""
Executed when the text item lose the focus.
:type focusEvent: QFocusEvent
"""
if self.diagram.mode is DiagramMode.LabelEdit:
if isEmpty(self.text()):
self.setText(self.template)
focusInData = self.focusInData
currentData = '"{0}"'.format(self.text().strip('"'))
###########################################################
# IMPORTANT! #
# ####################################################### #
# The code below is a bit tricky: to be able to properly #
# update the node in the project index we need to force #
# the value of the label to it's previous one and let the #
# command implementation update the index. #
###########################################################
self.setText(focusInData)
if focusInData and focusInData != currentData:
item = self.parentItem()
undo = item.compose(self.focusInFacet, focusInData)
redo = item.compose(self.focusInFacet, currentData)
command = CommandLabelChange(self.diagram, self.parentItem(), undo, redo)
self.session.undostack.push(command)
self.focusInData = None
self.focusInFacet = None
self.setSelectedText(False)
self.setAlignment(self.alignment())
self.setTextInteractionFlags(QtCore.Qt.NoTextInteraction)
self.diagram.setMode(DiagramMode.Idle)
self.diagram.sgnUpdated.emit()
super(AbstractLabel, self).focusOutEvent(focusEvent)
|
jjmleiro/hue | refs/heads/master | desktop/core/ext-py/boto-2.38.0/boto/pyami/scriptbase.py | 153 | import os
import sys
from boto.utils import ShellCommand, get_ts
import boto
import boto.utils
class ScriptBase(object):
def __init__(self, config_file=None):
self.instance_id = boto.config.get('Instance', 'instance-id', 'default')
self.name = self.__class__.__name__
self.ts = get_ts()
if config_file:
boto.config.read(config_file)
def notify(self, subject, body=''):
boto.utils.notify(subject, body)
def mkdir(self, path):
if not os.path.isdir(path):
try:
os.mkdir(path)
except:
boto.log.error('Error creating directory: %s' % path)
def umount(self, path):
if os.path.ismount(path):
self.run('umount %s' % path)
def run(self, command, notify=True, exit_on_error=False, cwd=None):
self.last_command = ShellCommand(command, cwd=cwd)
if self.last_command.status != 0:
boto.log.error('Error running command: "%s". Output: "%s"' % (command, self.last_command.output))
if notify:
self.notify('Error encountered',
'Error running the following command:\n\t%s\n\nCommand output:\n\t%s' % \
(command, self.last_command.output))
if exit_on_error:
sys.exit(-1)
return self.last_command.status
def main(self):
pass
|
mjs/juju | refs/heads/master | acceptancetests/assess_mixed_images.py | 2 | #!/usr/bin/env python
"""Assess mixed deployment of images from two sets of simplestreams."""
from __future__ import print_function
import argparse
import logging
import sys
from deploy_stack import (
assess_juju_relations,
BootstrapManager,
)
from jujucharm import (
local_charm_path,
)
from utility import (
add_basic_testing_arguments,
configure_logging,
)
__metaclass__ = type
log = logging.getLogger("assess_mixed_images")
def assess_mixed_images(client):
charm_path = local_charm_path(charm='dummy-sink', juju_ver=client.version,
series='centos7', platform='centos')
client.deploy(charm_path)
charm_path = local_charm_path(charm='dummy-source',
juju_ver=client.version, series='trusty')
client.deploy(charm_path)
client.juju('add-relation', ('dummy-source', 'dummy-sink'))
# Wait for the deployment to finish.
client.wait_for_started()
assess_juju_relations(client)
def parse_args(argv):
"""Parse all arguments."""
parser = argparse.ArgumentParser(
description="Deploy images from two sets of simplestreams.")
add_basic_testing_arguments(parser)
# Fallback behaviour fails without --bootstrap-series: Bug 1560625
parser.set_defaults(series='trusty')
parser.add_argument('--image-metadata-url')
return parser.parse_args(argv)
def main(argv=None):
args = parse_args(argv)
configure_logging(args.verbose)
bs_manager = BootstrapManager.from_args(args)
client = bs_manager.client
if args.image_metadata_url is not None:
client.env.update_config('image-metadata-url',
args.image_metadata_url)
with bs_manager.booted_context(args.upload_tools):
assess_mixed_images(bs_manager.client)
return 0
if __name__ == '__main__':
sys.exit(main())
|
Y--/root | refs/heads/master | interpreter/llvm/src/tools/clang/tools/scan-build-py/tests/unit/test_compilation.py | 52 | # -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
import libscanbuild.compilation as sut
import unittest
class CompilerTest(unittest.TestCase):
def test_is_compiler_call(self):
self.assertIsNotNone(sut.compiler_language(['clang']))
self.assertIsNotNone(sut.compiler_language(['clang-3.6']))
self.assertIsNotNone(sut.compiler_language(['clang++']))
self.assertIsNotNone(sut.compiler_language(['clang++-3.5.1']))
self.assertIsNotNone(sut.compiler_language(['cc']))
self.assertIsNotNone(sut.compiler_language(['c++']))
self.assertIsNotNone(sut.compiler_language(['gcc']))
self.assertIsNotNone(sut.compiler_language(['g++']))
self.assertIsNotNone(sut.compiler_language(['/usr/local/bin/gcc']))
self.assertIsNotNone(sut.compiler_language(['/usr/local/bin/g++']))
self.assertIsNotNone(sut.compiler_language(['/usr/local/bin/clang']))
self.assertIsNotNone(
sut.compiler_language(['armv7_neno-linux-gnueabi-g++']))
self.assertIsNone(sut.compiler_language([]))
self.assertIsNone(sut.compiler_language(['']))
self.assertIsNone(sut.compiler_language(['ld']))
self.assertIsNone(sut.compiler_language(['as']))
self.assertIsNone(sut.compiler_language(['/usr/local/bin/compiler']))
class SplitTest(unittest.TestCase):
def test_detect_cxx_from_compiler_name(self):
def test(cmd):
result = sut.split_command([cmd, '-c', 'src.c'])
self.assertIsNotNone(result, "wrong input for test")
return result.compiler == 'c++'
self.assertFalse(test('cc'))
self.assertFalse(test('gcc'))
self.assertFalse(test('clang'))
self.assertTrue(test('c++'))
self.assertTrue(test('g++'))
self.assertTrue(test('g++-5.3.1'))
self.assertTrue(test('clang++'))
self.assertTrue(test('clang++-3.7.1'))
self.assertTrue(test('armv7_neno-linux-gnueabi-g++'))
def test_action(self):
self.assertIsNotNone(sut.split_command(['clang', 'source.c']))
self.assertIsNotNone(sut.split_command(['clang', '-c', 'source.c']))
self.assertIsNotNone(sut.split_command(['clang', '-c', 'source.c',
'-MF', 'a.d']))
self.assertIsNone(sut.split_command(['clang', '-E', 'source.c']))
self.assertIsNone(sut.split_command(['clang', '-c', '-E', 'source.c']))
self.assertIsNone(sut.split_command(['clang', '-c', '-M', 'source.c']))
self.assertIsNone(
sut.split_command(['clang', '-c', '-MM', 'source.c']))
def test_source_file(self):
def test(expected, cmd):
self.assertEqual(expected, sut.split_command(cmd).files)
test(['src.c'], ['clang', 'src.c'])
test(['src.c'], ['clang', '-c', 'src.c'])
test(['src.C'], ['clang', '-x', 'c', 'src.C'])
test(['src.cpp'], ['clang++', '-c', 'src.cpp'])
test(['s1.c', 's2.c'], ['clang', '-c', 's1.c', 's2.c'])
test(['s1.c', 's2.c'], ['cc', 's1.c', 's2.c', '-ldep', '-o', 'a.out'])
test(['src.c'], ['clang', '-c', '-I', './include', 'src.c'])
test(['src.c'], ['clang', '-c', '-I', '/opt/me/include', 'src.c'])
test(['src.c'], ['clang', '-c', '-D', 'config=file.c', 'src.c'])
self.assertIsNone(
sut.split_command(['cc', 'this.o', 'that.o', '-o', 'a.out']))
self.assertIsNone(
sut.split_command(['cc', 'this.o', '-lthat', '-o', 'a.out']))
def test_filter_flags(self):
def test(expected, flags):
command = ['clang', '-c', 'src.c'] + flags
self.assertEqual(expected, sut.split_command(command).flags)
def same(expected):
test(expected, expected)
def filtered(flags):
test([], flags)
same([])
same(['-I', '/opt/me/include', '-DNDEBUG', '-ULIMITS'])
same(['-O', '-O2'])
same(['-m32', '-mmms'])
same(['-Wall', '-Wno-unused', '-g', '-funroll-loops'])
filtered([])
filtered(['-lclien', '-L/opt/me/lib', '-L', '/opt/you/lib'])
filtered(['-static'])
filtered(['-MD', '-MT', 'something'])
filtered(['-MMD', '-MF', 'something'])
class SourceClassifierTest(unittest.TestCase):
def test_sources(self):
self.assertIsNone(sut.classify_source('file.o'))
self.assertIsNone(sut.classify_source('file.exe'))
self.assertIsNone(sut.classify_source('/path/file.o'))
self.assertIsNone(sut.classify_source('clang'))
self.assertEqual('c', sut.classify_source('file.c'))
self.assertEqual('c', sut.classify_source('./file.c'))
self.assertEqual('c', sut.classify_source('/path/file.c'))
self.assertEqual('c++', sut.classify_source('file.c', False))
self.assertEqual('c++', sut.classify_source('./file.c', False))
self.assertEqual('c++', sut.classify_source('/path/file.c', False))
|
android-legacy/android_kernel_exynos | refs/heads/exybase | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
|
svn2github/kgyp | refs/heads/master | test/win/gyptest-link-aslr.py | 344 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure aslr setting is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('aslr.gyp', chdir=CHDIR)
test.build('aslr.gyp', test.ALL, chdir=CHDIR)
def HasDynamicBase(exe):
full_path = test.built_file_path(exe, chdir=CHDIR)
output = test.run_dumpbin('/headers', full_path)
return ' Dynamic base' in output
# Default is to be on.
if not HasDynamicBase('test_aslr_default.exe'):
test.fail_test()
if HasDynamicBase('test_aslr_no.exe'):
test.fail_test()
if not HasDynamicBase('test_aslr_yes.exe'):
test.fail_test()
test.pass_test()
|
icereval/osf.io | refs/heads/develop | api_tests/waffle/views/test_waffle_list.py | 6 | import pytest
from api.base.settings.defaults import API_BASE
from osf_tests.factories import (
AuthUserFactory,
FlagFactory,
SampleFactory,
SwitchFactory
)
from api.base.pagination import MaxSizePagination
@pytest.mark.django_db
class TestWaffleList:
@pytest.fixture()
def user(self):
return AuthUserFactory()
@pytest.fixture()
def inactive_flag(self):
return FlagFactory(name='inactive_flag', everyone=False)
@pytest.fixture()
def active_flag(self):
return FlagFactory(name='active_flag')
@pytest.fixture()
def inactive_switch(self):
return SwitchFactory(name='inactive_switch', active=False)
@pytest.fixture()
def active_sample(self):
return SampleFactory(name='active_sample')
@pytest.fixture()
def url(self):
return '/{}_waffle/'.format(API_BASE)
@pytest.fixture()
def flag_url(self, url):
return url + '?flags=active_flag'
def test_waffle_flag_no_filter(self, app, user, url, inactive_flag, active_flag):
res = app.get(url, auth=user.auth)
assert res.status_code == 200
assert len(res.json['data']) == 2
def test_waffle_flag_filter_active(self, app, user, flag_url, active_flag):
res = app.get(flag_url, auth=user.auth)
assert res.status_code == 200
assert len(res.json['data']) == 1
assert res.json['data'][0]['attributes']['name'] == 'active_flag'
assert res.json['data'][0]['attributes']['active'] is True
def test_waffle_flag_filter_does_not_exist(self, app, user, url, inactive_flag, active_flag):
res = app.get(url + '?flags=jibberish', auth=user.auth)
assert res.status_code == 200
assert len(res.json['data']) == 0
def test_filter_waffle_samples_flags_and_switches(self, app, user, url, inactive_flag, active_flag, active_sample, inactive_switch):
res = app.get(url + '?flags=active_flag&samples=active_sample&switches=inactive_switch', auth=user.auth)
assert res.status_code == 200
assert len(res.json['data']) == 3
def test_waffle_switch_logged_out(self, app, user, url, inactive_switch):
res = app.get(url + '?switches=inactive_switch')
assert res.status_code == 200
assert len(res.json['data']) == 1
assert res.json['data'][0]['attributes']['name'] == 'inactive_switch'
assert not res.json['data'][0]['attributes']['active']
def test_page_size(self, app, url, user):
res = app.get(url)
assert res.json['links']['meta']['per_page'] == MaxSizePagination.page_size
|
GavinCruise/flask | refs/heads/master | flask/cli.py | 136 | # -*- coding: utf-8 -*-
"""
flask.cli
~~~~~~~~~
A simple command line application to run flask apps.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from threading import Lock, Thread
from functools import update_wrapper
import click
from ._compat import iteritems, reraise
class NoAppException(click.UsageError):
"""Raised if an application cannot be found or loaded."""
def find_best_app(module):
"""Given a module instance this tries to find the best possible
application in the module or raises an exception.
"""
from . import Flask
# Search for the most common names first.
for attr_name in 'app', 'application':
app = getattr(module, attr_name, None)
if app is not None and isinstance(app, Flask):
return app
# Otherwise find the only object that is a Flask instance.
matches = [v for k, v in iteritems(module.__dict__)
if isinstance(v, Flask)]
if len(matches) == 1:
return matches[0]
raise NoAppException('Failed to find application in module "%s". Are '
'you sure it contains a Flask application? Maybe '
'you wrapped it in a WSGI middleware or you are '
'using a factory function.' % module.__name__)
def prepare_exec_for_file(filename):
"""Given a filename this will try to calculate the python path, add it
to the search path and return the actual module name that is expected.
"""
module = []
# Chop off file extensions or package markers
if filename.endswith('.py'):
filename = filename[:-3]
elif os.path.split(filename)[1] == '__init__.py':
filename = os.path.dirname(filename)
else:
raise NoAppException('The file provided (%s) does exist but is not a '
'valid Python file. This means that it cannot '
'be used as application. Please change the '
'extension to .py' % filename)
filename = os.path.realpath(filename)
dirpath = filename
while 1:
dirpath, extra = os.path.split(dirpath)
module.append(extra)
if not os.path.isfile(os.path.join(dirpath, '__init__.py')):
break
sys.path.insert(0, dirpath)
return '.'.join(module[::-1])
def locate_app(app_id):
"""Attempts to locate the application."""
__traceback_hide__ = True
if ':' in app_id:
module, app_obj = app_id.split(':', 1)
else:
module = app_id
app_obj = None
__import__(module)
mod = sys.modules[module]
if app_obj is None:
app = find_best_app(mod)
else:
app = getattr(mod, app_obj, None)
if app is None:
raise RuntimeError('Failed to find application in module "%s"'
% module)
return app
class DispatchingApp(object):
"""Special application that dispatches to a flask application which
is imported by name in a background thread. If an error happens
it is is recorded and shows as part of the WSGI handling which in case
of the Werkzeug debugger means that it shows up in the browser.
"""
def __init__(self, loader, use_eager_loading=False):
self.loader = loader
self._app = None
self._lock = Lock()
self._bg_loading_exc_info = None
if use_eager_loading:
self._load_unlocked()
else:
self._load_in_background()
def _load_in_background(self):
def _load_app():
__traceback_hide__ = True
with self._lock:
try:
self._load_unlocked()
except Exception:
self._bg_loading_exc_info = sys.exc_info()
t = Thread(target=_load_app, args=())
t.start()
def _flush_bg_loading_exception(self):
__traceback_hide__ = True
exc_info = self._bg_loading_exc_info
if exc_info is not None:
self._bg_loading_exc_info = None
reraise(*exc_info)
def _load_unlocked(self):
__traceback_hide__ = True
self._app = rv = self.loader()
self._bg_loading_exc_info = None
return rv
def __call__(self, environ, start_response):
__traceback_hide__ = True
if self._app is not None:
return self._app(environ, start_response)
self._flush_bg_loading_exception()
with self._lock:
if self._app is not None:
rv = self._app
else:
rv = self._load_unlocked()
return rv(environ, start_response)
class ScriptInfo(object):
"""Help object to deal with Flask applications. This is usually not
necessary to interface with as it's used internally in the dispatching
to click.
"""
def __init__(self, app_import_path=None, debug=None, create_app=None):
#: The application import path
self.app_import_path = app_import_path
#: The debug flag. If this is not None, the application will
#: automatically have it's debug flag overridden with this value.
self.debug = debug
#: Optionally a function that is passed the script info to create
#: the instance of the application.
self.create_app = create_app
#: A dictionary with arbitrary data that can be associated with
#: this script info.
self.data = {}
self._loaded_app = None
def load_app(self):
"""Loads the Flask app (if not yet loaded) and returns it. Calling
this multiple times will just result in the already loaded app to
be returned.
"""
__traceback_hide__ = True
if self._loaded_app is not None:
return self._loaded_app
if self.create_app is not None:
rv = self.create_app(self)
else:
if self.app_import_path is None:
raise NoAppException('Could not locate Flask application. '
'You did not provide FLASK_APP or the '
'--app parameter.')
rv = locate_app(self.app_import_path)
if self.debug is not None:
rv.debug = self.debug
self._loaded_app = rv
return rv
pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
def with_appcontext(f):
"""Wraps a callback so that it's guaranteed to be executed with the
script's application context. If callbacks are registered directly
to the ``app.cli`` object then they are wrapped with this function
by default unless it's disabled.
"""
@click.pass_context
def decorator(__ctx, *args, **kwargs):
with __ctx.ensure_object(ScriptInfo).load_app().app_context():
return __ctx.invoke(f, *args, **kwargs)
return update_wrapper(decorator, f)
def set_debug_value(ctx, param, value):
ctx.ensure_object(ScriptInfo).debug = value
def set_app_value(ctx, param, value):
if value is not None:
if os.path.isfile(value):
value = prepare_exec_for_file(value)
elif '.' not in sys.path:
sys.path.insert(0, '.')
ctx.ensure_object(ScriptInfo).app_import_path = value
debug_option = click.Option(['--debug/--no-debug'],
help='Enable or disable debug mode.',
default=None, callback=set_debug_value)
app_option = click.Option(['-a', '--app'],
help='The application to run',
callback=set_app_value, is_eager=True)
class AppGroup(click.Group):
"""This works similar to a regular click :class:`~click.Group` but it
changes the behavior of the :meth:`command` decorator so that it
automatically wraps the functions in :func:`with_appcontext`.
Not to be confused with :class:`FlaskGroup`.
"""
def command(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it wraps callbacks in :func:`with_appcontext`
unless it's disabled by passing ``with_appcontext=False``.
"""
wrap_for_ctx = kwargs.pop('with_appcontext', True)
def decorator(f):
if wrap_for_ctx:
f = with_appcontext(f)
return click.Group.command(self, *args, **kwargs)(f)
return decorator
def group(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it defaults the group class to
:class:`AppGroup`.
"""
kwargs.setdefault('cls', AppGroup)
return click.Group.group(self, *args, **kwargs)
class FlaskGroup(AppGroup):
"""Special subclass of the :class:`AppGroup` group that supports
loading more commands from the configured Flask app. Normally a
developer does not have to interface with this class but there are
some very advanced use cases for which it makes sense to create an
instance of this.
For information as of why this is useful see :ref:`custom-scripts`.
:param add_default_commands: if this is True then the default run and
shell commands wil be added.
:param add_app_option: adds the default :option:`--app` option. This gets
automatically disabled if a `create_app`
callback is defined.
:param add_debug_option: adds the default :option:`--debug` option.
:param create_app: an optional callback that is passed the script info
and returns the loaded app.
"""
def __init__(self, add_default_commands=True, add_app_option=None,
add_debug_option=True, create_app=None, **extra):
params = list(extra.pop('params', None) or ())
if add_app_option is None:
add_app_option = create_app is None
if add_app_option:
params.append(app_option)
if add_debug_option:
params.append(debug_option)
AppGroup.__init__(self, params=params, **extra)
self.create_app = create_app
if add_default_commands:
self.add_command(run_command)
self.add_command(shell_command)
def get_command(self, ctx, name):
# We load built-in commands first as these should always be the
# same no matter what the app does. If the app does want to
# override this it needs to make a custom instance of this group
# and not attach the default commands.
#
# This also means that the script stays functional in case the
# application completely fails.
rv = AppGroup.get_command(self, ctx, name)
if rv is not None:
return rv
info = ctx.ensure_object(ScriptInfo)
try:
rv = info.load_app().cli.get_command(ctx, name)
if rv is not None:
return rv
except NoAppException:
pass
def list_commands(self, ctx):
# The commands available is the list of both the application (if
# available) plus the builtin commands.
rv = set(click.Group.list_commands(self, ctx))
info = ctx.ensure_object(ScriptInfo)
try:
rv.update(info.load_app().cli.list_commands(ctx))
except Exception:
# Here we intentionally swallow all exceptions as we don't
# want the help page to break if the app does not exist.
# If someone attempts to use the command we try to create
# the app again and this will give us the error.
pass
return sorted(rv)
def main(self, *args, **kwargs):
obj = kwargs.get('obj')
if obj is None:
obj = ScriptInfo(create_app=self.create_app)
kwargs['obj'] = obj
kwargs.setdefault('auto_envvar_prefix', 'FLASK')
return AppGroup.main(self, *args, **kwargs)
def script_info_option(*args, **kwargs):
"""This decorator works exactly like :func:`click.option` but is eager
by default and stores the value in the :attr:`ScriptInfo.data`. This
is useful to further customize an application factory in very complex
situations.
:param script_info_key: this is a mandatory keyword argument which
defines under which data key the value should
be stored.
"""
try:
key = kwargs.pop('script_info_key')
except LookupError:
raise TypeError('script_info_key not provided.')
real_callback = kwargs.get('callback')
def callback(ctx, param, value):
if real_callback is not None:
value = real_callback(ctx, value)
ctx.ensure_object(ScriptInfo).data[key] = value
return value
kwargs['callback'] = callback
kwargs.setdefault('is_eager', True)
return click.option(*args, **kwargs)
@click.command('run', short_help='Runs a development server.')
@click.option('--host', '-h', default='127.0.0.1',
help='The interface to bind to.')
@click.option('--port', '-p', default=5000,
help='The port to bind to.')
@click.option('--reload/--no-reload', default=None,
help='Enable or disable the reloader. By default the reloader '
'is active if debug is enabled.')
@click.option('--debugger/--no-debugger', default=None,
help='Enable or disable the debugger. By default the debugger '
'is active if debug is enabled.')
@click.option('--eager-loading/--lazy-loader', default=None,
help='Enable or disable eager loading. By default eager '
'loading is enabled if the reloader is disabled.')
@click.option('--with-threads/--without-threads', default=False,
help='Enable or disable multithreading.')
@pass_script_info
def run_command(info, host, port, reload, debugger, eager_loading,
with_threads):
"""Runs a local development server for the Flask application.
This local server is recommended for development purposes only but it
can also be used for simple intranet deployments. By default it will
not support any sort of concurrency at all to simplify debugging. This
can be changed with the --with-threads option which will enable basic
multithreading.
The reloader and debugger are by default enabled if the debug flag of
Flask is enabled and disabled otherwise.
"""
from werkzeug.serving import run_simple
if reload is None:
reload = info.debug
if debugger is None:
debugger = info.debug
if eager_loading is None:
eager_loading = not reload
app = DispatchingApp(info.load_app, use_eager_loading=eager_loading)
# Extra startup messages. This depends a but on Werkzeug internals to
# not double execute when the reloader kicks in.
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
# If we have an import path we can print it out now which can help
# people understand what's being served. If we do not have an
# import path because the app was loaded through a callback then
# we won't print anything.
if info.app_import_path is not None:
print(' * Serving Flask app "%s"' % info.app_import_path)
if info.debug is not None:
print(' * Forcing debug %s' % (info.debug and 'on' or 'off'))
run_simple(host, port, app, use_reloader=reload,
use_debugger=debugger, threaded=with_threads)
@click.command('shell', short_help='Runs a shell in the app context.')
@with_appcontext
def shell_command():
"""Runs an interactive Python shell in the context of a given
Flask application. The application will populate the default
namespace of this shell according to it's configuration.
This is useful for executing small snippets of management code
without having to manually configuring the application.
"""
import code
from flask.globals import _app_ctx_stack
app = _app_ctx_stack.top.app
banner = 'Python %s on %s\nApp: %s%s\nInstance: %s' % (
sys.version,
sys.platform,
app.import_name,
app.debug and ' [debug]' or '',
app.instance_path,
)
ctx = {}
# Support the regular Python interpreter startup script if someone
# is using it.
startup = os.environ.get('PYTHONSTARTUP')
if startup and os.path.isfile(startup):
with open(startup, 'r') as f:
eval(compile(f.read(), startup, 'exec'), ctx)
ctx.update(app.make_shell_context())
code.interact(banner=banner, local=ctx)
cli = FlaskGroup(help="""\
This shell command acts as general utility script for Flask applications.
It loads the application configured (either through the FLASK_APP environment
variable or the --app parameter) and then provides commands either provided
by the application or Flask itself.
The most useful commands are the "run" and "shell" command.
Example usage:
flask --app=hello --debug run
""")
def main(as_module=False):
this_module = __package__ + '.cli'
args = sys.argv[1:]
if as_module:
if sys.version_info >= (2, 7):
name = 'python -m ' + this_module.rsplit('.', 1)[0]
else:
name = 'python -m ' + this_module
# This module is always executed as "python -m flask.run" and as such
# we need to ensure that we restore the actual command line so that
# the reloader can properly operate.
sys.argv = ['-m', this_module] + sys.argv[1:]
else:
name = None
cli.main(args=args, prog_name=name)
if __name__ == '__main__':
main(as_module=True)
|
ltilve/ChromiumGStreamerBackend | refs/heads/master | tools/perf/measurements/memory_multi_tab.py | 22 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Multi tab memory test.
This test is a multi tab test, but we're interested in measurements for
the entire test rather than each single page.
"""
from telemetry.page import page_test
from metrics import memory
class MemoryMultiTab(page_test.PageTest):
def __init__(self):
super(MemoryMultiTab, self).__init__()
self._memory_metric = None
# _first_tab is used to make memory measurements
self._first_tab = None
def DidStartBrowser(self, browser):
self._memory_metric = memory.MemoryMetric(browser)
def CustomizeBrowserOptions(self, options):
memory.MemoryMetric.CustomizeBrowserOptions(options)
# Since this is a memory benchmark, we want to sample memory histograms at
# a high frequency.
options.AppendExtraBrowserArgs('--memory-metrics')
def TabForPage(self, page, browser):
return browser.tabs.New()
def DidNavigateToPage(self, page, tab):
# Start measurement on the first tab.
if not self._first_tab:
self._memory_metric.Start(page, tab)
self._first_tab = tab
def ValidateAndMeasurePage(self, page, tab, results):
# Finalize measurement on the last tab.
if len(tab.browser.tabs) == len(page.story_set.stories):
self._memory_metric.Stop(page, self._first_tab)
self._memory_metric.AddResults(self._first_tab, results)
|
bmos299/fabric | refs/heads/master | bddtests/peer/admin_pb2.py | 2 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: peer/admin.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='peer/admin.proto',
package='protos',
syntax='proto3',
serialized_pb=_b('\n\x10peer/admin.proto\x12\x06protos\x1a\x1bgoogle/protobuf/empty.proto\"\x9a\x01\n\x0cServerStatus\x12/\n\x06status\x18\x01 \x01(\x0e\x32\x1f.protos.ServerStatus.StatusCode\"Y\n\nStatusCode\x12\r\n\tUNDEFINED\x10\x00\x12\x0b\n\x07STARTED\x10\x01\x12\x0b\n\x07STOPPED\x10\x02\x12\n\n\x06PAUSED\x10\x03\x12\t\n\x05\x45RROR\x10\x04\x12\x0b\n\x07UNKNOWN\x10\x05\"8\n\x0fLogLevelRequest\x12\x12\n\nlog_module\x18\x01 \x01(\t\x12\x11\n\tlog_level\x18\x02 \x01(\t\"9\n\x10LogLevelResponse\x12\x12\n\nlog_module\x18\x01 \x01(\t\x12\x11\n\tlog_level\x18\x02 \x01(\t2\xd5\x02\n\x05\x41\x64min\x12;\n\tGetStatus\x12\x16.google.protobuf.Empty\x1a\x14.protos.ServerStatus\"\x00\x12=\n\x0bStartServer\x12\x16.google.protobuf.Empty\x1a\x14.protos.ServerStatus\"\x00\x12<\n\nStopServer\x12\x16.google.protobuf.Empty\x1a\x14.protos.ServerStatus\"\x00\x12H\n\x11GetModuleLogLevel\x12\x17.protos.LogLevelRequest\x1a\x18.protos.LogLevelResponse\"\x00\x12H\n\x11SetModuleLogLevel\x12\x17.protos.LogLevelRequest\x1a\x18.protos.LogLevelResponse\"\x00\x42+Z)github.com/hyperledger/fabric/protos/peerb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SERVERSTATUS_STATUSCODE = _descriptor.EnumDescriptor(
name='StatusCode',
full_name='protos.ServerStatus.StatusCode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNDEFINED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STARTED', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STOPPED', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PAUSED', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=5, number=5,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=123,
serialized_end=212,
)
_sym_db.RegisterEnumDescriptor(_SERVERSTATUS_STATUSCODE)
_SERVERSTATUS = _descriptor.Descriptor(
name='ServerStatus',
full_name='protos.ServerStatus',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='protos.ServerStatus.status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_SERVERSTATUS_STATUSCODE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=58,
serialized_end=212,
)
_LOGLEVELREQUEST = _descriptor.Descriptor(
name='LogLevelRequest',
full_name='protos.LogLevelRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='log_module', full_name='protos.LogLevelRequest.log_module', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='log_level', full_name='protos.LogLevelRequest.log_level', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=214,
serialized_end=270,
)
_LOGLEVELRESPONSE = _descriptor.Descriptor(
name='LogLevelResponse',
full_name='protos.LogLevelResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='log_module', full_name='protos.LogLevelResponse.log_module', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='log_level', full_name='protos.LogLevelResponse.log_level', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=272,
serialized_end=329,
)
_SERVERSTATUS.fields_by_name['status'].enum_type = _SERVERSTATUS_STATUSCODE
_SERVERSTATUS_STATUSCODE.containing_type = _SERVERSTATUS
DESCRIPTOR.message_types_by_name['ServerStatus'] = _SERVERSTATUS
DESCRIPTOR.message_types_by_name['LogLevelRequest'] = _LOGLEVELREQUEST
DESCRIPTOR.message_types_by_name['LogLevelResponse'] = _LOGLEVELRESPONSE
ServerStatus = _reflection.GeneratedProtocolMessageType('ServerStatus', (_message.Message,), dict(
DESCRIPTOR = _SERVERSTATUS,
__module__ = 'peer.admin_pb2'
# @@protoc_insertion_point(class_scope:protos.ServerStatus)
))
_sym_db.RegisterMessage(ServerStatus)
LogLevelRequest = _reflection.GeneratedProtocolMessageType('LogLevelRequest', (_message.Message,), dict(
DESCRIPTOR = _LOGLEVELREQUEST,
__module__ = 'peer.admin_pb2'
# @@protoc_insertion_point(class_scope:protos.LogLevelRequest)
))
_sym_db.RegisterMessage(LogLevelRequest)
LogLevelResponse = _reflection.GeneratedProtocolMessageType('LogLevelResponse', (_message.Message,), dict(
DESCRIPTOR = _LOGLEVELRESPONSE,
__module__ = 'peer.admin_pb2'
# @@protoc_insertion_point(class_scope:protos.LogLevelResponse)
))
_sym_db.RegisterMessage(LogLevelResponse)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('Z)github.com/hyperledger/fabric/protos/peer'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
class AdminStub(object):
"""Interface exported by the server.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetStatus = channel.unary_unary(
'/protos.Admin/GetStatus',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=ServerStatus.FromString,
)
self.StartServer = channel.unary_unary(
'/protos.Admin/StartServer',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=ServerStatus.FromString,
)
self.StopServer = channel.unary_unary(
'/protos.Admin/StopServer',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=ServerStatus.FromString,
)
self.GetModuleLogLevel = channel.unary_unary(
'/protos.Admin/GetModuleLogLevel',
request_serializer=LogLevelRequest.SerializeToString,
response_deserializer=LogLevelResponse.FromString,
)
self.SetModuleLogLevel = channel.unary_unary(
'/protos.Admin/SetModuleLogLevel',
request_serializer=LogLevelRequest.SerializeToString,
response_deserializer=LogLevelResponse.FromString,
)
class AdminServicer(object):
"""Interface exported by the server.
"""
def GetStatus(self, request, context):
"""Return the serve status.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StartServer(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StopServer(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetModuleLogLevel(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetModuleLogLevel(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_AdminServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetStatus': grpc.unary_unary_rpc_method_handler(
servicer.GetStatus,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=ServerStatus.SerializeToString,
),
'StartServer': grpc.unary_unary_rpc_method_handler(
servicer.StartServer,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=ServerStatus.SerializeToString,
),
'StopServer': grpc.unary_unary_rpc_method_handler(
servicer.StopServer,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=ServerStatus.SerializeToString,
),
'GetModuleLogLevel': grpc.unary_unary_rpc_method_handler(
servicer.GetModuleLogLevel,
request_deserializer=LogLevelRequest.FromString,
response_serializer=LogLevelResponse.SerializeToString,
),
'SetModuleLogLevel': grpc.unary_unary_rpc_method_handler(
servicer.SetModuleLogLevel,
request_deserializer=LogLevelRequest.FromString,
response_serializer=LogLevelResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'protos.Admin', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BetaAdminServicer(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""Interface exported by the server.
"""
def GetStatus(self, request, context):
"""Return the serve status.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def StartServer(self, request, context):
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def StopServer(self, request, context):
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def GetModuleLogLevel(self, request, context):
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def SetModuleLogLevel(self, request, context):
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaAdminStub(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""Interface exported by the server.
"""
def GetStatus(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Return the serve status.
"""
raise NotImplementedError()
GetStatus.future = None
def StartServer(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
raise NotImplementedError()
StartServer.future = None
def StopServer(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
raise NotImplementedError()
StopServer.future = None
def GetModuleLogLevel(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
raise NotImplementedError()
GetModuleLogLevel.future = None
def SetModuleLogLevel(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
raise NotImplementedError()
SetModuleLogLevel.future = None
def beta_create_Admin_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_deserializers = {
('protos.Admin', 'GetModuleLogLevel'): LogLevelRequest.FromString,
('protos.Admin', 'GetStatus'): google_dot_protobuf_dot_empty__pb2.Empty.FromString,
('protos.Admin', 'SetModuleLogLevel'): LogLevelRequest.FromString,
('protos.Admin', 'StartServer'): google_dot_protobuf_dot_empty__pb2.Empty.FromString,
('protos.Admin', 'StopServer'): google_dot_protobuf_dot_empty__pb2.Empty.FromString,
}
response_serializers = {
('protos.Admin', 'GetModuleLogLevel'): LogLevelResponse.SerializeToString,
('protos.Admin', 'GetStatus'): ServerStatus.SerializeToString,
('protos.Admin', 'SetModuleLogLevel'): LogLevelResponse.SerializeToString,
('protos.Admin', 'StartServer'): ServerStatus.SerializeToString,
('protos.Admin', 'StopServer'): ServerStatus.SerializeToString,
}
method_implementations = {
('protos.Admin', 'GetModuleLogLevel'): face_utilities.unary_unary_inline(servicer.GetModuleLogLevel),
('protos.Admin', 'GetStatus'): face_utilities.unary_unary_inline(servicer.GetStatus),
('protos.Admin', 'SetModuleLogLevel'): face_utilities.unary_unary_inline(servicer.SetModuleLogLevel),
('protos.Admin', 'StartServer'): face_utilities.unary_unary_inline(servicer.StartServer),
('protos.Admin', 'StopServer'): face_utilities.unary_unary_inline(servicer.StopServer),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_Admin_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_serializers = {
('protos.Admin', 'GetModuleLogLevel'): LogLevelRequest.SerializeToString,
('protos.Admin', 'GetStatus'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
('protos.Admin', 'SetModuleLogLevel'): LogLevelRequest.SerializeToString,
('protos.Admin', 'StartServer'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
('protos.Admin', 'StopServer'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
}
response_deserializers = {
('protos.Admin', 'GetModuleLogLevel'): LogLevelResponse.FromString,
('protos.Admin', 'GetStatus'): ServerStatus.FromString,
('protos.Admin', 'SetModuleLogLevel'): LogLevelResponse.FromString,
('protos.Admin', 'StartServer'): ServerStatus.FromString,
('protos.Admin', 'StopServer'): ServerStatus.FromString,
}
cardinalities = {
'GetModuleLogLevel': cardinality.Cardinality.UNARY_UNARY,
'GetStatus': cardinality.Cardinality.UNARY_UNARY,
'SetModuleLogLevel': cardinality.Cardinality.UNARY_UNARY,
'StartServer': cardinality.Cardinality.UNARY_UNARY,
'StopServer': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'protos.Admin', cardinalities, options=stub_options)
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
|
fontenele/scrapy | refs/heads/master | tests/test_downloadermiddleware_httpcompression.py | 110 | from io import BytesIO
from unittest import TestCase
from os.path import join, abspath, dirname
from gzip import GzipFile
from scrapy.spiders import Spider
from scrapy.http import Response, Request, HtmlResponse
from scrapy.downloadermiddlewares.httpcompression import HttpCompressionMiddleware
from tests import tests_datadir
from w3lib.encoding import resolve_encoding
SAMPLEDIR = join(tests_datadir, 'compressed')
FORMAT = {
'gzip': ('html-gzip.bin', 'gzip'),
'x-gzip': ('html-gzip.bin', 'gzip'),
'rawdeflate': ('html-rawdeflate.bin', 'deflate'),
'zlibdeflate': ('html-zlibdeflate.bin', 'deflate'),
}
class HttpCompressionTest(TestCase):
def setUp(self):
self.spider = Spider('foo')
self.mw = HttpCompressionMiddleware()
def _getresponse(self, coding):
if coding not in FORMAT:
raise ValueError()
samplefile, contentencoding = FORMAT[coding]
with open(join(SAMPLEDIR, samplefile), 'rb') as sample:
body = sample.read()
headers = {
'Server': 'Yaws/1.49 Yet Another Web Server',
'Date': 'Sun, 08 Mar 2009 00:41:03 GMT',
'Content-Length': len(body),
'Content-Type': 'text/html',
'Content-Encoding': contentencoding,
}
response = Response('http://scrapytest.org/', body=body, headers=headers)
response.request = Request('http://scrapytest.org', headers={'Accept-Encoding': 'gzip,deflate'})
return response
def test_process_request(self):
request = Request('http://scrapytest.org')
assert 'Accept-Encoding' not in request.headers
self.mw.process_request(request, self.spider)
self.assertEqual(request.headers.get('Accept-Encoding'), 'gzip,deflate')
def test_process_response_gzip(self):
response = self._getresponse('gzip')
request = response.request
self.assertEqual(response.headers['Content-Encoding'], 'gzip')
newresponse = self.mw.process_response(request, response, self.spider)
assert newresponse is not response
assert newresponse.body.startswith('<!DOCTYPE')
assert 'Content-Encoding' not in newresponse.headers
def test_process_response_rawdeflate(self):
response = self._getresponse('rawdeflate')
request = response.request
self.assertEqual(response.headers['Content-Encoding'], 'deflate')
newresponse = self.mw.process_response(request, response, self.spider)
assert newresponse is not response
assert newresponse.body.startswith('<!DOCTYPE')
assert 'Content-Encoding' not in newresponse.headers
def test_process_response_zlibdelate(self):
response = self._getresponse('zlibdeflate')
request = response.request
self.assertEqual(response.headers['Content-Encoding'], 'deflate')
newresponse = self.mw.process_response(request, response, self.spider)
assert newresponse is not response
assert newresponse.body.startswith('<!DOCTYPE')
assert 'Content-Encoding' not in newresponse.headers
def test_process_response_plain(self):
response = Response('http://scrapytest.org', body='<!DOCTYPE...')
request = Request('http://scrapytest.org')
assert not response.headers.get('Content-Encoding')
newresponse = self.mw.process_response(request, response, self.spider)
assert newresponse is response
assert newresponse.body.startswith('<!DOCTYPE')
def test_multipleencodings(self):
response = self._getresponse('gzip')
response.headers['Content-Encoding'] = ['uuencode', 'gzip']
request = response.request
newresponse = self.mw.process_response(request, response, self.spider)
assert newresponse is not response
self.assertEqual(newresponse.headers.getlist('Content-Encoding'), ['uuencode'])
def test_process_response_encoding_inside_body(self):
headers = {
'Content-Type': 'text/html',
'Content-Encoding': 'gzip',
}
f = BytesIO()
plainbody = b"""<html><head><title>Some page</title><meta http-equiv="Content-Type" content="text/html; charset=gb2312">"""
zf = GzipFile(fileobj=f, mode='wb')
zf.write(plainbody)
zf.close()
response = Response("http;//www.example.com/", headers=headers, body=f.getvalue())
request = Request("http://www.example.com/")
newresponse = self.mw.process_response(request, response, self.spider)
assert isinstance(newresponse, HtmlResponse)
self.assertEqual(newresponse.body, plainbody)
self.assertEqual(newresponse.encoding, resolve_encoding('gb2312'))
def test_process_response_force_recalculate_encoding(self):
headers = {
'Content-Type': 'text/html',
'Content-Encoding': 'gzip',
}
f = BytesIO()
plainbody = b"""<html><head><title>Some page</title><meta http-equiv="Content-Type" content="text/html; charset=gb2312">"""
zf = GzipFile(fileobj=f, mode='wb')
zf.write(plainbody)
zf.close()
response = HtmlResponse("http;//www.example.com/page.html", headers=headers, body=f.getvalue())
request = Request("http://www.example.com/")
newresponse = self.mw.process_response(request, response, self.spider)
assert isinstance(newresponse, HtmlResponse)
self.assertEqual(newresponse.body, plainbody)
self.assertEqual(newresponse.encoding, resolve_encoding('gb2312'))
def test_process_response_gzipped_contenttype(self):
response = self._getresponse('gzip')
response.headers['Content-Type'] = 'application/gzip'
request = response.request
newresponse = self.mw.process_response(request, response, self.spider)
self.assertIs(newresponse, response)
self.assertEqual(response.headers['Content-Encoding'], 'gzip')
self.assertEqual(response.headers['Content-Type'], 'application/gzip')
|
jorpramo/flask | refs/heads/master | setup.py | 141 | """
Flask
-----
Flask is a microframework for Python based on Werkzeug, Jinja 2 and good
intentions. And before you ask: It's BSD licensed!
Flask is Fun
````````````
Save in a hello.py:
.. code:: python
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
if __name__ == "__main__":
app.run()
And Easy to Setup
`````````````````
And run it:
.. code:: bash
$ pip install Flask
$ python hello.py
* Running on http://localhost:5000/
Links
`````
* `website <http://flask.pocoo.org/>`_
* `documentation <http://flask.pocoo.org/docs/>`_
* `development version
<http://github.com/mitsuhiko/flask/zipball/master#egg=Flask-dev>`_
"""
import re
import ast
from setuptools import setup
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('flask/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
setup(
name='Flask',
version=version,
url='http://github.com/mitsuhiko/flask/',
license='BSD',
author='Armin Ronacher',
author_email='armin.ronacher@active-4.com',
description='A microframework based on Werkzeug, Jinja2 '
'and good intentions',
long_description=__doc__,
packages=['flask', 'flask.ext'],
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=[
'Werkzeug>=0.7',
'Jinja2>=2.4',
'itsdangerous>=0.21',
'click>=2.0',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
],
entry_points='''
[console_scripts]
flask=flask.cli:main
'''
)
|
Xeralux/tensorflow | refs/heads/master | tensorflow/contrib/distributions/python/kernel_tests/bijectors/invert_test.py | 2 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions import gamma as gamma_lib
from tensorflow.python.ops.distributions import transformed_distribution as transformed_distribution_lib
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class InvertBijectorTest(test.TestCase):
"""Tests the correctness of the Y = Invert(bij) transformation."""
def testBijector(self):
with self.test_session():
for fwd in [
bijectors.Identity(),
bijectors.Exp(event_ndims=1),
bijectors.Affine(shift=[0., 1.], scale_diag=[2., 3.]),
bijectors.Softplus(event_ndims=1),
bijectors.SoftmaxCentered(),
]:
rev = bijectors.Invert(fwd)
self.assertEqual("_".join(["invert", fwd.name]), rev.name)
x = [[[1., 2.],
[2., 3.]]]
self.assertAllClose(fwd.inverse(x).eval(), rev.forward(x).eval())
self.assertAllClose(fwd.forward(x).eval(), rev.inverse(x).eval())
self.assertAllClose(
fwd.forward_log_det_jacobian(x).eval(),
rev.inverse_log_det_jacobian(x).eval())
self.assertAllClose(
fwd.inverse_log_det_jacobian(x).eval(),
rev.forward_log_det_jacobian(x).eval())
def testScalarCongruency(self):
with self.test_session():
bijector = bijectors.Invert(bijectors.Exp())
assert_scalar_congruency(
bijector, lower_x=1e-3, upper_x=1.5, rtol=0.05)
def testShapeGetters(self):
with self.test_session():
bijector = bijectors.Invert(bijectors.SoftmaxCentered(validate_args=True))
x = tensor_shape.TensorShape([2])
y = tensor_shape.TensorShape([1])
self.assertAllEqual(y, bijector.forward_event_shape(x))
self.assertAllEqual(
y.as_list(),
bijector.forward_event_shape_tensor(x.as_list()).eval())
self.assertAllEqual(x, bijector.inverse_event_shape(y))
self.assertAllEqual(
x.as_list(),
bijector.inverse_event_shape_tensor(y.as_list()).eval())
def testDocstringExample(self):
with self.test_session():
exp_gamma_distribution = (
transformed_distribution_lib.TransformedDistribution(
distribution=gamma_lib.Gamma(concentration=1., rate=2.),
bijector=bijectors.Invert(bijectors.Exp())))
self.assertAllEqual(
[], array_ops.shape(exp_gamma_distribution.sample()).eval())
if __name__ == "__main__":
test.main()
|
habibmasuro/kivy | refs/heads/master | kivy/tests/test_issue_883.py | 70 | import unittest
from kivy.uix.label import Label
class LabelEmptyMarkupTestCase(unittest.TestCase):
def test_empty_markup(self):
label = Label(text='[b][/b]', markup=True)
label.texture_update()
self.assertTrue(label.texture is not None)
self.assertEqual(label.texture.width, 1)
self.assertEqual(label.texture.height, 1)
|
sticksnleaves/ghost-blog | refs/heads/master | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/lexers/_luabuiltins.py | 275 | # -*- coding: utf-8 -*-
"""
pygments.lexers._luabuiltins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the names and modules of lua functions
It is able to re-generate itself, but for adding new functions you
probably have to add some callbacks (see function module_callbacks).
Do not edit the MODULES dict by hand.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
MODULES = {'basic': ['_G',
'_VERSION',
'assert',
'collectgarbage',
'dofile',
'error',
'getfenv',
'getmetatable',
'ipairs',
'load',
'loadfile',
'loadstring',
'next',
'pairs',
'pcall',
'print',
'rawequal',
'rawget',
'rawset',
'select',
'setfenv',
'setmetatable',
'tonumber',
'tostring',
'type',
'unpack',
'xpcall'],
'coroutine': ['coroutine.create',
'coroutine.resume',
'coroutine.running',
'coroutine.status',
'coroutine.wrap',
'coroutine.yield'],
'debug': ['debug.debug',
'debug.getfenv',
'debug.gethook',
'debug.getinfo',
'debug.getlocal',
'debug.getmetatable',
'debug.getregistry',
'debug.getupvalue',
'debug.setfenv',
'debug.sethook',
'debug.setlocal',
'debug.setmetatable',
'debug.setupvalue',
'debug.traceback'],
'io': ['io.close',
'io.flush',
'io.input',
'io.lines',
'io.open',
'io.output',
'io.popen',
'io.read',
'io.tmpfile',
'io.type',
'io.write'],
'math': ['math.abs',
'math.acos',
'math.asin',
'math.atan2',
'math.atan',
'math.ceil',
'math.cosh',
'math.cos',
'math.deg',
'math.exp',
'math.floor',
'math.fmod',
'math.frexp',
'math.huge',
'math.ldexp',
'math.log10',
'math.log',
'math.max',
'math.min',
'math.modf',
'math.pi',
'math.pow',
'math.rad',
'math.random',
'math.randomseed',
'math.sinh',
'math.sin',
'math.sqrt',
'math.tanh',
'math.tan'],
'modules': ['module',
'require',
'package.cpath',
'package.loaded',
'package.loadlib',
'package.path',
'package.preload',
'package.seeall'],
'os': ['os.clock',
'os.date',
'os.difftime',
'os.execute',
'os.exit',
'os.getenv',
'os.remove',
'os.rename',
'os.setlocale',
'os.time',
'os.tmpname'],
'string': ['string.byte',
'string.char',
'string.dump',
'string.find',
'string.format',
'string.gmatch',
'string.gsub',
'string.len',
'string.lower',
'string.match',
'string.rep',
'string.reverse',
'string.sub',
'string.upper'],
'table': ['table.concat',
'table.insert',
'table.maxn',
'table.remove',
'table.sort']}
if __name__ == '__main__':
import re
import urllib
import pprint
# you can't generally find out what module a function belongs to if you
# have only its name. Because of this, here are some callback functions
# that recognize if a gioven function belongs to a specific module
def module_callbacks():
def is_in_coroutine_module(name):
return name.startswith('coroutine.')
def is_in_modules_module(name):
if name in ['require', 'module'] or name.startswith('package'):
return True
else:
return False
def is_in_string_module(name):
return name.startswith('string.')
def is_in_table_module(name):
return name.startswith('table.')
def is_in_math_module(name):
return name.startswith('math')
def is_in_io_module(name):
return name.startswith('io.')
def is_in_os_module(name):
return name.startswith('os.')
def is_in_debug_module(name):
return name.startswith('debug.')
return {'coroutine': is_in_coroutine_module,
'modules': is_in_modules_module,
'string': is_in_string_module,
'table': is_in_table_module,
'math': is_in_math_module,
'io': is_in_io_module,
'os': is_in_os_module,
'debug': is_in_debug_module}
def get_newest_version():
f = urllib.urlopen('http://www.lua.org/manual/')
r = re.compile(r'^<A HREF="(\d\.\d)/">Lua \1</A>')
for line in f:
m = r.match(line)
if m is not None:
return m.groups()[0]
def get_lua_functions(version):
f = urllib.urlopen('http://www.lua.org/manual/%s/' % version)
r = re.compile(r'^<A HREF="manual.html#pdf-(.+)">\1</A>')
functions = []
for line in f:
m = r.match(line)
if m is not None:
functions.append(m.groups()[0])
return functions
def get_function_module(name):
for mod, cb in module_callbacks().iteritems():
if cb(name):
return mod
if '.' in name:
return name.split('.')[0]
else:
return 'basic'
def regenerate(filename, modules):
f = open(filename)
try:
content = f.read()
finally:
f.close()
header = content[:content.find('MODULES = {')]
footer = content[content.find("if __name__ == '__main__':"):]
f = open(filename, 'w')
f.write(header)
f.write('MODULES = %s\n\n' % pprint.pformat(modules))
f.write(footer)
f.close()
def run():
version = get_newest_version()
print '> Downloading function index for Lua %s' % version
functions = get_lua_functions(version)
print '> %d functions found:' % len(functions)
modules = {}
for full_function_name in functions:
print '>> %s' % full_function_name
m = get_function_module(full_function_name)
modules.setdefault(m, []).append(full_function_name)
regenerate(__file__, modules)
run()
|
kellyschrock/ardupilot | refs/heads/master | Tools/autotest/quadplane.py | 3 | #!/usr/bin/env python
# Fly ArduPlane QuadPlane in SITL
from __future__ import print_function
import os
import pexpect
from pymavlink import mavutil
from common import AutoTest
from common import AutoTestTimeoutException
from pysim import util
from pysim import vehicleinfo
import operator
# get location of scripts
testdir = os.path.dirname(os.path.realpath(__file__))
SITL_START_LOCATION = mavutil.location(-27.274439, 151.290064, 343, 8.7)
MISSION = 'ArduPlane-Missions/Dalby-OBC2016.txt'
FENCE = 'ArduPlane-Missions/Dalby-OBC2016-fence.txt'
WIND = "0,180,0.2" # speed,direction,variance
class AutoTestQuadPlane(AutoTest):
def default_frame(self):
return "quadplane"
def test_filepath(self):
return os.path.realpath(__file__)
def sitl_start_location(self):
return SITL_START_LOCATION
def log_name(self):
return "QuadPlane"
def apply_defaultfile_parameters(self):
# plane passes in a defaults_file in place of applying
# parameters afterwards.
pass
def defaults_filepath(self):
vinfo = vehicleinfo.VehicleInfo()
defaults_file = vinfo.options["ArduPlane"]["frames"][self.frame]["default_params_filename"]
if isinstance(defaults_file, str):
defaults_file = [defaults_file]
defaults_list = []
for d in defaults_file:
defaults_list.append(os.path.join(testdir, d))
return ','.join(defaults_list)
def is_plane(self):
return True
def get_stick_arming_channel(self):
return int(self.get_parameter("RCMAP_YAW"))
def get_disarm_delay(self):
return int(self.get_parameter("LAND_DISARMDELAY"))
def set_autodisarm_delay(self, delay):
self.set_parameter("LAND_DISARMDELAY", delay)
def test_motor_mask(self):
"""Check operation of output_motor_mask"""
"""copter tailsitters will add condition: or (int(self.get_parameter('Q_TAILSIT_MOTMX')) & 1)"""
if not(int(self.get_parameter('Q_TILT_MASK')) & 1):
self.progress("output_motor_mask not in use")
return
self.progress("Testing output_motor_mask")
self.wait_ready_to_arm()
"""Default channel for Motor1 is 5"""
self.progress('Assert that SERVO5 is Motor1')
assert(33 == self.get_parameter('SERVO5_FUNCTION'))
modes = ('MANUAL', 'FBWA', 'QHOVER')
for mode in modes:
self.progress("Testing %s mode" % mode)
self.change_mode(mode)
self.arm_vehicle()
self.progress("Raising throttle")
self.set_rc(3, 1800)
self.progress("Waiting for Motor1 to start")
self.wait_servo_channel_value(5, 1100, comparator=operator.gt)
self.set_rc(3, 1000)
self.disarm_vehicle()
self.wait_ready_to_arm()
def fly_mission(self, filename, fence, height_accuracy=-1):
"""Fly a mission from a file."""
self.progress("Flying mission %s" % filename)
self.load_mission(filename)
self.mavproxy.send('fence load %s\n' % fence)
self.mavproxy.send('wp list\n')
self.mavproxy.expect('Requesting [0-9]+ waypoints')
self.wait_ready_to_arm()
self.arm_vehicle()
self.mavproxy.send('mode AUTO\n')
self.wait_mode('AUTO')
self.wait_waypoint(1, 19, max_dist=60, timeout=1200)
self.mav.motors_disarmed_wait()
# wait for blood sample here
self.mavproxy.send('wp set 20\n')
self.wait_ready_to_arm()
self.arm_vehicle()
self.wait_waypoint(20, 34, max_dist=60, timeout=1200)
self.mav.motors_disarmed_wait()
self.progress("Mission OK")
def fly_qautotune(self):
self.change_mode("QHOVER")
self.wait_ready_to_arm()
self.arm_vehicle()
self.set_rc(3, 1800)
self.wait_altitude(30,
40,
relative=True,
timeout=30)
self.set_rc(3, 1500)
self.change_mode("QAUTOTUNE")
tstart = self.get_sim_time()
sim_time_expected = 5000
deadline = tstart + sim_time_expected
while self.get_sim_time_cached() < deadline:
now = self.get_sim_time_cached()
m = self.mav.recv_match(type='STATUSTEXT',
blocking=True,
timeout=1)
if m is None:
continue
self.progress("STATUSTEXT (%u<%u): %s" % (now, deadline, m.text))
if "AutoTune: Success" in m.text:
break
self.progress("AUTOTUNE OK (%u seconds)" % (now - tstart))
self.set_rc(3, 1200)
self.wait_altitude(-5, 1, relative=True, timeout=30)
while self.get_sim_time_cached() < deadline:
self.mavproxy.send('disarm\n')
try:
self.wait_text("AutoTune: Saved gains for Roll Pitch Yaw", timeout=0.5)
except AutoTestTimeoutException as e:
continue
break
self.mav.motors_disarmed_wait()
def test_pid_tuning(self):
self.change_mode("FBWA") # we don't update PIDs in MANUAL
super(AutoTestQuadPlane, self).test_pid_tuning()
def test_parameter_checks(self):
self.test_parameter_checks_poscontrol("Q_P")
def default_mode(self):
return "MANUAL"
def disabled_tests(self):
return {
"QAutoTune": "See https://github.com/ArduPilot/ardupilot/issues/10411",
}
def tests(self):
'''return list of all tests'''
m = os.path.join(testdir, "ArduPlane-Missions/Dalby-OBC2016.txt")
f = os.path.join(testdir,
"ArduPlane-Missions/Dalby-OBC2016-fence.txt")
ret = super(AutoTestQuadPlane, self).tests()
ret.extend([
("TestMotorMask", "Test output_motor_mask", self.test_motor_mask),
("QAutoTune", "Fly QAUTOTUNE mode", self.fly_qautotune),
("ParameterChecks",
"Test Arming Parameter Checks",
self.test_parameter_checks),
("Mission", "Dalby Mission",
lambda: self.fly_mission(m, f))
])
return ret
|
sunlianqiang/kbengine | refs/heads/master | kbe/src/lib/python/Lib/idlelib/run.py | 80 | import sys
import io
import linecache
import time
import socket
import traceback
import _thread as thread
import threading
import queue
import tkinter
from idlelib import CallTips
from idlelib import AutoComplete
from idlelib import RemoteDebugger
from idlelib import RemoteObjectBrowser
from idlelib import StackViewer
from idlelib import rpc
from idlelib import PyShell
from idlelib import IOBinding
import __main__
LOCALHOST = '127.0.0.1'
import warnings
def idle_showwarning_subproc(
message, category, filename, lineno, file=None, line=None):
"""Show Idle-format warning after replacing warnings.showwarning.
The only difference is the formatter called.
"""
if file is None:
file = sys.stderr
try:
file.write(PyShell.idle_formatwarning(
message, category, filename, lineno, line))
except IOError:
pass # the file (probably stderr) is invalid - this warning gets lost.
_warnings_showwarning = None
def capture_warnings(capture):
"Replace warning.showwarning with idle_showwarning_subproc, or reverse."
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = idle_showwarning_subproc
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
capture_warnings(True)
tcl = tkinter.Tcl()
def handle_tk_events(tcl=tcl):
"""Process any tk events that are ready to be dispatched if tkinter
has been imported, a tcl interpreter has been created and tk has been
loaded."""
tcl.eval("update")
# Thread shared globals: Establish a queue between a subthread (which handles
# the socket) and the main thread (which runs user code), plus global
# completion, exit and interruptable (the main thread) flags:
exit_now = False
quitting = False
interruptable = False
def main(del_exitfunc=False):
"""Start the Python execution server in a subprocess
In the Python subprocess, RPCServer is instantiated with handlerclass
MyHandler, which inherits register/unregister methods from RPCHandler via
the mix-in class SocketIO.
When the RPCServer 'server' is instantiated, the TCPServer initialization
creates an instance of run.MyHandler and calls its handle() method.
handle() instantiates a run.Executive object, passing it a reference to the
MyHandler object. That reference is saved as attribute rpchandler of the
Executive instance. The Executive methods have access to the reference and
can pass it on to entities that they command
(e.g. RemoteDebugger.Debugger.start_debugger()). The latter, in turn, can
call MyHandler(SocketIO) register/unregister methods via the reference to
register and unregister themselves.
"""
global exit_now
global quitting
global no_exitfunc
no_exitfunc = del_exitfunc
#time.sleep(15) # test subprocess not responding
try:
assert(len(sys.argv) > 1)
port = int(sys.argv[-1])
except:
print("IDLE Subprocess: no IP port passed in sys.argv.",
file=sys.__stderr__)
return
capture_warnings(True)
sys.argv[:] = [""]
sockthread = threading.Thread(target=manage_socket,
name='SockThread',
args=((LOCALHOST, port),))
sockthread.daemon = True
sockthread.start()
while 1:
try:
if exit_now:
try:
exit()
except KeyboardInterrupt:
# exiting but got an extra KBI? Try again!
continue
try:
seq, request = rpc.request_queue.get(block=True, timeout=0.05)
except queue.Empty:
handle_tk_events()
continue
method, args, kwargs = request
ret = method(*args, **kwargs)
rpc.response_queue.put((seq, ret))
except KeyboardInterrupt:
if quitting:
exit_now = True
continue
except SystemExit:
capture_warnings(False)
raise
except:
type, value, tb = sys.exc_info()
try:
print_exception()
rpc.response_queue.put((seq, None))
except:
# Link didn't work, print same exception to __stderr__
traceback.print_exception(type, value, tb, file=sys.__stderr__)
exit()
else:
continue
def manage_socket(address):
for i in range(3):
time.sleep(i)
try:
server = MyRPCServer(address, MyHandler)
break
except OSError as err:
print("IDLE Subprocess: OSError: " + err.args[1] +
", retrying....", file=sys.__stderr__)
socket_error = err
else:
print("IDLE Subprocess: Connection to "
"IDLE GUI failed, exiting.", file=sys.__stderr__)
show_socket_error(socket_error, address)
global exit_now
exit_now = True
return
server.handle_request() # A single request only
def show_socket_error(err, address):
import tkinter
import tkinter.messagebox as tkMessageBox
root = tkinter.Tk()
root.withdraw()
if err.args[0] == 61: # connection refused
msg = "IDLE's subprocess can't connect to %s:%d. This may be due "\
"to your personal firewall configuration. It is safe to "\
"allow this internal connection because no data is visible on "\
"external ports." % address
tkMessageBox.showerror("IDLE Subprocess Error", msg, parent=root)
else:
tkMessageBox.showerror("IDLE Subprocess Error",
"Socket Error: %s" % err.args[1])
root.destroy()
def print_exception():
import linecache
linecache.checkcache()
flush_stdout()
efile = sys.stderr
typ, val, tb = excinfo = sys.exc_info()
sys.last_type, sys.last_value, sys.last_traceback = excinfo
seen = set()
def print_exc(typ, exc, tb):
seen.add(exc)
context = exc.__context__
cause = exc.__cause__
if cause is not None and cause not in seen:
print_exc(type(cause), cause, cause.__traceback__)
print("\nThe above exception was the direct cause "
"of the following exception:\n", file=efile)
elif (context is not None and
not exc.__suppress_context__ and
context not in seen):
print_exc(type(context), context, context.__traceback__)
print("\nDuring handling of the above exception, "
"another exception occurred:\n", file=efile)
if tb:
tbe = traceback.extract_tb(tb)
print('Traceback (most recent call last):', file=efile)
exclude = ("run.py", "rpc.py", "threading.py", "queue.py",
"RemoteDebugger.py", "bdb.py")
cleanup_traceback(tbe, exclude)
traceback.print_list(tbe, file=efile)
lines = traceback.format_exception_only(typ, exc)
for line in lines:
print(line, end='', file=efile)
print_exc(typ, val, tb)
def cleanup_traceback(tb, exclude):
"Remove excluded traces from beginning/end of tb; get cached lines"
orig_tb = tb[:]
while tb:
for rpcfile in exclude:
if tb[0][0].count(rpcfile):
break # found an exclude, break for: and delete tb[0]
else:
break # no excludes, have left RPC code, break while:
del tb[0]
while tb:
for rpcfile in exclude:
if tb[-1][0].count(rpcfile):
break
else:
break
del tb[-1]
if len(tb) == 0:
# exception was in IDLE internals, don't prune!
tb[:] = orig_tb[:]
print("** IDLE Internal Exception: ", file=sys.stderr)
rpchandler = rpc.objecttable['exec'].rpchandler
for i in range(len(tb)):
fn, ln, nm, line = tb[i]
if nm == '?':
nm = "-toplevel-"
if not line and fn.startswith("<pyshell#"):
line = rpchandler.remotecall('linecache', 'getline',
(fn, ln), {})
tb[i] = fn, ln, nm, line
def flush_stdout():
"""XXX How to do this now?"""
def exit():
"""Exit subprocess, possibly after first clearing exit functions.
If config-main.cfg/.def 'General' 'delete-exitfunc' is True, then any
functions registered with atexit will be removed before exiting.
(VPython support)
"""
if no_exitfunc:
import atexit
atexit._clear()
capture_warnings(False)
sys.exit(0)
class MyRPCServer(rpc.RPCServer):
def handle_error(self, request, client_address):
"""Override RPCServer method for IDLE
Interrupt the MainThread and exit server if link is dropped.
"""
global quitting
try:
raise
except SystemExit:
raise
except EOFError:
global exit_now
exit_now = True
thread.interrupt_main()
except:
erf = sys.__stderr__
print('\n' + '-'*40, file=erf)
print('Unhandled server exception!', file=erf)
print('Thread: %s' % threading.current_thread().name, file=erf)
print('Client Address: ', client_address, file=erf)
print('Request: ', repr(request), file=erf)
traceback.print_exc(file=erf)
print('\n*** Unrecoverable, server exiting!', file=erf)
print('-'*40, file=erf)
quitting = True
thread.interrupt_main()
class MyHandler(rpc.RPCHandler):
def handle(self):
"""Override base method"""
executive = Executive(self)
self.register("exec", executive)
self.console = self.get_remote_proxy("console")
sys.stdin = PyShell.PseudoInputFile(self.console, "stdin",
IOBinding.encoding)
sys.stdout = PyShell.PseudoOutputFile(self.console, "stdout",
IOBinding.encoding)
sys.stderr = PyShell.PseudoOutputFile(self.console, "stderr",
IOBinding.encoding)
sys.displayhook = rpc.displayhook
# page help() text to shell.
import pydoc # import must be done here to capture i/o binding
pydoc.pager = pydoc.plainpager
# Keep a reference to stdin so that it won't try to exit IDLE if
# sys.stdin gets changed from within IDLE's shell. See issue17838.
self._keep_stdin = sys.stdin
self.interp = self.get_remote_proxy("interp")
rpc.RPCHandler.getresponse(self, myseq=None, wait=0.05)
def exithook(self):
"override SocketIO method - wait for MainThread to shut us down"
time.sleep(10)
def EOFhook(self):
"Override SocketIO method - terminate wait on callback and exit thread"
global quitting
quitting = True
thread.interrupt_main()
def decode_interrupthook(self):
"interrupt awakened thread"
global quitting
quitting = True
thread.interrupt_main()
class Executive(object):
def __init__(self, rpchandler):
self.rpchandler = rpchandler
self.locals = __main__.__dict__
self.calltip = CallTips.CallTips()
self.autocomplete = AutoComplete.AutoComplete()
def runcode(self, code):
global interruptable
try:
self.usr_exc_info = None
interruptable = True
try:
exec(code, self.locals)
finally:
interruptable = False
except SystemExit:
# Scripts that raise SystemExit should just
# return to the interactive prompt
pass
except:
self.usr_exc_info = sys.exc_info()
if quitting:
exit()
# even print a user code SystemExit exception, continue
print_exception()
jit = self.rpchandler.console.getvar("<<toggle-jit-stack-viewer>>")
if jit:
self.rpchandler.interp.open_remote_stack_viewer()
else:
flush_stdout()
def interrupt_the_server(self):
if interruptable:
thread.interrupt_main()
def start_the_debugger(self, gui_adap_oid):
return RemoteDebugger.start_debugger(self.rpchandler, gui_adap_oid)
def stop_the_debugger(self, idb_adap_oid):
"Unregister the Idb Adapter. Link objects and Idb then subject to GC"
self.rpchandler.unregister(idb_adap_oid)
def get_the_calltip(self, name):
return self.calltip.fetch_tip(name)
def get_the_completion_list(self, what, mode):
return self.autocomplete.fetch_completions(what, mode)
def stackviewer(self, flist_oid=None):
if self.usr_exc_info:
typ, val, tb = self.usr_exc_info
else:
return None
flist = None
if flist_oid is not None:
flist = self.rpchandler.get_remote_proxy(flist_oid)
while tb and tb.tb_frame.f_globals["__name__"] in ["rpc", "run"]:
tb = tb.tb_next
sys.last_type = typ
sys.last_value = val
item = StackViewer.StackTreeItem(flist, tb)
return RemoteObjectBrowser.remote_object_tree_item(item)
capture_warnings(False) # Make sure turned off; see issue 18081
|
loveshell/volatility | refs/heads/master | volatility/plugins/overlays/windows/win2003_sp12_x64_syscalls.py | 13 | # Volatility
# Copyright (c) 2008-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: MHL
@license: GNU General Public License 2.0
@contact: michael.ligh@mnin.org
This file provides support for Windows 2003 SP1 and SP2 x64.
"""
syscalls = [
[
'NtMapUserPhysicalPagesScatter', # 0x0
'NtWaitForSingleObject', # 0x1
'NtCallbackReturn', # 0x2
'NtReadFile', # 0x3
'NtDeviceIoControlFile', # 0x4
'NtWriteFile', # 0x5
'NtRemoveIoCompletion', # 0x6
'NtReleaseSemaphore', # 0x7
'NtReplyWaitReceivePort', # 0x8
'NtReplyPort', # 0x9
'NtSetInformationThread', # 0xa
'NtSetEvent', # 0xb
'NtClose', # 0xc
'NtQueryObject', # 0xd
'NtQueryInformationFile', # 0xe
'NtOpenKey', # 0xf
'NtEnumerateValueKey', # 0x10
'NtFindAtom', # 0x11
'NtQueryDefaultLocale', # 0x12
'NtQueryKey', # 0x13
'NtQueryValueKey', # 0x14
'NtAllocateVirtualMemory', # 0x15
'NtQueryInformationProcess', # 0x16
'NtWaitForMultipleObjects32', # 0x17
'NtWriteFileGather', # 0x18
'NtSetInformationProcess', # 0x19
'NtCreateKey', # 0x1a
'NtFreeVirtualMemory', # 0x1b
'NtImpersonateClientOfPort', # 0x1c
'NtReleaseMutant', # 0x1d
'NtQueryInformationToken', # 0x1e
'NtRequestWaitReplyPort', # 0x1f
'NtQueryVirtualMemory', # 0x20
'NtOpenThreadToken', # 0x21
'NtQueryInformationThread', # 0x22
'NtOpenProcess', # 0x23
'NtSetInformationFile', # 0x24
'NtMapViewOfSection', # 0x25
'NtAccessCheckAndAuditAlarm', # 0x26
'NtUnmapViewOfSection', # 0x27
'NtReplyWaitReceivePortEx', # 0x28
'NtTerminateProcess', # 0x29
'NtSetEventBoostPriority', # 0x2a
'NtReadFileScatter', # 0x2b
'NtOpenThreadTokenEx', # 0x2c
'NtOpenProcessTokenEx', # 0x2d
'NtQueryPerformanceCounter', # 0x2e
'NtEnumerateKey', # 0x2f
'NtOpenFile', # 0x30
'NtDelayExecution', # 0x31
'NtQueryDirectoryFile', # 0x32
'NtQuerySystemInformation', # 0x33
'NtOpenSection', # 0x34
'NtQueryTimer', # 0x35
'NtFsControlFile', # 0x36
'NtWriteVirtualMemory', # 0x37
'NtCloseObjectAuditAlarm', # 0x38
'NtDuplicateObject', # 0x39
'NtQueryAttributesFile', # 0x3a
'NtClearEvent', # 0x3b
'NtReadVirtualMemory', # 0x3c
'NtOpenEvent', # 0x3d
'NtAdjustPrivilegesToken', # 0x3e
'NtDuplicateToken', # 0x3f
'NtContinue', # 0x40
'NtQueryDefaultUILanguage', # 0x41
'NtQueueApcThread', # 0x42
'NtYieldExecution', # 0x43
'NtAddAtom', # 0x44
'NtCreateEvent', # 0x45
'NtQueryVolumeInformationFile', # 0x46
'NtCreateSection', # 0x47
'NtFlushBuffersFile', # 0x48
'NtApphelpCacheControl', # 0x49
'NtCreateProcessEx', # 0x4a
'NtCreateThread', # 0x4b
'NtIsProcessInJob', # 0x4c
'NtProtectVirtualMemory', # 0x4d
'NtQuerySection', # 0x4e
'NtResumeThread', # 0x4f
'NtTerminateThread', # 0x50
'NtReadRequestData', # 0x51
'NtCreateFile', # 0x52
'NtQueryEvent', # 0x53
'NtWriteRequestData', # 0x54
'NtOpenDirectoryObject', # 0x55
'NtAccessCheckByTypeAndAuditAlarm', # 0x56
'NtQuerySystemTime', # 0x57
'NtWaitForMultipleObjects', # 0x58
'NtSetInformationObject', # 0x59
'NtCancelIoFile', # 0x5a
'NtTraceEvent', # 0x5b
'NtPowerInformation', # 0x5c
'NtSetValueKey', # 0x5d
'NtCancelTimer', # 0x5e
'NtSetTimer', # 0x5f
'NtAcceptConnectPort', # 0x60
'NtAccessCheck', # 0x61
'NtAccessCheckByType', # 0x62
'NtAccessCheckByTypeResultList', # 0x63
'NtAccessCheckByTypeResultListAndAuditAlarm', # 0x64
'NtAccessCheckByTypeResultListAndAuditAlarmByHandle', # 0x65
'NtAddBootEntry', # 0x66
'NtAddDriverEntry', # 0x67
'NtAdjustGroupsToken', # 0x68
'NtAlertResumeThread', # 0x69
'NtAlertThread', # 0x6a
'NtAllocateLocallyUniqueId', # 0x6b
'NtAllocateUserPhysicalPages', # 0x6c
'NtAllocateUuids', # 0x6d
'NtAreMappedFilesTheSame', # 0x6e
'NtAssignProcessToJobObject', # 0x6f
'NtCancelDeviceWakeupRequest', # 0x70
'NtCompactKeys', # 0x71
'NtCompareTokens', # 0x72
'NtCompleteConnectPort', # 0x73
'NtCompressKey', # 0x74
'NtConnectPort', # 0x75
'NtCreateDebugObject', # 0x76
'NtCreateDirectoryObject', # 0x77
'NtCreateEventPair', # 0x78
'NtCreateIoCompletion', # 0x79
'NtCreateJobObject', # 0x7a
'NtCreateJobSet', # 0x7b
'NtCreateKeyedEvent', # 0x7c
'NtCreateMailslotFile', # 0x7d
'NtCreateMutant', # 0x7e
'NtCreateNamedPipeFile', # 0x7f
'NtCreatePagingFile', # 0x80
'NtCreatePort', # 0x81
'NtCreateProcess', # 0x82
'NtCreateProfile', # 0x83
'NtCreateSemaphore', # 0x84
'NtCreateSymbolicLinkObject', # 0x85
'NtCreateTimer', # 0x86
'NtCreateToken', # 0x87
'NtCreateWaitablePort', # 0x88
'NtDebugActiveProcess', # 0x89
'NtDebugContinue', # 0x8a
'NtDeleteAtom', # 0x8b
'NtDeleteBootEntry', # 0x8c
'NtDeleteDriverEntry', # 0x8d
'NtDeleteFile', # 0x8e
'NtDeleteKey', # 0x8f
'NtDeleteObjectAuditAlarm', # 0x90
'NtDeleteValueKey', # 0x91
'NtDisplayString', # 0x92
'NtEnumerateBootEntries', # 0x93
'NtEnumerateDriverEntries', # 0x94
'NtEnumerateSystemEnvironmentValuesEx', # 0x95
'NtExtendSection', # 0x96
'NtFilterToken', # 0x97
'NtFlushInstructionCache', # 0x98
'NtFlushKey', # 0x99
'NtFlushVirtualMemory', # 0x9a
'NtFlushWriteBuffer', # 0x9b
'NtFreeUserPhysicalPages', # 0x9c
'NtGetContextThread', # 0x9d
'NtGetCurrentProcessorNumber', # 0x9e
'NtGetDevicePowerState', # 0x9f
'NtGetPlugPlayEvent', # 0xa0
'NtGetWriteWatch', # 0xa1
'NtImpersonateAnonymousToken', # 0xa2
'NtImpersonateThread', # 0xa3
'NtInitializeRegistry', # 0xa4
'NtInitiatePowerAction', # 0xa5
'NtIsSystemResumeAutomatic', # 0xa6
'NtListenPort', # 0xa7
'NtLoadDriver', # 0xa8
'NtLoadKey', # 0xa9
'NtLoadKey2', # 0xaa
'NtLoadKeyEx', # 0xab
'NtLockFile', # 0xac
'NtLockProductActivationKeys', # 0xad
'NtLockRegistryKey', # 0xae
'NtLockVirtualMemory', # 0xaf
'NtMakePermanentObject', # 0xb0
'NtMakeTemporaryObject', # 0xb1
'NtMapUserPhysicalPages', # 0xb2
'NtModifyBootEntry', # 0xb3
'NtModifyDriverEntry', # 0xb4
'NtNotifyChangeDirectoryFile', # 0xb5
'NtNotifyChangeKey', # 0xb6
'NtNotifyChangeMultipleKeys', # 0xb7
'NtOpenEventPair', # 0xb8
'NtOpenIoCompletion', # 0xb9
'NtOpenJobObject', # 0xba
'NtOpenKeyedEvent', # 0xbb
'NtOpenMutant', # 0xbc
'NtOpenObjectAuditAlarm', # 0xbd
'NtOpenProcessToken', # 0xbe
'NtOpenSemaphore', # 0xbf
'NtOpenSymbolicLinkObject', # 0xc0
'NtOpenThread', # 0xc1
'NtOpenTimer', # 0xc2
'NtPlugPlayControl', # 0xc3
'NtPrivilegeCheck', # 0xc4
'NtPrivilegeObjectAuditAlarm', # 0xc5
'NtPrivilegedServiceAuditAlarm', # 0xc6
'NtPulseEvent', # 0xc7
'NtQueryBootEntryOrder', # 0xc8
'NtQueryBootOptions', # 0xc9
'NtQueryDebugFilterState', # 0xca
'NtQueryDirectoryObject', # 0xcb
'NtQueryDriverEntryOrder', # 0xcc
'NtQueryEaFile', # 0xcd
'NtQueryFullAttributesFile', # 0xce
'NtQueryInformationAtom', # 0xcf
'NtQueryInformationJobObject', # 0xd0
'NtQueryInformationPort', # 0xd1
'NtQueryInstallUILanguage', # 0xd2
'NtQueryIntervalProfile', # 0xd3
'NtQueryIoCompletion', # 0xd4
'NtQueryMultipleValueKey', # 0xd5
'NtQueryMutant', # 0xd6
'NtQueryOpenSubKeys', # 0xd7
'NtQueryOpenSubKeysEx', # 0xd8
'NtQueryPortInformationProcess', # 0xd9
'NtQueryQuotaInformationFile', # 0xda
'NtQuerySecurityObject', # 0xdb
'NtQuerySemaphore', # 0xdc
'NtQuerySymbolicLinkObject', # 0xdd
'NtQuerySystemEnvironmentValue', # 0xde
'NtQuerySystemEnvironmentValueEx', # 0xdf
'NtQueryTimerResolution', # 0xe0
'NtRaiseException', # 0xe1
'NtRaiseHardError', # 0xe2
'NtRegisterThreadTerminatePort', # 0xe3
'NtReleaseKeyedEvent', # 0xe4
'NtRemoveProcessDebug', # 0xe5
'NtRenameKey', # 0xe6
'NtReplaceKey', # 0xe7
'NtReplyWaitReplyPort', # 0xe8
'NtRequestDeviceWakeup', # 0xe9
'NtRequestPort', # 0xea
'NtRequestWakeupLatency', # 0xeb
'NtResetEvent', # 0xec
'NtResetWriteWatch', # 0xed
'NtRestoreKey', # 0xee
'NtResumeProcess', # 0xef
'NtSaveKey', # 0xf0
'NtSaveKeyEx', # 0xf1
'NtSaveMergedKeys', # 0xf2
'NtSecureConnectPort', # 0xf3
'NtSetBootEntryOrder', # 0xf4
'NtSetBootOptions', # 0xf5
'NtSetContextThread', # 0xf6
'NtSetDebugFilterState', # 0xf7
'NtSetDefaultHardErrorPort', # 0xf8
'NtSetDefaultLocale', # 0xf9
'NtSetDefaultUILanguage', # 0xfa
'NtSetDriverEntryOrder', # 0xfb
'NtSetEaFile', # 0xfc
'NtSetHighEventPair', # 0xfd
'NtSetHighWaitLowEventPair', # 0xfe
'NtSetInformationDebugObject', # 0xff
'NtSetInformationJobObject', # 0x100
'NtSetInformationKey', # 0x101
'NtSetInformationToken', # 0x102
'NtSetIntervalProfile', # 0x103
'NtSetIoCompletion', # 0x104
'NtSetLdtEntries', # 0x105
'NtSetLowEventPair', # 0x106
'NtSetLowWaitHighEventPair', # 0x107
'NtSetQuotaInformationFile', # 0x108
'NtSetSecurityObject', # 0x109
'NtSetSystemEnvironmentValue', # 0x10a
'NtSetSystemEnvironmentValueEx', # 0x10b
'NtSetSystemInformation', # 0x10c
'NtSetSystemPowerState', # 0x10d
'NtSetSystemTime', # 0x10e
'NtSetThreadExecutionState', # 0x10f
'NtSetTimerResolution', # 0x110
'NtSetUuidSeed', # 0x111
'NtSetVolumeInformationFile', # 0x112
'NtShutdownSystem', # 0x113
'NtSignalAndWaitForSingleObject', # 0x114
'NtStartProfile', # 0x115
'NtStopProfile', # 0x116
'NtSuspendProcess', # 0x117
'NtSuspendThread', # 0x118
'NtSystemDebugControl', # 0x119
'NtTerminateJobObject', # 0x11a
'NtTestAlert', # 0x11b
'NtTranslateFilePath', # 0x11c
'NtUnloadDriver', # 0x11d
'NtUnloadKey', # 0x11e
'NtUnloadKey2', # 0x11f
'NtUnloadKeyEx', # 0x120
'NtUnlockFile', # 0x121
'NtUnlockVirtualMemory', # 0x122
'NtVdmControl', # 0x123
'NtWaitForDebugEvent', # 0x124
'NtWaitForKeyedEvent', # 0x125
'NtWaitHighEventPair', # 0x126
'NtWaitLowEventPair', # 0x127
],
[
'NtUserGetThreadState', # 0x0
'NtUserPeekMessage', # 0x1
'NtUserCallOneParam', # 0x2
'NtUserGetKeyState', # 0x3
'NtUserInvalidateRect', # 0x4
'NtUserCallNoParam', # 0x5
'NtUserGetMessage', # 0x6
'NtUserMessageCall', # 0x7
'NtGdiBitBlt', # 0x8
'NtGdiGetCharSet', # 0x9
'NtUserGetDC', # 0xa
'NtGdiSelectBitmap', # 0xb
'NtUserWaitMessage', # 0xc
'NtUserTranslateMessage', # 0xd
'NtUserPostMessage', # 0xe
'NtUserQueryWindow', # 0xf
'NtUserTranslateAccelerator', # 0x10
'NtGdiFlush', # 0x11
'NtUserRedrawWindow', # 0x12
'NtUserWindowFromPoint', # 0x13
'NtUserCallMsgFilter', # 0x14
'NtUserValidateTimerCallback', # 0x15
'NtUserBeginPaint', # 0x16
'NtUserSetTimer', # 0x17
'NtUserEndPaint', # 0x18
'NtUserSetCursor', # 0x19
'NtUserKillTimer', # 0x1a
'NtUserBuildHwndList', # 0x1b
'NtUserSelectPalette', # 0x1c
'NtUserCallNextHookEx', # 0x1d
'NtUserHideCaret', # 0x1e
'NtGdiIntersectClipRect', # 0x1f
'NtUserCallHwndLock', # 0x20
'NtUserGetProcessWindowStation', # 0x21
'NtGdiDeleteObjectApp', # 0x22
'NtUserSetWindowPos', # 0x23
'NtUserShowCaret', # 0x24
'NtUserEndDeferWindowPosEx', # 0x25
'NtUserCallHwndParamLock', # 0x26
'NtUserVkKeyScanEx', # 0x27
'NtGdiSetDIBitsToDeviceInternal', # 0x28
'NtUserCallTwoParam', # 0x29
'NtGdiGetRandomRgn', # 0x2a
'NtUserCopyAcceleratorTable', # 0x2b
'NtUserNotifyWinEvent', # 0x2c
'NtGdiExtSelectClipRgn', # 0x2d
'NtUserIsClipboardFormatAvailable', # 0x2e
'NtUserSetScrollInfo', # 0x2f
'NtGdiStretchBlt', # 0x30
'NtUserCreateCaret', # 0x31
'NtGdiRectVisible', # 0x32
'NtGdiCombineRgn', # 0x33
'NtGdiGetDCObject', # 0x34
'NtUserDispatchMessage', # 0x35
'NtUserRegisterWindowMessage', # 0x36
'NtGdiExtTextOutW', # 0x37
'NtGdiSelectFont', # 0x38
'NtGdiRestoreDC', # 0x39
'NtGdiSaveDC', # 0x3a
'NtUserGetForegroundWindow', # 0x3b
'NtUserShowScrollBar', # 0x3c
'NtUserFindExistingCursorIcon', # 0x3d
'NtGdiGetDCDword', # 0x3e
'NtGdiGetRegionData', # 0x3f
'NtGdiLineTo', # 0x40
'NtUserSystemParametersInfo', # 0x41
'NtGdiGetAppClipBox', # 0x42
'NtUserGetAsyncKeyState', # 0x43
'NtUserGetCPD', # 0x44
'NtUserRemoveProp', # 0x45
'NtGdiDoPalette', # 0x46
'NtGdiPolyPolyDraw', # 0x47
'NtUserSetCapture', # 0x48
'NtUserEnumDisplayMonitors', # 0x49
'NtGdiCreateCompatibleBitmap', # 0x4a
'NtUserSetProp', # 0x4b
'NtGdiGetTextCharsetInfo', # 0x4c
'NtUserSBGetParms', # 0x4d
'NtUserGetIconInfo', # 0x4e
'NtUserExcludeUpdateRgn', # 0x4f
'NtUserSetFocus', # 0x50
'NtGdiExtGetObjectW', # 0x51
'NtUserDeferWindowPos', # 0x52
'NtUserGetUpdateRect', # 0x53
'NtGdiCreateCompatibleDC', # 0x54
'NtUserGetClipboardSequenceNumber', # 0x55
'NtGdiCreatePen', # 0x56
'NtUserShowWindow', # 0x57
'NtUserGetKeyboardLayoutList', # 0x58
'NtGdiPatBlt', # 0x59
'NtUserMapVirtualKeyEx', # 0x5a
'NtUserSetWindowLong', # 0x5b
'NtGdiHfontCreate', # 0x5c
'NtUserMoveWindow', # 0x5d
'NtUserPostThreadMessage', # 0x5e
'NtUserDrawIconEx', # 0x5f
'NtUserGetSystemMenu', # 0x60
'NtGdiDrawStream', # 0x61
'NtUserInternalGetWindowText', # 0x62
'NtUserGetWindowDC', # 0x63
'NtGdiD3dDrawPrimitives2', # 0x64
'NtGdiInvertRgn', # 0x65
'NtGdiGetRgnBox', # 0x66
'NtGdiGetAndSetDCDword', # 0x67
'NtGdiMaskBlt', # 0x68
'NtGdiGetWidthTable', # 0x69
'NtUserScrollDC', # 0x6a
'NtUserGetObjectInformation', # 0x6b
'NtGdiCreateBitmap', # 0x6c
'NtGdiConsoleTextOut', # 0x6d
'NtUserFindWindowEx', # 0x6e
'NtGdiPolyPatBlt', # 0x6f
'NtUserUnhookWindowsHookEx', # 0x70
'NtGdiGetNearestColor', # 0x71
'NtGdiTransformPoints', # 0x72
'NtGdiGetDCPoint', # 0x73
'NtUserCheckImeHotKey', # 0x74
'NtGdiCreateDIBBrush', # 0x75
'NtGdiGetTextMetricsW', # 0x76
'NtUserCreateWindowEx', # 0x77
'NtUserSetParent', # 0x78
'NtUserGetKeyboardState', # 0x79
'NtUserToUnicodeEx', # 0x7a
'NtUserGetControlBrush', # 0x7b
'NtUserGetClassName', # 0x7c
'NtGdiAlphaBlend', # 0x7d
'NtGdiDdBlt', # 0x7e
'NtGdiOffsetRgn', # 0x7f
'NtUserDefSetText', # 0x80
'NtGdiGetTextFaceW', # 0x81
'NtGdiStretchDIBitsInternal', # 0x82
'NtUserSendInput', # 0x83
'NtUserGetThreadDesktop', # 0x84
'NtGdiCreateRectRgn', # 0x85
'NtGdiGetDIBitsInternal', # 0x86
'NtUserGetUpdateRgn', # 0x87
'NtGdiDeleteClientObj', # 0x88
'NtUserGetIconSize', # 0x89
'NtUserFillWindow', # 0x8a
'NtGdiExtCreateRegion', # 0x8b
'NtGdiComputeXformCoefficients', # 0x8c
'NtUserSetWindowsHookEx', # 0x8d
'NtUserNotifyProcessCreate', # 0x8e
'NtGdiUnrealizeObject', # 0x8f
'NtUserGetTitleBarInfo', # 0x90
'NtGdiRectangle', # 0x91
'NtUserSetThreadDesktop', # 0x92
'NtUserGetDCEx', # 0x93
'NtUserGetScrollBarInfo', # 0x94
'NtGdiGetTextExtent', # 0x95
'NtUserSetWindowFNID', # 0x96
'NtGdiSetLayout', # 0x97
'NtUserCalcMenuBar', # 0x98
'NtUserThunkedMenuItemInfo', # 0x99
'NtGdiExcludeClipRect', # 0x9a
'NtGdiCreateDIBSection', # 0x9b
'NtGdiGetDCforBitmap', # 0x9c
'NtUserDestroyCursor', # 0x9d
'NtUserDestroyWindow', # 0x9e
'NtUserCallHwndParam', # 0x9f
'NtGdiCreateDIBitmapInternal', # 0xa0
'NtUserOpenWindowStation', # 0xa1
'NtGdiDdDeleteSurfaceObject', # 0xa2
'NtGdiEnumFontClose', # 0xa3
'NtGdiEnumFontOpen', # 0xa4
'NtGdiEnumFontChunk', # 0xa5
'NtGdiDdCanCreateSurface', # 0xa6
'NtGdiDdCreateSurface', # 0xa7
'NtUserSetCursorIconData', # 0xa8
'NtGdiDdDestroySurface', # 0xa9
'NtUserCloseDesktop', # 0xaa
'NtUserOpenDesktop', # 0xab
'NtUserSetProcessWindowStation', # 0xac
'NtUserGetAtomName', # 0xad
'NtGdiDdResetVisrgn', # 0xae
'NtGdiExtCreatePen', # 0xaf
'NtGdiCreatePaletteInternal', # 0xb0
'NtGdiSetBrushOrg', # 0xb1
'NtUserBuildNameList', # 0xb2
'NtGdiSetPixel', # 0xb3
'NtUserRegisterClassExWOW', # 0xb4
'NtGdiCreatePatternBrushInternal', # 0xb5
'NtUserGetAncestor', # 0xb6
'NtGdiGetOutlineTextMetricsInternalW', # 0xb7
'NtGdiSetBitmapBits', # 0xb8
'NtUserCloseWindowStation', # 0xb9
'NtUserGetDoubleClickTime', # 0xba
'NtUserEnableScrollBar', # 0xbb
'NtGdiCreateSolidBrush', # 0xbc
'NtUserGetClassInfoEx', # 0xbd
'NtGdiCreateClientObj', # 0xbe
'NtUserUnregisterClass', # 0xbf
'NtUserDeleteMenu', # 0xc0
'NtGdiRectInRegion', # 0xc1
'NtUserScrollWindowEx', # 0xc2
'NtGdiGetPixel', # 0xc3
'NtUserSetClassLong', # 0xc4
'NtUserGetMenuBarInfo', # 0xc5
'NtGdiDdCreateSurfaceEx', # 0xc6
'NtGdiDdCreateSurfaceObject', # 0xc7
'NtGdiGetNearestPaletteIndex', # 0xc8
'NtGdiDdLockD3D', # 0xc9
'NtGdiDdUnlockD3D', # 0xca
'NtGdiGetCharWidthW', # 0xcb
'NtUserInvalidateRgn', # 0xcc
'NtUserGetClipboardOwner', # 0xcd
'NtUserSetWindowRgn', # 0xce
'NtUserBitBltSysBmp', # 0xcf
'NtGdiGetCharWidthInfo', # 0xd0
'NtUserValidateRect', # 0xd1
'NtUserCloseClipboard', # 0xd2
'NtUserOpenClipboard', # 0xd3
'NtGdiGetStockObject', # 0xd4
'NtUserSetClipboardData', # 0xd5
'NtUserEnableMenuItem', # 0xd6
'NtUserAlterWindowStyle', # 0xd7
'NtGdiFillRgn', # 0xd8
'NtUserGetWindowPlacement', # 0xd9
'NtGdiModifyWorldTransform', # 0xda
'NtGdiGetFontData', # 0xdb
'NtUserGetOpenClipboardWindow', # 0xdc
'NtUserSetThreadState', # 0xdd
'NtGdiOpenDCW', # 0xde
'NtUserTrackMouseEvent', # 0xdf
'NtGdiGetTransform', # 0xe0
'NtUserDestroyMenu', # 0xe1
'NtGdiGetBitmapBits', # 0xe2
'NtUserConsoleControl', # 0xe3
'NtUserSetActiveWindow', # 0xe4
'NtUserSetInformationThread', # 0xe5
'NtUserSetWindowPlacement', # 0xe6
'NtUserGetControlColor', # 0xe7
'NtGdiSetMetaRgn', # 0xe8
'NtGdiSetMiterLimit', # 0xe9
'NtGdiSetVirtualResolution', # 0xea
'NtGdiGetRasterizerCaps', # 0xeb
'NtUserSetWindowWord', # 0xec
'NtUserGetClipboardFormatName', # 0xed
'NtUserRealInternalGetMessage', # 0xee
'NtUserCreateLocalMemHandle', # 0xef
'NtUserAttachThreadInput', # 0xf0
'NtGdiCreateHalftonePalette', # 0xf1
'NtUserPaintMenuBar', # 0xf2
'NtUserSetKeyboardState', # 0xf3
'NtGdiCombineTransform', # 0xf4
'NtUserCreateAcceleratorTable', # 0xf5
'NtUserGetCursorFrameInfo', # 0xf6
'NtUserGetAltTabInfo', # 0xf7
'NtUserGetCaretBlinkTime', # 0xf8
'NtGdiQueryFontAssocInfo', # 0xf9
'NtUserProcessConnect', # 0xfa
'NtUserEnumDisplayDevices', # 0xfb
'NtUserEmptyClipboard', # 0xfc
'NtUserGetClipboardData', # 0xfd
'NtUserRemoveMenu', # 0xfe
'NtGdiSetBoundsRect', # 0xff
'NtUserSetInformationProcess', # 0x100
'NtGdiGetBitmapDimension', # 0x101
'NtUserConvertMemHandle', # 0x102
'NtUserDestroyAcceleratorTable', # 0x103
'NtUserGetGUIThreadInfo', # 0x104
'NtGdiCloseFigure', # 0x105
'NtUserSetWindowsHookAW', # 0x106
'NtUserSetMenuDefaultItem', # 0x107
'NtUserCheckMenuItem', # 0x108
'NtUserSetWinEventHook', # 0x109
'NtUserUnhookWinEvent', # 0x10a
'NtGdiSetupPublicCFONT', # 0x10b
'NtUserLockWindowUpdate', # 0x10c
'NtUserSetSystemMenu', # 0x10d
'NtUserThunkedMenuInfo', # 0x10e
'NtGdiBeginPath', # 0x10f
'NtGdiEndPath', # 0x110
'NtGdiFillPath', # 0x111
'NtUserCallHwnd', # 0x112
'NtUserDdeInitialize', # 0x113
'NtUserModifyUserStartupInfoFlags', # 0x114
'NtUserCountClipboardFormats', # 0x115
'NtGdiAddFontMemResourceEx', # 0x116
'NtGdiEqualRgn', # 0x117
'NtGdiGetSystemPaletteUse', # 0x118
'NtGdiRemoveFontMemResourceEx', # 0x119
'NtUserEnumDisplaySettings', # 0x11a
'NtUserPaintDesktop', # 0x11b
'NtGdiExtEscape', # 0x11c
'NtGdiSetBitmapDimension', # 0x11d
'NtGdiSetFontEnumeration', # 0x11e
'NtUserChangeClipboardChain', # 0x11f
'NtUserResolveDesktop', # 0x120
'NtUserSetClipboardViewer', # 0x121
'NtUserShowWindowAsync', # 0x122
'NtUserSetConsoleReserveKeys', # 0x123
'NtGdiCreateColorSpace', # 0x124
'NtGdiDeleteColorSpace', # 0x125
'NtUserActivateKeyboardLayout', # 0x126
'NtGdiAbortDoc', # 0x127
'NtGdiAbortPath', # 0x128
'NtGdiAddEmbFontToDC', # 0x129
'NtGdiAddFontResourceW', # 0x12a
'NtGdiAddRemoteFontToDC', # 0x12b
'NtGdiAddRemoteMMInstanceToDC', # 0x12c
'NtGdiAngleArc', # 0x12d
'NtGdiAnyLinkedFonts', # 0x12e
'NtGdiArcInternal', # 0x12f
'NtGdiBRUSHOBJ_DeleteRbrush', # 0x130
'NtGdiBRUSHOBJ_hGetColorTransform', # 0x131
'NtGdiBRUSHOBJ_pvAllocRbrush', # 0x132
'NtGdiBRUSHOBJ_pvGetRbrush', # 0x133
'NtGdiBRUSHOBJ_ulGetBrushColor', # 0x134
'NtGdiCLIPOBJ_bEnum', # 0x135
'NtGdiCLIPOBJ_cEnumStart', # 0x136
'NtGdiCLIPOBJ_ppoGetPath', # 0x137
'NtGdiCancelDC', # 0x138
'NtGdiChangeGhostFont', # 0x139
'NtGdiCheckBitmapBits', # 0x13a
'NtGdiClearBitmapAttributes', # 0x13b
'NtGdiClearBrushAttributes', # 0x13c
'NtGdiColorCorrectPalette', # 0x13d
'NtGdiConvertMetafileRect', # 0x13e
'NtGdiCreateColorTransform', # 0x13f
'NtGdiCreateEllipticRgn', # 0x140
'NtGdiCreateHatchBrushInternal', # 0x141
'NtGdiCreateMetafileDC', # 0x142
'NtGdiCreateRoundRectRgn', # 0x143
'NtGdiCreateServerMetaFile', # 0x144
'NtGdiD3dContextCreate', # 0x145
'NtGdiD3dContextDestroy', # 0x146
'NtGdiD3dContextDestroyAll', # 0x147
'NtGdiD3dValidateTextureStageState', # 0x148
'NtGdiDdAddAttachedSurface', # 0x149
'NtGdiDdAlphaBlt', # 0x14a
'NtGdiDdAttachSurface', # 0x14b
'NtGdiDdBeginMoCompFrame', # 0x14c
'NtGdiDdCanCreateD3DBuffer', # 0x14d
'NtGdiDdColorControl', # 0x14e
'NtGdiDdCreateD3DBuffer', # 0x14f
'NtGdiDdCreateDirectDrawObject', # 0x150
'NtGdiDdCreateMoComp', # 0x151
'NtGdiDdDeleteDirectDrawObject', # 0x152
'NtGdiDdDestroyD3DBuffer', # 0x153
'NtGdiDdDestroyMoComp', # 0x154
'NtGdiDdEndMoCompFrame', # 0x155
'NtGdiDdFlip', # 0x156
'NtGdiDdFlipToGDISurface', # 0x157
'NtGdiDdGetAvailDriverMemory', # 0x158
'NtGdiDdGetBltStatus', # 0x159
'NtGdiDdGetDC', # 0x15a
'NtGdiDdGetDriverInfo', # 0x15b
'NtGdiDdGetDriverState', # 0x15c
'NtGdiDdGetDxHandle', # 0x15d
'NtGdiDdGetFlipStatus', # 0x15e
'NtGdiDdGetInternalMoCompInfo', # 0x15f
'NtGdiDdGetMoCompBuffInfo', # 0x160
'NtGdiDdGetMoCompFormats', # 0x161
'NtGdiDdGetMoCompGuids', # 0x162
'NtGdiDdGetScanLine', # 0x163
'NtGdiDdLock', # 0x164
'NtGdiDdQueryDirectDrawObject', # 0x165
'NtGdiDdQueryMoCompStatus', # 0x166
'NtGdiDdReenableDirectDrawObject', # 0x167
'NtGdiDdReleaseDC', # 0x168
'NtGdiDdRenderMoComp', # 0x169
'NtGdiDdSetColorKey', # 0x16a
'NtGdiDdSetExclusiveMode', # 0x16b
'NtGdiDdSetGammaRamp', # 0x16c
'NtGdiDdSetOverlayPosition', # 0x16d
'NtGdiDdUnattachSurface', # 0x16e
'NtGdiDdUnlock', # 0x16f
'NtGdiDdUpdateOverlay', # 0x170
'NtGdiDdWaitForVerticalBlank', # 0x171
'NtGdiDeleteColorTransform', # 0x172
'NtGdiDescribePixelFormat', # 0x173
'NtGdiDoBanding', # 0x174
'NtGdiDrawEscape', # 0x175
'NtGdiDvpAcquireNotification', # 0x176
'NtGdiDvpCanCreateVideoPort', # 0x177
'NtGdiDvpColorControl', # 0x178
'NtGdiDvpCreateVideoPort', # 0x179
'NtGdiDvpDestroyVideoPort', # 0x17a
'NtGdiDvpFlipVideoPort', # 0x17b
'NtGdiDvpGetVideoPortBandwidth', # 0x17c
'NtGdiDvpGetVideoPortConnectInfo', # 0x17d
'NtGdiDvpGetVideoPortField', # 0x17e
'NtGdiDvpGetVideoPortFlipStatus', # 0x17f
'NtGdiDvpGetVideoPortInputFormats', # 0x180
'NtGdiDvpGetVideoPortLine', # 0x181
'NtGdiDvpGetVideoPortOutputFormats', # 0x182
'NtGdiDvpGetVideoSignalStatus', # 0x183
'NtGdiDvpReleaseNotification', # 0x184
'NtGdiDvpUpdateVideoPort', # 0x185
'NtGdiDvpWaitForVideoPortSync', # 0x186
'NtGdiDxgGenericThunk', # 0x187
'NtGdiEllipse', # 0x188
'NtGdiEnableEudc', # 0x189
'NtGdiEndDoc', # 0x18a
'NtGdiEndPage', # 0x18b
'NtGdiEngAlphaBlend', # 0x18c
'NtGdiEngAssociateSurface', # 0x18d
'NtGdiEngBitBlt', # 0x18e
'NtGdiEngCheckAbort', # 0x18f
'NtGdiEngComputeGlyphSet', # 0x190
'NtGdiEngCopyBits', # 0x191
'NtGdiEngCreateBitmap', # 0x192
'NtGdiEngCreateClip', # 0x193
'NtGdiEngCreateDeviceBitmap', # 0x194
'NtGdiEngCreateDeviceSurface', # 0x195
'NtGdiEngCreatePalette', # 0x196
'NtGdiEngDeleteClip', # 0x197
'NtGdiEngDeletePalette', # 0x198
'NtGdiEngDeletePath', # 0x199
'NtGdiEngDeleteSurface', # 0x19a
'NtGdiEngEraseSurface', # 0x19b
'NtGdiEngFillPath', # 0x19c
'NtGdiEngGradientFill', # 0x19d
'NtGdiEngLineTo', # 0x19e
'NtGdiEngLockSurface', # 0x19f
'NtGdiEngMarkBandingSurface', # 0x1a0
'NtGdiEngPaint', # 0x1a1
'NtGdiEngPlgBlt', # 0x1a2
'NtGdiEngStretchBlt', # 0x1a3
'NtGdiEngStretchBltROP', # 0x1a4
'NtGdiEngStrokeAndFillPath', # 0x1a5
'NtGdiEngStrokePath', # 0x1a6
'NtGdiEngTextOut', # 0x1a7
'NtGdiEngTransparentBlt', # 0x1a8
'NtGdiEngUnlockSurface', # 0x1a9
'NtGdiEnumObjects', # 0x1aa
'NtGdiEudcLoadUnloadLink', # 0x1ab
'NtGdiExtFloodFill', # 0x1ac
'NtGdiFONTOBJ_cGetAllGlyphHandles', # 0x1ad
'NtGdiFONTOBJ_cGetGlyphs', # 0x1ae
'NtGdiFONTOBJ_pQueryGlyphAttrs', # 0x1af
'NtGdiFONTOBJ_pfdg', # 0x1b0
'NtGdiFONTOBJ_pifi', # 0x1b1
'NtGdiFONTOBJ_pvTrueTypeFontFile', # 0x1b2
'NtGdiFONTOBJ_pxoGetXform', # 0x1b3
'NtGdiFONTOBJ_vGetInfo', # 0x1b4
'NtGdiFlattenPath', # 0x1b5
'NtGdiFontIsLinked', # 0x1b6
'NtGdiForceUFIMapping', # 0x1b7
'NtGdiFrameRgn', # 0x1b8
'NtGdiFullscreenControl', # 0x1b9
'NtGdiGetBoundsRect', # 0x1ba
'NtGdiGetCharABCWidthsW', # 0x1bb
'NtGdiGetCharacterPlacementW', # 0x1bc
'NtGdiGetColorAdjustment', # 0x1bd
'NtGdiGetColorSpaceforBitmap', # 0x1be
'NtGdiGetDeviceCaps', # 0x1bf
'NtGdiGetDeviceCapsAll', # 0x1c0
'NtGdiGetDeviceGammaRamp', # 0x1c1
'NtGdiGetDeviceWidth', # 0x1c2
'NtGdiGetDhpdev', # 0x1c3
'NtGdiGetETM', # 0x1c4
'NtGdiGetEmbUFI', # 0x1c5
'NtGdiGetEmbedFonts', # 0x1c6
'NtGdiGetEudcTimeStampEx', # 0x1c7
'NtGdiGetFontResourceInfoInternalW', # 0x1c8
'NtGdiGetFontUnicodeRanges', # 0x1c9
'NtGdiGetGlyphIndicesW', # 0x1ca
'NtGdiGetGlyphIndicesWInternal', # 0x1cb
'NtGdiGetGlyphOutline', # 0x1cc
'NtGdiGetKerningPairs', # 0x1cd
'NtGdiGetLinkedUFIs', # 0x1ce
'NtGdiGetMiterLimit', # 0x1cf
'NtGdiGetMonitorID', # 0x1d0
'NtGdiGetObjectBitmapHandle', # 0x1d1
'NtGdiGetPath', # 0x1d2
'NtGdiGetPerBandInfo', # 0x1d3
'NtGdiGetRealizationInfo', # 0x1d4
'NtGdiGetServerMetaFileBits', # 0x1d5
'NtGdiGetSpoolMessage', # 0x1d6
'NtGdiGetStats', # 0x1d7
'NtGdiGetStringBitmapW', # 0x1d8
'NtGdiGetTextExtentExW', # 0x1d9
'NtGdiGetUFI', # 0x1da
'NtGdiGetUFIPathname', # 0x1db
'NtGdiGradientFill', # 0x1dc
'NtGdiHT_Get8BPPFormatPalette', # 0x1dd
'NtGdiHT_Get8BPPMaskPalette', # 0x1de
'NtGdiIcmBrushInfo', # 0x1df
'NtGdiInit', # 0x1e0
'NtGdiInitSpool', # 0x1e1
'NtGdiMakeFontDir', # 0x1e2
'NtGdiMakeInfoDC', # 0x1e3
'NtGdiMakeObjectUnXferable', # 0x1e4
'NtGdiMakeObjectXferable', # 0x1e5
'NtGdiMirrorWindowOrg', # 0x1e6
'NtGdiMonoBitmap', # 0x1e7
'NtGdiMoveTo', # 0x1e8
'NtGdiOffsetClipRgn', # 0x1e9
'NtGdiPATHOBJ_bEnum', # 0x1ea
'NtGdiPATHOBJ_bEnumClipLines', # 0x1eb
'NtGdiPATHOBJ_vEnumStart', # 0x1ec
'NtGdiPATHOBJ_vEnumStartClipLines', # 0x1ed
'NtGdiPATHOBJ_vGetBounds', # 0x1ee
'NtGdiPathToRegion', # 0x1ef
'NtGdiPlgBlt', # 0x1f0
'NtGdiPolyDraw', # 0x1f1
'NtGdiPolyTextOutW', # 0x1f2
'NtGdiPtInRegion', # 0x1f3
'NtGdiPtVisible', # 0x1f4
'NtGdiQueryFonts', # 0x1f5
'NtGdiRemoveFontResourceW', # 0x1f6
'NtGdiRemoveMergeFont', # 0x1f7
'NtGdiResetDC', # 0x1f8
'NtGdiResizePalette', # 0x1f9
'NtGdiRoundRect', # 0x1fa
'NtGdiSTROBJ_bEnum', # 0x1fb
'NtGdiSTROBJ_bEnumPositionsOnly', # 0x1fc
'NtGdiSTROBJ_bGetAdvanceWidths', # 0x1fd
'NtGdiSTROBJ_dwGetCodePage', # 0x1fe
'NtGdiSTROBJ_vEnumStart', # 0x1ff
'NtGdiScaleViewportExtEx', # 0x200
'NtGdiScaleWindowExtEx', # 0x201
'GreSelectBrush', # 0x202
'NtGdiSelectClipPath', # 0x203
'NtGdiSelectPen', # 0x204
'NtGdiSetBitmapAttributes', # 0x205
'NtGdiSetBrushAttributes', # 0x206
'NtGdiSetColorAdjustment', # 0x207
'NtGdiSetColorSpace', # 0x208
'NtGdiSetDeviceGammaRamp', # 0x209
'NtGdiSetFontXform', # 0x20a
'NtGdiSetIcmMode', # 0x20b
'NtGdiSetLinkedUFIs', # 0x20c
'NtGdiSetMagicColors', # 0x20d
'NtGdiSetPUMPDOBJ', # 0x20e
'NtGdiSetPixelFormat', # 0x20f
'NtGdiSetRectRgn', # 0x210
'NtGdiSetSizeDevice', # 0x211
'NtGdiSetSystemPaletteUse', # 0x212
'NtGdiSetTextJustification', # 0x213
'NtGdiStartDoc', # 0x214
'NtGdiStartPage', # 0x215
'NtGdiStrokeAndFillPath', # 0x216
'NtGdiStrokePath', # 0x217
'NtGdiSwapBuffers', # 0x218
'NtGdiTransparentBlt', # 0x219
'NtGdiUMPDEngFreeUserMem', # 0x21a
'NtGdiUnloadPrinterDriver', # 0x21b
'EngRestoreFloatingPointState', # 0x21c
'NtGdiUpdateColors', # 0x21d
'NtGdiUpdateTransform', # 0x21e
'NtGdiWidenPath', # 0x21f
'NtGdiXFORMOBJ_bApplyXform', # 0x220
'NtGdiXFORMOBJ_iGetXform', # 0x221
'NtGdiXLATEOBJ_cGetPalette', # 0x222
'NtGdiXLATEOBJ_hGetColorTransform', # 0x223
'NtGdiXLATEOBJ_iXlate', # 0x224
'NtUserAssociateInputContext', # 0x225
'NtUserBlockInput', # 0x226
'NtUserBuildHimcList', # 0x227
'NtUserBuildPropList', # 0x228
'NtUserCallHwndOpt', # 0x229
'NtUserChangeDisplaySettings', # 0x22a
'NtUserChildWindowFromPointEx', # 0x22b
'NtUserClipCursor', # 0x22c
'NtUserCreateDesktop', # 0x22d
'NtUserCreateInputContext', # 0x22e
'NtUserCreateWindowStation', # 0x22f
'NtUserCtxDisplayIOCtl', # 0x230
'NtUserDdeGetQualityOfService', # 0x231
'NtUserDdeSetQualityOfService', # 0x232
'NtUserDestroyInputContext', # 0x233
'NtUserDisableThreadIme', # 0x234
'NtUserDragDetect', # 0x235
'NtUserDragObject', # 0x236
'NtUserDrawAnimatedRects', # 0x237
'NtUserDrawCaption', # 0x238
'NtUserDrawCaptionTemp', # 0x239
'NtUserDrawMenuBarTemp', # 0x23a
'NtUserEndMenu', # 0x23b
'NtUserEvent', # 0x23c
'NtUserFlashWindowEx', # 0x23d
'NtUserGetAppImeLevel', # 0x23e
'NtUserGetCaretPos', # 0x23f
'NtUserGetClipCursor', # 0x240
'NtUserGetClipboardViewer', # 0x241
'NtUserGetComboBoxInfo', # 0x242
'NtUserGetCursorInfo', # 0x243
'NtUserGetGuiResources', # 0x244
'NtUserGetImeHotKey', # 0x245
'NtUserGetImeInfoEx', # 0x246
'NtUserGetInternalWindowPos', # 0x247
'NtUserGetKeyNameText', # 0x248
'NtUserGetKeyboardLayoutName', # 0x249
'NtUserGetLayeredWindowAttributes', # 0x24a
'NtUserGetListBoxInfo', # 0x24b
'NtUserGetMenuIndex', # 0x24c
'NtUserGetMenuItemRect', # 0x24d
'NtUserGetMouseMovePointsEx', # 0x24e
'NtUserGetPriorityClipboardFormat', # 0x24f
'NtUserGetRawInputBuffer', # 0x250
'NtUserGetRawInputData', # 0x251
'NtUserGetRawInputDeviceInfo', # 0x252
'NtUserGetRawInputDeviceList', # 0x253
'NtUserGetRegisteredRawInputDevices', # 0x254
'NtUserGetWOWClass', # 0x255
'NtUserHardErrorControl', # 0x256
'NtUserHiliteMenuItem', # 0x257
'NtUserImpersonateDdeClientWindow', # 0x258
'NtUserInitTask', # 0x259
'NtUserInitialize', # 0x25a
'NtUserInitializeClientPfnArrays', # 0x25b
'NtUserLoadKeyboardLayoutEx', # 0x25c
'NtUserLockWindowStation', # 0x25d
'NtUserLockWorkStation', # 0x25e
'NtUserMNDragLeave', # 0x25f
'NtUserMNDragOver', # 0x260
'NtUserMenuItemFromPoint', # 0x261
'NtUserMinMaximize', # 0x262
'NtUserNotifyIMEStatus', # 0x263
'NtUserOpenInputDesktop', # 0x264
'NtUserPrintWindow', # 0x265
'NtUserQueryInformationThread', # 0x266
'NtUserQueryInputContext', # 0x267
'NtUserQuerySendMessage', # 0x268
'NtUserRealChildWindowFromPoint', # 0x269
'NtUserRealWaitMessageEx', # 0x26a
'NtUserRegisterHotKey', # 0x26b
'NtUserRegisterRawInputDevices', # 0x26c
'NtUserRegisterTasklist', # 0x26d
'NtUserRegisterUserApiHook', # 0x26e
'NtUserRemoteConnect', # 0x26f
'NtUserRemoteRedrawRectangle', # 0x270
'NtUserRemoteRedrawScreen', # 0x271
'NtUserRemoteStopScreenUpdates', # 0x272
'NtUserResolveDesktopForWOW', # 0x273
'NtUserSetAppImeLevel', # 0x274
'NtUserSetClassWord', # 0x275
'NtUserSetCursorContents', # 0x276
'NtUserSetImeHotKey', # 0x277
'NtUserSetImeInfoEx', # 0x278
'NtUserSetImeOwnerWindow', # 0x279
'NtUserSetInternalWindowPos', # 0x27a
'NtUserSetLayeredWindowAttributes', # 0x27b
'NtUserSetLogonNotifyWindow', # 0x27c
'NtUserSetMenu', # 0x27d
'NtUserSetMenuContextHelpId', # 0x27e
'NtUserSetMenuFlagRtoL', # 0x27f
'NtUserSetObjectInformation', # 0x280
'NtUserSetShellWindowEx', # 0x281
'NtUserSetSysColors', # 0x282
'NtUserSetSystemCursor', # 0x283
'NtUserSetSystemTimer', # 0x284
'NtUserSetThreadLayoutHandles', # 0x285
'NtUserSetWindowStationUser', # 0x286
'NtUserSoundSentry', # 0x287
'NtUserSwitchDesktop', # 0x288
'NtUserTestForInteractiveUser', # 0x289
'NtUserTrackPopupMenuEx', # 0x28a
'NtUserUnloadKeyboardLayout', # 0x28b
'NtUserUnlockWindowStation', # 0x28c
'NtUserUnregisterHotKey', # 0x28d
'NtUserUnregisterUserApiHook', # 0x28e
'NtUserUpdateInputContext', # 0x28f
'NtUserUpdateInstance', # 0x290
'NtUserUpdateLayeredWindow', # 0x291
'NtUserUpdatePerUserSystemParameters', # 0x292
'NtUserUserHandleGrantAccess', # 0x293
'NtUserValidateHandleSecure', # 0x294
'NtUserWaitForInputIdle', # 0x295
'NtUserWaitForMsgAndEvent', # 0x296
'NtUserSetClassLongPtr', # 0x297
'NtUserSetWindowLongPtr', # 0x298
'NtUserWin32PoolAllocationStats', # 0x299
'NtUserYieldTask', # 0x29a
],
]
|
Andrew-McNab-UK/DIRAC | refs/heads/integration | Interfaces/scripts/dirac-wms-job-logging-info.py | 6 | #!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-wms-job-logging-ingo
# Author : Stuart Paterson
########################################################################
"""
Retrieve history of transitions for a DIRAC job
"""
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... JobID ...' % Script.scriptName,
'Arguments:',
' JobID: DIRAC Job ID' ] ) )
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
if len( args ) < 1:
Script.showHelp()
from DIRAC.Interfaces.API.Dirac import Dirac, parseArguments
dirac = Dirac()
exitCode = 0
errorList = []
for job in parseArguments( args ):
result = dirac.getJobLoggingInfo( job, printOutput = True )
if not result['OK']:
errorList.append( ( job, result['Message'] ) )
exitCode = 2
for error in errorList:
print "ERROR %s: %s" % error
DIRAC.exit( exitCode )
|
ddnbgroup/ansible | refs/heads/devel | contrib/inventory/cobbler.py | 27 | #!/usr/bin/env python
"""
Cobbler external inventory script
=================================
Ansible has a feature where instead of reading from /etc/ansible/hosts
as a text file, it can query external programs to obtain the list
of hosts, groups the hosts are in, and even variables to assign to each host.
To use this, copy this file over /etc/ansible/hosts and chmod +x the file.
This, more or less, allows you to keep one central database containing
info about all of your managed instances.
This script is an example of sourcing that data from Cobbler
(http://cobbler.github.com). With cobbler each --mgmt-class in cobbler
will correspond to a group in Ansible, and --ks-meta variables will be
passed down for use in templates or even in argument lines.
NOTE: The cobbler system names will not be used. Make sure a
cobbler --dns-name is set for each cobbler system. If a system
appears with two DNS names we do not add it twice because we don't want
ansible talking to it twice. The first one found will be used. If no
--dns-name is set the system will NOT be visible to ansible. We do
not add cobbler system names because there is no requirement in cobbler
that those correspond to addresses.
See http://ansible.github.com/api.html for more info
Tested with Cobbler 2.0.11.
Changelog:
- 2015-06-21 dmccue: Modified to support run-once _meta retrieval, results in
higher performance at ansible startup. Groups are determined by owner rather than
default mgmt_classes. DNS name determined from hostname. cobbler values are written
to a 'cobbler' fact namespace
- 2013-09-01 pgehres: Refactored implementation to make use of caching and to
limit the number of connections to external cobbler server for performance.
Added use of cobbler.ini file to configure settings. Tested with Cobbler 2.4.0
"""
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import argparse
import ConfigParser
import os
import re
from time import time
import xmlrpclib
try:
import json
except ImportError:
import simplejson as json
# NOTE -- this file assumes Ansible is being accessed FROM the cobbler
# server, so it does not attempt to login with a username and password.
# this will be addressed in a future version of this script.
orderby_keyname = 'owners' # alternatively 'mgmt_classes'
class CobblerInventory(object):
def __init__(self):
""" Main execution path """
self.conn = None
self.inventory = dict() # A list of groups and the hosts in that group
self.cache = dict() # Details about hosts in the inventory
# Read settings and parse CLI arguments
self.read_settings()
self.parse_cli_args()
# Cache
if self.args.refresh_cache:
self.update_cache()
elif not self.is_cache_valid():
self.update_cache()
else:
self.load_inventory_from_cache()
self.load_cache_from_cache()
data_to_print = ""
# Data to print
if self.args.host:
data_to_print += self.get_host_info()
else:
self.inventory['_meta'] = { 'hostvars': {} }
for hostname in self.cache:
self.inventory['_meta']['hostvars'][hostname] = {'cobbler': self.cache[hostname] }
data_to_print += self.json_format_dict(self.inventory, True)
print data_to_print
def _connect(self):
if not self.conn:
self.conn = xmlrpclib.Server(self.cobbler_host, allow_none=True)
def is_cache_valid(self):
""" Determines if the cache files have expired, or if it is still valid """
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_inventory):
return True
return False
def read_settings(self):
""" Reads the settings from the cobbler.ini file """
config = ConfigParser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/cobbler.ini')
self.cobbler_host = config.get('cobbler', 'host')
# Cache related
cache_path = config.get('cobbler', 'cache_path')
self.cache_path_cache = cache_path + "/ansible-cobbler.cache"
self.cache_path_inventory = cache_path + "/ansible-cobbler.index"
self.cache_max_age = config.getint('cobbler', 'cache_max_age')
def parse_cli_args(self):
""" Command line argument processing """
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Cobbler')
parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to cobbler (default: False - use cache files)')
self.args = parser.parse_args()
def update_cache(self):
""" Make calls to cobbler and save the output in a cache """
self._connect()
self.groups = dict()
self.hosts = dict()
data = self.conn.get_systems()
for host in data:
# Get the FQDN for the host and add it to the right groups
dns_name = host['hostname'] #None
ksmeta = None
interfaces = host['interfaces']
if dns_name is None:
continue
status = host['status']
profile = host['profile']
classes = host[orderby_keyname]
if status not in self.inventory:
self.inventory[status] = []
self.inventory[status].append(dns_name)
if profile not in self.inventory:
self.inventory[profile] = []
self.inventory[profile].append(dns_name)
for cls in classes:
if cls not in self.inventory:
self.inventory[cls] = []
self.inventory[cls].append(dns_name)
# Since we already have all of the data for the host, update the host details as well
# The old way was ksmeta only -- provide backwards compatibility
self.cache[dns_name] = host
if "ks_meta" in host:
for key, value in host["ks_meta"].iteritems():
self.cache[dns_name][key] = value
self.write_to_cache(self.cache, self.cache_path_cache)
self.write_to_cache(self.inventory, self.cache_path_inventory)
def get_host_info(self):
""" Get variables about a specific host """
if not self.cache or len(self.cache) == 0:
# Need to load index from cache
self.load_cache_from_cache()
if not self.args.host in self.cache:
# try updating the cache
self.update_cache()
if not self.args.host in self.cache:
# host might not exist anymore
return self.json_format_dict({}, True)
return self.json_format_dict(self.cache[self.args.host], True)
def push(self, my_dict, key, element):
""" Pushed an element onto an array that may not have been defined in the dict """
if key in my_dict:
my_dict[key].append(element)
else:
my_dict[key] = [element]
def load_inventory_from_cache(self):
""" Reads the index from the cache file sets self.index """
cache = open(self.cache_path_inventory, 'r')
json_inventory = cache.read()
self.inventory = json.loads(json_inventory)
def load_cache_from_cache(self):
""" Reads the cache from the cache file sets self.cache """
cache = open(self.cache_path_cache, 'r')
json_cache = cache.read()
self.cache = json.loads(json_cache)
def write_to_cache(self, data, filename):
""" Writes data in JSON format to a file """
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def to_safe(self, word):
""" Converts 'bad' characters in a string to underscores so they can be used as Ansible groups """
return re.sub("[^A-Za-z0-9\-]", "_", word)
def json_format_dict(self, data, pretty=False):
""" Converts a dict to a JSON object and dumps it as a formatted string """
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
CobblerInventory()
|
sanjoydesk/FrameworkBenchmarks | refs/heads/master | frameworks/Python/flask/gunicorn_conf.py | 214 | import multiprocessing
import os
import sys
_is_pypy = hasattr(sys, 'pypy_version_info')
_is_travis = os.environ.get('TRAVIS') == 'true'
workers = multiprocessing.cpu_count() * 3
if _is_travis:
workers = 2
bind = "0.0.0.0:8080"
keepalive = 120
errorlog = '-'
pidfile = 'gunicorn.pid'
if _is_pypy:
worker_class = "tornado"
else:
worker_class = "meinheld.gmeinheld.MeinheldWorker"
def post_fork(server, worker):
# Disable access log.
# (Until https://github.com/mopemope/meinheld/pull/42 is released)
import meinheld.server
meinheld.server.set_access_logger(None)
|
shwinpiocess/redismon | refs/heads/master | redis/connection.py | 7 | from __future__ import with_statement
from distutils.version import StrictVersion
from itertools import chain
from select import select
import os
import socket
import sys
import threading
import warnings
try:
import ssl
ssl_available = True
except ImportError:
ssl_available = False
from redis._compat import (b, xrange, imap, byte_to_chr, unicode, bytes, long,
BytesIO, nativestr, basestring, iteritems,
LifoQueue, Empty, Full, urlparse, parse_qs,
unquote)
from redis.exceptions import (
RedisError,
ConnectionError,
TimeoutError,
BusyLoadingError,
ResponseError,
InvalidResponse,
AuthenticationError,
NoScriptError,
ExecAbortError,
ReadOnlyError
)
from redis.utils import HIREDIS_AVAILABLE
if HIREDIS_AVAILABLE:
import hiredis
hiredis_version = StrictVersion(hiredis.__version__)
HIREDIS_SUPPORTS_CALLABLE_ERRORS = \
hiredis_version >= StrictVersion('0.1.3')
HIREDIS_SUPPORTS_BYTE_BUFFER = \
hiredis_version >= StrictVersion('0.1.4')
if not HIREDIS_SUPPORTS_BYTE_BUFFER:
msg = ("redis-py works best with hiredis >= 0.1.4. You're running "
"hiredis %s. Please consider upgrading." % hiredis.__version__)
warnings.warn(msg)
HIREDIS_USE_BYTE_BUFFER = True
# only use byte buffer if hiredis supports it and the Python version
# is >= 2.7
if not HIREDIS_SUPPORTS_BYTE_BUFFER or (
sys.version_info[0] == 2 and sys.version_info[1] < 7):
HIREDIS_USE_BYTE_BUFFER = False
SYM_STAR = b('*')
SYM_DOLLAR = b('$')
SYM_CRLF = b('\r\n')
SYM_EMPTY = b('')
SERVER_CLOSED_CONNECTION_ERROR = "Connection closed by server."
class Token(object):
"""
Literal strings in Redis commands, such as the command names and any
hard-coded arguments are wrapped in this class so we know not to apply
and encoding rules on them.
"""
def __init__(self, value):
if isinstance(value, Token):
value = value.value
self.value = value
def __repr__(self):
return self.value
def __str__(self):
return self.value
class BaseParser(object):
EXCEPTION_CLASSES = {
'ERR': {
'max number of clients reached': ConnectionError
},
'EXECABORT': ExecAbortError,
'LOADING': BusyLoadingError,
'NOSCRIPT': NoScriptError,
'READONLY': ReadOnlyError,
}
def parse_error(self, response):
"Parse an error response"
error_code = response.split(' ')[0]
if error_code in self.EXCEPTION_CLASSES:
response = response[len(error_code) + 1:]
exception_class = self.EXCEPTION_CLASSES[error_code]
if isinstance(exception_class, dict):
exception_class = exception_class.get(response, ResponseError)
return exception_class(response)
return ResponseError(response)
class SocketBuffer(object):
def __init__(self, socket, socket_read_size):
self._sock = socket
self.socket_read_size = socket_read_size
self._buffer = BytesIO()
# number of bytes written to the buffer from the socket
self.bytes_written = 0
# number of bytes read from the buffer
self.bytes_read = 0
@property
def length(self):
return self.bytes_written - self.bytes_read
def _read_from_socket(self, length=None):
socket_read_size = self.socket_read_size
buf = self._buffer
buf.seek(self.bytes_written)
marker = 0
try:
while True:
data = self._sock.recv(socket_read_size)
# an empty string indicates the server shutdown the socket
if isinstance(data, bytes) and len(data) == 0:
raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)
buf.write(data)
data_length = len(data)
self.bytes_written += data_length
marker += data_length
if length is not None and length > marker:
continue
break
except socket.timeout:
raise TimeoutError("Timeout reading from socket")
except socket.error:
e = sys.exc_info()[1]
raise ConnectionError("Error while reading from socket: %s" %
(e.args,))
def read(self, length):
length = length + 2 # make sure to read the \r\n terminator
# make sure we've read enough data from the socket
if length > self.length:
self._read_from_socket(length - self.length)
self._buffer.seek(self.bytes_read)
data = self._buffer.read(length)
self.bytes_read += len(data)
# purge the buffer when we've consumed it all so it doesn't
# grow forever
if self.bytes_read == self.bytes_written:
self.purge()
return data[:-2]
def readline(self):
buf = self._buffer
buf.seek(self.bytes_read)
data = buf.readline()
while not data.endswith(SYM_CRLF):
# there's more data in the socket that we need
self._read_from_socket()
buf.seek(self.bytes_read)
data = buf.readline()
self.bytes_read += len(data)
# purge the buffer when we've consumed it all so it doesn't
# grow forever
if self.bytes_read == self.bytes_written:
self.purge()
return data[:-2]
def purge(self):
self._buffer.seek(0)
self._buffer.truncate()
self.bytes_written = 0
self.bytes_read = 0
def close(self):
try:
self.purge()
self._buffer.close()
except:
# issue #633 suggests the purge/close somehow raised a
# BadFileDescriptor error. Perhaps the client ran out of
# memory or something else? It's probably OK to ignore
# any error being raised from purge/close since we're
# removing the reference to the instance below.
pass
self._buffer = None
self._sock = None
class PythonParser(BaseParser):
"Plain Python parsing class"
encoding = None
def __init__(self, socket_read_size):
self.socket_read_size = socket_read_size
self._sock = None
self._buffer = None
def __del__(self):
try:
self.on_disconnect()
except Exception:
pass
def on_connect(self, connection):
"Called when the socket connects"
self._sock = connection._sock
self._buffer = SocketBuffer(self._sock, self.socket_read_size)
if connection.decode_responses:
self.encoding = connection.encoding
def on_disconnect(self):
"Called when the socket disconnects"
if self._sock is not None:
self._sock.close()
self._sock = None
if self._buffer is not None:
self._buffer.close()
self._buffer = None
self.encoding = None
def can_read(self):
return self._buffer and bool(self._buffer.length)
def read_response(self):
response = self._buffer.readline()
if not response:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
byte, response = byte_to_chr(response[0]), response[1:]
if byte not in ('-', '+', ':', '$', '*'):
raise InvalidResponse("Protocol Error: %s, %s" %
(str(byte), str(response)))
# server returned an error
if byte == '-':
response = nativestr(response)
error = self.parse_error(response)
# if the error is a ConnectionError, raise immediately so the user
# is notified
if isinstance(error, ConnectionError):
raise error
# otherwise, we're dealing with a ResponseError that might belong
# inside a pipeline response. the connection's read_response()
# and/or the pipeline's execute() will raise this error if
# necessary, so just return the exception instance here.
return error
# single value
elif byte == '+':
pass
# int value
elif byte == ':':
response = long(response)
# bulk response
elif byte == '$':
length = int(response)
if length == -1:
return None
response = self._buffer.read(length)
# multi-bulk response
elif byte == '*':
length = int(response)
if length == -1:
return None
response = [self.read_response() for i in xrange(length)]
if isinstance(response, bytes) and self.encoding:
response = response.decode(self.encoding)
return response
class HiredisParser(BaseParser):
"Parser class for connections using Hiredis"
def __init__(self, socket_read_size):
if not HIREDIS_AVAILABLE:
raise RedisError("Hiredis is not installed")
self.socket_read_size = socket_read_size
if HIREDIS_USE_BYTE_BUFFER:
self._buffer = bytearray(socket_read_size)
def __del__(self):
try:
self.on_disconnect()
except Exception:
pass
def on_connect(self, connection):
self._sock = connection._sock
kwargs = {
'protocolError': InvalidResponse,
'replyError': self.parse_error,
}
# hiredis < 0.1.3 doesn't support functions that create exceptions
if not HIREDIS_SUPPORTS_CALLABLE_ERRORS:
kwargs['replyError'] = ResponseError
if connection.decode_responses:
kwargs['encoding'] = connection.encoding
self._reader = hiredis.Reader(**kwargs)
self._next_response = False
def on_disconnect(self):
self._sock = None
self._reader = None
self._next_response = False
def can_read(self):
if not self._reader:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
if self._next_response is False:
self._next_response = self._reader.gets()
return self._next_response is not False
def read_response(self):
if not self._reader:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
# _next_response might be cached from a can_read() call
if self._next_response is not False:
response = self._next_response
self._next_response = False
return response
response = self._reader.gets()
socket_read_size = self.socket_read_size
while response is False:
try:
if HIREDIS_USE_BYTE_BUFFER:
bufflen = self._sock.recv_into(self._buffer)
if bufflen == 0:
raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)
else:
buffer = self._sock.recv(socket_read_size)
# an empty string indicates the server shutdown the socket
if not isinstance(buffer, bytes) or len(buffer) == 0:
raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)
except socket.timeout:
raise TimeoutError("Timeout reading from socket")
except socket.error:
e = sys.exc_info()[1]
raise ConnectionError("Error while reading from socket: %s" %
(e.args,))
if HIREDIS_USE_BYTE_BUFFER:
self._reader.feed(self._buffer, 0, bufflen)
else:
self._reader.feed(buffer)
response = self._reader.gets()
# if an older version of hiredis is installed, we need to attempt
# to convert ResponseErrors to their appropriate types.
if not HIREDIS_SUPPORTS_CALLABLE_ERRORS:
if isinstance(response, ResponseError):
response = self.parse_error(response.args[0])
elif isinstance(response, list) and response and \
isinstance(response[0], ResponseError):
response[0] = self.parse_error(response[0].args[0])
# if the response is a ConnectionError or the response is a list and
# the first item is a ConnectionError, raise it as something bad
# happened
if isinstance(response, ConnectionError):
raise response
elif isinstance(response, list) and response and \
isinstance(response[0], ConnectionError):
raise response[0]
return response
if HIREDIS_AVAILABLE:
DefaultParser = HiredisParser
else:
DefaultParser = PythonParser
class Connection(object):
"Manages TCP communication to and from a Redis server"
description_format = "Connection<host=%(host)s,port=%(port)s,db=%(db)s>"
def __init__(self, host='localhost', port=6379, db=0, password=None,
socket_timeout=None, socket_connect_timeout=None,
socket_keepalive=False, socket_keepalive_options=None,
retry_on_timeout=False, encoding='utf-8',
encoding_errors='strict', decode_responses=False,
parser_class=DefaultParser, socket_read_size=65536):
self.pid = os.getpid()
self.host = host
self.port = int(port)
self.db = db
self.password = password
self.socket_timeout = socket_timeout
self.socket_connect_timeout = socket_connect_timeout or socket_timeout
self.socket_keepalive = socket_keepalive
self.socket_keepalive_options = socket_keepalive_options or {}
self.retry_on_timeout = retry_on_timeout
self.encoding = encoding
self.encoding_errors = encoding_errors
self.decode_responses = decode_responses
self._sock = None
self._parser = parser_class(socket_read_size=socket_read_size)
self._description_args = {
'host': self.host,
'port': self.port,
'db': self.db,
}
self._connect_callbacks = []
def __repr__(self):
return self.description_format % self._description_args
def __del__(self):
try:
self.disconnect()
except Exception:
pass
def register_connect_callback(self, callback):
self._connect_callbacks.append(callback)
def clear_connect_callbacks(self):
self._connect_callbacks = []
def connect(self):
"Connects to the Redis server if not already connected"
if self._sock:
return
try:
sock = self._connect()
except socket.error:
e = sys.exc_info()[1]
raise ConnectionError(self._error_message(e))
self._sock = sock
try:
self.on_connect()
except RedisError:
# clean up after any error in on_connect
self.disconnect()
raise
# run any user callbacks. right now the only internal callback
# is for pubsub channel/pattern resubscription
for callback in self._connect_callbacks:
callback(self)
def _connect(self):
"Create a TCP socket connection"
# we want to mimic what socket.create_connection does to support
# ipv4/ipv6, but we want to set options prior to calling
# socket.connect()
err = None
for res in socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
family, socktype, proto, canonname, socket_address = res
sock = None
try:
sock = socket.socket(family, socktype, proto)
# TCP_NODELAY
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# TCP_KEEPALIVE
if self.socket_keepalive:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
for k, v in iteritems(self.socket_keepalive_options):
sock.setsockopt(socket.SOL_TCP, k, v)
# set the socket_connect_timeout before we connect
sock.settimeout(self.socket_connect_timeout)
# connect
sock.connect(socket_address)
# set the socket_timeout now that we're connected
sock.settimeout(self.socket_timeout)
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
raise socket.error("socket.getaddrinfo returned an empty list")
def _error_message(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message"
if len(exception.args) == 1:
return "Error connecting to %s:%s. %s." % \
(self.host, self.port, exception.args[0])
else:
return "Error %s connecting to %s:%s. %s." % \
(exception.args[0], self.host, self.port, exception.args[1])
def on_connect(self):
"Initialize the connection, authenticate and select a database"
self._parser.on_connect(self)
# if a password is specified, authenticate
if self.password:
self.send_command('AUTH', self.password)
if nativestr(self.read_response()) != 'OK':
raise AuthenticationError('Invalid Password')
# if a database is specified, switch to it
if self.db:
self.send_command('SELECT', self.db)
if nativestr(self.read_response()) != 'OK':
raise ConnectionError('Invalid Database')
def disconnect(self):
"Disconnects from the Redis server"
self._parser.on_disconnect()
if self._sock is None:
return
try:
self._sock.shutdown(socket.SHUT_RDWR)
self._sock.close()
except socket.error:
pass
self._sock = None
def send_packed_command(self, command):
"Send an already packed command to the Redis server"
if not self._sock:
self.connect()
try:
if isinstance(command, str):
command = [command]
for item in command:
self._sock.sendall(item)
except socket.timeout:
self.disconnect()
raise TimeoutError("Timeout writing to socket")
except socket.error:
e = sys.exc_info()[1]
self.disconnect()
if len(e.args) == 1:
errno, errmsg = 'UNKNOWN', e.args[0]
else:
errno = e.args[0]
errmsg = e.args[1]
raise ConnectionError("Error %s while writing to socket. %s." %
(errno, errmsg))
except:
self.disconnect()
raise
def send_command(self, *args):
"Pack and send a command to the Redis server"
self.send_packed_command(self.pack_command(*args))
def can_read(self, timeout=0):
"Poll the socket to see if there's data that can be read."
sock = self._sock
if not sock:
self.connect()
sock = self._sock
return self._parser.can_read() or \
bool(select([sock], [], [], timeout)[0])
def read_response(self):
"Read the response from a previously sent command"
try:
response = self._parser.read_response()
except:
self.disconnect()
raise
if isinstance(response, ResponseError):
raise response
return response
def encode(self, value):
"Return a bytestring representation of the value"
if isinstance(value, Token):
return b(value.value)
elif isinstance(value, bytes):
return value
elif isinstance(value, (int, long)):
value = b(str(value))
elif isinstance(value, float):
value = b(repr(value))
elif not isinstance(value, basestring):
value = unicode(value)
if isinstance(value, unicode):
value = value.encode(self.encoding, self.encoding_errors)
return value
def pack_command(self, *args):
"Pack a series of arguments into the Redis protocol"
output = []
# the client might have included 1 or more literal arguments in
# the command name, e.g., 'CONFIG GET'. The Redis server expects these
# arguments to be sent separately, so split the first argument
# manually. All of these arguements get wrapped in the Token class
# to prevent them from being encoded.
command = args[0]
if ' ' in command:
args = tuple([Token(s) for s in command.split(' ')]) + args[1:]
else:
args = (Token(command),) + args[1:]
buff = SYM_EMPTY.join(
(SYM_STAR, b(str(len(args))), SYM_CRLF))
for arg in imap(self.encode, args):
# to avoid large string mallocs, chunk the command into the
# output list if we're sending large values
if len(buff) > 6000 or len(arg) > 6000:
buff = SYM_EMPTY.join(
(buff, SYM_DOLLAR, b(str(len(arg))), SYM_CRLF))
output.append(buff)
output.append(arg)
buff = SYM_CRLF
else:
buff = SYM_EMPTY.join((buff, SYM_DOLLAR, b(str(len(arg))),
SYM_CRLF, arg, SYM_CRLF))
output.append(buff)
return output
def pack_commands(self, commands):
"Pack multiple commands into the Redis protocol"
output = []
pieces = []
buffer_length = 0
for cmd in commands:
for chunk in self.pack_command(*cmd):
pieces.append(chunk)
buffer_length += len(chunk)
if buffer_length > 6000:
output.append(SYM_EMPTY.join(pieces))
buffer_length = 0
pieces = []
if pieces:
output.append(SYM_EMPTY.join(pieces))
return output
class SSLConnection(Connection):
description_format = "SSLConnection<host=%(host)s,port=%(port)s,db=%(db)s>"
def __init__(self, ssl_keyfile=None, ssl_certfile=None, ssl_cert_reqs=None,
ssl_ca_certs=None, **kwargs):
if not ssl_available:
raise RedisError("Python wasn't built with SSL support")
super(SSLConnection, self).__init__(**kwargs)
self.keyfile = ssl_keyfile
self.certfile = ssl_certfile
if ssl_cert_reqs is None:
ssl_cert_reqs = ssl.CERT_NONE
elif isinstance(ssl_cert_reqs, basestring):
CERT_REQS = {
'none': ssl.CERT_NONE,
'optional': ssl.CERT_OPTIONAL,
'required': ssl.CERT_REQUIRED
}
if ssl_cert_reqs not in CERT_REQS:
raise RedisError(
"Invalid SSL Certificate Requirements Flag: %s" %
ssl_cert_reqs)
ssl_cert_reqs = CERT_REQS[ssl_cert_reqs]
self.cert_reqs = ssl_cert_reqs
self.ca_certs = ssl_ca_certs
def _connect(self):
"Wrap the socket with SSL support"
sock = super(SSLConnection, self)._connect()
sock = ssl.wrap_socket(sock,
cert_reqs=self.cert_reqs,
keyfile=self.keyfile,
certfile=self.certfile,
ca_certs=self.ca_certs)
return sock
class UnixDomainSocketConnection(Connection):
description_format = "UnixDomainSocketConnection<path=%(path)s,db=%(db)s>"
def __init__(self, path='', db=0, password=None,
socket_timeout=None, encoding='utf-8',
encoding_errors='strict', decode_responses=False,
retry_on_timeout=False,
parser_class=DefaultParser, socket_read_size=65536):
self.pid = os.getpid()
self.path = path
self.db = db
self.password = password
self.socket_timeout = socket_timeout
self.retry_on_timeout = retry_on_timeout
self.encoding = encoding
self.encoding_errors = encoding_errors
self.decode_responses = decode_responses
self._sock = None
self._parser = parser_class(socket_read_size=socket_read_size)
self._description_args = {
'path': self.path,
'db': self.db,
}
self._connect_callbacks = []
def _connect(self):
"Create a Unix domain socket connection"
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.socket_timeout)
sock.connect(self.path)
return sock
def _error_message(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message"
if len(exception.args) == 1:
return "Error connecting to unix socket: %s. %s." % \
(self.path, exception.args[0])
else:
return "Error %s connecting to unix socket: %s. %s." % \
(exception.args[0], self.path, exception.args[1])
class ConnectionPool(object):
"Generic connection pool"
@classmethod
def from_url(cls, url, db=None, decode_components=False, **kwargs):
"""
Return a connection pool configured from the given URL.
For example::
redis://[:password]@localhost:6379/0
rediss://[:password]@localhost:6379/0
unix://[:password]@/path/to/socket.sock?db=0
Three URL schemes are supported:
redis:// creates a normal TCP socket connection
rediss:// creates a SSL wrapped TCP socket connection
unix:// creates a Unix Domain Socket connection
There are several ways to specify a database number. The parse function
will return the first specified option:
1. A ``db`` querystring option, e.g. redis://localhost?db=0
2. If using the redis:// scheme, the path argument of the url, e.g.
redis://localhost/0
3. The ``db`` argument to this function.
If none of these options are specified, db=0 is used.
The ``decode_components`` argument allows this function to work with
percent-encoded URLs. If this argument is set to ``True`` all ``%xx``
escapes will be replaced by their single-character equivalents after
the URL has been parsed. This only applies to the ``hostname``,
``path``, and ``password`` components.
Any additional querystring arguments and keyword arguments will be
passed along to the ConnectionPool class's initializer. In the case
of conflicting arguments, querystring arguments always win.
"""
url_string = url
url = urlparse(url)
qs = ''
# in python2.6, custom URL schemes don't recognize querystring values
# they're left as part of the url.path.
if '?' in url.path and not url.query:
# chop the querystring including the ? off the end of the url
# and reparse it.
qs = url.path.split('?', 1)[1]
url = urlparse(url_string[:-(len(qs) + 1)])
else:
qs = url.query
url_options = {}
for name, value in iteritems(parse_qs(qs)):
if value and len(value) > 0:
url_options[name] = value[0]
if decode_components:
password = unquote(url.password) if url.password else None
path = unquote(url.path) if url.path else None
hostname = unquote(url.hostname) if url.hostname else None
else:
password = url.password
path = url.path
hostname = url.hostname
# We only support redis:// and unix:// schemes.
if url.scheme == 'unix':
url_options.update({
'password': password,
'path': path,
'connection_class': UnixDomainSocketConnection,
})
else:
url_options.update({
'host': hostname,
'port': int(url.port or 6379),
'password': password,
})
# If there's a path argument, use it as the db argument if a
# querystring value wasn't specified
if 'db' not in url_options and path:
try:
url_options['db'] = int(path.replace('/', ''))
except (AttributeError, ValueError):
pass
if url.scheme == 'rediss':
url_options['connection_class'] = SSLConnection
# last shot at the db value
url_options['db'] = int(url_options.get('db', db or 0))
# update the arguments from the URL values
kwargs.update(url_options)
# backwards compatability
if 'charset' in kwargs:
warnings.warn(DeprecationWarning(
'"charset" is deprecated. Use "encoding" instead'))
kwargs['encoding'] = kwargs.pop('charset')
if 'errors' in kwargs:
warnings.warn(DeprecationWarning(
'"errors" is deprecated. Use "encoding_errors" instead'))
kwargs['encoding_errors'] = kwargs.pop('errors')
return cls(**kwargs)
def __init__(self, connection_class=Connection, max_connections=None,
**connection_kwargs):
"""
Create a connection pool. If max_connections is set, then this
object raises redis.ConnectionError when the pool's limit is reached.
By default, TCP connections are created connection_class is specified.
Use redis.UnixDomainSocketConnection for unix sockets.
Any additional keyword arguments are passed to the constructor of
connection_class.
"""
max_connections = max_connections or 2 ** 31
if not isinstance(max_connections, (int, long)) or max_connections < 0:
raise ValueError('"max_connections" must be a positive integer')
self.connection_class = connection_class
self.connection_kwargs = connection_kwargs
self.max_connections = max_connections
self.reset()
def __repr__(self):
return "%s<%s>" % (
type(self).__name__,
self.connection_class.description_format % self.connection_kwargs,
)
def reset(self):
self.pid = os.getpid()
self._created_connections = 0
self._available_connections = []
self._in_use_connections = set()
self._check_lock = threading.Lock()
def _checkpid(self):
if self.pid != os.getpid():
with self._check_lock:
if self.pid == os.getpid():
# another thread already did the work while we waited
# on the lock.
return
self.disconnect()
self.reset()
def get_connection(self, command_name, *keys, **options):
"Get a connection from the pool"
self._checkpid()
try:
connection = self._available_connections.pop()
except IndexError:
connection = self.make_connection()
self._in_use_connections.add(connection)
return connection
def make_connection(self):
"Create a new connection"
if self._created_connections >= self.max_connections:
raise ConnectionError("Too many connections")
self._created_connections += 1
return self.connection_class(**self.connection_kwargs)
def release(self, connection):
"Releases the connection back to the pool"
self._checkpid()
if connection.pid != self.pid:
return
self._in_use_connections.remove(connection)
self._available_connections.append(connection)
def disconnect(self):
"Disconnects all connections in the pool"
all_conns = chain(self._available_connections,
self._in_use_connections)
for connection in all_conns:
connection.disconnect()
class BlockingConnectionPool(ConnectionPool):
"""
Thread-safe blocking connection pool::
>>> from redis.client import Redis
>>> client = Redis(connection_pool=BlockingConnectionPool())
It performs the same function as the default
``:py:class: ~redis.connection.ConnectionPool`` implementation, in that,
it maintains a pool of reusable connections that can be shared by
multiple redis clients (safely across threads if required).
The difference is that, in the event that a client tries to get a
connection from the pool when all of connections are in use, rather than
raising a ``:py:class: ~redis.exceptions.ConnectionError`` (as the default
``:py:class: ~redis.connection.ConnectionPool`` implementation does), it
makes the client wait ("blocks") for a specified number of seconds until
a connection becomes available.
Use ``max_connections`` to increase / decrease the pool size::
>>> pool = BlockingConnectionPool(max_connections=10)
Use ``timeout`` to tell it either how many seconds to wait for a connection
to become available, or to block forever:
# Block forever.
>>> pool = BlockingConnectionPool(timeout=None)
# Raise a ``ConnectionError`` after five seconds if a connection is
# not available.
>>> pool = BlockingConnectionPool(timeout=5)
"""
def __init__(self, max_connections=50, timeout=20,
connection_class=Connection, queue_class=LifoQueue,
**connection_kwargs):
self.queue_class = queue_class
self.timeout = timeout
super(BlockingConnectionPool, self).__init__(
connection_class=connection_class,
max_connections=max_connections,
**connection_kwargs)
def reset(self):
self.pid = os.getpid()
self._check_lock = threading.Lock()
# Create and fill up a thread safe queue with ``None`` values.
self.pool = self.queue_class(self.max_connections)
while True:
try:
self.pool.put_nowait(None)
except Full:
break
# Keep a list of actual connection instances so that we can
# disconnect them later.
self._connections = []
def make_connection(self):
"Make a fresh connection."
connection = self.connection_class(**self.connection_kwargs)
self._connections.append(connection)
return connection
def get_connection(self, command_name, *keys, **options):
"""
Get a connection, blocking for ``self.timeout`` until a connection
is available from the pool.
If the connection returned is ``None`` then creates a new connection.
Because we use a last-in first-out queue, the existing connections
(having been returned to the pool after the initial ``None`` values
were added) will be returned before ``None`` values. This means we only
create new connections when we need to, i.e.: the actual number of
connections will only increase in response to demand.
"""
# Make sure we haven't changed process.
self._checkpid()
# Try and get a connection from the pool. If one isn't available within
# self.timeout then raise a ``ConnectionError``.
connection = None
try:
connection = self.pool.get(block=True, timeout=self.timeout)
except Empty:
# Note that this is not caught by the redis client and will be
# raised unless handled by application code. If you want never to
raise ConnectionError("No connection available.")
# If the ``connection`` is actually ``None`` then that's a cue to make
# a new connection to add to the pool.
if connection is None:
connection = self.make_connection()
return connection
def release(self, connection):
"Releases the connection back to the pool."
# Make sure we haven't changed process.
self._checkpid()
if connection.pid != self.pid:
return
# Put the connection back into the pool.
try:
self.pool.put_nowait(connection)
except Full:
# perhaps the pool has been reset() after a fork? regardless,
# we don't want this connection
pass
def disconnect(self):
"Disconnects all connections in the pool."
for connection in self._connections:
connection.disconnect()
|
gpmidi/fragforce.org | refs/heads/dev | ffdonations/utils.py | 3 | # from .tasks import *
from django.conf import settings
from django.db.models import Sum
from memoize import memoize
from ffsfdc.models import *
from .models import *
# @memoize(timeout=120)
# def el_num_donations():
# baseq = DonationModel.objects.filter(DonationModel.tracked_q())
# return baseq.count()
#
#
# @memoize(timeout=120)
# def el_donation_stats():
# baseq = DonationModel.objects.filter(DonationModel.tracked_q())
# return baseq.aggregate(
# sumDonations=Sum('amount'),
# avgDonation=Avg('amount'),
# minDonation=Min('amount'),
# maxDonation=Max('amount'),
# )
@memoize(timeout=120)
def event_name_maker(year=timezone.now().year):
return 'Extra Life %d' % year
@memoize(timeout=120)
def el_teams(year=timezone.now().year):
""" Returns a list of team IDs that we're tracking for the given year """
from ffdonations.tasks.teams import update_teams
yr = event_name_maker(year=year)
ret = set([])
for sa in SiteAccount.objects.filter(el_id__isnull=False).only('el_id').all():
try:
tm = TeamModel.objects.get(id=sa.el_id)
if tm.event.name == yr:
ret.add(tm.id)
except TeamModel.DoesNotExist:
update_teams.delay([sa.el_id, ])
return ret
@memoize(timeout=120)
def el_contact(year=timezone.now().year):
""" Returns a list of participant IDs that we're tracking for the given year """
from ffdonations.tasks.participants import update_participants
yr = event_name_maker(year=year)
ret = []
for sa in Contact.objects.filter(extra_life_id__isnull=False).only('extra_life_id').all():
try:
tm = ParticipantModel.objects.get(id=sa.extra_life_id)
if tm.event.name == yr:
ret.append(tm.id)
except ParticipantModel.DoesNotExist:
update_participants.delay([sa.extra_life_id, ])
return ret
@memoize(timeout=120)
def el_num_donations(year=timezone.now().year):
""" For current year """
teams = TeamModel.objects.filter(id__in=el_teams(year=year))
tsum = teams.aggregate(ttl=Sum('numDonations')).get('ttl', 0)
if tsum is None:
tsum = 0
psum = ParticipantModel.objects.filter(id__in=el_contact(year=year), tracked=True) \
.filter(~Q(team__in=teams)) \
.aggregate(ttl=Sum('numDonations')).get('ttl', 0)
if psum is None:
psum = 0
return dict(
countDonations=float(tsum + psum),
countTeamDonations=float(tsum),
countParticipantDonations=float(psum),
)
@memoize(timeout=120)
def el_donation_stats(year=timezone.now().year):
""" For current year """
teams = TeamModel.objects.filter(id__in=el_teams(year=year))
tsum = teams.aggregate(ttl=Sum('sumDonations')).get('ttl', 0)
if tsum is None:
tsum = 0
psum = ParticipantModel.objects.filter(id__in=el_contact(year=year), tracked=True) \
.filter(~Q(team__in=teams)) \
.aggregate(ttl=Sum('sumDonations')).get('ttl', 0)
if psum is None:
psum = 0
return dict(
sumDonations=float(tsum + psum),
sumteamDonations=float(tsum),
sumparticipantDonations=float(psum),
)
@memoize(timeout=120)
def childsplay_donation_stats():
""" For current year """
raised = CampaignTiltifyModel.objects.filter(
startsAt__lte=timezone.now(),
endsAt__gte=timezone.now(),
team__in=TeamTiltifyModel.objects.filter(slug__in=settings.TILTIFY_TEAMS)
).aggregate(
total=Sum('totalAmountRaised'),
supporting=Sum('supportingAmountRaised'),
direct=Sum('amountRaised'),
)
return dict(
totalAmountRaised=float(raised.get('total', 0) or 0),
supportingAmountRaised=float(raised.get('supporting', 0) or 0),
amountRaised=float(raised.get('amount', 0) or 0),
)
|
sxjscience/tvm | refs/heads/master | python/tvm/contrib/tf_op/module.py | 5 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Module container of TensorFlow TVMDSO op"""
import tensorflow as tf
from tensorflow.python.framework import load_library
from tensorflow.python import platform
class OpModule:
"""Module container of TensorFlow TVMDSO op which wraps exported
TVM op implementation library to be called on TensorFlow side"""
def __init__(self, lib_path):
self.lib_path = lib_path
def func(self, name, output_dtype=None, output_shape=None):
"""Get tvm op function wrapped as TensorFlow tensor to tensor function
Parameters
----------
name: str
function name
output_dtype: str or TensorFlow datatype
Output datatype, default is float32
output_shape: List of integer/tf scalar tensor or tf shape tensor
Output shape, default the same with first input's shape
Returns
----------
Func object that acts as TensorFlow tensor to tensor function.
"""
return TensorFunc(self.lib_path, name, output_dtype, output_shape)
def __getitem__(self, func_name):
return self.func(func_name)
class TensorFunc:
"""Function object that acts as TensorFlow tensor to tensor function."""
def __init__(self, lib_path, func_name, output_dtype, output_shape):
self.lib_path = lib_path
self.func_name = func_name
self.output_dtype = output_dtype
# const(0) indicate invalid dynamic shape
self.dynamic_output_shape = tf.constant(0, tf.int64)
self.static_output_shape = None
self.has_static_output_shape = False # extra flag is required
if self._is_static_shape(output_shape):
self.static_output_shape = output_shape
self.has_static_output_shape = True
elif output_shape is not None:
self.dynamic_output_shape = self._pack_shape_tensor(output_shape)
self.module = self._load_platform_specific_library("libtvm_dso_op")
self.tvm_dso_op = self.module.tvm_dso_op
def apply(self, *params):
return self.tvm_dso_op(
params,
dynamic_output_shape=self.dynamic_output_shape,
static_output_shape=self.static_output_shape,
has_static_output_shape=self.has_static_output_shape,
lib_path=self.lib_path,
func_name=self.func_name,
output_dtype=self.output_dtype,
)
def __call__(self, *params):
return self.apply(*params)
def _load_platform_specific_library(self, lib_name):
system = platform.system()
if system == "Darwin":
lib_file_name = lib_name + ".dylib"
elif system == "Windows":
lib_file_name = lib_name + ".dll"
else:
lib_file_name = lib_name + ".so"
return load_library.load_op_library(lib_file_name)
def _is_static_shape(self, shape):
if shape is None or not isinstance(shape, list):
return False
for dim_value in shape:
if not isinstance(dim_value, int):
return False
if dim_value < 0:
raise Exception("Negative dimension is illegal: %d" % dim_value)
return True
def _pack_shape_tensor(self, shape):
if isinstance(shape, tf.Tensor):
if shape.dtype == tf.int32:
shape = tf.cast(shape, tf.int64)
elif isinstance(shape, list):
shape_dims = []
for dim_value in shape:
if isinstance(dim_value, int):
shape_dims.append(tf.constant(dim_value, tf.int64))
elif isinstance(dim_value, tf.Tensor) and dim_value.shape.rank == 0:
if dim_value.dtype == tf.int32:
dim_value = tf.cast(dim_value, tf.int64)
shape_dims.append(dim_value)
else:
raise TypeError("Input shape dimension is neither scalar tensor nor int")
shape = tf.stack(shape_dims)
else:
raise TypeError("Input shape is neither tensor nor list")
return shape
|
erdincay/youtube-dl | refs/heads/master | youtube_dl/extractor/ringtv.py | 124 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
class RingTVIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?ringtv\.craveonline\.com/(?P<type>news|videos/video)/(?P<id>[^/?#]+)'
_TEST = {
"url": "http://ringtv.craveonline.com/news/310833-luis-collazo-says-victor-ortiz-better-not-quit-on-jan-30",
"md5": "d25945f5df41cdca2d2587165ac28720",
"info_dict": {
'id': '857645',
'ext': 'mp4',
"title": 'Video: Luis Collazo says Victor Ortiz "better not quit on Jan. 30" - Ring TV',
"description": 'Luis Collazo is excited about his Jan. 30 showdown with fellow former welterweight titleholder Victor Ortiz at Barclays Center in his hometown of Brooklyn. The SuperBowl week fight headlines a Golden Boy Live! card on Fox Sports 1.',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id').split('-')[0]
webpage = self._download_webpage(url, video_id)
if mobj.group('type') == 'news':
video_id = self._search_regex(
r'''(?x)<iframe[^>]+src="http://cms\.springboardplatform\.com/
embed_iframe/[0-9]+/video/([0-9]+)/''',
webpage, 'real video ID')
title = self._og_search_title(webpage)
description = self._html_search_regex(
r'addthis:description="([^"]+)"',
webpage, 'description', fatal=False)
final_url = "http://ringtv.craveonline.springboardplatform.com/storage/ringtv.craveonline.com/conversion/%s.mp4" % video_id
thumbnail_url = "http://ringtv.craveonline.springboardplatform.com/storage/ringtv.craveonline.com/snapshots/%s.jpg" % video_id
return {
'id': video_id,
'url': final_url,
'title': title,
'thumbnail': thumbnail_url,
'description': description,
}
|
OCA/purchase-workflow | refs/heads/12.0 | purchase_request/models/purchase_order.py | 1 | # Copyright 2018-2019 Eficent Business and IT Consulting Services S.L.
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl-3.0).
from odoo import _, api, exceptions, fields, models
class PurchaseOrder(models.Model):
_inherit = "purchase.order"
@api.multi
def _purchase_request_confirm_message_content(self, request,
request_dict):
self.ensure_one()
if not request_dict:
request_dict = {}
title = _('Order confirmation %s for your Request %s') % (
self.name, request.name)
message = '<h3>%s</h3><ul>' % title
message += _('The following requested items from Purchase Request %s '
'have now been confirmed in Purchase Order %s:') % (
request.name, self.name)
for line in request_dict.values():
message += _(
'<li><b>%s</b>: Ordered quantity %s %s, Planned date %s</li>'
) % (line['name'],
line['product_qty'],
line['product_uom'],
line['date_planned'],
)
message += '</ul>'
return message
@api.multi
def _purchase_request_confirm_message(self):
request_obj = self.env['purchase.request']
for po in self:
requests_dict = {}
for line in po.order_line:
for request_line in line.sudo().purchase_request_lines:
request_id = request_line.request_id.id
if request_id not in requests_dict:
requests_dict[request_id] = {}
date_planned = "%s" % line.date_planned
data = {
'name': request_line.name,
'product_qty': line.product_qty,
'product_uom': line.product_uom.name,
'date_planned': date_planned,
}
requests_dict[request_id][request_line.id] = data
for request_id in requests_dict:
request = request_obj.sudo().browse(request_id)
message = po._purchase_request_confirm_message_content(
request, requests_dict[request_id])
request.message_post(body=message, subtype='mail.mt_comment')
return True
@api.multi
def _purchase_request_line_check(self):
for po in self:
for line in po.order_line:
for request_line in line.purchase_request_lines:
if request_line.sudo().purchase_state == 'done':
raise exceptions.UserError(
_('Purchase Request %s has already '
'been completed') % request_line.request_id.name)
return True
@api.multi
def button_confirm(self):
self._purchase_request_line_check()
res = super(PurchaseOrder, self).button_confirm()
self._purchase_request_confirm_message()
return res
@api.multi
def unlink(self):
alloc_to_unlink = self.env['purchase.request.allocation']
for rec in self:
for alloc in rec.order_line.mapped(
'purchase_request_lines').mapped(
'purchase_request_allocation_ids').filtered(
lambda alloc: alloc.purchase_line_id.order_id.id == rec.id
):
alloc_to_unlink += alloc
res = super().unlink()
alloc_to_unlink.unlink()
return res
class PurchaseOrderLine(models.Model):
_inherit = "purchase.order.line"
purchase_request_lines = fields.Many2many(
'purchase.request.line',
'purchase_request_purchase_order_line_rel',
'purchase_order_line_id',
'purchase_request_line_id',
'Purchase Request Lines', readonly=True, copy=False)
purchase_request_allocation_ids = fields.One2many(
comodel_name='purchase.request.allocation',
inverse_name='purchase_line_id',
string='Purchase Request Allocation',
copy=False,)
@api.multi
def action_openRequestLineTreeView(self):
"""
:return dict: dictionary value for created view
"""
request_line_ids = []
for line in self:
request_line_ids += line.purchase_request_lines.ids
domain = [('id', 'in', request_line_ids)]
return {'name': _('Purchase Request Lines'),
'type': 'ir.actions.act_window',
'res_model': 'purchase.request.line',
'view_type': 'form',
'view_mode': 'tree,form',
'domain': domain}
@api.multi
def _prepare_stock_moves(self, picking):
self.ensure_one()
val = super(PurchaseOrderLine, self)._prepare_stock_moves(picking)
all_list = []
for v in val:
all_ids = self.env['purchase.request.allocation'].search(
[('purchase_line_id', '=', v['purchase_line_id'])]
)
for all_id in all_ids:
all_list.append((4, all_id.id))
v['purchase_request_allocation_ids'] = all_list
return val
@api.multi
def update_service_allocations(self, prev_qty_received):
for rec in self:
allocation = self.env['purchase.request.allocation'].search(
[('purchase_line_id', '=', rec.id),
('purchase_line_id.product_id.type', '=', 'service')]
)
if not allocation:
return
qty_left = rec.qty_received - prev_qty_received
for alloc in allocation:
allocated_product_qty = alloc.allocated_product_qty
if not qty_left:
alloc.purchase_request_line_id._compute_qty()
break
if alloc.open_product_qty <= qty_left:
allocated_product_qty += alloc.open_product_qty
qty_left -= alloc.open_product_qty
alloc._notify_allocation(alloc.open_product_qty)
else:
allocated_product_qty += qty_left
alloc._notify_allocation(qty_left)
qty_left = 0
alloc.write({'allocated_product_qty': allocated_product_qty})
message_data = self._prepare_request_message_data(
alloc,
alloc.purchase_request_line_id,
allocated_product_qty)
message = \
self._purchase_request_confirm_done_message_content(
message_data)
alloc.purchase_request_line_id.request_id.message_post(
body=message, subtype='mail.mt_comment')
alloc.purchase_request_line_id._compute_qty()
return True
@api.model
def _purchase_request_confirm_done_message_content(self, message_data):
title = _('Service confirmation for Request %s') % (
message_data['request_name'])
message = '<h3>%s</h3>' % title
message += _('The following requested services from Purchase'
' Request %s requested by %s '
'have now been received:') % (
message_data['request_name'], message_data['requestor'])
message += '<ul>'
message += _(
'<li><b>%s</b>: Received quantity %s %s</li>'
) % (message_data['product_name'],
message_data['product_qty'],
message_data['product_uom'],
)
message += '</ul>'
return message
def _prepare_request_message_data(
self, alloc, request_line, allocated_qty):
return {
'request_name': request_line.request_id.name,
'product_name': request_line.product_id.name_get()[0][1],
'product_qty': allocated_qty,
'product_uom': alloc.product_uom_id.name,
'requestor': request_line.request_id.requested_by.partner_id.name,
}
@api.multi
def write(self, vals):
# As services do not generate stock move this tweak is required
# to allocate them.
prev_qty_received = {}
if vals.get('qty_received', False):
service_lines = self.filtered(
lambda l: l.product_id.type == 'service')
for line in service_lines:
prev_qty_received[line.id] = line.qty_received
res = super(PurchaseOrderLine, self).write(vals)
if prev_qty_received:
for line in service_lines:
line.update_service_allocations(prev_qty_received[line.id])
return res
|
gm-stack/mcproxy | refs/heads/master | mcproxy/log.py | 2 | import logging.config, yaml
logging.config.dictConfig({
'version': 1,
'formatters': {
'brief': {
'format': '[%(levelname)s] %(name)s: %(message)s',
},
'precise': {
'format': '%(asctime)s [%(levelname)-8s] %(name)-15s %(message)s',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'brief',
'level': 'INFO',
'stream': 'ext://sys.stdout'
},
'file': {
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'precise',
'filename': 'mcproxy.log',
'level': 'INFO',
'maxBytes': 4098,
},
},
'loggers': {
'root': {
'handlers': ['file', 'console']
},
'SERVER': {
'propagate': True
},
'PROTOCOL': {
'propagate': True
},
},
})
class ConsoleHandler(logging.StreamHandler):
"""
A handler class which writes logging records, appropriately formatted,
to a console.
"""
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
try:
msg = self.format(record)
stream = self.stream
fs = "%s\n"
stream.write
except: pass |
iulian787/spack | refs/heads/develop | var/spack/repos/builtin/packages/xwininfo/package.py | 5 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Xwininfo(AutotoolsPackage, XorgPackage):
"""xwininfo prints information about windows on an X server. Various
information is displayed depending on which options are selected."""
homepage = "http://cgit.freedesktop.org/xorg/app/xwininfo"
xorg_mirror_path = "app/xwininfo-1.1.3.tar.gz"
version('1.1.3', sha256='784f8b9c9ddab24ce4faa65fde6430a8d7cf3c0564573582452cc99c599bd941')
depends_on('libxcb@1.6:')
depends_on('libx11')
depends_on('xproto@7.0.17:', type='build')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
|
amrdraz/kodr | refs/heads/master | app/brython/www/src/Lib/encodings/ptcp154.py | 219 | """ Python Character Mapping Codec generated from 'PTCP154.txt' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='ptcp154',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE (DEL)
'\u0496' # 0x80 -> CYRILLIC CAPITAL LETTER ZHE WITH DESCENDER
'\u0492' # 0x81 -> CYRILLIC CAPITAL LETTER GHE WITH STROKE
'\u04ee' # 0x82 -> CYRILLIC CAPITAL LETTER U WITH MACRON
'\u0493' # 0x83 -> CYRILLIC SMALL LETTER GHE WITH STROKE
'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\u04b6' # 0x86 -> CYRILLIC CAPITAL LETTER CHE WITH DESCENDER
'\u04ae' # 0x87 -> CYRILLIC CAPITAL LETTER STRAIGHT U
'\u04b2' # 0x88 -> CYRILLIC CAPITAL LETTER HA WITH DESCENDER
'\u04af' # 0x89 -> CYRILLIC SMALL LETTER STRAIGHT U
'\u04a0' # 0x8A -> CYRILLIC CAPITAL LETTER BASHKIR KA
'\u04e2' # 0x8B -> CYRILLIC CAPITAL LETTER I WITH MACRON
'\u04a2' # 0x8C -> CYRILLIC CAPITAL LETTER EN WITH DESCENDER
'\u049a' # 0x8D -> CYRILLIC CAPITAL LETTER KA WITH DESCENDER
'\u04ba' # 0x8E -> CYRILLIC CAPITAL LETTER SHHA
'\u04b8' # 0x8F -> CYRILLIC CAPITAL LETTER CHE WITH VERTICAL STROKE
'\u0497' # 0x90 -> CYRILLIC SMALL LETTER ZHE WITH DESCENDER
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\u04b3' # 0x98 -> CYRILLIC SMALL LETTER HA WITH DESCENDER
'\u04b7' # 0x99 -> CYRILLIC SMALL LETTER CHE WITH DESCENDER
'\u04a1' # 0x9A -> CYRILLIC SMALL LETTER BASHKIR KA
'\u04e3' # 0x9B -> CYRILLIC SMALL LETTER I WITH MACRON
'\u04a3' # 0x9C -> CYRILLIC SMALL LETTER EN WITH DESCENDER
'\u049b' # 0x9D -> CYRILLIC SMALL LETTER KA WITH DESCENDER
'\u04bb' # 0x9E -> CYRILLIC SMALL LETTER SHHA
'\u04b9' # 0x9F -> CYRILLIC SMALL LETTER CHE WITH VERTICAL STROKE
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u040e' # 0xA1 -> CYRILLIC CAPITAL LETTER SHORT U (Byelorussian)
'\u045e' # 0xA2 -> CYRILLIC SMALL LETTER SHORT U (Byelorussian)
'\u0408' # 0xA3 -> CYRILLIC CAPITAL LETTER JE
'\u04e8' # 0xA4 -> CYRILLIC CAPITAL LETTER BARRED O
'\u0498' # 0xA5 -> CYRILLIC CAPITAL LETTER ZE WITH DESCENDER
'\u04b0' # 0xA6 -> CYRILLIC CAPITAL LETTER STRAIGHT U WITH STROKE
'\xa7' # 0xA7 -> SECTION SIGN
'\u0401' # 0xA8 -> CYRILLIC CAPITAL LETTER IO
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\u04d8' # 0xAA -> CYRILLIC CAPITAL LETTER SCHWA
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\u04ef' # 0xAD -> CYRILLIC SMALL LETTER U WITH MACRON
'\xae' # 0xAE -> REGISTERED SIGN
'\u049c' # 0xAF -> CYRILLIC CAPITAL LETTER KA WITH VERTICAL STROKE
'\xb0' # 0xB0 -> DEGREE SIGN
'\u04b1' # 0xB1 -> CYRILLIC SMALL LETTER STRAIGHT U WITH STROKE
'\u0406' # 0xB2 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
'\u0456' # 0xB3 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
'\u0499' # 0xB4 -> CYRILLIC SMALL LETTER ZE WITH DESCENDER
'\u04e9' # 0xB5 -> CYRILLIC SMALL LETTER BARRED O
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\u0451' # 0xB8 -> CYRILLIC SMALL LETTER IO
'\u2116' # 0xB9 -> NUMERO SIGN
'\u04d9' # 0xBA -> CYRILLIC SMALL LETTER SCHWA
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u0458' # 0xBC -> CYRILLIC SMALL LETTER JE
'\u04aa' # 0xBD -> CYRILLIC CAPITAL LETTER ES WITH DESCENDER
'\u04ab' # 0xBE -> CYRILLIC SMALL LETTER ES WITH DESCENDER
'\u049d' # 0xBF -> CYRILLIC SMALL LETTER KA WITH VERTICAL STROKE
'\u0410' # 0xC0 -> CYRILLIC CAPITAL LETTER A
'\u0411' # 0xC1 -> CYRILLIC CAPITAL LETTER BE
'\u0412' # 0xC2 -> CYRILLIC CAPITAL LETTER VE
'\u0413' # 0xC3 -> CYRILLIC CAPITAL LETTER GHE
'\u0414' # 0xC4 -> CYRILLIC CAPITAL LETTER DE
'\u0415' # 0xC5 -> CYRILLIC CAPITAL LETTER IE
'\u0416' # 0xC6 -> CYRILLIC CAPITAL LETTER ZHE
'\u0417' # 0xC7 -> CYRILLIC CAPITAL LETTER ZE
'\u0418' # 0xC8 -> CYRILLIC CAPITAL LETTER I
'\u0419' # 0xC9 -> CYRILLIC CAPITAL LETTER SHORT I
'\u041a' # 0xCA -> CYRILLIC CAPITAL LETTER KA
'\u041b' # 0xCB -> CYRILLIC CAPITAL LETTER EL
'\u041c' # 0xCC -> CYRILLIC CAPITAL LETTER EM
'\u041d' # 0xCD -> CYRILLIC CAPITAL LETTER EN
'\u041e' # 0xCE -> CYRILLIC CAPITAL LETTER O
'\u041f' # 0xCF -> CYRILLIC CAPITAL LETTER PE
'\u0420' # 0xD0 -> CYRILLIC CAPITAL LETTER ER
'\u0421' # 0xD1 -> CYRILLIC CAPITAL LETTER ES
'\u0422' # 0xD2 -> CYRILLIC CAPITAL LETTER TE
'\u0423' # 0xD3 -> CYRILLIC CAPITAL LETTER U
'\u0424' # 0xD4 -> CYRILLIC CAPITAL LETTER EF
'\u0425' # 0xD5 -> CYRILLIC CAPITAL LETTER HA
'\u0426' # 0xD6 -> CYRILLIC CAPITAL LETTER TSE
'\u0427' # 0xD7 -> CYRILLIC CAPITAL LETTER CHE
'\u0428' # 0xD8 -> CYRILLIC CAPITAL LETTER SHA
'\u0429' # 0xD9 -> CYRILLIC CAPITAL LETTER SHCHA
'\u042a' # 0xDA -> CYRILLIC CAPITAL LETTER HARD SIGN
'\u042b' # 0xDB -> CYRILLIC CAPITAL LETTER YERU
'\u042c' # 0xDC -> CYRILLIC CAPITAL LETTER SOFT SIGN
'\u042d' # 0xDD -> CYRILLIC CAPITAL LETTER E
'\u042e' # 0xDE -> CYRILLIC CAPITAL LETTER YU
'\u042f' # 0xDF -> CYRILLIC CAPITAL LETTER YA
'\u0430' # 0xE0 -> CYRILLIC SMALL LETTER A
'\u0431' # 0xE1 -> CYRILLIC SMALL LETTER BE
'\u0432' # 0xE2 -> CYRILLIC SMALL LETTER VE
'\u0433' # 0xE3 -> CYRILLIC SMALL LETTER GHE
'\u0434' # 0xE4 -> CYRILLIC SMALL LETTER DE
'\u0435' # 0xE5 -> CYRILLIC SMALL LETTER IE
'\u0436' # 0xE6 -> CYRILLIC SMALL LETTER ZHE
'\u0437' # 0xE7 -> CYRILLIC SMALL LETTER ZE
'\u0438' # 0xE8 -> CYRILLIC SMALL LETTER I
'\u0439' # 0xE9 -> CYRILLIC SMALL LETTER SHORT I
'\u043a' # 0xEA -> CYRILLIC SMALL LETTER KA
'\u043b' # 0xEB -> CYRILLIC SMALL LETTER EL
'\u043c' # 0xEC -> CYRILLIC SMALL LETTER EM
'\u043d' # 0xED -> CYRILLIC SMALL LETTER EN
'\u043e' # 0xEE -> CYRILLIC SMALL LETTER O
'\u043f' # 0xEF -> CYRILLIC SMALL LETTER PE
'\u0440' # 0xF0 -> CYRILLIC SMALL LETTER ER
'\u0441' # 0xF1 -> CYRILLIC SMALL LETTER ES
'\u0442' # 0xF2 -> CYRILLIC SMALL LETTER TE
'\u0443' # 0xF3 -> CYRILLIC SMALL LETTER U
'\u0444' # 0xF4 -> CYRILLIC SMALL LETTER EF
'\u0445' # 0xF5 -> CYRILLIC SMALL LETTER HA
'\u0446' # 0xF6 -> CYRILLIC SMALL LETTER TSE
'\u0447' # 0xF7 -> CYRILLIC SMALL LETTER CHE
'\u0448' # 0xF8 -> CYRILLIC SMALL LETTER SHA
'\u0449' # 0xF9 -> CYRILLIC SMALL LETTER SHCHA
'\u044a' # 0xFA -> CYRILLIC SMALL LETTER HARD SIGN
'\u044b' # 0xFB -> CYRILLIC SMALL LETTER YERU
'\u044c' # 0xFC -> CYRILLIC SMALL LETTER SOFT SIGN
'\u044d' # 0xFD -> CYRILLIC SMALL LETTER E
'\u044e' # 0xFE -> CYRILLIC SMALL LETTER YU
'\u044f' # 0xFF -> CYRILLIC SMALL LETTER YA
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
selecsosi/django-cms | refs/heads/develop | cms/tests/menu.py | 8 | # -*- coding: utf-8 -*-
from __future__ import with_statement
import copy
from django.conf import settings
from django.contrib.auth.models import AnonymousUser, Permission, Group
from django.contrib.sites.models import Site
from django.template import Template, TemplateSyntaxError
from django.utils.translation import activate
from menus.base import NavigationNode
from menus.menu_pool import menu_pool, _build_nodes_inner_for_one_menu
from menus.models import CacheKey
from menus.utils import mark_descendants, find_selected, cut_levels
from cms.api import create_page
from cms.menu import CMSMenu, get_visible_pages
from cms.models import Page
from cms.models.permissionmodels import GlobalPagePermission, PagePermission
from cms.test_utils.fixtures.menus import (MenusFixture, SubMenusFixture,
SoftrootFixture, ExtendedMenusFixture)
from cms.test_utils.testcases import SettingsOverrideTestCase
from cms.test_utils.util.context_managers import (SettingsOverride,
LanguageOverride)
from cms.test_utils.util.fuzzy_int import FuzzyInt
from cms.test_utils.util.mock import AttributeObject
from cms.utils import get_cms_setting
from cms.utils.compat.dj import get_user_model, user_related_name
from cms.utils.i18n import force_language
class BaseMenuTest(SettingsOverrideTestCase):
def _get_nodes(self, path='/'):
node1 = NavigationNode('1', '/1/', 1)
node2 = NavigationNode('2', '/2/', 2, 1)
node3 = NavigationNode('3', '/3/', 3, 2)
node4 = NavigationNode('4', '/4/', 4, 2)
node5 = NavigationNode('5', '/5/', 5)
nodes = [node1, node2, node3, node4, node5]
tree = _build_nodes_inner_for_one_menu([n for n in nodes], "test")
request = self.get_request(path)
menu_pool.apply_modifiers(tree, request)
return tree, nodes
def setUp(self):
super(BaseMenuTest, self).setUp()
if not menu_pool.discovered:
menu_pool.discover_menus()
self.old_menu = menu_pool.menus
menu_pool.menus = {'CMSMenu': self.old_menu['CMSMenu']}
menu_pool.clear(settings.SITE_ID)
activate("en")
def tearDown(self):
menu_pool.menus = self.old_menu
super(BaseMenuTest, self).tearDown()
def get_page(self, num):
return Page.objects.public().get(title_set__title='P%s' % num)
class ExtendedFixturesMenuTests(ExtendedMenusFixture, BaseMenuTest):
"""
Tree from fixture:
+ P1
| + P2
| + P3
| + P9
| + P10
| + P11
+ P4
| + P5
+ P6 (not in menu)
+ P7
+ P8
"""
def get_page(self, num):
return Page.objects.public().get(title_set__title='P%s' % num)
def get_level(self, num):
return Page.objects.public().filter(level=num)
def get_all_pages(self):
return Page.objects.public()
def test_menu_failfast_on_invalid_usage(self):
context = self.get_context()
context['child'] = self.get_page(1)
# test standard show_menu
with SettingsOverride(DEBUG=True, TEMPLATE_DEBUG=True):
tpl = Template("{% load menu_tags %}{% show_menu 0 0 0 0 'menu/menu.html' child %}")
self.assertRaises(TemplateSyntaxError, tpl.render, context)
def test_show_submenu_nephews(self):
context = self.get_context(path=self.get_page(2).get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_sub_menu 100 1 1 %}")
tpl.render(context)
nodes = context["children"]
# P2 is the selected node
self.assertTrue(nodes[0].selected)
# Should include P10 but not P11
self.assertEqual(len(nodes[1].children), 1)
self.assertFalse(nodes[1].children[0].children)
tpl = Template("{% load menu_tags %}{% show_sub_menu 100 1 %}")
tpl.render(context)
nodes = context["children"]
# should now include both P10 and P11
self.assertEqual(len(nodes[1].children), 1)
self.assertEqual(len(nodes[1].children[0].children), 1)
def test_show_submenu_template_root_level_none_no_nephew_limit(self):
context = self.get_context(path=self.get_page(1).get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_sub_menu 100 None 100 %}")
tpl.render(context)
nodes = context["children"]
# default nephew limit, P2 and P9 in the nodes list
self.assertEqual(len(nodes), 2)
class FixturesMenuTests(MenusFixture, BaseMenuTest):
"""
Tree from fixture:
+ P1
| + P2
| + P3
+ P4
| + P5
+ P6 (not in menu)
+ P7
+ P8
"""
def get_page(self, num):
return Page.objects.public().get(title_set__title='P%s' % num)
def get_level(self, num):
return Page.objects.public().filter(level=num)
def get_all_pages(self):
return Page.objects.public()
def test_menu_failfast_on_invalid_usage(self):
context = self.get_context()
context['child'] = self.get_page(1)
# test standard show_menu
with SettingsOverride(DEBUG=True, TEMPLATE_DEBUG=True):
tpl = Template("{% load menu_tags %}{% show_menu 0 0 0 0 'menu/menu.html' child %}")
self.assertRaises(TemplateSyntaxError, tpl.render, context)
def test_basic_cms_menu(self):
self.assertEqual(len(menu_pool.menus), 1)
with force_language("en"):
response = self.client.get(self.get_pages_root()) # path = '/'
self.assertEqual(response.status_code, 200)
request = self.get_request()
# test the cms menu class
menu = CMSMenu()
nodes = menu.get_nodes(request)
self.assertEqual(len(nodes), len(self.get_all_pages()))
def test_show_menu(self):
context = self.get_context()
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_menu %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 2)
self.assertEqual(nodes[0].selected, True)
self.assertEqual(nodes[0].sibling, False)
self.assertEqual(nodes[0].descendant, False)
self.assertEqual(nodes[0].children[0].descendant, True)
self.assertEqual(nodes[0].children[0].children[0].descendant, True)
self.assertEqual(nodes[0].get_absolute_url(), self.get_pages_root())
self.assertEqual(nodes[1].get_absolute_url(), self.get_page(4).get_absolute_url())
self.assertEqual(nodes[1].sibling, True)
self.assertEqual(nodes[1].selected, False)
def test_show_menu_num_queries(self):
context = self.get_context()
# test standard show_menu
with self.assertNumQueries(FuzzyInt(5, 7)):
"""
The queries should be:
get all pages
get all page permissions
get all titles
get the menu cache key
create a savepoint (in django>=1.6)
set the menu cache key
release the savepoint (in django>=1.6)
"""
tpl = Template("{% load menu_tags %}{% show_menu %}")
tpl.render(context)
def test_show_menu_cache_key_leak(self):
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_menu %}")
self.assertEqual(CacheKey.objects.count(), 0)
tpl.render(context)
self.assertEqual(CacheKey.objects.count(), 1)
tpl.render(context)
self.assertEqual(CacheKey.objects.count(), 1)
def test_menu_keys_duplicate_truncates(self):
"""
When two objects with the same characteristics are present in the
database, get_or_create truncates the database table to "invalidate"
the cache, before retrying. This can happen after migrations, and since
it's only cache, we don't want any propagation of errors.
"""
CacheKey.objects.create(language="fr", site=1, key="a")
CacheKey.objects.create(language="fr", site=1, key="a")
CacheKey.objects.get_or_create(language="fr", site=1, key="a")
self.assertEqual(CacheKey.objects.count(), 1)
def test_only_active_tree(self):
context = self.get_context()
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_menu 0 100 0 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes[1].children), 0)
self.assertEqual(len(nodes[0].children), 1)
self.assertEqual(len(nodes[0].children[0].children), 1)
context = self.get_context(path=self.get_page(4).get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 0 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes[1].children), 1)
self.assertEqual(len(nodes[0].children), 0)
def test_only_one_active_level(self):
context = self.get_context()
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_menu 0 100 0 1 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes[1].children), 0)
self.assertEqual(len(nodes[0].children), 1)
self.assertEqual(len(nodes[0].children[0].children), 0)
def test_only_level_zero(self):
context = self.get_context()
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_menu 0 0 0 0 %}")
tpl.render(context)
nodes = context['children']
for node in nodes:
self.assertEqual(len(node.children), 0)
def test_only_level_one(self):
context = self.get_context()
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_menu 1 1 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), len(self.get_level(1)))
for node in nodes:
self.assertEqual(len(node.children), 0)
def test_only_level_one_active(self):
context = self.get_context()
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_menu 1 1 0 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].descendant, True)
self.assertEqual(len(nodes[0].children), 0)
def test_level_zero_and_one(self):
context = self.get_context()
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_menu 0 1 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 2)
for node in nodes:
self.assertEqual(len(node.children), 1)
def test_show_submenu(self):
context = self.get_context()
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_sub_menu %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(nodes[0].descendant, True)
self.assertEqual(len(nodes), 1)
self.assertEqual(len(nodes[0].children), 1)
tpl = Template("{% load menu_tags %}{% show_sub_menu 1 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1)
self.assertEqual(len(nodes[0].children), 0)
context = self.get_context(path=self.get_page(3).get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_sub_menu 100 1 %}")
tpl.render(context)
nodes = context["children"]
# P3 is the selected node
self.assertFalse(nodes[0].selected)
self.assertTrue(nodes[0].children[0].selected)
# top level node should be P2
self.assertEqual(nodes[0].get_absolute_url(), self.get_page(2).get_absolute_url())
# should include P3 as well
self.assertEqual(len(nodes[0].children), 1)
context = self.get_context(path=self.get_page(2).get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_sub_menu 100 0 %}")
tpl.render(context)
nodes = context["children"]
# P1 should be in the nav
self.assertEqual(nodes[0].get_absolute_url(), self.get_page(1).get_absolute_url())
# P2 is selected
self.assertTrue(nodes[0].children[0].selected)
def test_show_submenu_template_root_level_none(self):
context = self.get_context(path=self.get_page(1).get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_sub_menu 100 None 1 %}")
tpl.render(context)
nodes = context["children"]
# First node is P2 (P1 children) thus not selected
self.assertFalse(nodes[0].selected)
# nephew limit of 1, so only P2 is the nodes list
self.assertEqual(len(nodes), 1)
# P3 is a child of P2, but not in nodes list
self.assertTrue(nodes[0].children)
def test_show_breadcrumb(self):
context = self.get_context(path=self.get_page(3).get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_breadcrumb %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(len(nodes), 3)
tpl = Template("{% load menu_tags %}{% show_breadcrumb 1 %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(len(nodes), 2)
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_breadcrumb %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(len(nodes), 1)
tpl = Template("{% load menu_tags %}{% show_breadcrumb 1 %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(len(nodes), 0)
page1 = self.get_page(1)
page1.in_navigation = False
page1.save()
page2 = self.get_page(2)
context = self.get_context(path=page2.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_breadcrumb %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(len(nodes), 2)
self.assertEqual(nodes[0].get_absolute_url(), self.get_pages_root())
self.assertEqual(isinstance(nodes[0], NavigationNode), True)
self.assertEqual(nodes[1].get_absolute_url(), page2.get_absolute_url())
def test_language_chooser(self):
# test simple language chooser with default args
lang_settings = copy.deepcopy(get_cms_setting('LANGUAGES'))
lang_settings[1][0]['public'] = False
with SettingsOverride(CMS_LANGUAGES=lang_settings):
context = self.get_context(path=self.get_page(3).get_absolute_url())
tpl = Template("{% load menu_tags %}{% language_chooser %}")
tpl.render(context)
self.assertEqual(len(context['languages']), 3)
# try a different template and some different args
tpl = Template("{% load menu_tags %}{% language_chooser 'menu/test_language_chooser.html' %}")
tpl.render(context)
self.assertEqual(context['template'], 'menu/test_language_chooser.html')
tpl = Template("{% load menu_tags %}{% language_chooser 'short' 'menu/test_language_chooser.html' %}")
tpl.render(context)
self.assertEqual(context['template'], 'menu/test_language_chooser.html')
for lang in context['languages']:
self.assertEqual(*lang)
def test_page_language_url(self):
path = self.get_page(3).get_absolute_url()
context = self.get_context(path=path)
tpl = Template("{%% load menu_tags %%}{%% page_language_url '%s' %%}" % 'en')
url = tpl.render(context)
self.assertEqual(url, "%s" % path)
def test_show_menu_below_id(self):
page2 = self.get_page(2)
page2.reverse_id = "hello"
page2.save()
page2 = self.reload(page2)
self.assertEqual(page2.reverse_id, "hello")
page5 = self.get_page(5)
context = self.get_context(path=page5.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu_below_id 'hello' %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1)
page3_url = self.get_page(3).get_absolute_url()
self.assertEqual(nodes[0].get_absolute_url(), page3_url)
page2.in_navigation = False
page2.save()
context = self.get_context(path=page5.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu_below_id 'hello' %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].get_absolute_url(), page3_url)
def test_unpublished(self):
page2 = self.get_page(2)
page2.title_set.update(published=False)
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_menu %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 2)
self.assertEqual(len(nodes[0].children), 0)
def test_home_not_in_menu(self):
page1 = self.get_page(1)
page1.in_navigation = False
page1.save()
page4 = self.get_page(4)
page4.in_navigation = False
page4.save()
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].get_absolute_url(), self.get_page(2).get_absolute_url())
self.assertEqual(nodes[0].children[0].get_absolute_url(), self.get_page(3).get_absolute_url())
page4 = self.get_page(4)
page4.in_navigation = True
page4.save()
menu_pool.clear(settings.SITE_ID)
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 2)
def test_show_submenu_from_non_menu_page(self):
"""
Here's the structure bit we're interested in:
+ P6 (not in menu)
+ P7
+ P8
When we render P6, there should be a menu entry for P7 and P8 if the
tag parameters are "1 XXX XXX XXX"
"""
page6 = self.get_page(6)
context = self.get_context(page6.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 1 100 0 1 %}")
tpl.render(context)
nodes = context['children']
number_of_p6_children = len(page6.children.filter(in_navigation=True))
self.assertEqual(len(nodes), number_of_p6_children)
page7 = self.get_page(7)
context = self.get_context(page7.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 1 100 0 1 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), number_of_p6_children)
tpl = Template("{% load menu_tags %}{% show_menu 2 100 0 1 %}")
tpl.render(context)
nodes = context['children']
number_of_p7_children = len(page7.children.filter(in_navigation=True))
self.assertEqual(len(nodes), number_of_p7_children)
def test_show_breadcrumb_invisible(self):
# Must use the drafts to find the parent when calling create_page
parent = Page.objects.drafts().get(title_set__title='P3')
invisible_page = create_page("invisible", "nav_playground.html", "en",
parent=parent, published=True, in_navigation=False)
context = self.get_context(path=invisible_page.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_breadcrumb %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(len(nodes), 3)
tpl = Template("{% load menu_tags %}{% show_breadcrumb 0 'menu/breadcrumb.html' 1 %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(len(nodes), 3)
tpl = Template("{% load menu_tags %}{% show_breadcrumb 0 'menu/breadcrumb.html' 0 %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(len(nodes), 4)
class MenuTests(BaseMenuTest):
def test_build_nodes_inner_for_worst_case_menu(self):
'''
Tests the worst case scenario
node5
node4
node3
node2
node1
'''
node1 = NavigationNode('Test1', '/test1/', 1, 2)
node2 = NavigationNode('Test2', '/test2/', 2, 3)
node3 = NavigationNode('Test3', '/test3/', 3, 4)
node4 = NavigationNode('Test4', '/test4/', 4, 5)
node5 = NavigationNode('Test5', '/test5/', 5, None)
menu_class_name = 'Test'
nodes = [node1, node2, node3, node4, node5, ]
len_nodes = len(nodes)
final_list = _build_nodes_inner_for_one_menu(nodes, menu_class_name)
self.assertEqual(len(final_list), len_nodes)
self.assertEqual(node1.parent, node2)
self.assertEqual(node2.parent, node3)
self.assertEqual(node3.parent, node4)
self.assertEqual(node4.parent, node5)
self.assertEqual(node5.parent, None)
self.assertEqual(node1.children, [])
self.assertEqual(node2.children, [node1])
self.assertEqual(node3.children, [node2])
self.assertEqual(node4.children, [node3])
self.assertEqual(node5.children, [node4])
def test_build_nodes_inner_for_circular_menu(self):
'''
TODO:
To properly handle this test we need to have a circular dependency
detection system.
Go nuts implementing it :)
'''
pass
def test_build_nodes_inner_for_broken_menu(self):
'''
Tests a broken menu tree (non-existing parent)
node5
node4
node3
<non-existant>
node2
node1
'''
node1 = NavigationNode('Test1', '/test1/', 1, 2)
node2 = NavigationNode('Test2', '/test2/', 2, 12)
node3 = NavigationNode('Test3', '/test3/', 3, 4)
node4 = NavigationNode('Test4', '/test4/', 4, 5)
node5 = NavigationNode('Test5', '/test5/', 5, None)
menu_class_name = 'Test'
nodes = [node1, node2, node3, node4, node5, ]
final_list = _build_nodes_inner_for_one_menu(nodes, menu_class_name)
self.assertEqual(len(final_list), 3)
self.assertFalse(node1 in final_list)
self.assertFalse(node2 in final_list)
self.assertEqual(node1.parent, None)
self.assertEqual(node2.parent, None)
self.assertEqual(node3.parent, node4)
self.assertEqual(node4.parent, node5)
self.assertEqual(node5.parent, None)
self.assertEqual(node1.children, [])
self.assertEqual(node2.children, [])
self.assertEqual(node3.children, [])
self.assertEqual(node4.children, [node3])
self.assertEqual(node5.children, [node4])
def test_utils_mark_descendants(self):
tree_nodes, flat_nodes = self._get_nodes()
mark_descendants(tree_nodes)
for node in flat_nodes:
self.assertTrue(node.descendant, node)
def test_utils_find_selected(self):
tree_nodes, flat_nodes = self._get_nodes()
node = flat_nodes[0]
selected = find_selected(tree_nodes)
self.assertEqual(selected, node)
selected = find_selected([])
self.assertEqual(selected, None)
def test_utils_cut_levels(self):
tree_nodes, flat_nodes = self._get_nodes()
self.assertEqual(cut_levels(tree_nodes, 1), [flat_nodes[1]])
def test_empty_menu(self):
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 0)
class AdvancedSoftrootTests(SoftrootFixture, SettingsOverrideTestCase):
"""
Tree in fixture (as taken from issue 662):
top
root
aaa
111
ccc
ddd
222
bbb
333
444
In the fixture, all pages are "in_navigation", "published" and
NOT-"soft_root".
What is a soft root?
If a page is a soft root, it becomes the root page in the menu if
we are currently on or under that page.
If we are above that page, the children of this page are not shown.
"""
settings_overrides = {
'CMS_PERMISSION': False
}
def tearDown(self):
Page.objects.all().delete()
def get_page(self, name):
return Page.objects.public().get(title_set__slug=name)
def assertTreeQuality(self, a, b, *attrs):
"""
Checks that the node-lists a and b are the same for attrs.
This is recursive over the tree
"""
msg = '%r != %r with %r, %r' % (len(a), len(b), a, b)
self.assertEqual(len(a), len(b), msg)
for n1, n2 in zip(a, b):
for attr in attrs:
a1 = getattr(n1, attr)
a2 = getattr(n2, attr)
msg = '%r != %r with %r, %r (%s)' % (a1, a2, n1, n2, attr)
self.assertEqual(a1, a2, msg)
self.assertTreeQuality(n1.children, n2.children)
def test_top_not_in_nav(self):
"""
top: not in navigation
tag: show_menu 0 100 0 100
context shared: current page is aaa
context 1: root is NOT a softroot
context 2: root IS a softroot
expected result: the two node-trees should be equal
"""
top = self.get_page('top')
top.in_navigation = False
top.save()
aaa = self.get_page('aaa')
# root is NOT a soft root
context = self.get_context(aaa.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 0 100 %}")
tpl.render(context)
hard_root = context['children']
# root IS a soft root
root = self.get_page('root')
root.soft_root = True
root.save()
aaa = self.get_page('aaa')
context = self.get_context(aaa.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 0 100 %}")
tpl.render(context)
soft_root = context['children']
# assert the two trees are equal in terms of 'level' and 'title'
self.assertTreeQuality(hard_root, soft_root, 'level', 'title')
def test_top_in_nav(self):
"""
top: in navigation
tag: show_menu 0 100 0 100
context shared: current page is aaa
context 1: root is NOT a softroot
context 2: root IS a softroot
expected result 1:
0:top
1:root
2:aaa
3:111
4:ccc
5:ddd
3:222
2:bbb
expected result 2:
0:root
1:aaa
2:111
3:ccc
4:ddd
2:222
1:bbb
"""
aaa = self.get_page('aaa')
# root is NOT a soft root
context = self.get_context(aaa.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 0 100 %}")
tpl.render(context)
hard_root = context['children']
mock_tree = [
AttributeObject(title='top', level=0, children=[
AttributeObject(title='root', level=1, children=[
AttributeObject(title='aaa', level=2, children=[
AttributeObject(title='111', level=3, children=[
AttributeObject(title='ccc', level=4, children=[
AttributeObject(title='ddd', level=5, children=[])
])
]),
AttributeObject(title='222', level=3, children=[])
]),
AttributeObject(title='bbb', level=2, children=[])
])
])
]
self.assertTreeQuality(hard_root, mock_tree)
# root IS a soft root
root = self.get_page('root')
root.soft_root = True
root.save()
aaa = self.get_page('aaa')
context = self.get_context(aaa.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 0 100 %}")
tpl.render(context)
soft_root = context['children']
mock_tree = [
AttributeObject(title='root', level=0, children=[
AttributeObject(title='aaa', level=1, children=[
AttributeObject(title='111', level=2, children=[
AttributeObject(title='ccc', level=3, children=[
AttributeObject(title='ddd', level=4, children=[])
])
]),
AttributeObject(title='222', level=2, children=[])
]),
AttributeObject(title='bbb', level=1, children=[])
])
]
self.assertTreeQuality(soft_root, mock_tree, 'title', 'level')
class ShowSubMenuCheck(SubMenusFixture, BaseMenuTest):
"""
Tree from fixture:
+ P1
| + P2
| + P3
+ P4
| + P5
+ P6
+ P7 (not in menu)
+ P8
"""
def test_show_submenu(self):
page = self.get_page(6)
subpage = self.get_page(8)
context = self.get_context(page.get_absolute_url())
# test standard show_menu
tpl = Template("{% load menu_tags %}{% show_sub_menu %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].id, subpage.pk)
def test_show_submenu_num_queries(self):
page = self.get_page(6)
context = self.get_context(page.get_absolute_url())
# test standard show_menu
with self.assertNumQueries(FuzzyInt(5, 7)):
"""
The queries should be:
get all pages
get all page permissions
get all titles
get the menu cache key
create a savepoint (in django>=1.6)
set the menu cache key
release the savepoint (in django>=1.6)
"""
tpl = Template("{% load menu_tags %}{% show_sub_menu %}")
tpl.render(context)
class ShowMenuBelowIdTests(BaseMenuTest):
def test_not_in_navigation(self):
"""
Test for issue 521
Build the following tree:
A
|-B
|-C
\-D (not in nav)
"""
a = create_page('A', 'nav_playground.html', 'en', published=True,
in_navigation=True, reverse_id='a')
b = create_page('B', 'nav_playground.html', 'en', parent=a,
published=True, in_navigation=True)
c = create_page('C', 'nav_playground.html', 'en', parent=b,
published=True, in_navigation=True)
create_page('D', 'nav_playground.html', 'en', parent=self.reload(b),
published=True, in_navigation=False)
context = self.get_context(a.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu_below_id 'a' 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 1, nodes)
node = nodes[0]
self.assertEqual(node.id, b.publisher_public.id)
children = node.children
self.assertEqual(len(children), 1, repr(children))
child = children[0]
self.assertEqual(child.id, c.publisher_public.id)
def test_not_in_navigation_num_queries(self):
"""
Test for issue 521
Build the following tree:
A
|-B
|-C
\-D (not in nav)
"""
a = create_page('A', 'nav_playground.html', 'en', published=True,
in_navigation=True, reverse_id='a')
b = create_page('B', 'nav_playground.html', 'en', parent=a,
published=True, in_navigation=True)
create_page('C', 'nav_playground.html', 'en', parent=b,
published=True, in_navigation=True)
create_page('D', 'nav_playground.html', 'en', parent=self.reload(b),
published=True, in_navigation=False)
with LanguageOverride('en'):
context = self.get_context(a.get_absolute_url())
with self.assertNumQueries(FuzzyInt(5, 7)):
"""
The queries should be:
get all pages
get all page permissions
get all titles
get the menu cache key
create a savepoint (in django>=1.6)
set the menu cache key
release the savepoint (in django>=1.6)
"""
# Actually seems to run:
tpl = Template("{% load menu_tags %}{% show_menu_below_id 'a' 0 100 100 100 %}")
tpl.render(context)
class ViewPermissionMenuTests(SettingsOverrideTestCase):
settings_overrides = {
'CMS_PERMISSION': True,
'CMS_PUBLIC_FOR': 'all',
}
def get_request(self, user=None):
attrs = {
'user': user or AnonymousUser(),
'REQUEST': {},
'session': {},
}
return type('Request', (object,), attrs)
def test_public_for_all_staff(self):
request = self.get_request()
request.user.is_staff = True
page = Page()
page.pk = 1
page.level = 0
page.tree_id = 1
pages = [page]
result = get_visible_pages(request, pages)
self.assertEqual(result, [1])
def test_public_for_all_staff_assert_num_queries(self):
request = self.get_request()
request.user.is_staff = True
page = Page()
page.pk = 1
page.level = 0
page.tree_id = 1
pages = [page]
with self.assertNumQueries(1):
"""
The queries are:
PagePermission count query
"""
get_visible_pages(request, pages)
def test_public_for_all(self):
user = get_user_model().objects.create_user('user', 'user@domain.com', 'user')
request = self.get_request(user)
page = Page()
page.pk = 1
page.level = 0
page.tree_id = 1
pages = [page]
result = get_visible_pages(request, pages)
self.assertEqual(result, [1])
def test_public_for_all_num_queries(self):
user = get_user_model().objects.create_user('user', 'user@domain.com', 'user')
request = self.get_request(user)
site = Site()
site.pk = 1
page = Page()
page.pk = 1
page.level = 0
page.tree_id = 1
pages = [page]
with self.assertNumQueries(2):
"""
The queries are:
PagePermission query for affected pages
GlobalpagePermission query for user
"""
get_visible_pages(request, pages, site)
def test_unauthed(self):
request = self.get_request()
page = Page()
page.pk = 1
page.level = 0
page.tree_id = 1
pages = [page]
result = get_visible_pages(request, pages)
self.assertEqual(result, [1])
def test_unauthed_num_queries(self):
request = self.get_request()
site = Site()
site.pk = 1
page = Page()
page.pk = 1
page.level = 0
page.tree_id = 1
pages = [page]
with self.assertNumQueries(1):
"""
The query is:
PagePermission query for affected pages
global is not executed because it's lazy
"""
get_visible_pages(request, pages, site)
def test_authed_basic_perm(self):
with SettingsOverride(CMS_PUBLIC_FOR='staff'):
User = get_user_model()
user = User()
user.username = "test"
user.is_staff = True
user.save()
user.user_permissions.add(Permission.objects.get(codename='view_page'))
request = self.get_request(user)
page = Page()
page.pk = 1
page.level = 0
page.tree_id = 1
pages = [page]
result = get_visible_pages(request, pages)
self.assertEqual(result, [1])
def test_authed_basic_perm_num_queries(self):
site = Site()
site.pk = 1
with SettingsOverride(CMS_PUBLIC_FOR='staff'):
User = get_user_model()
user = User()
user.username = "test"
user.is_staff = True
user.save()
user.user_permissions.add(Permission.objects.get(codename='view_page'))
request = self.get_request(user)
page = Page()
page.pk = 1
page.level = 0
page.tree_id = 1
pages = [page]
with self.assertNumQueries(2):
"""
The queries are:
PagePermission count query
GlobalpagePermission count query
"""
get_visible_pages(request, pages, site)
def test_authed_no_access(self):
with SettingsOverride(CMS_PUBLIC_FOR='staff'):
user = get_user_model().objects.create_user('user', 'user@domain.com', 'user')
request = self.get_request(user)
page = Page()
page.pk = 1
page.level = 0
page.tree_id = 1
pages = [page]
result = get_visible_pages(request, pages)
self.assertEqual(result, [])
def test_authed_no_access_num_queries(self):
site = Site()
site.pk = 1
with SettingsOverride(CMS_PUBLIC_FOR='staff'):
user = get_user_model().objects.create_user('user', 'user@domain.com', 'user')
request = self.get_request(user)
page = Page()
page.pk = 1
page.level = 0
page.tree_id = 1
pages = [page]
with self.assertNumQueries(2):
"""
The queries are:
View Permission Calculation Query
globalpagepermissino calculation
"""
get_visible_pages(request, pages, site)
def test_unauthed_no_access(self):
with SettingsOverride(CMS_PUBLIC_FOR='staff'):
request = self.get_request()
page = Page()
page.pk = 1
page.level = 0
page.tree_id = 1
pages = [page]
result = get_visible_pages(request, pages)
self.assertEqual(result, [])
def test_unauthed_no_access_num_queries(self):
site = Site()
site.pk = 1
request = self.get_request()
page = Page()
page.pk = 1
page.level = 0
page.tree_id = 1
pages = [page]
with self.assertNumQueries(1):
get_visible_pages(request, pages, site)
def test_page_permissions(self):
with SettingsOverride(CMS_PUBLIC_FOR='staff'):
user = get_user_model().objects.create_user('user', 'user@domain.com', 'user')
request = self.get_request(user)
page = create_page('A', 'nav_playground.html', 'en')
PagePermission.objects.create(can_view=True, user=user, page=page)
pages = [page]
result = get_visible_pages(request, pages)
self.assertEqual(result, [page.pk])
def test_page_permissions_num_queries(self):
with SettingsOverride(CMS_PUBLIC_FOR='staff'):
user = get_user_model().objects.create_user('user', 'user@domain.com', 'user')
request = self.get_request(user)
page = create_page('A', 'nav_playground.html', 'en')
PagePermission.objects.create(can_view=True, user=user, page=page)
pages = [page]
with self.assertNumQueries(3):
"""
The queries are:
PagePermission query for affected pages
GlobalpagePermission query for user
"""
get_visible_pages(request, pages)
def test_page_permissions_view_groups(self):
with SettingsOverride(CMS_PUBLIC_FOR='staff'):
user = get_user_model().objects.create_user('user', 'user@domain.com', 'user')
group = Group.objects.create(name='testgroup')
user_set = getattr(group, user_related_name)
user_set.add(user)
request = self.get_request(user)
page = create_page('A', 'nav_playground.html', 'en')
PagePermission.objects.create(can_view=True, group=group, page=page)
pages = [page]
result = get_visible_pages(request, pages)
self.assertEqual(result, [page.pk])
def test_page_permissions_view_groups_num_queries(self):
with SettingsOverride(CMS_PUBLIC_FOR='staff'):
user = get_user_model().objects.create_user('user', 'user@domain.com', 'user')
group = Group.objects.create(name='testgroup')
user_set = getattr(group, user_related_name)
user_set.add(user)
request = self.get_request(user)
page = create_page('A', 'nav_playground.html', 'en')
PagePermission.objects.create(can_view=True, group=group, page=page)
pages = [page]
with self.assertNumQueries(4):
"""
The queries are:
PagePermission query for affected pages
GlobalpagePermission query for user
Group query via PagePermission
"""
get_visible_pages(request, pages)
def test_global_permission(self):
with SettingsOverride(CMS_PUBLIC_FOR='staff'):
user = get_user_model().objects.create_user('user', 'user@domain.com', 'user')
GlobalPagePermission.objects.create(can_view=True, user=user)
request = self.get_request(user)
page = Page()
page.pk = 1
page.level = 0
page.tree_id = 1
pages = [page]
result = get_visible_pages(request, pages)
self.assertEqual(result, [1])
def test_global_permission_num_queries(self):
site = Site()
site.pk = 1
user = get_user_model().objects.create_user('user', 'user@domain.com', 'user')
GlobalPagePermission.objects.create(can_view=True, user=user)
request = self.get_request(user)
site = Site()
site.pk = 1
page = Page()
page.pk = 1
page.level = 0
page.tree_id = 1
pages = [page]
with self.assertNumQueries(2):
"""
The queries are:
PagePermission query for affected pages
GlobalpagePermission query for user
"""
get_visible_pages(request, pages, site)
class SoftrootTests(SettingsOverrideTestCase):
"""
Ask evildmp/superdmp if you don't understand softroots!
Softroot description from the docs:
A soft root is a page that acts as the root for a menu navigation tree.
Typically, this will be a page that is the root of a significant new
section on your site.
When the soft root feature is enabled, the navigation menu for any page
will start at the nearest soft root, rather than at the real root of
the site’s page hierarchy.
This feature is useful when your site has deep page hierarchies (and
therefore multiple levels in its navigation trees). In such a case, you
usually don’t want to present site visitors with deep menus of nested
items.
For example, you’re on the page “Introduction to Bleeding”, so the menu
might look like this:
School of Medicine
Medical Education
Departments
Department of Lorem Ipsum
Department of Donec Imperdiet
Department of Cras Eros
Department of Mediaeval Surgery
Theory
Cures
Bleeding
Introduction to Bleeding <this is the current page>
Bleeding - the scientific evidence
Cleaning up the mess
Cupping
Leaches
Maggots
Techniques
Instruments
Department of Curabitur a Purus
Department of Sed Accumsan
Department of Etiam
Research
Administration
Contact us
Impressum
which is frankly overwhelming.
By making “Department of Mediaeval Surgery” a soft root, the menu
becomes much more manageable:
Department of Mediaeval Surgery
Theory
Cures
Bleeding
Introduction to Bleeding <current page>
Bleeding - the scientific evidence
Cleaning up the mess
Cupping
Leaches
Maggots
Techniques
Instruments
"""
settings_overrides = {
'CMS_PERMISSION': False
}
def test_basic_home(self):
"""
Given the tree:
|- Home
| |- Projects (SOFTROOT)
| | |- django CMS
| | |- django Shop
| |- People
Expected menu when on "Home" (0 100 100 100):
|- Home
| |- Projects (SOFTROOT)
| | |- django CMS
| | |- django Shop
| |- People
"""
stdkwargs = {
'template': 'nav_playground.html',
'language': 'en',
'published': True,
'in_navigation': True,
}
home = create_page("Home", **stdkwargs)
projects = create_page("Projects", parent=home, soft_root=True, **stdkwargs)
djangocms = create_page("django CMS", parent=projects, **stdkwargs)
djangoshop = create_page("django Shop", parent=projects, **stdkwargs)
people = create_page("People", parent=home, **stdkwargs)
# On Home
context = self.get_context(home.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
# check everything
self.assertEqual(len(nodes), 1)
homenode = nodes[0]
self.assertEqual(homenode.id, home.publisher_public.pk)
self.assertEqual(len(homenode.children), 2)
projectsnode, peoplenode = homenode.children
self.assertEqual(projectsnode.id, projects.publisher_public.pk)
self.assertEqual(peoplenode.id, people.publisher_public.pk)
self.assertEqual(len(projectsnode.children), 2)
cmsnode, shopnode = projectsnode.children
self.assertEqual(cmsnode.id, djangocms.publisher_public.pk)
self.assertEqual(shopnode.id, djangoshop.publisher_public.pk)
self.assertEqual(len(cmsnode.children), 0)
self.assertEqual(len(shopnode.children), 0)
self.assertEqual(len(peoplenode.children), 0)
def test_basic_projects(self):
"""
Given the tree:
|- Home
| |- Projects (SOFTROOT)
| | |- django CMS
| | |- django Shop
| |- People
Expected menu when on "Projects" (0 100 100 100):
|- Projects (SOFTROOT)
| |- django CMS
| |- django Shop
"""
stdkwargs = {
'template': 'nav_playground.html',
'language': 'en',
'published': True,
'in_navigation': True,
}
home = create_page("Home", **stdkwargs)
projects = create_page("Projects", parent=home, soft_root=True, **stdkwargs)
djangocms = create_page("django CMS", parent=projects, **stdkwargs)
djangoshop = create_page("django Shop", parent=projects, **stdkwargs)
create_page("People", parent=home, **stdkwargs)
# On Projects
context = self.get_context(projects.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
# check everything
self.assertEqual(len(nodes), 1)
projectsnode = nodes[0]
self.assertEqual(projectsnode.id, projects.publisher_public.pk)
self.assertEqual(len(projectsnode.children), 2)
cmsnode, shopnode = projectsnode.children
self.assertEqual(cmsnode.id, djangocms.publisher_public.pk)
self.assertEqual(shopnode.id, djangoshop.publisher_public.pk)
self.assertEqual(len(cmsnode.children), 0)
self.assertEqual(len(shopnode.children), 0)
def test_basic_djangocms(self):
"""
Given the tree:
|- Home
| |- Projects (SOFTROOT)
| | |- django CMS
| | |- django Shop
| |- People
Expected menu when on "django CMS" (0 100 100 100):
|- Projects (SOFTROOT)
| |- django CMS
| |- django Shop
"""
stdkwargs = {
'template': 'nav_playground.html',
'language': 'en',
'published': True,
'in_navigation': True,
}
home = create_page("Home", **stdkwargs)
projects = create_page("Projects", parent=home, soft_root=True, **stdkwargs)
djangocms = create_page("django CMS", parent=projects, **stdkwargs)
djangoshop = create_page("django Shop", parent=projects, **stdkwargs)
create_page("People", parent=home, **stdkwargs)
# On django CMS
context = self.get_context(djangocms.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
# check everything
self.assertEqual(len(nodes), 1)
projectsnode = nodes[0]
self.assertEqual(projectsnode.id, projects.publisher_public.pk)
self.assertEqual(len(projectsnode.children), 2)
cmsnode, shopnode = projectsnode.children
self.assertEqual(cmsnode.id, djangocms.publisher_public.pk)
self.assertEqual(shopnode.id, djangoshop.publisher_public.pk)
self.assertEqual(len(cmsnode.children), 0)
self.assertEqual(len(shopnode.children), 0)
def test_basic_people(self):
"""
Given the tree:
|- Home
| |- Projects (SOFTROOT)
| | |- django CMS
| | |- django Shop
| |- People
Expected menu when on "People" (0 100 100 100):
|- Home
| |- Projects (SOFTROOT)
| | |- django CMS
| | |- django Shop
| |- People
"""
stdkwargs = {
'template': 'nav_playground.html',
'language': 'en',
'published': True,
'in_navigation': True,
}
home = create_page("Home", **stdkwargs)
projects = create_page("Projects", parent=home, soft_root=True, **stdkwargs)
djangocms = create_page("django CMS", parent=projects, **stdkwargs)
djangoshop = create_page("django Shop", parent=projects, **stdkwargs)
people = create_page("People", parent=home, **stdkwargs)
# On People
context = self.get_context(home.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
# check everything
self.assertEqual(len(nodes), 1)
homenode = nodes[0]
self.assertEqual(homenode.id, home.publisher_public.pk)
self.assertEqual(len(homenode.children), 2)
projectsnode, peoplenode = homenode.children
self.assertEqual(projectsnode.id, projects.publisher_public.pk)
self.assertEqual(peoplenode.id, people.publisher_public.pk)
self.assertEqual(len(projectsnode.children), 2)
cmsnode, shopnode = projectsnode.children
self.assertEqual(cmsnode.id, djangocms.publisher_public.pk)
self.assertEqual(shopnode.id, djangoshop.publisher_public.pk)
self.assertEqual(len(cmsnode.children), 0)
self.assertEqual(len(shopnode.children), 0)
self.assertEqual(len(peoplenode.children), 0)
|
RobertABT/heightmap | refs/heads/master | build/matplotlib/examples/pylab_examples/fill_between_demo.py | 6 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
x = np.arange(0.0, 2, 0.01)
y1 = np.sin(2*np.pi*x)
y2 = 1.2*np.sin(4*np.pi*x)
fig, (ax1, ax2, ax3) = plt.subplots(3,1, sharex=True)
ax1.fill_between(x, 0, y1)
ax1.set_ylabel('between y1 and 0')
ax2.fill_between(x, y1, 1)
ax2.set_ylabel('between y1 and 1')
ax3.fill_between(x, y1, y2)
ax3.set_ylabel('between y1 and y2')
ax3.set_xlabel('x')
# now fill between y1 and y2 where a logical condition is met. Note
# this is different than calling
# fill_between(x[where], y1[where],y2[where]
# because of edge effects over multiple contiguous regions.
fig, (ax, ax1) = plt.subplots(2, 1, sharex=True)
ax.plot(x, y1, x, y2, color='black')
ax.fill_between(x, y1, y2, where=y2>=y1, facecolor='green', interpolate=True)
ax.fill_between(x, y1, y2, where=y2<=y1, facecolor='red', interpolate=True)
ax.set_title('fill between where')
# Test support for masked arrays.
y2 = np.ma.masked_greater(y2, 1.0)
ax1.plot(x, y1, x, y2, color='black')
ax1.fill_between(x, y1, y2, where=y2>=y1, facecolor='green', interpolate=True)
ax1.fill_between(x, y1, y2, where=y2<=y1, facecolor='red', interpolate=True)
ax1.set_title('Now regions with y2>1 are masked')
# This example illustrates a problem; because of the data
# gridding, there are undesired unfilled triangles at the crossover
# points. A brute-force solution would be to interpolate all
# arrays to a very fine grid before plotting.
# show how to use transforms to create axes spans where a certain condition is satisfied
fig, ax = plt.subplots()
y = np.sin(4*np.pi*x)
ax.plot(x, y, color='black')
# use the data coordinates for the x-axis and the axes coordinates for the y-axis
import matplotlib.transforms as mtransforms
trans = mtransforms.blended_transform_factory(ax.transData, ax.transAxes)
theta = 0.9
ax.axhline(theta, color='green', lw=2, alpha=0.5)
ax.axhline(-theta, color='red', lw=2, alpha=0.5)
ax.fill_between(x, 0, 1, where=y>theta, facecolor='green', alpha=0.5, transform=trans)
ax.fill_between(x, 0, 1, where=y<-theta, facecolor='red', alpha=0.5, transform=trans)
plt.show()
|
mendersoftware/integration | refs/heads/master | testutils/api/tenantadm.py | 1 | # Copyright 2021 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
HOST = "mender-tenantadm:8080"
URL_INTERNAL = "/api/internal/v1/tenantadm"
URL_MGMT = "/api/management/v1/tenantadm"
URL_INTERNAL_SUSPEND = "/tenants/{tid}/status"
URL_INTERNAL_TENANTS = "/tenants"
URL_INTERNAL_TENANT = "/tenants/{tid}"
URL_MGMT_TENANTS = "/tenants"
URL_MGMT_THIS_TENANT = "/user/tenant"
ALL_ADDONS = ["troubleshoot", "configure"]
def req_status(status):
return {"status": status}
def make_addons(addons=[]):
return [{"name": a, "enabled": a in addons} for a in ALL_ADDONS]
|
Hardslog/android_kernel_asus_ze551kl | refs/heads/android-5.0 | scripts/build-all.py | 704 | #! /usr/bin/env python
# Copyright (c) 2009-2014, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
from collections import namedtuple
import glob
from optparse import OptionParser
import os
import re
import shutil
import subprocess
import sys
import threading
import Queue
version = 'build-all.py, version 1.99'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules", "dtbs"]
all_options = {}
compile64 = os.environ.get('CROSS_COMPILE64')
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
if not os.environ.get('CROSS_COMPILE'):
fail("CROSS_COMPILE must be set in the environment")
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def build_threads():
"""Determine the number of build threads requested by the user"""
if all_options.load_average:
return all_options.load_average
return all_options.jobs or 1
failed_targets = []
BuildResult = namedtuple('BuildResult', ['status', 'messages'])
class BuildSequence(namedtuple('BuildSequence', ['log_name', 'short_name', 'steps'])):
def set_width(self, width):
self.width = width
def __enter__(self):
self.log = open(self.log_name, 'w')
def __exit__(self, type, value, traceback):
self.log.close()
def run(self):
self.status = None
messages = ["Building: " + self.short_name]
def printer(line):
text = "[%-*s] %s" % (self.width, self.short_name, line)
messages.append(text)
self.log.write(text)
self.log.write('\n')
for step in self.steps:
st = step.run(printer)
if st:
self.status = BuildResult(self.short_name, messages)
break
if not self.status:
self.status = BuildResult(None, messages)
class BuildTracker:
"""Manages all of the steps necessary to perform a build. The
build consists of one or more sequences of steps. The different
sequences can be processed independently, while the steps within a
sequence must be done in order."""
def __init__(self):
self.sequence = []
self.lock = threading.Lock()
def add_sequence(self, log_name, short_name, steps):
self.sequence.append(BuildSequence(log_name, short_name, steps))
def longest_name(self):
longest = 0
for seq in self.sequence:
longest = max(longest, len(seq.short_name))
return longest
def __repr__(self):
return "BuildTracker(%s)" % self.sequence
def run_child(self, seq):
seq.set_width(self.longest)
tok = self.build_tokens.get()
with self.lock:
print "Building:", seq.short_name
with seq:
seq.run()
self.results.put(seq.status)
self.build_tokens.put(tok)
def run(self):
self.longest = self.longest_name()
self.results = Queue.Queue()
children = []
errors = []
self.build_tokens = Queue.Queue()
nthreads = build_threads()
print "Building with", nthreads, "threads"
for i in range(nthreads):
self.build_tokens.put(True)
for seq in self.sequence:
child = threading.Thread(target=self.run_child, args=[seq])
children.append(child)
child.start()
for child in children:
stats = self.results.get()
if all_options.verbose:
with self.lock:
for line in stats.messages:
print line
sys.stdout.flush()
if stats.status:
errors.append(stats.status)
for child in children:
child.join()
if errors:
fail("\n ".join(["Failed targets:"] + errors))
class PrintStep:
"""A step that just prints a message"""
def __init__(self, message):
self.message = message
def run(self, outp):
outp(self.message)
class MkdirStep:
"""A step that makes a directory"""
def __init__(self, direc):
self.direc = direc
def run(self, outp):
outp("mkdir %s" % self.direc)
os.mkdir(self.direc)
class RmtreeStep:
def __init__(self, direc):
self.direc = direc
def run(self, outp):
outp("rmtree %s" % self.direc)
shutil.rmtree(self.direc, ignore_errors=True)
class CopyfileStep:
def __init__(self, src, dest):
self.src = src
self.dest = dest
def run(self, outp):
outp("cp %s %s" % (self.src, self.dest))
shutil.copyfile(self.src, self.dest)
class ExecStep:
def __init__(self, cmd, **kwargs):
self.cmd = cmd
self.kwargs = kwargs
def run(self, outp):
outp("exec: %s" % (" ".join(self.cmd),))
with open('/dev/null', 'r') as devnull:
proc = subprocess.Popen(self.cmd, stdin=devnull,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
**self.kwargs)
stdout = proc.stdout
while True:
line = stdout.readline()
if not line:
break
line = line.rstrip('\n')
outp(line)
result = proc.wait()
if result != 0:
return ('error', result)
else:
return None
class Builder():
def __init__(self, name, defconfig):
self.name = name
self.defconfig = defconfig
self.confname = self.defconfig.split('/')[-1]
# Determine if this is a 64-bit target based on the location
# of the defconfig.
self.make_env = os.environ.copy()
if "/arm64/" in defconfig:
if compile64:
self.make_env['CROSS_COMPILE'] = compile64
else:
fail("Attempting to build 64-bit, without setting CROSS_COMPILE64")
self.make_env['ARCH'] = 'arm64'
else:
self.make_env['ARCH'] = 'arm'
self.make_env['KCONFIG_NOTIMESTAMP'] = 'true'
self.log_name = "%s/log-%s.log" % (build_dir, self.name)
def build(self):
steps = []
dest_dir = os.path.join(build_dir, self.name)
log_name = "%s/log-%s.log" % (build_dir, self.name)
steps.append(PrintStep('Building %s in %s log %s' %
(self.name, dest_dir, log_name)))
if not os.path.isdir(dest_dir):
steps.append(MkdirStep(dest_dir))
defconfig = self.defconfig
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
staging_dir = 'install_staging'
modi_dir = '%s' % staging_dir
hdri_dir = '%s/usr' % staging_dir
steps.append(RmtreeStep(os.path.join(dest_dir, staging_dir)))
steps.append(ExecStep(['make', 'O=%s' % dest_dir,
self.confname], env=self.make_env))
if not all_options.updateconfigs:
# Build targets can be dependent upon the completion of
# previous build targets, so build them one at a time.
cmd_line = ['make',
'INSTALL_HDR_PATH=%s' % hdri_dir,
'INSTALL_MOD_PATH=%s' % modi_dir,
'O=%s' % dest_dir]
build_targets = []
for c in make_command:
if re.match(r'^-{1,2}\w', c):
cmd_line.append(c)
else:
build_targets.append(c)
for t in build_targets:
steps.append(ExecStep(cmd_line + [t], env=self.make_env))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
steps.append(ExecStep(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=self.make_env))
steps.append(CopyfileStep(savedefconfig, defconfig))
return steps
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
with open(file, 'a') as defconfig:
defconfig.write(str + '\n')
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = []
arch_pats = (
r'[fm]sm[0-9]*_defconfig',
r'apq*_defconfig',
r'qsd*_defconfig',
r'mdm*_defconfig',
r'mpq*_defconfig',
)
arch64_pats = (
r'msm*_defconfig',
)
for p in arch_pats:
for n in glob.glob('arch/arm/configs/' + p):
name = os.path.basename(n)[:-10]
names.append(Builder(name, n))
if 'CROSS_COMPILE64' in os.environ:
for p in arch64_pats:
for n in glob.glob('arch/arm64/configs/' + p):
name = os.path.basename(n)[:-10] + "-64"
names.append(Builder(name, n))
return names
def build_many(targets):
print "Building %d target(s)" % len(targets)
# If we are requesting multiple builds, divide down the job number
# to construct the make_command, giving it a floor of 2, so there
# is still some parallelism.
if all_options.jobs and all_options.jobs > 1:
j = max(all_options.jobs / len(targets), 2)
make_command.append("-j" + str(j))
tracker = BuildTracker()
for target in targets:
if all_options.updateconfigs:
update_config(target.defconfig, all_options.updateconfigs)
steps = target.build()
tracker.add_sequence(target.log_name, target.name, steps)
tracker.run()
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs:
print " %s" % target.name
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if args == ['all']:
build_many(configs)
elif args == ['perf']:
targets = []
for t in configs:
if "perf" in t.name:
targets.append(t)
build_many(targets)
elif args == ['noperf']:
targets = []
for t in configs:
if "perf" not in t.name:
targets.append(t)
build_many(targets)
elif len(args) > 0:
all_configs = {}
for t in configs:
all_configs[t.name] = t
targets = []
for t in args:
if t not in all_configs:
parser.error("Target '%s' not one of %s" % (t, all_configs.keys()))
targets.append(all_configs[t])
build_many(targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
|
insomnia-lab/calibre | refs/heads/master | src/calibre/ebooks/oeb/display/webview.py | 7 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import re
from calibre import guess_type
class EntityDeclarationProcessor(object): # {{{
def __init__(self, html):
self.declared_entities = {}
for match in re.finditer(r'<!\s*ENTITY\s+([^>]+)>', html):
tokens = match.group(1).split()
if len(tokens) > 1:
self.declared_entities[tokens[0].strip()] = tokens[1].strip().replace('"', '')
self.processed_html = html
for key, val in self.declared_entities.iteritems():
self.processed_html = self.processed_html.replace('&%s;'%key, val)
# }}}
def self_closing_sub(match):
tag = match.group(1)
if tag.lower().strip() == 'br':
return match.group()
return '<%s%s></%s>'%(match.group(1), match.group(2), match.group(1))
def load_html(path, view, codec='utf-8', mime_type=None,
pre_load_callback=lambda x:None, path_is_html=False,
force_as_html=False):
from PyQt4.Qt import QUrl, QByteArray
if mime_type is None:
mime_type = guess_type(path)[0]
if not mime_type:
mime_type = 'text/html'
if path_is_html:
html = path
else:
with open(path, 'rb') as f:
html = f.read().decode(codec, 'replace')
html = EntityDeclarationProcessor(html).processed_html
self_closing_pat = re.compile(r'<\s*([:A-Za-z0-9-]+)([^>]*)/\s*>')
html = self_closing_pat.sub(self_closing_sub, html)
loading_url = QUrl.fromLocalFile(path)
pre_load_callback(loading_url)
if force_as_html or re.search(r'<[a-zA-Z0-9-]+:svg', html) is None:
view.setHtml(html, loading_url)
else:
view.setContent(QByteArray(html.encode(codec)), mime_type,
loading_url)
mf = view.page().mainFrame()
elem = mf.findFirstElement('parsererror')
if not elem.isNull():
return False
return True
|
dialounke/pylayers | refs/heads/master | pylayers/__init__.py | 3 | """
pylayers
=========
This file is adapted from scikit-learn package
"""
import sys
__version__ = '0.12-git'
try:
__PYLAYERS_SETUP__
except NameError:
__PYLAYERS_SETUP__ = False
if __PYLAYERS_SETUP__:
sys.stderr.write('Partial import of pylayers during the build process.\n')
else:
try:
from numpy.testing import nosetester
class _NoseTester(nosetester.NoseTester):
""" Subclass numpy's NoseTester to add doctests by default
"""
def test(self, label='fast', verbose=1, extra_argv=['--exe'],
doctests=True, coverage=False):
"""Run the full test suite
Examples
--------
This will run the test suite and stop at the first failing
example
>>> from pylayers import test
>>> test(extra_argv=['--exe', '-sx']) #doctest: +SKIP
"""
return super(_NoseTester, self).test(label=label, verbose=verbose,
extra_argv=extra_argv,
doctests=doctests, coverage=coverage)
try:
test = _NoseTester(raise_warnings="release").test
except TypeError:
# Older versions of numpy do not have a raise_warnings argument
test = _NoseTester().test
del nosetester
except:
pass
__all__ = ['gis', 'signal', 'antprop', 'simul','util']
|
hhsprings/cython | refs/heads/master | Cython/Compiler/CythonScope.py | 27 | from __future__ import absolute_import
from .Symtab import ModuleScope
from .PyrexTypes import *
from .UtilityCode import CythonUtilityCode
from .Errors import error
from .Scanning import StringSourceDescriptor
from . import MemoryView
class CythonScope(ModuleScope):
is_cython_builtin = 1
_cythonscope_initialized = False
def __init__(self, context):
ModuleScope.__init__(self, u'cython', None, None)
self.pxd_file_loaded = True
self.populate_cython_scope()
# The Main.Context object
self.context = context
for fused_type in (cy_integral_type, cy_floating_type, cy_numeric_type):
entry = self.declare_typedef(fused_type.name,
fused_type,
None,
cname='<error>')
entry.in_cinclude = True
def lookup_type(self, name):
# This function should go away when types are all first-level objects.
type = parse_basic_type(name)
if type:
return type
return super(CythonScope, self).lookup_type(name)
def lookup(self, name):
entry = super(CythonScope, self).lookup(name)
if entry is None and not self._cythonscope_initialized:
self.load_cythonscope()
entry = super(CythonScope, self).lookup(name)
return entry
def find_module(self, module_name, pos):
error("cython.%s is not available" % module_name, pos)
def find_submodule(self, module_name):
entry = self.entries.get(module_name, None)
if not entry:
self.load_cythonscope()
entry = self.entries.get(module_name, None)
if entry and entry.as_module:
return entry.as_module
else:
# TODO: fix find_submodule control flow so that we're not
# expected to create a submodule here (to protect CythonScope's
# possible immutability). Hack ourselves out of the situation
# for now.
raise error((StringSourceDescriptor(u"cython", u""), 0, 0),
"cython.%s is not available" % module_name)
def lookup_qualified_name(self, qname):
# ExprNode.as_cython_attribute generates qnames and we untangle it here...
name_path = qname.split(u'.')
scope = self
while len(name_path) > 1:
scope = scope.lookup_here(name_path[0]).as_module
del name_path[0]
if scope is None:
return None
else:
return scope.lookup_here(name_path[0])
def populate_cython_scope(self):
# These are used to optimize isinstance in FinalOptimizePhase
type_object = self.declare_typedef(
'PyTypeObject',
base_type = c_void_type,
pos = None,
cname = 'PyTypeObject')
type_object.is_void = True
type_object_type = type_object.type
self.declare_cfunction(
'PyObject_TypeCheck',
CFuncType(c_bint_type, [CFuncTypeArg("o", py_object_type, None),
CFuncTypeArg("t", c_ptr_type(type_object_type), None)]),
pos = None,
defining = 1,
cname = 'PyObject_TypeCheck')
def load_cythonscope(self):
"""
Creates some entries for testing purposes and entries for
cython.array() and for cython.view.*.
"""
if self._cythonscope_initialized:
return
self._cythonscope_initialized = True
cython_testscope_utility_code.declare_in_scope(
self, cython_scope=self)
cython_test_extclass_utility_code.declare_in_scope(
self, cython_scope=self)
#
# The view sub-scope
#
self.viewscope = viewscope = ModuleScope(u'view', self, None)
self.declare_module('view', viewscope, None).as_module = viewscope
viewscope.is_cython_builtin = True
viewscope.pxd_file_loaded = True
cythonview_testscope_utility_code.declare_in_scope(
viewscope, cython_scope=self)
view_utility_scope = MemoryView.view_utility_code.declare_in_scope(
self.viewscope, cython_scope=self,
whitelist=MemoryView.view_utility_whitelist)
# self.entries["array"] = view_utility_scope.entries.pop("array")
def create_cython_scope(context):
# One could in fact probably make it a singleton,
# but not sure yet whether any code mutates it (which would kill reusing
# it across different contexts)
return CythonScope(context)
# Load test utilities for the cython scope
def load_testscope_utility(cy_util_name, **kwargs):
return CythonUtilityCode.load(cy_util_name, "TestCythonScope.pyx", **kwargs)
undecorated_methods_protos = UtilityCode(proto=u"""
/* These methods are undecorated and have therefore no prototype */
static PyObject *__pyx_TestClass_cdef_method(
struct __pyx_TestClass_obj *self, int value);
static PyObject *__pyx_TestClass_cpdef_method(
struct __pyx_TestClass_obj *self, int value, int skip_dispatch);
static PyObject *__pyx_TestClass_def_method(
PyObject *self, PyObject *value);
""")
cython_testscope_utility_code = load_testscope_utility("TestScope")
test_cython_utility_dep = load_testscope_utility("TestDep")
cython_test_extclass_utility_code = \
load_testscope_utility("TestClass", name="TestClass",
requires=[undecorated_methods_protos,
test_cython_utility_dep])
cythonview_testscope_utility_code = load_testscope_utility("View.TestScope")
|
likelyzhao/mxnet | refs/heads/dev-faster-rcnn | example/ssd/evaluate/evaluate_net.py | 10 | from __future__ import print_function
import os
import sys
import importlib
import mxnet as mx
from dataset.iterator import DetRecordIter
from config.config import cfg
from evaluate.eval_metric import MApMetric, VOC07MApMetric
import logging
def evaluate_net(net, path_imgrec, num_classes, mean_pixels, data_shape,
model_prefix, epoch, ctx=mx.cpu(), batch_size=1,
path_imglist="", nms_thresh=0.45, force_nms=False,
ovp_thresh=0.5, use_difficult=False, class_names=None,
voc07_metric=False):
"""
evalute network given validation record file
Parameters:
----------
net : str or None
Network name or use None to load from json without modifying
path_imgrec : str
path to the record validation file
path_imglist : str
path to the list file to replace labels in record file, optional
num_classes : int
number of classes, not including background
mean_pixels : tuple
(mean_r, mean_g, mean_b)
data_shape : tuple or int
(3, height, width) or height/width
model_prefix : str
model prefix of saved checkpoint
epoch : int
load model epoch
ctx : mx.ctx
mx.gpu() or mx.cpu()
batch_size : int
validation batch size
nms_thresh : float
non-maximum suppression threshold
force_nms : boolean
whether suppress different class objects
ovp_thresh : float
AP overlap threshold for true/false postives
use_difficult : boolean
whether to use difficult objects in evaluation if applicable
class_names : comma separated str
class names in string, must correspond to num_classes if set
voc07_metric : boolean
whether to use 11-point evluation as in VOC07 competition
"""
# set up logger
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# args
if isinstance(data_shape, int):
data_shape = (3, data_shape, data_shape)
assert len(data_shape) == 3 and data_shape[0] == 3
model_prefix += '_' + str(data_shape[1])
# iterator
eval_iter = DetRecordIter(path_imgrec, batch_size, data_shape,
path_imglist=path_imglist, **cfg.valid)
# model params
load_net, args, auxs = mx.model.load_checkpoint(model_prefix, epoch)
# network
if net is None:
net = load_net
else:
sys.path.append(os.path.join(cfg.ROOT_DIR, 'symbol'))
net = importlib.import_module("symbol_" + net) \
.get_symbol(num_classes, nms_thresh, force_nms)
if not 'label' in net.list_arguments():
label = mx.sym.Variable(name='label')
net = mx.sym.Group([net, label])
# init module
mod = mx.mod.Module(net, label_names=('label',), logger=logger, context=ctx,
fixed_param_names=net.list_arguments())
mod.bind(data_shapes=eval_iter.provide_data, label_shapes=eval_iter.provide_label)
mod.set_params(args, auxs, allow_missing=False, force_init=True)
# run evaluation
if voc07_metric:
metric = VOC07MApMetric(ovp_thresh, use_difficult, class_names)
else:
metric = MApMetric(ovp_thresh, use_difficult, class_names)
results = mod.score(eval_iter, metric, num_batch=None)
for k, v in results:
print("{}: {}".format(k, v))
|
mark-ignacio/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/path_unittest.py | 124 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
import sys
from webkitpy.common.system.systemhost import SystemHost
from webkitpy.common.system.platforminfo import PlatformInfo
from webkitpy.common.system.platforminfo_mock import MockPlatformInfo
from webkitpy.common.system import path
class AbspathTest(unittest.TestCase):
def platforminfo(self):
return SystemHost().platform
def test_abspath_to_uri_cygwin(self):
if sys.platform != 'cygwin':
return
self.assertEqual(path.abspath_to_uri(self.platforminfo(), '/cygdrive/c/foo/bar.html'),
'file:///C:/foo/bar.html')
def test_abspath_to_uri_unixy(self):
self.assertEqual(path.abspath_to_uri(MockPlatformInfo(), "/foo/bar.html"),
'file:///foo/bar.html')
def test_abspath_to_uri_win(self):
if sys.platform != 'win32':
return
self.assertEqual(path.abspath_to_uri(self.platforminfo(), 'c:\\foo\\bar.html'),
'file:///c:/foo/bar.html')
def test_abspath_to_uri_escaping_unixy(self):
self.assertEqual(path.abspath_to_uri(MockPlatformInfo(), '/foo/bar + baz%?.html'),
'file:///foo/bar%20+%20baz%25%3F.html')
# Note that you can't have '?' in a filename on windows.
def test_abspath_to_uri_escaping_cygwin(self):
if sys.platform != 'cygwin':
return
self.assertEqual(path.abspath_to_uri(self.platforminfo(), '/cygdrive/c/foo/bar + baz%.html'),
'file:///C:/foo/bar%20+%20baz%25.html')
def test_stop_cygpath_subprocess(self):
if sys.platform != 'cygwin':
return
# Call cygpath to ensure the subprocess is running.
path.cygpath("/cygdrive/c/foo.txt")
self.assertTrue(path._CygPath._singleton.is_running())
# Stop it.
path._CygPath.stop_cygpath_subprocess()
# Ensure that it is stopped.
self.assertFalse(path._CygPath._singleton.is_running())
|
kyubifire/softlayer-python | refs/heads/master | tests/functional_tests.py | 9 | """
SoftLayer.tests.functional_tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:license: MIT, see LICENSE for more details.
"""
import os
import SoftLayer
from SoftLayer import testing
from SoftLayer import transports
class FunctionalTest(testing.TestCase):
def _get_creds(self):
for key in 'SL_USERNAME SL_API_KEY'.split():
if key not in os.environ:
raise self.skipTest('SL_USERNAME and SL_API_KEY environmental '
'variables not set')
return {
'endpoint': (os.environ.get('SL_API_ENDPOINT') or
SoftLayer.API_PUBLIC_ENDPOINT),
'username': os.environ['SL_USERNAME'],
'api_key': os.environ['SL_API_KEY']
}
class UnauthedUser(FunctionalTest):
def test_failed_auth(self):
client = SoftLayer.Client(
username='doesnotexist', api_key='issurelywrong', timeout=20)
self.assertRaises(
SoftLayer.SoftLayerAPIError,
client['SoftLayer_User_Customer'].getPortalLoginToken)
def test_no_hostname(self):
try:
request = transports.Request()
request.service = 'SoftLayer_Account'
request.method = 'getObject'
request.id = 1234
# This test will fail if 'notvalidsoftlayer.com' becomes a thing
transport = transports.XmlRpcTransport(
endpoint_url='http://notvalidsoftlayer.com',
)
transport(request)
except SoftLayer.TransportError as ex:
self.assertEqual(ex.faultCode, 0)
else:
self.fail('Transport Error Exception Not Raised')
class AuthedUser(FunctionalTest):
def test_service_does_not_exist(self):
creds = self._get_creds()
client = SoftLayer.Client(
username=creds['username'],
api_key=creds['api_key'],
endpoint_url=creds['endpoint'],
timeout=20)
try:
client["SoftLayer_DOESNOTEXIST"].getObject()
except SoftLayer.SoftLayerAPIError as e:
self.assertEqual(e.faultCode, '-32601')
self.assertEqual(e.faultString, 'Service does not exist')
self.assertEqual(e.reason, 'Service does not exist')
else:
self.fail('No Exception Raised')
def test_get_users(self):
creds = self._get_creds()
client = SoftLayer.Client(
username=creds['username'],
api_key=creds['api_key'],
endpoint_url=creds['endpoint'],
timeout=20)
found = False
results = client["Account"].getUsers()
for user in results:
if user.get('username') == creds['username']:
found = True
self.assertTrue(found)
def test_result_types(self):
creds = self._get_creds()
client = SoftLayer.Client(
username=creds['username'],
api_key=creds['api_key'],
endpoint_url=creds['endpoint'],
timeout=20)
result = client['SoftLayer_User_Security_Question'].getAllObjects()
self.assertIsInstance(result, list)
self.assertIsInstance(result[0], dict)
self.assertIsInstance(result[0]['viewable'], int)
self.assertIsInstance(result[0]['question'], str)
self.assertIsInstance(result[0]['id'], int)
self.assertIsInstance(result[0]['displayOrder'], int)
|
mudbungie/NetExplorer | refs/heads/master | env/lib/python3.4/site-packages/omnijson/packages/simplejson/ordered_dict.py | 1039 | """Drop-in replacement for collections.OrderedDict by Raymond Hettinger
http://code.activestate.com/recipes/576693/
"""
from UserDict import DictMixin
# Modified from original to support Python 2.4, see
# http://code.google.com/p/simplejson/issues/detail?id=53
try:
all
except NameError:
def all(seq):
for elem in seq:
if not elem:
return False
return True
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
# Modified from original to support Python 2.4, see
# http://code.google.com/p/simplejson/issues/detail?id=53
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
return len(self)==len(other) and \
all(p==q for p, q in zip(self.items(), other.items()))
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
|
hvy/chainer | refs/heads/master | tests/chainer_tests/functions_tests/pooling_tests/test_roi_max_pooling_2d.py | 4 | import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer_tests.functions_tests.pooling_tests import pooling_nd_helper
def _pair(x):
if isinstance(x, chainer.utils.collections_abc.Iterable):
return x
return x, x
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float64],
'outsize': [
5, 7, (5, 7),
(numpy.int32(5), numpy.int32(7))],
'spatial_scale': [
0.6, 1.0, 2, numpy.float32(0.6), numpy.int32(2)],
}))
class TestROIMaxPooling2D(unittest.TestCase):
def setUp(self):
N = 3
n_channels = 3
self.x = pooling_nd_helper.shuffled_linspace(
(N, n_channels, 12, 8), self.dtype)
self.rois = numpy.array([
[1, 1, 7, 7],
[2, 6, 12, 8],
[1, 3, 11, 6],
[3, 3, 4, 4]
], dtype=self.dtype)
self.roi_indices = numpy.array([0, 2, 1, 0], dtype=numpy.int32)
n_rois = self.rois.shape[0]
outsize = _pair(self.outsize)
self.gy = numpy.random.uniform(
-1, 1, (n_rois, n_channels,
outsize[0], outsize[1])).astype(self.dtype)
if self.dtype == numpy.float16:
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 1e-2, 'rtol': 1e-2}
else:
self.check_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
def check_forward(self, x_data, roi_data, roi_index_data):
x = chainer.Variable(x_data)
rois = chainer.Variable(roi_data)
roi_indices = chainer.Variable(roi_index_data)
y = functions.roi_max_pooling_2d(
x, rois, roi_indices, outsize=self.outsize,
spatial_scale=self.spatial_scale)
self.assertEqual(y.data.dtype, self.dtype)
y_data = cuda.to_cpu(y.data)
self.assertEqual(self.gy.shape, y_data.shape)
def test_forward_cpu(self):
self.check_forward(self.x, self.rois, self.roi_indices)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(
cuda.to_gpu(self.x), cuda.to_gpu(self.rois),
cuda.to_gpu(self.roi_indices))
@attr.gpu
def test_forward_cpu_gpu_equal(self):
# cpu
x_cpu = chainer.Variable(self.x)
rois_cpu = chainer.Variable(self.rois)
roi_indices_cpu = chainer.Variable(self.roi_indices)
y_cpu = functions.roi_max_pooling_2d(
x_cpu, rois_cpu, roi_indices_cpu, outsize=self.outsize,
spatial_scale=self.spatial_scale)
# gpu
x_gpu = chainer.Variable(cuda.to_gpu(self.x))
rois_gpu = chainer.Variable(cuda.to_gpu(self.rois))
roi_indices_gpu = chainer.Variable(cuda.to_gpu(self.roi_indices))
y_gpu = functions.roi_max_pooling_2d(
x_gpu, rois_gpu, roi_indices_gpu, outsize=self.outsize,
spatial_scale=self.spatial_scale)
testing.assert_allclose(y_cpu.data, cuda.to_cpu(y_gpu.data))
def check_backward(self, x_data, roi_data, roi_index_data, y_grad):
def f(x, rois, roi_indices):
y = functions.roi_max_pooling_2d(
x, rois, roi_indices, outsize=self.outsize,
spatial_scale=self.spatial_scale)
xp = cuda.get_array_module(y)
# replace -inf with zero for gradient_check
y = functions.where(
xp.isinf(y.array), xp.zeros(y.shape, dtype=y.dtype), y)
return y
gradient_check.check_backward(
f, (x_data, roi_data, roi_index_data), y_grad,
no_grads=[False, True, True], **self.check_backward_options)
def test_backward_cpu(self):
self.check_backward(self.x, self.rois, self.roi_indices, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.rois),
cuda.to_gpu(self.roi_indices), cuda.to_gpu(self.gy))
testing.run_module(__name__, __file__)
|
Liyier/learning_log | refs/heads/master | env/Lib/site-packages/django/contrib/gis/forms/fields.py | 504 | from __future__ import unicode_literals
from django import forms
from django.contrib.gis.geos import GEOSException, GEOSGeometry
from django.utils.translation import ugettext_lazy as _
from .widgets import OpenLayersWidget
class GeometryField(forms.Field):
"""
This is the basic form field for a Geometry. Any textual input that is
accepted by GEOSGeometry is accepted by this form. By default,
this includes WKT, HEXEWKB, WKB (in a buffer), and GeoJSON.
"""
widget = OpenLayersWidget
geom_type = 'GEOMETRY'
default_error_messages = {
'required': _('No geometry value provided.'),
'invalid_geom': _('Invalid geometry value.'),
'invalid_geom_type': _('Invalid geometry type.'),
'transform_error': _('An error occurred when transforming the geometry '
'to the SRID of the geometry form field.'),
}
def __init__(self, **kwargs):
# Pop out attributes from the database field, or use sensible
# defaults (e.g., allow None).
self.srid = kwargs.pop('srid', None)
self.geom_type = kwargs.pop('geom_type', self.geom_type)
super(GeometryField, self).__init__(**kwargs)
self.widget.attrs['geom_type'] = self.geom_type
def to_python(self, value):
"""
Transforms the value to a Geometry object.
"""
if value in self.empty_values:
return None
if not isinstance(value, GEOSGeometry):
try:
value = GEOSGeometry(value)
except (GEOSException, ValueError, TypeError):
raise forms.ValidationError(self.error_messages['invalid_geom'], code='invalid_geom')
# Try to set the srid
if not value.srid:
try:
value.srid = self.widget.map_srid
except AttributeError:
if self.srid:
value.srid = self.srid
return value
def clean(self, value):
"""
Validates that the input value can be converted to a Geometry
object (which is returned). A ValidationError is raised if
the value cannot be instantiated as a Geometry.
"""
geom = super(GeometryField, self).clean(value)
if geom is None:
return geom
# Ensuring that the geometry is of the correct type (indicated
# using the OGC string label).
if str(geom.geom_type).upper() != self.geom_type and not self.geom_type == 'GEOMETRY':
raise forms.ValidationError(self.error_messages['invalid_geom_type'], code='invalid_geom_type')
# Transforming the geometry if the SRID was set.
if self.srid and self.srid != -1 and self.srid != geom.srid:
try:
geom.transform(self.srid)
except GEOSException:
raise forms.ValidationError(
self.error_messages['transform_error'], code='transform_error')
return geom
def has_changed(self, initial, data):
""" Compare geographic value of data with its initial value. """
try:
data = self.to_python(data)
initial = self.to_python(initial)
except forms.ValidationError:
return True
# Only do a geographic comparison if both values are available
if initial and data:
data.transform(initial.srid)
# If the initial value was not added by the browser, the geometry
# provided may be slightly different, the first time it is saved.
# The comparison is done with a very low tolerance.
return not initial.equals_exact(data, tolerance=0.000001)
else:
# Check for change of state of existence
return bool(initial) != bool(data)
class GeometryCollectionField(GeometryField):
geom_type = 'GEOMETRYCOLLECTION'
class PointField(GeometryField):
geom_type = 'POINT'
class MultiPointField(GeometryField):
geom_type = 'MULTIPOINT'
class LineStringField(GeometryField):
geom_type = 'LINESTRING'
class MultiLineStringField(GeometryField):
geom_type = 'MULTILINESTRING'
class PolygonField(GeometryField):
geom_type = 'POLYGON'
class MultiPolygonField(GeometryField):
geom_type = 'MULTIPOLYGON'
|
JShadowMan/package | refs/heads/master | python/gui/tcl_tkinter/centering_window.py | 2 | #!/usr/bin/env python3
from tkinter import *
class CenteringWindow(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
self.__init_window_ui()
def __init_window_ui(self):
self.pack(fill = BOTH, expand = 1)
self.parent.title('Centering Window')
self.__init_window_position(1366, 768)
def __init_window_position(self, width = None, height = None):
screen_width = self.parent.winfo_screenwidth()
screen_height = self.parent.winfo_screenheight()
if width is None:
width = screen_width
if height is None:
height = screen_height
x_coorindate = int((screen_width - width) / 2)
y_coordinate = int((screen_height - height) / 2)
self.parent.geometry('{}x{}+{}+{}'.format(width, height, x_coorindate, y_coordinate))
if __name__ == '__main__':
root = Tk()
main_frame = CenteringWindow(root)
root.mainloop()
|
kustodian/ansible | refs/heads/devel | lib/ansible/plugins/action/eos.py | 8 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.module_utils.network.eos.eos import eos_provider_spec
from ansible.plugins.action.network import ActionModule as ActionNetworkModule
from ansible.module_utils.network.common.utils import load_provider
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionNetworkModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
module_name = self._task.action.split('.')[-1]
self._config_module = True if module_name == 'eos_config' else False
persistent_connection = self._play_context.connection.split('.')[-1]
warnings = []
if persistent_connection in ('network_cli', 'httpapi'):
provider = self._task.args.get('provider', {})
if any(provider.values()):
display.warning('provider is unnecessary when using %s and will be ignored' % self._play_context.connection)
del self._task.args['provider']
if self._task.args.get('transport'):
display.warning('transport is unnecessary when using %s and will be ignored' % self._play_context.connection)
del self._task.args['transport']
elif self._play_context.connection == 'local':
provider = load_provider(eos_provider_spec, self._task.args)
transport = provider['transport'] or 'cli'
display.vvvv('connection transport is %s' % transport, self._play_context.remote_addr)
if transport == 'cli':
pc = copy.deepcopy(self._play_context)
pc.connection = 'ansible.netcommon.network_cli'
pc.network_os = 'arista.eos.eos'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
pc.become = provider['authorize'] or False
if pc.become:
pc.become_method = 'enable'
pc.become_pass = provider['auth_pass']
connection = self._shared_loader_obj.connection_loader.get('ansible.netcommon.persistent', pc, sys.stdin,
task_uuid=self._task._uuid)
# TODO: Remove below code after ansible minimal is cut out
if connection is None:
pc.connection = 'network_cli'
pc.network_os = 'eos'
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin, task_uuid=self._task._uuid)
display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
command_timeout = int(provider['timeout']) if provider['timeout'] else connection.get_option('persistent_command_timeout')
connection.set_options(direct={'persistent_command_timeout': command_timeout})
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
task_vars['ansible_socket'] = socket_path
warnings.append(['connection local support for this module is deprecated and will be removed in version 2.14,'
' use connection %s' % pc.connection])
else:
self._task.args['provider'] = ActionModule.eapi_implementation(provider, self._play_context)
warnings.append(['connection local support for this module is deprecated and will be removed in version 2.14,'
' use connection either httpapi or ansible.netcommon.httpapi (whichever is applicable)'])
else:
return {'failed': True, 'msg': 'Connection type %s is not valid for this module' % self._play_context.connection}
result = super(ActionModule, self).run(task_vars=task_vars)
if warnings:
if 'warnings' in result:
result['warnings'].extend(warnings)
else:
result['warnings'] = warnings
return result
@staticmethod
def eapi_implementation(provider, play_context):
provider['transport'] = 'eapi'
if provider.get('host') is None:
provider['host'] = play_context.remote_addr
if provider.get('port') is None:
default_port = 443 if provider['use_ssl'] else 80
provider['port'] = int(play_context.port or default_port)
if provider.get('timeout') is None:
provider['timeout'] = C.PERSISTENT_COMMAND_TIMEOUT
if provider.get('username') is None:
provider['username'] = play_context.connection_user
if provider.get('password') is None:
provider['password'] = play_context.password
if provider.get('authorize') is None:
provider['authorize'] = False
return provider
|
yannickcr/CouchPotatoServer | refs/heads/develop | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/engadget.py | 18 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .fivemin import FiveMinIE
from ..utils import (
url_basename,
)
class EngadgetIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://www.engadget.com/
(?:video/5min/(?P<id>\d+)|
[\d/]+/.*?)
'''
_TEST = {
'url': 'http://www.engadget.com/video/5min/518153925/',
'md5': 'c6820d4828a5064447a4d9fc73f312c9',
'info_dict': {
'id': '518153925',
'ext': 'mp4',
'title': 'Samsung Galaxy Tab Pro 8.4 Review',
},
'add_ie': ['FiveMin'],
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
if video_id is not None:
return FiveMinIE._build_result(video_id)
else:
title = url_basename(url)
webpage = self._download_webpage(url, title)
ids = re.findall(r'<iframe[^>]+?playList=(\d+)', webpage)
return {
'_type': 'playlist',
'title': title,
'entries': [FiveMinIE._build_result(id) for id in ids]
}
|
fxfitz/ansible | refs/heads/devel | lib/ansible/modules/cloud/ovirt/ovirt_vms_facts.py | 73 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_vms_facts
short_description: Retrieve facts about one or more oVirt/RHV virtual machines
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt/RHV virtual machines."
notes:
- "This module creates a new top-level C(ovirt_vms) fact, which
contains a list of virtual machines."
options:
pattern:
description:
- "Search term which is accepted by oVirt/RHV search backend."
- "For example to search VM X from cluster Y use following pattern:
name=X and cluster=Y"
all_content:
description:
- "If I(true) all the attributes of the virtual machines should be
included in the response."
case_sensitive:
description:
- "If I(true) performed search will take case into account."
max:
description:
- "The maximum number of results to return."
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all VMs which names start with C(centos) and
# belong to cluster C(west):
- ovirt_vms_facts:
pattern: name=centos* and cluster=west
- debug:
var: ovirt_vms
'''
RETURN = '''
ovirt_vms:
description: "List of dictionaries describing the VMs. VM attribues are mapped to dictionary keys,
all VMs attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm."
returned: On success.
type: list
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
pattern=dict(default='', required=False),
all_content=dict(default=False, type='bool'),
case_sensitive=dict(default=True, type='bool'),
max=dict(default=None, type='int'),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
vms_service = connection.system_service().vms_service()
vms = vms_service.list(
search=module.params['pattern'],
all_content=module.params['all_content'],
case_sensitive=module.params['case_sensitive'],
max=module.params['max'],
)
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_vms=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in vms
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
|
andrew12/python-coinroll | refs/heads/master | fibonacci.py | 1 | import coinroll
from util import color
import argparse
from decimal import Decimal
parser = argparse.ArgumentParser()
parser.add_argument('user')
parser.add_argument('password')
parser.add_argument('-b', '--base', type=Decimal, default=None)
parser.add_argument('-x', '--max', type=Decimal, default=Decimal('Infinity'))
parser.add_argument('-t', '--target', type=Decimal, default=Decimal('Infinity'))
parser.add_argument('-s', '--sequence', type=int, nargs='*', default=[1, 1])
args = parser.parse_args()
lessthan = 32440
bot = coinroll.Coinroll(args.user, args.password)
r = bot.stats()
game = coinroll.gameinfo(lessthan)
base = args.base or game.minbet
seq = args.sequence
this = r.balance
index = 0
print('balance | amount | lucky < less | diff | profit')
try:
while True:
amount = seq[index] * base
if r.balance < amount or r.balance >= args.target or args.max < amount:
break
r = bot.bet(lessthan, amount)
print('{:.8f} | {:.8f} | {} < {:5d} | {} | {} | {}'.format(r.balance,
r.amount, color(r.diff, r.lucky, '5d'), r.lessthan, color(r.diff),
color(r.profit), color(r.balance - this)))
if r.win:
index -= 2
if index < 0:
index = 0
else:
index += 1
if len(seq) <= index:
seq.append(seq[-2] + seq[-1])
except KeyboardInterrupt:
pass
|
Nikea/VisTrails | refs/heads/master | vistrails/packages/tabledata/common.py | 2 | try:
import numpy
except ImportError: # pragma: no cover
numpy = None
from vistrails.core.modules.basic_modules import List, ListType
from vistrails.core.modules.config import ModuleSettings
from vistrails.core.modules.output_modules import OutputModule, FileMode
from vistrails.core.modules.vistrails_module import Module, ModuleError, \
Converter
class InternalModuleError(Exception):
"""Track ModuleError in subclasses."""
def raise_module_error(self, module_obj):
raise ModuleError(module_obj, self.message)
class TableObject(object):
columns = None # the number of columns in the table
rows = None # the number of rows in the table
names = None # the names of the columns
name = None # a name for the table (useful for joins, etc.)
def __init__(self, columns, nb_rows, names):
self.columns = len(columns)
self.rows = nb_rows
self.names = names
self._columns = columns
def get_column(self, i, numeric=False): # pragma: no cover
"""Gets a column from the table as a list or numpy array.
If numeric=False (the default), the data is returned 'as-is'. It might
either be bytes (=str), unicode or number (int, long, float).
If numeric=True, the data is returned as a numpy array if numpy is
available, or as a list of floats.
"""
if numeric and numpy is not None:
return numpy.array(self._columns[i], dtype=numpy.float32)
else:
return self._columns[i]
def get_column_by_name(self, name, numeric=False):
"""Gets a column from its name.
This convenience methods looks up the right column index if names are
available and calls get_column().
You shouldn't need to override this method, get_column() should be
sufficient.
"""
try:
col = self.names.index(name)
except ValueError:
raise KeyError(name)
else:
return self.get_column(col, numeric)
@classmethod
def from_dicts(cls, dicts, keys=None):
iterator = iter(dicts)
try:
first = next(iterator)
except StopIteration:
if keys is None:
raise ValueError("No entry in sequence")
return cls([[]] * len(keys), 0, list(keys))
if keys is None:
keys = first.keys()
columns = [[first[key]] for key in keys]
count = 1
for dct in iterator:
for i, key in enumerate(keys):
try:
v = dct[key]
except KeyError:
raise ValueError("Entry %d has no key %r" % (count, key))
else:
columns[i].append(v)
count += 1
return cls(columns, count, keys)
class Table(Module):
_input_ports = [('name', '(org.vistrails.vistrails.basic:String)')]
_output_ports = [('value', 'Table')]
def set_output(self, port_name, value):
if self.list_depth == 0 and value is not None and port_name == 'value':
if value.name is None:
value.name = self.force_get_input('name', None)
Module.set_output(self, port_name, value)
def choose_column(nb_columns, column_names=None, name=None, index=None):
"""Selects a column in a table either by name or index.
If both are specified, the function will make sure that they represent the
same column.
"""
if name is not None:
if isinstance(name, unicode):
name = name.encode('utf-8')
if column_names is None:
raise ValueError("Unable to get column by name: table doesn't "
"have column names")
try:
name_index = column_names.index(name)
except ValueError:
try:
name_index = column_names.index(name.strip())
except ValueError:
raise ValueError("Column name was not found: %r" % name)
if index is not None:
if name_index != index:
raise ValueError("Both a column name and index were "
"specified, and they don't agree")
return name_index
elif index is not None:
if index < 0 or index >= nb_columns:
raise ValueError("No column %d, table only has %d columns" % (
index, nb_columns))
return index
else:
raise ValueError("No column name nor index specified")
def choose_columns(nb_columns, column_names=None, names=None, indexes=None):
"""Selects a list of columns from a table.
If both the names and indexes lists are specified, the function will make
sure that they represent the same list of columns.
Columns may appear more than once.
"""
if names is not None:
if column_names is None:
raise ValueError("Unable to get column by names: table "
"doesn't have column names")
result = []
for name in names:
if isinstance(name, unicode):
name = name.encode('utf-8')
try:
idx = column_names.index(name)
except ValueError:
try:
idx = column_names.index(name.strip())
except ValueError:
raise ValueError("Column name was not found: %r" % name)
result.append(idx)
if indexes is not None:
if result != indexes:
raise ValueError("Both column names and indexes were "
"specified, and they don't agree")
return result
elif indexes is not None:
for index in indexes:
if index < 0 or index >= nb_columns:
raise ValueError("No column %d, table only has %d columns" % (
index, nb_columns))
return indexes
else:
raise ValueError("No column names nor indexes specified")
class ExtractColumn(Module):
"""Gets a single column from a table, as a list.
Specifying one of 'column_name' or 'column_index' is sufficient; if you
provide both, the module will check that the column has the expected name.
"""
_input_ports = [
('table', Table),
('column_name', '(org.vistrails.vistrails.basic:String)',
{'optional': True}),
('column_index', '(org.vistrails.vistrails.basic:Integer)',
{'optional': True}),
('numeric', '(org.vistrails.vistrails.basic:Boolean)',
{'optional': True, 'defaults': "['False']"})]
_output_ports = [
('value', '(org.vistrails.vistrails.basic:List)')]
def compute(self):
table = self.get_input('table')
try:
column_idx = choose_column(
table.columns,
column_names=table.names,
name=self.force_get_input('column_name', None),
index=self.force_get_input('column_index', None))
except ValueError, e:
raise ModuleError(self, e.message)
self.set_output('value', table.get_column(
column_idx,
self.get_input('numeric', allow_default=True)))
class BuildTable(Module):
"""Builds a table by putting together columns from multiple sources.
Input can be a mix of lists, which will be used as single columns, and
whole tables, whose column names will be mangled.
"""
_settings = ModuleSettings(configure_widget=
'vistrails.packages.tabledata.widgets:BuildTableWidget')
_output_ports = [('value', Table)]
def __init__(self):
Module.__init__(self)
self.input_ports_order = []
def transfer_attrs(self, module):
Module.transfer_attrs(self, module)
self.input_ports_order = [p.name for p in module.input_port_specs]
def compute(self):
items = None
if self.input_ports_order: # pragma: no branch
items = [(p, self.get_input(p))
for p in self.input_ports_order]
if not items:
raise ModuleError(self, "No inputs were provided")
nb_rows = None
cols = []
names = []
for portname, item in items:
if isinstance(item, TableObject):
if nb_rows is not None:
if item.rows != nb_rows:
raise ModuleError(
self,
"Different row counts: %d != %d" % (
item.rows, nb_rows))
else:
nb_rows = item.rows
cols.extend(item.get_column(c)
for c in xrange(item.columns))
if item.names is not None:
names.extend(item.names)
else:
names.extend("%s col %d" % (portname, i)
for i in xrange(len(cols) - len(names)))
else:
if nb_rows is not None:
if len(item) != nb_rows:
raise ModuleError(
self,
"Different row counts: %d != %d" % (
len(item), nb_rows))
else:
nb_rows = len(item)
cols.append(item)
names.append(portname)
self.set_output('value', TableObject(cols, nb_rows, names))
class SingleColumnTable(Converter):
"""Automatic Converter module from List to Table.
"""
_input_ports = [('in_value', List)]
_output_ports = [('out_value', Table)]
def compute(self):
column = self.get_input('in_value')
if not isinstance(column, ListType):
column = list(column)
self.set_output('out_value', TableObject(
[column], # columns
len(column), # nb_rows
['converted_list'])) # names
class TableToFileMode(FileMode):
formats = ['html']
def write_html(self, table):
document = ['<!DOCTYPE html>\n'
'<html>\n <head>\n'
' <meta http-equiv="Content-type" content="text/html; '
'charset=utf-8" />\n'
' <title>Exported table</title>\n'
' <style type="text/css">\n'
'table { border-collapse: collapse; }\n'
'td, th { border: 1px solid black; }\n'
' </style>\n'
' </head>\n <body>\n <table>\n']
if table.names is not None:
names = table.names
else:
names = ['col %d' % n for n in xrange(table.columns)]
document.append('<tr>\n')
document.extend(' <th>%s</th>\n' % name for name in names)
document.append('</tr>\n')
columns = [table.get_column(col) for col in xrange(table.columns)]
for row in xrange(table.rows):
document.append('<tr>\n')
for col in xrange(table.columns):
elem = columns[col][row]
if isinstance(elem, bytes):
elem = elem.decode('utf-8', 'replace')
elif not isinstance(elem, unicode):
elem = unicode(elem)
document.append(' <td>%s</td>\n' % elem)
document.append('</tr>\n')
document.append(' </table>\n </body>\n</html>\n')
return ''.join(document)
def compute_output(self, output_module, configuration=None):
value = output_module.get_input("value")
filename = self.get_filename(configuration, suffix='.html')
with open(filename, 'wb') as fp:
fp.write(self.write_html(value))
class TableOutput(OutputModule):
_settings = ModuleSettings(configure_widget="vistrails.gui.modules.output_configuration:OutputModuleConfigurationWidget")
_input_ports = [('value', 'Table')]
_output_modes = [TableToFileMode]
_modules = [(Table, {'abstract': True}), ExtractColumn, BuildTable,
(SingleColumnTable, {'hide_descriptor': True}),
TableOutput]
|
mspark93/VTK | refs/heads/master | ThirdParty/Twisted/twisted/python/test/test_util.py | 23 | # -*- test-case-name: twisted.python.test.test_util
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.util}.
"""
from __future__ import division, absolute_import
import os.path, sys
import shutil, errno, warnings
try:
import pwd, grp
except ImportError:
pwd = grp = None
from twisted.trial import unittest
from twisted.trial.util import suppress as SUPPRESS
from twisted.python.compat import _PY3
from twisted.python import util
from twisted.python.reflect import fullyQualifiedName
from twisted.internet import reactor
from twisted.internet.interfaces import IReactorProcess
from twisted.internet.protocol import ProcessProtocol
from twisted.internet.defer import Deferred
from twisted.internet.error import ProcessDone
if _PY3:
MockOS = None
else:
from twisted.test.test_process import MockOS
class UtilTestCase(unittest.TestCase):
def testUniq(self):
l = ["a", 1, "ab", "a", 3, 4, 1, 2, 2, 4, 6]
self.assertEqual(util.uniquify(l), ["a", 1, "ab", 3, 4, 2, 6])
def testRaises(self):
self.failUnless(util.raises(ZeroDivisionError, divmod, 1, 0))
self.failIf(util.raises(ZeroDivisionError, divmod, 0, 1))
try:
util.raises(TypeError, divmod, 1, 0)
except ZeroDivisionError:
pass
else:
raise unittest.FailTest("util.raises didn't raise when it should have")
def test_uidFromNumericString(self):
"""
When L{uidFromString} is called with a base-ten string representation
of an integer, it returns the integer.
"""
self.assertEqual(util.uidFromString("100"), 100)
def test_uidFromUsernameString(self):
"""
When L{uidFromString} is called with a base-ten string representation
of an integer, it returns the integer.
"""
pwent = pwd.getpwuid(os.getuid())
self.assertEqual(util.uidFromString(pwent.pw_name), pwent.pw_uid)
if pwd is None:
test_uidFromUsernameString.skip = (
"Username/UID conversion requires the pwd module.")
def test_gidFromNumericString(self):
"""
When L{gidFromString} is called with a base-ten string representation
of an integer, it returns the integer.
"""
self.assertEqual(util.gidFromString("100"), 100)
def test_gidFromGroupnameString(self):
"""
When L{gidFromString} is called with a base-ten string representation
of an integer, it returns the integer.
"""
grent = grp.getgrgid(os.getgid())
self.assertEqual(util.gidFromString(grent.gr_name), grent.gr_gid)
if grp is None:
test_gidFromGroupnameString.skip = (
"Group Name/GID conversion requires the grp module.")
class NameToLabelTests(unittest.TestCase):
"""
Tests for L{nameToLabel}.
"""
def test_nameToLabel(self):
"""
Test the various kinds of inputs L{nameToLabel} supports.
"""
nameData = [
('f', 'F'),
('fo', 'Fo'),
('foo', 'Foo'),
('fooBar', 'Foo Bar'),
('fooBarBaz', 'Foo Bar Baz'),
]
for inp, out in nameData:
got = util.nameToLabel(inp)
self.assertEqual(
got, out,
"nameToLabel(%r) == %r != %r" % (inp, got, out))
class UntilConcludesTests(unittest.TestCase):
"""
Tests for L{untilConcludes}, an C{EINTR} helper.
"""
def test_uninterruptably(self):
"""
L{untilConcludes} calls the function passed to it until the function
does not raise either L{OSError} or L{IOError} with C{errno} of
C{EINTR}. It otherwise completes with the same result as the function
passed to it.
"""
def f(a, b):
self.calls += 1
exc = self.exceptions.pop()
if exc is not None:
raise exc(errno.EINTR, "Interrupted system call!")
return a + b
self.exceptions = [None]
self.calls = 0
self.assertEqual(util.untilConcludes(f, 1, 2), 3)
self.assertEqual(self.calls, 1)
self.exceptions = [None, OSError, IOError]
self.calls = 0
self.assertEqual(util.untilConcludes(f, 2, 3), 5)
self.assertEqual(self.calls, 3)
class SwitchUIDTest(unittest.TestCase):
"""
Tests for L{util.switchUID}.
"""
if getattr(os, "getuid", None) is None:
skip = "getuid/setuid not available"
def setUp(self):
self.mockos = MockOS()
self.patch(util, "os", self.mockos)
self.patch(util, "initgroups", self.initgroups)
self.initgroupsCalls = []
def initgroups(self, uid, gid):
"""
Save L{util.initgroups} calls in C{self.initgroupsCalls}.
"""
self.initgroupsCalls.append((uid, gid))
def test_uid(self):
"""
L{util.switchUID} calls L{util.initgroups} and then C{os.setuid} with
the given uid.
"""
util.switchUID(12000, None)
self.assertEqual(self.initgroupsCalls, [(12000, None)])
self.assertEqual(self.mockos.actions, [("setuid", 12000)])
def test_euid(self):
"""
L{util.switchUID} calls L{util.initgroups} and then C{os.seteuid} with
the given uid if the C{euid} parameter is set to C{True}.
"""
util.switchUID(12000, None, True)
self.assertEqual(self.initgroupsCalls, [(12000, None)])
self.assertEqual(self.mockos.seteuidCalls, [12000])
def test_currentUID(self):
"""
If the current uid is the same as the uid passed to L{util.switchUID},
then initgroups does not get called, but a warning is issued.
"""
uid = self.mockos.getuid()
util.switchUID(uid, None)
self.assertEqual(self.initgroupsCalls, [])
self.assertEqual(self.mockos.actions, [])
warnings = self.flushWarnings([util.switchUID])
self.assertEqual(len(warnings), 1)
self.assertIn('tried to drop privileges and setuid %i' % uid,
warnings[0]['message'])
self.assertIn('but uid is already %i' % uid, warnings[0]['message'])
def test_currentEUID(self):
"""
If the current euid is the same as the euid passed to L{util.switchUID},
then initgroups does not get called, but a warning is issued.
"""
euid = self.mockos.geteuid()
util.switchUID(euid, None, True)
self.assertEqual(self.initgroupsCalls, [])
self.assertEqual(self.mockos.seteuidCalls, [])
warnings = self.flushWarnings([util.switchUID])
self.assertEqual(len(warnings), 1)
self.assertIn('tried to drop privileges and seteuid %i' % euid,
warnings[0]['message'])
self.assertIn('but euid is already %i' % euid, warnings[0]['message'])
class TestMergeFunctionMetadata(unittest.TestCase):
"""
Tests for L{mergeFunctionMetadata}.
"""
def test_mergedFunctionBehavesLikeMergeTarget(self):
"""
After merging C{foo}'s data into C{bar}, the returned function behaves
as if it is C{bar}.
"""
foo_object = object()
bar_object = object()
def foo():
return foo_object
def bar(x, y, ab, c=10, *d, **e):
(a, b) = ab
return bar_object
baz = util.mergeFunctionMetadata(foo, bar)
self.assertIdentical(baz(1, 2, (3, 4), quux=10), bar_object)
def test_moduleIsMerged(self):
"""
Merging C{foo} into C{bar} returns a function with C{foo}'s
C{__module__}.
"""
def foo():
pass
def bar():
pass
bar.__module__ = 'somewhere.else'
baz = util.mergeFunctionMetadata(foo, bar)
self.assertEqual(baz.__module__, foo.__module__)
def test_docstringIsMerged(self):
"""
Merging C{foo} into C{bar} returns a function with C{foo}'s docstring.
"""
def foo():
"""
This is foo.
"""
def bar():
"""
This is bar.
"""
baz = util.mergeFunctionMetadata(foo, bar)
self.assertEqual(baz.__doc__, foo.__doc__)
def test_nameIsMerged(self):
"""
Merging C{foo} into C{bar} returns a function with C{foo}'s name.
"""
def foo():
pass
def bar():
pass
baz = util.mergeFunctionMetadata(foo, bar)
self.assertEqual(baz.__name__, foo.__name__)
def test_instanceDictionaryIsMerged(self):
"""
Merging C{foo} into C{bar} returns a function with C{bar}'s
dictionary, updated by C{foo}'s.
"""
def foo():
pass
foo.a = 1
foo.b = 2
def bar():
pass
bar.b = 3
bar.c = 4
baz = util.mergeFunctionMetadata(foo, bar)
self.assertEqual(foo.a, baz.a)
self.assertEqual(foo.b, baz.b)
self.assertEqual(bar.c, baz.c)
class OrderedDictTest(unittest.TestCase):
def testOrderedDict(self):
d = util.OrderedDict()
d['a'] = 'b'
d['b'] = 'a'
d[3] = 12
d[1234] = 4321
self.assertEqual(repr(d), "{'a': 'b', 'b': 'a', 3: 12, 1234: 4321}")
self.assertEqual(d.values(), ['b', 'a', 12, 4321])
del d[3]
self.assertEqual(repr(d), "{'a': 'b', 'b': 'a', 1234: 4321}")
self.assertEqual(d, {'a': 'b', 'b': 'a', 1234:4321})
self.assertEqual(d.keys(), ['a', 'b', 1234])
self.assertEqual(list(d.iteritems()),
[('a', 'b'), ('b','a'), (1234, 4321)])
item = d.popitem()
self.assertEqual(item, (1234, 4321))
def testInitialization(self):
d = util.OrderedDict({'monkey': 'ook',
'apple': 'red'})
self.failUnless(d._order)
d = util.OrderedDict(((1,1),(3,3),(2,2),(0,0)))
self.assertEqual(repr(d), "{1: 1, 3: 3, 2: 2, 0: 0}")
class InsensitiveDictTest(unittest.TestCase):
"""
Tests for L{util.InsensitiveDict}.
"""
def test_preserve(self):
"""
L{util.InsensitiveDict} preserves the case of keys if constructed with
C{preserve=True}.
"""
dct = util.InsensitiveDict({'Foo':'bar', 1:2, 'fnz':{1:2}}, preserve=1)
self.assertEqual(dct['fnz'], {1:2})
self.assertEqual(dct['foo'], 'bar')
self.assertEqual(dct.copy(), dct)
self.assertEqual(dct['foo'], dct.get('Foo'))
self.assertIn(1, dct)
self.assertIn('foo', dct)
# Make eval() work, urrrrgh:
InsensitiveDict = util.InsensitiveDict
self.assertEqual(eval(repr(dct)), dct)
keys=['Foo', 'fnz', 1]
for x in keys:
self.assertIn(x, dct.keys())
self.assertIn((x, dct[x]), dct.items())
self.assertEqual(len(keys), len(dct))
del dct[1]
del dct['foo']
self.assertEqual(dct.keys(), ['fnz'])
def test_noPreserve(self):
"""
L{util.InsensitiveDict} does not preserves the case of keys if
constructed with C{preserve=False}.
"""
dct = util.InsensitiveDict({'Foo':'bar', 1:2, 'fnz':{1:2}}, preserve=0)
keys=['foo', 'fnz', 1]
for x in keys:
self.assertIn(x, dct.keys())
self.assertIn((x, dct[x]), dct.items())
self.assertEqual(len(keys), len(dct))
del dct[1]
del dct['foo']
self.assertEqual(dct.keys(), ['fnz'])
def test_unicode(self):
"""
Unicode keys are case insensitive.
"""
d = util.InsensitiveDict(preserve=False)
d[u"Foo"] = 1
self.assertEqual(d[u"FOO"], 1)
self.assertEqual(d.keys(), [u"foo"])
def test_bytes(self):
"""
Bytes keys are case insensitive.
"""
d = util.InsensitiveDict(preserve=False)
d[b"Foo"] = 1
self.assertEqual(d[b"FOO"], 1)
self.assertEqual(d.keys(), [b"foo"])
class PasswordTestingProcessProtocol(ProcessProtocol):
"""
Write the string C{"secret\n"} to a subprocess and then collect all of
its output and fire a Deferred with it when the process ends.
"""
def connectionMade(self):
self.output = []
self.transport.write('secret\n')
def childDataReceived(self, fd, output):
self.output.append((fd, output))
def processEnded(self, reason):
self.finished.callback((reason, self.output))
class GetPasswordTest(unittest.TestCase):
if not IReactorProcess.providedBy(reactor):
skip = "Process support required to test getPassword"
def test_stdin(self):
"""
Making sure getPassword accepts a password from standard input by
running a child process which uses getPassword to read in a string
which it then writes it out again. Write a string to the child
process and then read one and make sure it is the right string.
"""
p = PasswordTestingProcessProtocol()
p.finished = Deferred()
reactor.spawnProcess(
p,
sys.executable,
[sys.executable,
'-c',
('import sys\n'
'from twisted.python.util import getPassword\n'
'sys.stdout.write(getPassword())\n'
'sys.stdout.flush()\n')],
env={'PYTHONPATH': os.pathsep.join(sys.path)})
def processFinished(result):
(reason, output) = result
reason.trap(ProcessDone)
self.assertIn((1, 'secret'), output)
return p.finished.addCallback(processFinished)
class SearchUpwardsTest(unittest.TestCase):
def testSearchupwards(self):
os.makedirs('searchupwards/a/b/c')
file('searchupwards/foo.txt', 'w').close()
file('searchupwards/a/foo.txt', 'w').close()
file('searchupwards/a/b/c/foo.txt', 'w').close()
os.mkdir('searchupwards/bar')
os.mkdir('searchupwards/bam')
os.mkdir('searchupwards/a/bar')
os.mkdir('searchupwards/a/b/bam')
actual=util.searchupwards('searchupwards/a/b/c',
files=['foo.txt'],
dirs=['bar', 'bam'])
expected=os.path.abspath('searchupwards') + os.sep
self.assertEqual(actual, expected)
shutil.rmtree('searchupwards')
actual=util.searchupwards('searchupwards/a/b/c',
files=['foo.txt'],
dirs=['bar', 'bam'])
expected=None
self.assertEqual(actual, expected)
class IntervalDifferentialTestCase(unittest.TestCase):
def testDefault(self):
d = iter(util.IntervalDifferential([], 10))
for i in range(100):
self.assertEqual(d.next(), (10, None))
def testSingle(self):
d = iter(util.IntervalDifferential([5], 10))
for i in range(100):
self.assertEqual(d.next(), (5, 0))
def testPair(self):
d = iter(util.IntervalDifferential([5, 7], 10))
for i in range(100):
self.assertEqual(d.next(), (5, 0))
self.assertEqual(d.next(), (2, 1))
self.assertEqual(d.next(), (3, 0))
self.assertEqual(d.next(), (4, 1))
self.assertEqual(d.next(), (1, 0))
self.assertEqual(d.next(), (5, 0))
self.assertEqual(d.next(), (1, 1))
self.assertEqual(d.next(), (4, 0))
self.assertEqual(d.next(), (3, 1))
self.assertEqual(d.next(), (2, 0))
self.assertEqual(d.next(), (5, 0))
self.assertEqual(d.next(), (0, 1))
def testTriple(self):
d = iter(util.IntervalDifferential([2, 4, 5], 10))
for i in range(100):
self.assertEqual(d.next(), (2, 0))
self.assertEqual(d.next(), (2, 0))
self.assertEqual(d.next(), (0, 1))
self.assertEqual(d.next(), (1, 2))
self.assertEqual(d.next(), (1, 0))
self.assertEqual(d.next(), (2, 0))
self.assertEqual(d.next(), (0, 1))
self.assertEqual(d.next(), (2, 0))
self.assertEqual(d.next(), (0, 2))
self.assertEqual(d.next(), (2, 0))
self.assertEqual(d.next(), (0, 1))
self.assertEqual(d.next(), (2, 0))
self.assertEqual(d.next(), (1, 2))
self.assertEqual(d.next(), (1, 0))
self.assertEqual(d.next(), (0, 1))
self.assertEqual(d.next(), (2, 0))
self.assertEqual(d.next(), (2, 0))
self.assertEqual(d.next(), (0, 1))
self.assertEqual(d.next(), (0, 2))
def testInsert(self):
d = iter(util.IntervalDifferential([], 10))
self.assertEqual(d.next(), (10, None))
d.addInterval(3)
self.assertEqual(d.next(), (3, 0))
self.assertEqual(d.next(), (3, 0))
d.addInterval(6)
self.assertEqual(d.next(), (3, 0))
self.assertEqual(d.next(), (3, 0))
self.assertEqual(d.next(), (0, 1))
self.assertEqual(d.next(), (3, 0))
self.assertEqual(d.next(), (3, 0))
self.assertEqual(d.next(), (0, 1))
def testRemove(self):
d = iter(util.IntervalDifferential([3, 5], 10))
self.assertEqual(d.next(), (3, 0))
self.assertEqual(d.next(), (2, 1))
self.assertEqual(d.next(), (1, 0))
d.removeInterval(3)
self.assertEqual(d.next(), (4, 0))
self.assertEqual(d.next(), (5, 0))
d.removeInterval(5)
self.assertEqual(d.next(), (10, None))
self.assertRaises(ValueError, d.removeInterval, 10)
class Record(util.FancyEqMixin):
"""
Trivial user of L{FancyEqMixin} used by tests.
"""
compareAttributes = ('a', 'b')
def __init__(self, a, b):
self.a = a
self.b = b
class DifferentRecord(util.FancyEqMixin):
"""
Trivial user of L{FancyEqMixin} which is not related to L{Record}.
"""
compareAttributes = ('a', 'b')
def __init__(self, a, b):
self.a = a
self.b = b
class DerivedRecord(Record):
"""
A class with an inheritance relationship to L{Record}.
"""
class EqualToEverything(object):
"""
A class the instances of which consider themselves equal to everything.
"""
def __eq__(self, other):
return True
def __ne__(self, other):
return False
class EqualToNothing(object):
"""
A class the instances of which consider themselves equal to nothing.
"""
def __eq__(self, other):
return False
def __ne__(self, other):
return True
class EqualityTests(unittest.TestCase):
"""
Tests for L{FancyEqMixin}.
"""
def test_identity(self):
"""
Instances of a class which mixes in L{FancyEqMixin} but which
defines no comparison attributes compare by identity.
"""
class Empty(util.FancyEqMixin):
pass
self.assertFalse(Empty() == Empty())
self.assertTrue(Empty() != Empty())
empty = Empty()
self.assertTrue(empty == empty)
self.assertFalse(empty != empty)
def test_equality(self):
"""
Instances of a class which mixes in L{FancyEqMixin} should compare
equal if all of their attributes compare equal. They should not
compare equal if any of their attributes do not compare equal.
"""
self.assertTrue(Record(1, 2) == Record(1, 2))
self.assertFalse(Record(1, 2) == Record(1, 3))
self.assertFalse(Record(1, 2) == Record(2, 2))
self.assertFalse(Record(1, 2) == Record(3, 4))
def test_unequality(self):
"""
Unequality between instances of a particular L{record} should be
defined as the negation of equality.
"""
self.assertFalse(Record(1, 2) != Record(1, 2))
self.assertTrue(Record(1, 2) != Record(1, 3))
self.assertTrue(Record(1, 2) != Record(2, 2))
self.assertTrue(Record(1, 2) != Record(3, 4))
def test_differentClassesEquality(self):
"""
Instances of different classes which mix in L{FancyEqMixin} should not
compare equal.
"""
self.assertFalse(Record(1, 2) == DifferentRecord(1, 2))
def test_differentClassesInequality(self):
"""
Instances of different classes which mix in L{FancyEqMixin} should
compare unequal.
"""
self.assertTrue(Record(1, 2) != DifferentRecord(1, 2))
def test_inheritedClassesEquality(self):
"""
An instance of a class which derives from a class which mixes in
L{FancyEqMixin} should compare equal to an instance of the base class
if and only if all of their attributes compare equal.
"""
self.assertTrue(Record(1, 2) == DerivedRecord(1, 2))
self.assertFalse(Record(1, 2) == DerivedRecord(1, 3))
self.assertFalse(Record(1, 2) == DerivedRecord(2, 2))
self.assertFalse(Record(1, 2) == DerivedRecord(3, 4))
def test_inheritedClassesInequality(self):
"""
An instance of a class which derives from a class which mixes in
L{FancyEqMixin} should compare unequal to an instance of the base
class if any of their attributes compare unequal.
"""
self.assertFalse(Record(1, 2) != DerivedRecord(1, 2))
self.assertTrue(Record(1, 2) != DerivedRecord(1, 3))
self.assertTrue(Record(1, 2) != DerivedRecord(2, 2))
self.assertTrue(Record(1, 2) != DerivedRecord(3, 4))
def test_rightHandArgumentImplementsEquality(self):
"""
The right-hand argument to the equality operator is given a chance
to determine the result of the operation if it is of a type
unrelated to the L{FancyEqMixin}-based instance on the left-hand
side.
"""
self.assertTrue(Record(1, 2) == EqualToEverything())
self.assertFalse(Record(1, 2) == EqualToNothing())
def test_rightHandArgumentImplementsUnequality(self):
"""
The right-hand argument to the non-equality operator is given a
chance to determine the result of the operation if it is of a type
unrelated to the L{FancyEqMixin}-based instance on the left-hand
side.
"""
self.assertFalse(Record(1, 2) != EqualToEverything())
self.assertTrue(Record(1, 2) != EqualToNothing())
class RunAsEffectiveUserTests(unittest.TestCase):
"""
Test for the L{util.runAsEffectiveUser} function.
"""
if getattr(os, "geteuid", None) is None:
skip = "geteuid/seteuid not available"
def setUp(self):
self.mockos = MockOS()
self.patch(os, "geteuid", self.mockos.geteuid)
self.patch(os, "getegid", self.mockos.getegid)
self.patch(os, "seteuid", self.mockos.seteuid)
self.patch(os, "setegid", self.mockos.setegid)
def _securedFunction(self, startUID, startGID, wantUID, wantGID):
"""
Check if wanted UID/GID matched start or saved ones.
"""
self.assertTrue(wantUID == startUID or
wantUID == self.mockos.seteuidCalls[-1])
self.assertTrue(wantGID == startGID or
wantGID == self.mockos.setegidCalls[-1])
def test_forwardResult(self):
"""
L{util.runAsEffectiveUser} forwards the result obtained by calling the
given function
"""
result = util.runAsEffectiveUser(0, 0, lambda: 1)
self.assertEqual(result, 1)
def test_takeParameters(self):
"""
L{util.runAsEffectiveUser} pass the given parameters to the given
function.
"""
result = util.runAsEffectiveUser(0, 0, lambda x: 2*x, 3)
self.assertEqual(result, 6)
def test_takesKeyworkArguments(self):
"""
L{util.runAsEffectiveUser} pass the keyword parameters to the given
function.
"""
result = util.runAsEffectiveUser(0, 0, lambda x, y=1, z=1: x*y*z, 2, z=3)
self.assertEqual(result, 6)
def _testUIDGIDSwitch(self, startUID, startGID, wantUID, wantGID,
expectedUIDSwitches, expectedGIDSwitches):
"""
Helper method checking the calls to C{os.seteuid} and C{os.setegid}
made by L{util.runAsEffectiveUser}, when switching from startUID to
wantUID and from startGID to wantGID.
"""
self.mockos.euid = startUID
self.mockos.egid = startGID
util.runAsEffectiveUser(
wantUID, wantGID,
self._securedFunction, startUID, startGID, wantUID, wantGID)
self.assertEqual(self.mockos.seteuidCalls, expectedUIDSwitches)
self.assertEqual(self.mockos.setegidCalls, expectedGIDSwitches)
self.mockos.seteuidCalls = []
self.mockos.setegidCalls = []
def test_root(self):
"""
Check UID/GID switches when current effective UID is root.
"""
self._testUIDGIDSwitch(0, 0, 0, 0, [], [])
self._testUIDGIDSwitch(0, 0, 1, 0, [1, 0], [])
self._testUIDGIDSwitch(0, 0, 0, 1, [], [1, 0])
self._testUIDGIDSwitch(0, 0, 1, 1, [1, 0], [1, 0])
def test_UID(self):
"""
Check UID/GID switches when current effective UID is non-root.
"""
self._testUIDGIDSwitch(1, 0, 0, 0, [0, 1], [])
self._testUIDGIDSwitch(1, 0, 1, 0, [], [])
self._testUIDGIDSwitch(1, 0, 1, 1, [0, 1, 0, 1], [1, 0])
self._testUIDGIDSwitch(1, 0, 2, 1, [0, 2, 0, 1], [1, 0])
def test_GID(self):
"""
Check UID/GID switches when current effective GID is non-root.
"""
self._testUIDGIDSwitch(0, 1, 0, 0, [], [0, 1])
self._testUIDGIDSwitch(0, 1, 0, 1, [], [])
self._testUIDGIDSwitch(0, 1, 1, 1, [1, 0], [])
self._testUIDGIDSwitch(0, 1, 1, 2, [1, 0], [2, 1])
def test_UIDGID(self):
"""
Check UID/GID switches when current effective UID/GID is non-root.
"""
self._testUIDGIDSwitch(1, 1, 0, 0, [0, 1], [0, 1])
self._testUIDGIDSwitch(1, 1, 0, 1, [0, 1], [])
self._testUIDGIDSwitch(1, 1, 1, 0, [0, 1, 0, 1], [0, 1])
self._testUIDGIDSwitch(1, 1, 1, 1, [], [])
self._testUIDGIDSwitch(1, 1, 2, 1, [0, 2, 0, 1], [])
self._testUIDGIDSwitch(1, 1, 1, 2, [0, 1, 0, 1], [2, 1])
self._testUIDGIDSwitch(1, 1, 2, 2, [0, 2, 0, 1], [2, 1])
def _getDeprecationSuppression(f):
"""
Returns a tuple of arguments needed to suppress deprecation warnings from
a specified function.
@param f: function to suppress dperecation warnings for
@type f: L{callable}
@return: tuple to add to C{suppress} attribute
"""
return SUPPRESS(
category=DeprecationWarning,
message='%s was deprecated' % (fullyQualifiedName(f),))
class InitGroupsTests(unittest.TestCase):
"""
Tests for L{util.initgroups}.
"""
if pwd is None:
skip = "pwd not available"
def setUp(self):
self.addCleanup(setattr, util, "_c_initgroups", util._c_initgroups)
self.addCleanup(setattr, util, "setgroups", util.setgroups)
def test_initgroupsForceC(self):
"""
If we fake the presence of the C extension, it's called instead of the
Python implementation.
"""
calls = []
util._c_initgroups = lambda x, y: calls.append((x, y))
setgroupsCalls = []
util.setgroups = calls.append
util.initgroups(os.getuid(), 4)
self.assertEqual(calls, [(pwd.getpwuid(os.getuid())[0], 4)])
self.assertFalse(setgroupsCalls)
def test_initgroupsForcePython(self):
"""
If we fake the absence of the C extension, the Python implementation is
called instead, calling C{os.setgroups}.
"""
util._c_initgroups = None
calls = []
util.setgroups = calls.append
util.initgroups(os.getuid(), os.getgid())
# Something should be in the calls, we don't really care what
self.assertTrue(calls)
def test_initgroupsInC(self):
"""
If the C extension is present, it's called instead of the Python
version. We check that by making sure C{os.setgroups} is not called.
"""
calls = []
util.setgroups = calls.append
try:
util.initgroups(os.getuid(), os.getgid())
except OSError:
pass
self.assertFalse(calls)
if util._c_initgroups is None:
test_initgroupsInC.skip = "C initgroups not available"
class DeprecationTests(unittest.TestCase):
"""
Tests for deprecations in C{twisted.python.util}.
"""
def test_getPluginDirs(self):
"""
L{util.getPluginDirs} is deprecated.
"""
util.getPluginDirs()
warnings = self.flushWarnings(offendingFunctions=[
self.test_getPluginDirs])
self.assertEqual(
warnings[0]['message'],
"twisted.python.util.getPluginDirs is deprecated since Twisted "
"12.2.")
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(len(warnings), 1)
def test_addPluginDir(self):
"""
L{util.addPluginDir} is deprecated.
"""
util.addPluginDir()
warnings = self.flushWarnings(offendingFunctions=[
self.test_addPluginDir])
self.assertEqual(
warnings[0]['message'],
"twisted.python.util.addPluginDir is deprecated since Twisted "
"12.2.")
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(len(warnings), 1)
test_addPluginDir.suppress = [
SUPPRESS(category=DeprecationWarning,
message="twisted.python.util.getPluginDirs is deprecated")
]
class SuppressedWarningsTests(unittest.TestCase):
"""
Tests for L{util.runWithWarningsSuppressed}.
"""
runWithWarningsSuppressed = staticmethod(util.runWithWarningsSuppressed)
def test_runWithWarningsSuppressedFiltered(self):
"""
Warnings from the function called by C{runWithWarningsSuppressed} are
suppressed if they match the passed in filter.
"""
filters = [(("ignore", ".*foo.*"), {}),
(("ignore", ".*bar.*"), {})]
self.runWithWarningsSuppressed(filters, warnings.warn, "ignore foo")
self.runWithWarningsSuppressed(filters, warnings.warn, "ignore bar")
self.assertEqual([], self.flushWarnings())
def test_runWithWarningsSuppressedUnfiltered(self):
"""
Warnings from the function called by C{runWithWarningsSuppressed} are
not suppressed if they do not match the passed in filter.
"""
filters = [(("ignore", ".*foo.*"), {}),
(("ignore", ".*bar.*"), {})]
self.runWithWarningsSuppressed(filters, warnings.warn, "don't ignore")
self.assertEqual(
["don't ignore"], [w['message'] for w in self.flushWarnings()])
def test_passThrough(self):
"""
C{runWithWarningsSuppressed} returns the result of the function it
called.
"""
self.assertEqual(self.runWithWarningsSuppressed([], lambda: 4), 4)
def test_noSideEffects(self):
"""
Once C{runWithWarningsSuppressed} has returned, it no longer
suppresses warnings.
"""
filters = [(("ignore", ".*foo.*"), {}),
(("ignore", ".*bar.*"), {})]
self.runWithWarningsSuppressed(filters, lambda: None)
warnings.warn("ignore foo")
self.assertEqual(
["ignore foo"], [w['message'] for w in self.flushWarnings()])
class FancyStrMixinTests(unittest.TestCase):
"""
Tests for L{util.FancyStrMixin}.
"""
def test_sequenceOfStrings(self):
"""
If C{showAttributes} is set to a sequence of strings, C{__str__}
renders using those by looking them up as attributes on the object.
"""
class Foo(util.FancyStrMixin):
showAttributes = ("first", "second")
first = 1
second = "hello"
self.assertEqual(str(Foo()), "<Foo first=1 second='hello'>")
def test_formatter(self):
"""
If C{showAttributes} has an item that is a 2-tuple, C{__str__} renders
the first item in the tuple as a key and the result of calling the
second item with the value of the attribute named by the first item as
the value.
"""
class Foo(util.FancyStrMixin):
showAttributes = (
"first",
("second", lambda value: repr(value[::-1])))
first = "hello"
second = "world"
self.assertEqual("<Foo first='hello' second='dlrow'>", str(Foo()))
def test_override(self):
"""
If C{showAttributes} has an item that is a 3-tuple, C{__str__} renders
the second item in the tuple as a key, and the contents of the
attribute named in the first item are rendered as the value. The value
is formatted using the third item in the tuple.
"""
class Foo(util.FancyStrMixin):
showAttributes = ("first", ("second", "2nd", "%.1f"))
first = 1
second = 2.111
self.assertEqual(str(Foo()), "<Foo first=1 2nd=2.1>")
def test_fancybasename(self):
"""
If C{fancybasename} is present, C{__str__} uses it instead of the class name.
"""
class Foo(util.FancyStrMixin):
fancybasename = "Bar"
self.assertEqual(str(Foo()), "<Bar>")
def test_repr(self):
"""
C{__repr__} outputs the same content as C{__str__}.
"""
class Foo(util.FancyStrMixin):
showAttributes = ("first", "second")
first = 1
second = "hello"
obj = Foo()
self.assertEqual(str(obj), repr(obj))
if _PY3:
del (SwitchUIDTest, SearchUpwardsTest, RunAsEffectiveUserTests,
OrderedDictTest, IntervalDifferentialTestCase, UtilTestCase,
TestMergeFunctionMetadata, DeprecationTests, InitGroupsTests,
GetPasswordTest,
)
|
OpringaoDoTurno/airflow | refs/heads/master | airflow/operators/sqlite_operator.py | 16 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.hooks.sqlite_hook import SqliteHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class SqliteOperator(BaseOperator):
"""
Executes sql code in a specific Sqlite database
:param sqlite_conn_id: reference to a specific sqlite database
:type sqlite_conn_id: string
:param sql: the sql code to be executed
:type sql: string or string pointing to a template file. File must have
a '.sql' extensions.
"""
template_fields = ('sql',)
template_ext = ('.sql',)
ui_color = '#cdaaed'
@apply_defaults
def __init__(
self, sql, sqlite_conn_id='sqlite_default', parameters=None,
*args, **kwargs):
super(SqliteOperator, self).__init__(*args, **kwargs)
self.sqlite_conn_id = sqlite_conn_id
self.sql = sql
self.parameters = parameters or []
def execute(self, context):
self.log.info('Executing: %s', self.sql)
hook = SqliteHook(sqlite_conn_id=self.sqlite_conn_id)
hook.run(self.sql, parameters=self.parameters)
|
maximeh/spiderwave | refs/heads/master | scrappers/scrape_nz.py | 1 | #! /usr/bin/python
# -*- coding: utf-8 -*-
from BeautifulSoup import BeautifulSoup
import db_wrapper
import scrape_mike as ssm
db_wrapper.connect()
url = "http://www.nzradioguide.co.nz/"
soup = ssm.get_page(url)
data = soup.find("table", {"id" : "thetable3"})
ssm.scrape(
data,
name_td_id=0, location_td_id=1, stream_td_id=3, categ_td_id=4,
country="New Zealand")
db_wrapper.disconnect()
|
gamingrobot/SpockBot | refs/heads/master | spockbot/plugins/core/settings.py | 5 | from spockbot.plugins import default_plugins
from spockbot.plugins.base import get_settings, pl_announce
class PloaderFetch(object):
def __init__(self, plugins, plugin_settings):
self.plugins = plugins
self.plugin_settings = plugin_settings
def get_plugins(self):
return self.plugins
def get_plugin_settings(self, plugin):
return self.plugin_settings.get(plugin, {})
@pl_announce('PloaderFetch')
class SettingsPlugin(object):
def __init__(self, ploader, kwargs):
settings = get_settings(kwargs.get('settings', {}), kwargs)
plugin_list = settings.get('plugins', default_plugins)
plugins = []
plugin_settings = {}
for plugin in plugin_list:
plugins.append(plugin[1])
plugin_settings[plugin[1]] = settings.get(plugin[0], {})
ploader.provides('PloaderFetch',
PloaderFetch(plugins, plugin_settings))
|
AustinRP/Site-Recommender | refs/heads/master | crawler/crawler/pipelines.py | 1 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from datasketch import MinHashLSHForest, MinHash
import os.path
import pickle
class MinHashPipeline(object):
pickle_filename = '../lsh_forest_data'
lsh_forest = None
minhashes = dict()
# Number of permutation functions to use in each MinHash
num_perm = 128
def open_spider(self, spider):
# Load forest from pickle if the file exists.
# We do this at the beginning to ensure that time is not
# wasted in the event that a problem occurs.
print('----- Loading Pickle File -----')
if os.path.isfile(self.pickle_filename):
self.lsh_forest = pickle.load(open(self.pickle_filename, 'rb'))
else:
self.lsh_forest = MinHashLSHForest(num_perm=self.num_perm)
def process_item(self, item, spider):
domain = item['domain']
print('domain: {}'.format(domain))
# make a MinHash object or add to an existing one
if domain not in self.minhashes:
# Initialize the MinHash
self.minhashes[domain] = MinHash(num_perm=self.num_perm)
# Update the MinHash with each trigram
for trigram in item['trigram_set']:
self.minhashes[domain].update(trigram.encode('utf-8'))
return item
def close_spider(self, spider):
print('----- Saving to Pickle File -----')
# populate forest from MinHashes
for domain, mh in self.minhashes.items():
self.lsh_forest.add(domain, mh)
# Save forest to pickle
pickle.dump(self.lsh_forest, open(self.pickle_filename, 'wb'))
print('----- {} Saved -----'.format(self.pickle_filename))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.