repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
eugena/django
|
refs/heads/master
|
tests/test_client_regress/__init__.py
|
12133432
| |
yfried/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/network/nxos/__init__.py
|
12133432
| |
MrStonedOne/-tg-station
|
refs/heads/master
|
tools/minibot/nudge.py
|
157
|
#!/usr/bin/env python3
import sys
import pickle
import socket
def pack():
ip = sys.argv[1]
try:
data = sys.argv[2:]
except:
data = "NO DATA SPECIFIED"
nudge(pickle.dumps({"ip": ip, "data": data}))
def nudge(data):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("localhost", 45678))
s.send(data)
s.close()
if __name__ == "__main__" and len(sys.argv) > 1:
pack()
|
EmanueleCannizzaro/scons
|
refs/heads/master
|
test/builderrors.py
|
1
|
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/builderrors.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import os
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.subdir('one', 'two', 'three')
test.write('build.py', r"""
import sys
exitval = int(sys.argv[1])
if exitval == 0:
contents = open(sys.argv[3], 'rb').read()
file = open(sys.argv[2], 'wb')
file.write(contents)
file.close()
sys.exit(exitval)
""")
test.write(['one', 'SConstruct'], """
B0 = Builder(action = r'%(_python_)s ../build.py 0 $TARGET $SOURCES')
B1 = Builder(action = r'%(_python_)s ../build.py 1 $TARGET $SOURCES')
env = Environment(BUILDERS = { 'B0' : B0, 'B1' : B1 })
env.B1(target = 'f1.out', source = 'f1.in')
env.B0(target = 'f2.out', source = 'f2.in')
env.B0(target = 'f3.out', source = 'f3.in')
""" % locals())
test.write(['one', 'f1.in'], "one/f1.in\n")
test.write(['one', 'f2.in'], "one/f2.in\n")
test.write(['one', 'f3.in'], "one/f3.in\n")
test.run(chdir = 'one', arguments = "f1.out f2.out f3.out",
stderr = "scons: *** [f1.out] Error 1\n", status = 2)
test.fail_test(os.path.exists(test.workpath('f1.out')))
test.fail_test(os.path.exists(test.workpath('f2.out')))
test.fail_test(os.path.exists(test.workpath('f3.out')))
test.write(['two', 'SConstruct'], """
B0 = Builder(action = r'%(_python_)s ../build.py 0 $TARGET $SOURCES')
B1 = Builder(action = r'%(_python_)s ../build.py 1 $TARGET $SOURCES')
env = Environment(BUILDERS = { 'B0': B0, 'B1' : B1 })
env.B0(target = 'f1.out', source = 'f1.in')
env.B1(target = 'f2.out', source = 'f2.in')
env.B0(target = 'f3.out', source = 'f3.in')
""" % locals())
test.write(['two', 'f1.in'], "two/f1.in\n")
test.write(['two', 'f2.in'], "two/f2.in\n")
test.write(['two', 'f3.in'], "two/f3.in\n")
test.run(chdir = 'two', arguments = "f1.out f2.out f3.out",
stderr = "scons: *** [f2.out] Error 1\n", status = 2)
test.fail_test(test.read(['two', 'f1.out']) != "two/f1.in\n")
test.fail_test(os.path.exists(test.workpath('f2.out')))
test.fail_test(os.path.exists(test.workpath('f3.out')))
test.write(['three', 'SConstruct'], """
B0 = Builder(action = r'%(_python_)s ../build.py 0 $TARGET $SOURCES')
B1 = Builder(action = r'%(_python_)s ../build.py 1 $TARGET $SOURCES')
env = Environment(BUILDERS = { 'B0' : B0, 'B1': B1 })
env.B0(target = 'f1.out', source = 'f1.in')
env.B0(target = 'f2.out', source = 'f2.in')
env.B1(target = 'f3.out', source = 'f3.in')
""" % locals())
test.write(['three', 'f1.in'], "three/f1.in\n")
test.write(['three', 'f2.in'], "three/f2.in\n")
test.write(['three', 'f3.in'], "three/f3.in\n")
test.run(chdir = 'three', arguments = "f1.out f2.out f3.out",
stderr = "scons: *** [f3.out] Error 1\n", status = 2)
test.fail_test(test.read(['three', 'f1.out']) != "three/f1.in\n")
test.fail_test(test.read(['three', 'f2.out']) != "three/f2.in\n")
test.fail_test(os.path.exists(test.workpath('f3.out')))
test.write('SConstruct', """
env=Environment()
env['ENV']['PATH'] = ''
env.Command(target='foo.out', source=[], action='not_a_program')
""")
test.run(status=2, stderr=None)
test.must_not_contain_any_line(test.stderr(), ['Exception', 'Traceback'])
# Test ETOOLONG (arg list too long). This is not in exitvalmap,
# but that shouldn't cause a scons traceback.
long_cmd = 'xyz ' + "foobarxyz" * 100000
test.write('SConstruct', """
env=Environment()
env.Command(target='longcmd.out', source=[], action='echo %s')
"""%long_cmd)
test.run(status=2, stderr=None)
test.must_not_contain_any_line(test.stderr(), ['Exception', 'Traceback'])
#TODO: This was originally commented out because of a problem with 1.5.2,
# but it doesn't work on later Pythons, either.
#expected = [
# "too long", # posix
# "nvalid argument", # win32
#]
#test.must_contain_any_line(test.stderr(), expected)
# Test bad shell ('./one' is a dir, so it can't be used as a shell).
# This will also give an exit status not in exitvalmap,
# with error "Permission denied" or "No such file or directory".
test.write('SConstruct', """
env=Environment()
env['SHELL'] = 'one'
env.Command(target='badshell.out', source=[], action='foo')
""")
test.run(status=2, stderr=None)
test.must_not_contain_any_line(test.stderr(), ['Exception', 'Traceback'])
expect = [
'No such file',
'Permission denied',
'permission denied',
]
test.must_contain_any_line(test.stderr(), expect)
# Test command with exit status -1.
# Should not give traceback.
test.write('SConstruct', """
import os
env = Environment(ENV = os.environ)
env.Command('dummy.txt', None, ['python -c "import sys; sys.exit(-1)"'])
""")
test.run(status=2, stderr=None)
test.must_not_contain_any_line(test.stderr(), ['Exception', 'Traceback'])
# Test SConscript with errors and an atexit function.
# Should not give traceback; the task error should get converted
# to a BuildError.
test.write('SConstruct', """
import atexit
env = Environment()
env2 = env.Clone()
env.Install("target", "dir1/myFile")
env2.Install("target", "dir2/myFile")
def print_build_failures():
from SCons.Script import GetBuildFailures
for bf in GetBuildFailures():
print bf.action
atexit.register(print_build_failures)
""")
test.run(status=2, stderr=None)
test.must_not_contain_any_line(test.stderr(), ['Exception', 'Traceback'])
# Bug #1053: Alias is called "all", but default is the File "all"
test.write('SConstruct', """
env = Environment()
env.Default("all")
env.Alias("all", env.Install("dir", "file.txt"))
""")
test.run(status=2, match=TestSCons.match_re, stderr="""\
scons: \*\*\* Do not know how to make File target `all' \(.*all\). Stop.
""")
# No tests failed; OK.
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
pigshell/nhnick
|
refs/heads/vnc-websocket
|
src/qt/qtwebkit/Tools/QueueStatusServer/handlers/showresults.py
|
146
|
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from google.appengine.ext import webapp
from model.queuestatus import QueueStatus
class ShowResults(webapp.RequestHandler):
def get(self, status_id):
status = QueueStatus.get_by_id(int(status_id))
if not status:
self.error(404)
return
self.response.headers["Content-Type"] = "text/plain; charset=utf-8"
self.response.out.write(status.results_file)
|
cherrygirl/micronaet7
|
refs/heads/master
|
Script/Prestiti/aziende.py
|
1
|
#!/usr/bin/env python
# -*- coding: windows-1251 -*-
from pyExcelerator import *
import os
# Functions:
def print_line(sigla_azienda, anno, mese, file_out, row):
file_out.write("%6s%6s%6s%6s%26s%25s%13.2f%13.2f%13.2f%13.2f%13.2f\n"%(str(int(float(sigla_azienda))).ljust(6),
str(int(float(anno))).ljust(6),
str(int(float(mese))).ljust(6),
row[0].replace("0","").ljust(6),
row[1].ljust(26),
row[2].ljust(25),
float(row[3]),
float(row[4]),
float(row[5]),
float(row[6]),
float(row[7]),
))
return
path = '.'
listing = os.listdir(path)
file_out = open("prestiti.txt", "w")
row=[]
print "Files read:"
for file_xls in [infile for infile in listing if infile[-3:].lower()=="xls"]:
sigla_azienda, descrizione, anno = "", "", ""
old_row=0
for sheet_name, values in parse_xls(file_xls, 'cp1251'):
for row_idx, col_idx in sorted(values.keys()):
#if col_idx==0:
# import pdb; pdb.set_trace()
v = values[row_idx, col_idx]
if isinstance(v, unicode):
v = v.encode('cp866', 'backslashreplace')
else:
v = str(v)
# Read parameters for setup default data for this XLS file:
if row_idx==3 and col_idx==1: sigla_azienda = v # get sigla azienda
if row_idx==3 and col_idx==2: descrizione = v # get descrizione azienda
if row_idx==4 and col_idx==1: anno = v # get anno
if row_idx==5 and col_idx==1: mese = v # get mese
if row_idx >= 13: # Data are over 13 line
if row_idx != old_row:
if len(row) == 8 and row[3] != '0.0': # line is long 8 and there's one value in first import column?
print_line(sigla_azienda, anno, mese, file_out, row)
old_row = row_idx
row = []
row.append(v)
print file_xls, "-", sigla_azienda.split(".")[0], "-", descrizione
file_out.close()
|
unbreakab1e/jenkins-job-builder-addons
|
refs/heads/master
|
tests/views/test_views.py
|
2
|
""" Test to make sure that the view match the fixtures"""
import os
from testscenarios.testcase import TestWithScenarios
from testtools import TestCase
from jenkins_jobs_addons import views
from tests.base import get_scenarios, BaseTestCase
class TestCaseModulePublishers(TestWithScenarios, TestCase, BaseTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), 'fixtures')
scenarios = get_scenarios(fixtures_path)
klass = views.Views
|
ajdawson/iris
|
refs/heads/master
|
lib/iris/tests/integration/test_ff.py
|
10
|
# (C) British Crown Copyright 2014 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Integration tests for loading LBC fieldsfiles."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import shutil
import numpy as np
import iris
import iris.experimental.um as um
from iris.tests import mock
@tests.skip_data
class TestLBC(tests.IrisTest):
def setUp(self):
# Load multiple cubes from a test file.
file_path = tests.get_data_path(('FF', 'lbc', 'small_lbc'))
self.all_cubes = iris.load(file_path)
# Select the second cube for detailed checks (the first is orography).
self.test_cube = self.all_cubes[1]
def test_various_cubes_shapes(self):
# Check a few aspects of the loaded cubes.
cubes = self.all_cubes
self.assertEqual(len(cubes), 10)
self.assertEqual(cubes[0].shape, (16, 16))
self.assertEqual(cubes[1].shape, (2, 4, 16, 16))
self.assertEqual(cubes[3].shape, (2, 5, 16, 16))
def test_cube_coords(self):
# Check coordinates of one cube.
cube = self.test_cube
self.assertEqual(len(cube.coords()), 8)
for name, shape in [
('forecast_reference_time', (1,)),
('time', (2,)),
('forecast_period', (2,)),
('model_level_number', (4,)),
('level_height', (1,)),
('sigma', (1,)),
('grid_latitude', (16,)),
('grid_longitude', (16,))]:
coords = cube.coords(name)
self.assertEqual(len(coords), 1,
'expected one {!r} coord, found {}'.format(
name, len(coords)))
coord, = coords
self.assertEqual(coord.shape, shape,
'coord {!r} shape is {} instead of {!r}.'.format(
name, coord.shape, shape))
def test_cube_data(self):
# Check just a few points of the data.
cube = self.test_cube
self.assertArrayAllClose(
cube.data[:, ::2, 6, 13],
np.array([[4.218922, 10.074577],
[4.626897, 6.520156]]),
atol=1.0e-6)
def test_cube_mask(self):
# Check the data mask : should be just the centre 6x2 section.
cube = self.test_cube
mask = np.zeros((2, 4, 16, 16), dtype=bool)
mask[:, :, 7:9, 5:11] = True
self.assertArrayEqual(cube.data.mask, mask)
class TestFFGrid(tests.IrisTest):
@tests.skip_data
def test_unhandled_grid_type(self):
self.filename = tests.get_data_path(('FF', 'n48_multi_field'))
with self.temp_filename() as temp_path:
shutil.copyfile(self.filename, temp_path)
ffv = um.FieldsFileVariant(temp_path,
mode=um.FieldsFileVariant.UPDATE_MODE)
ffv.fields[3].lbuser4 = 60
ffv.close()
with mock.patch('warnings.warn') as warn_fn:
iris.load(temp_path)
self.assertIn("Assuming the data is on a P grid.",
warn_fn.call_args[0][0])
if __name__ == '__main__':
tests.main()
|
jnewland/home-assistant
|
refs/heads/ci
|
homeassistant/components/islamic_prayer_times/__init__.py
|
15
|
"""The islamic_prayer_times component."""
|
bfrohs/servo
|
refs/heads/master
|
components/script/dom/bindings/codegen/parser/tests/test_attr.py
|
106
|
import WebIDL
def WebIDLTest(parser, harness):
testData = [("::TestAttr%s::b", "b", "Byte%s", False),
("::TestAttr%s::rb", "rb", "Byte%s", True),
("::TestAttr%s::o", "o", "Octet%s", False),
("::TestAttr%s::ro", "ro", "Octet%s", True),
("::TestAttr%s::s", "s", "Short%s", False),
("::TestAttr%s::rs", "rs", "Short%s", True),
("::TestAttr%s::us", "us", "UnsignedShort%s", False),
("::TestAttr%s::rus", "rus", "UnsignedShort%s", True),
("::TestAttr%s::l", "l", "Long%s", False),
("::TestAttr%s::rl", "rl", "Long%s", True),
("::TestAttr%s::ul", "ul", "UnsignedLong%s", False),
("::TestAttr%s::rul", "rul", "UnsignedLong%s", True),
("::TestAttr%s::ll", "ll", "LongLong%s", False),
("::TestAttr%s::rll", "rll", "LongLong%s", True),
("::TestAttr%s::ull", "ull", "UnsignedLongLong%s", False),
("::TestAttr%s::rull", "rull", "UnsignedLongLong%s", True),
("::TestAttr%s::str", "str", "String%s", False),
("::TestAttr%s::rstr", "rstr", "String%s", True),
("::TestAttr%s::obj", "obj", "Object%s", False),
("::TestAttr%s::robj", "robj", "Object%s", True),
("::TestAttr%s::object", "object", "Object%s", False),
("::TestAttr%s::f", "f", "Float%s", False),
("::TestAttr%s::rf", "rf", "Float%s", True)]
parser.parse("""
interface TestAttr {
attribute byte b;
readonly attribute byte rb;
attribute octet o;
readonly attribute octet ro;
attribute short s;
readonly attribute short rs;
attribute unsigned short us;
readonly attribute unsigned short rus;
attribute long l;
readonly attribute long rl;
attribute unsigned long ul;
readonly attribute unsigned long rul;
attribute long long ll;
readonly attribute long long rll;
attribute unsigned long long ull;
readonly attribute unsigned long long rull;
attribute DOMString str;
readonly attribute DOMString rstr;
attribute object obj;
readonly attribute object robj;
attribute object _object;
attribute float f;
readonly attribute float rf;
};
interface TestAttrNullable {
attribute byte? b;
readonly attribute byte? rb;
attribute octet? o;
readonly attribute octet? ro;
attribute short? s;
readonly attribute short? rs;
attribute unsigned short? us;
readonly attribute unsigned short? rus;
attribute long? l;
readonly attribute long? rl;
attribute unsigned long? ul;
readonly attribute unsigned long? rul;
attribute long long? ll;
readonly attribute long long? rll;
attribute unsigned long long? ull;
readonly attribute unsigned long long? rull;
attribute DOMString? str;
readonly attribute DOMString? rstr;
attribute object? obj;
readonly attribute object? robj;
attribute object? _object;
attribute float? f;
readonly attribute float? rf;
};
interface TestAttrArray {
attribute byte[] b;
readonly attribute byte[] rb;
attribute octet[] o;
readonly attribute octet[] ro;
attribute short[] s;
readonly attribute short[] rs;
attribute unsigned short[] us;
readonly attribute unsigned short[] rus;
attribute long[] l;
readonly attribute long[] rl;
attribute unsigned long[] ul;
readonly attribute unsigned long[] rul;
attribute long long[] ll;
readonly attribute long long[] rll;
attribute unsigned long long[] ull;
readonly attribute unsigned long long[] rull;
attribute DOMString[] str;
readonly attribute DOMString[] rstr;
attribute object[] obj;
readonly attribute object[] robj;
attribute object[] _object;
attribute float[] f;
readonly attribute float[] rf;
};
interface TestAttrNullableArray {
attribute byte[]? b;
readonly attribute byte[]? rb;
attribute octet[]? o;
readonly attribute octet[]? ro;
attribute short[]? s;
readonly attribute short[]? rs;
attribute unsigned short[]? us;
readonly attribute unsigned short[]? rus;
attribute long[]? l;
readonly attribute long[]? rl;
attribute unsigned long[]? ul;
readonly attribute unsigned long[]? rul;
attribute long long[]? ll;
readonly attribute long long[]? rll;
attribute unsigned long long[]? ull;
readonly attribute unsigned long long[]? rull;
attribute DOMString[]? str;
readonly attribute DOMString[]? rstr;
attribute object[]? obj;
readonly attribute object[]? robj;
attribute object[]? _object;
attribute float[]? f;
readonly attribute float[]? rf;
};
interface TestAttrArrayOfNullableTypes {
attribute byte?[] b;
readonly attribute byte?[] rb;
attribute octet?[] o;
readonly attribute octet?[] ro;
attribute short?[] s;
readonly attribute short?[] rs;
attribute unsigned short?[] us;
readonly attribute unsigned short?[] rus;
attribute long?[] l;
readonly attribute long?[] rl;
attribute unsigned long?[] ul;
readonly attribute unsigned long?[] rul;
attribute long long?[] ll;
readonly attribute long long?[] rll;
attribute unsigned long long?[] ull;
readonly attribute unsigned long long?[] rull;
attribute DOMString?[] str;
readonly attribute DOMString?[] rstr;
attribute object?[] obj;
readonly attribute object?[] robj;
attribute object?[] _object;
attribute float?[] f;
readonly attribute float?[] rf;
};
interface TestAttrNullableArrayOfNullableTypes {
attribute byte?[]? b;
readonly attribute byte?[]? rb;
attribute octet?[]? o;
readonly attribute octet?[]? ro;
attribute short?[]? s;
readonly attribute short?[]? rs;
attribute unsigned short?[]? us;
readonly attribute unsigned short?[]? rus;
attribute long?[]? l;
readonly attribute long?[]? rl;
attribute unsigned long?[]? ul;
readonly attribute unsigned long?[]? rul;
attribute long long?[]? ll;
readonly attribute long long?[]? rll;
attribute unsigned long long?[]? ull;
readonly attribute unsigned long long?[]? rull;
attribute DOMString?[]? str;
readonly attribute DOMString?[]? rstr;
attribute object?[]? obj;
readonly attribute object?[]? robj;
attribute object?[]? _object;
attribute float?[]? f;
readonly attribute float?[]? rf;
};
""")
results = parser.finish()
def checkAttr(attr, QName, name, type, readonly):
harness.ok(isinstance(attr, WebIDL.IDLAttribute),
"Should be an IDLAttribute")
harness.ok(attr.isAttr(), "Attr is an Attr")
harness.ok(not attr.isMethod(), "Attr is not an method")
harness.ok(not attr.isConst(), "Attr is not a const")
harness.check(attr.identifier.QName(), QName, "Attr has the right QName")
harness.check(attr.identifier.name, name, "Attr has the right name")
harness.check(str(attr.type), type, "Attr has the right type")
harness.check(attr.readonly, readonly, "Attr's readonly state is correct")
harness.ok(True, "TestAttr interface parsed without error.")
harness.check(len(results), 6, "Should be six productions.")
iface = results[0]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.check(iface.identifier.QName(), "::TestAttr", "Interface has the right QName")
harness.check(iface.identifier.name, "TestAttr", "Interface has the right name")
harness.check(len(iface.members), len(testData), "Expect %s members" % len(testData))
attrs = iface.members
for i in range(len(attrs)):
data = testData[i]
attr = attrs[i]
(QName, name, type, readonly) = data
checkAttr(attr, QName % "", name, type % "", readonly)
iface = results[1]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.check(iface.identifier.QName(), "::TestAttrNullable", "Interface has the right QName")
harness.check(iface.identifier.name, "TestAttrNullable", "Interface has the right name")
harness.check(len(iface.members), len(testData), "Expect %s members" % len(testData))
attrs = iface.members
for i in range(len(attrs)):
data = testData[i]
attr = attrs[i]
(QName, name, type, readonly) = data
checkAttr(attr, QName % "Nullable", name, type % "OrNull", readonly)
iface = results[2]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.check(iface.identifier.QName(), "::TestAttrArray", "Interface has the right QName")
harness.check(iface.identifier.name, "TestAttrArray", "Interface has the right name")
harness.check(len(iface.members), len(testData), "Expect %s members" % len(testData))
attrs = iface.members
for i in range(len(attrs)):
data = testData[i]
attr = attrs[i]
(QName, name, type, readonly) = data
checkAttr(attr, QName % "Array", name, type % "Array", readonly)
iface = results[3]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.check(iface.identifier.QName(), "::TestAttrNullableArray", "Interface has the right QName")
harness.check(iface.identifier.name, "TestAttrNullableArray", "Interface has the right name")
harness.check(len(iface.members), len(testData), "Expect %s members" % len(testData))
attrs = iface.members
for i in range(len(attrs)):
data = testData[i]
attr = attrs[i]
(QName, name, type, readonly) = data
checkAttr(attr, QName % "NullableArray", name, type % "ArrayOrNull", readonly)
iface = results[4]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.check(iface.identifier.QName(), "::TestAttrArrayOfNullableTypes", "Interface has the right QName")
harness.check(iface.identifier.name, "TestAttrArrayOfNullableTypes", "Interface has the right name")
harness.check(len(iface.members), len(testData), "Expect %s members" % len(testData))
attrs = iface.members
for i in range(len(attrs)):
data = testData[i]
attr = attrs[i]
(QName, name, type, readonly) = data
checkAttr(attr, QName % "ArrayOfNullableTypes", name, type % "OrNullArray", readonly)
iface = results[5]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.check(iface.identifier.QName(), "::TestAttrNullableArrayOfNullableTypes", "Interface has the right QName")
harness.check(iface.identifier.name, "TestAttrNullableArrayOfNullableTypes", "Interface has the right name")
harness.check(len(iface.members), len(testData), "Expect %s members" % len(testData))
attrs = iface.members
for i in range(len(attrs)):
data = testData[i]
attr = attrs[i]
(QName, name, type, readonly) = data
checkAttr(attr, QName % "NullableArrayOfNullableTypes", name, type % "OrNullArrayOrNull", readonly)
parser = parser.reset()
threw = False
try:
parser.parse("""
interface A {
[SetterInfallible] readonly attribute boolean foo;
};
""")
results = parser.finish()
except Exception, x:
threw = True
harness.ok(threw, "Should not allow [SetterInfallible] on readonly attributes")
|
dpausp/arguments
|
refs/heads/master
|
tests/concepts/proposition_type/__init__.py
|
12133432
| |
spapanik/pyrencode
|
refs/heads/main
|
src/pyrencode/settings/__init__.py
|
12133432
| |
gxx/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.3/tests/modeltests/m2m_and_m2o/__init__.py
|
12133432
| |
t0in4/django
|
refs/heads/master
|
tests/base/__init__.py
|
12133432
| |
yoer/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/tests/fixtures_regress/models.py
|
49
|
from __future__ import absolute_import, unicode_literals
from django.contrib.auth.models import User
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Animal(models.Model):
name = models.CharField(max_length=150)
latin_name = models.CharField(max_length=150)
count = models.IntegerField()
weight = models.FloatField()
# use a non-default name for the default manager
specimens = models.Manager()
def __str__(self):
return self.name
class Plant(models.Model):
name = models.CharField(max_length=150)
class Meta:
# For testing when upper case letter in app name; regression for #4057
db_table = "Fixtures_regress_plant"
@python_2_unicode_compatible
class Stuff(models.Model):
name = models.CharField(max_length=20, null=True)
owner = models.ForeignKey(User, null=True)
def __str__(self):
return six.text_type(self.name) + ' is owned by ' + six.text_type(self.owner)
class Absolute(models.Model):
name = models.CharField(max_length=40)
class Parent(models.Model):
name = models.CharField(max_length=10)
class Meta:
ordering = ('id',)
class Child(Parent):
data = models.CharField(max_length=10)
# Models to regression test #7572
class Channel(models.Model):
name = models.CharField(max_length=255)
class Article(models.Model):
title = models.CharField(max_length=255)
channels = models.ManyToManyField(Channel)
class Meta:
ordering = ('id',)
# Subclass of a model with a ManyToManyField for test_ticket_20820
class SpecialArticle(Article):
pass
# Models to regression test #11428
@python_2_unicode_compatible
class Widget(models.Model):
name = models.CharField(max_length=255)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class WidgetProxy(Widget):
class Meta:
proxy = True
# Check for forward references in FKs and M2Ms with natural keys
class TestManager(models.Manager):
def get_by_natural_key(self, key):
return self.get(name=key)
@python_2_unicode_compatible
class Store(models.Model):
objects = TestManager()
name = models.CharField(max_length=255)
main = models.ForeignKey('self', null=True)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
def natural_key(self):
return (self.name,)
@python_2_unicode_compatible
class Person(models.Model):
objects = TestManager()
name = models.CharField(max_length=255)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
# Person doesn't actually have a dependency on store, but we need to define
# one to test the behavior of the dependency resolution algorithm.
def natural_key(self):
return (self.name,)
natural_key.dependencies = ['fixtures_regress.store']
@python_2_unicode_compatible
class Book(models.Model):
name = models.CharField(max_length=255)
author = models.ForeignKey(Person)
stores = models.ManyToManyField(Store)
class Meta:
ordering = ('name',)
def __str__(self):
return '%s by %s (available at %s)' % (
self.name,
self.author.name,
', '.join(s.name for s in self.stores.all())
)
class NKManager(models.Manager):
def get_by_natural_key(self, data):
return self.get(data=data)
@python_2_unicode_compatible
class NKChild(Parent):
data = models.CharField(max_length=10, unique=True)
objects = NKManager()
def natural_key(self):
return self.data
def __str__(self):
return 'NKChild %s:%s' % (self.name, self.data)
@python_2_unicode_compatible
class RefToNKChild(models.Model):
text = models.CharField(max_length=10)
nk_fk = models.ForeignKey(NKChild, related_name='ref_fks')
nk_m2m = models.ManyToManyField(NKChild, related_name='ref_m2ms')
def __str__(self):
return '%s: Reference to %s [%s]' % (
self.text,
self.nk_fk,
', '.join(str(o) for o in self.nk_m2m.all())
)
# ome models with pathological circular dependencies
class Circle1(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle2']
class Circle2(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle1']
class Circle3(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle3']
class Circle4(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle5']
class Circle5(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle6']
class Circle6(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle4']
class ExternalDependency(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.book']
# Model for regression test of #11101
class Thingy(models.Model):
name = models.CharField(max_length=255)
|
zlsun/XX-Net
|
refs/heads/master
|
code/default/python27/1.0/lib/noarch/dnslib/proxy.py
|
7
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import binascii,socket,struct
from dnslib import DNSRecord
from dnslib.server import DNSServer,DNSHandler,BaseResolver,DNSLogger
class ProxyResolver(BaseResolver):
"""
Proxy resolver - passes all requests to upstream DNS server and
returns response
Note that the request/response will be each be decoded/re-encoded
twice:
a) Request packet received by DNSHandler and parsed into DNSRecord
b) DNSRecord passed to ProxyResolver, serialised back into packet
and sent to upstream DNS server
c) Upstream DNS server returns response packet which is parsed into
DNSRecord
d) ProxyResolver returns DNSRecord to DNSHandler which re-serialises
this into packet and returns to client
In practice this is actually fairly useful for testing but for a
'real' transparent proxy option the DNSHandler logic needs to be
modified (see PassthroughDNSHandler)
"""
def __init__(self,address,port):
self.address = address
self.port = port
def resolve(self,request,handler):
if handler.protocol == 'udp':
proxy_r = request.send(self.address,self.port)
else:
proxy_r = request.send(self.address,self.port,tcp=True)
reply = DNSRecord.parse(proxy_r)
return reply
class PassthroughDNSHandler(DNSHandler):
"""
Modify DNSHandler logic (get_reply method) to send directly to
upstream DNS server rather then decoding/encoding packet and
passing to Resolver (The request/response packets are still
parsed and logged but this is not inline)
"""
def get_reply(self,data):
host,port = self.server.resolver.address,self.server.resolver.port
request = DNSRecord.parse(data)
self.log_request(request)
if self.protocol == 'tcp':
data = struct.pack("!H",len(data)) + data
response = send_tcp(data,host,port)
response = response[2:]
else:
response = send_udp(data,host,port)
reply = DNSRecord.parse(response)
self.log_reply(reply)
return response
def send_tcp(data,host,port):
"""
Helper function to send/receive DNS TCP request
(in/out packets will have prepended TCP length header)
"""
sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sock.connect((host,port))
sock.sendall(data)
response = sock.recv(8192)
length = struct.unpack("!H",bytes(response[:2]))[0]
while len(response) - 2 < length:
response += sock.recv(8192)
sock.close()
return response
def send_udp(data,host,port):
"""
Helper function to send/receive DNS UDP request
"""
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
sock.sendto(data,(host,port))
response,server = sock.recvfrom(8192)
sock.close()
return response
if __name__ == '__main__':
import argparse,sys,time
p = argparse.ArgumentParser(description="DNS Proxy")
p.add_argument("--port","-p",type=int,default=53,
metavar="<port>",
help="Local proxy port (default:53)")
p.add_argument("--address","-a",default="",
metavar="<address>",
help="Local proxy listen address (default:all)")
p.add_argument("--upstream","-u",default="8.8.8.8:53",
metavar="<dns server:port>",
help="Upstream DNS server:port (default:8.8.8.8:53)")
p.add_argument("--tcp",action='store_true',default=False,
help="TCP proxy (default: UDP only)")
p.add_argument("--passthrough",action='store_true',default=False,
help="Dont decode/re-encode request/response (default: off)")
p.add_argument("--log",default="request,reply,truncated,error",
help="Log hooks to enable (default: +request,+reply,+truncated,+error,-recv,-send,-data)")
p.add_argument("--log-prefix",action='store_true',default=False,
help="Log prefix (timestamp/handler/resolver) (default: False)")
args = p.parse_args()
args.dns,_,args.dns_port = args.upstream.partition(':')
args.dns_port = int(args.dns_port or 53)
print("Starting Proxy Resolver (%s:%d -> %s:%d) [%s]" % (
args.address or "*",args.port,
args.dns,args.dns_port,
"UDP/TCP" if args.tcp else "UDP"))
resolver = ProxyResolver(args.dns,args.dns_port)
handler = PassthroughDNSHandler if args.passthrough else DNSHandler
logger = DNSLogger(args.log,args.log_prefix)
udp_server = DNSServer(resolver,
port=args.port,
address=args.address,
logger=logger,
handler=handler)
udp_server.start_thread()
if args.tcp:
tcp_server = DNSServer(resolver,
port=args.port,
address=args.address,
tcp=True,
logger=logger,
handler=handler)
tcp_server.start_thread()
while udp_server.isAlive():
time.sleep(1)
|
rahul003/mxnet
|
refs/heads/master
|
example/rcnn/symnet/symbol_vgg.py
|
11
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
from . import proposal_target
def get_vgg_feature(data):
# group 1
conv1_1 = mx.symbol.Convolution(
data=data, kernel=(3, 3), pad=(1, 1), num_filter=64, workspace=2048, name="conv1_1")
relu1_1 = mx.symbol.Activation(data=conv1_1, act_type="relu", name="relu1_1")
conv1_2 = mx.symbol.Convolution(
data=relu1_1, kernel=(3, 3), pad=(1, 1), num_filter=64, workspace=2048, name="conv1_2")
relu1_2 = mx.symbol.Activation(data=conv1_2, act_type="relu", name="relu1_2")
pool1 = mx.symbol.Pooling(
data=relu1_2, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool1")
# group 2
conv2_1 = mx.symbol.Convolution(
data=pool1, kernel=(3, 3), pad=(1, 1), num_filter=128, workspace=2048, name="conv2_1")
relu2_1 = mx.symbol.Activation(data=conv2_1, act_type="relu", name="relu2_1")
conv2_2 = mx.symbol.Convolution(
data=relu2_1, kernel=(3, 3), pad=(1, 1), num_filter=128, workspace=2048, name="conv2_2")
relu2_2 = mx.symbol.Activation(data=conv2_2, act_type="relu", name="relu2_2")
pool2 = mx.symbol.Pooling(
data=relu2_2, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool2")
# group 3
conv3_1 = mx.symbol.Convolution(
data=pool2, kernel=(3, 3), pad=(1, 1), num_filter=256, workspace=2048, name="conv3_1")
relu3_1 = mx.symbol.Activation(data=conv3_1, act_type="relu", name="relu3_1")
conv3_2 = mx.symbol.Convolution(
data=relu3_1, kernel=(3, 3), pad=(1, 1), num_filter=256, workspace=2048, name="conv3_2")
relu3_2 = mx.symbol.Activation(data=conv3_2, act_type="relu", name="relu3_2")
conv3_3 = mx.symbol.Convolution(
data=relu3_2, kernel=(3, 3), pad=(1, 1), num_filter=256, workspace=2048, name="conv3_3")
relu3_3 = mx.symbol.Activation(data=conv3_3, act_type="relu", name="relu3_3")
pool3 = mx.symbol.Pooling(
data=relu3_3, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool3")
# group 4
conv4_1 = mx.symbol.Convolution(
data=pool3, kernel=(3, 3), pad=(1, 1), num_filter=512, workspace=2048, name="conv4_1")
relu4_1 = mx.symbol.Activation(data=conv4_1, act_type="relu", name="relu4_1")
conv4_2 = mx.symbol.Convolution(
data=relu4_1, kernel=(3, 3), pad=(1, 1), num_filter=512, workspace=2048, name="conv4_2")
relu4_2 = mx.symbol.Activation(data=conv4_2, act_type="relu", name="relu4_2")
conv4_3 = mx.symbol.Convolution(
data=relu4_2, kernel=(3, 3), pad=(1, 1), num_filter=512, workspace=2048, name="conv4_3")
relu4_3 = mx.symbol.Activation(data=conv4_3, act_type="relu", name="relu4_3")
pool4 = mx.symbol.Pooling(
data=relu4_3, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool4")
# group 5
conv5_1 = mx.symbol.Convolution(
data=pool4, kernel=(3, 3), pad=(1, 1), num_filter=512, workspace=2048, name="conv5_1")
relu5_1 = mx.symbol.Activation(data=conv5_1, act_type="relu", name="relu5_1")
conv5_2 = mx.symbol.Convolution(
data=relu5_1, kernel=(3, 3), pad=(1, 1), num_filter=512, workspace=2048, name="conv5_2")
relu5_2 = mx.symbol.Activation(data=conv5_2, act_type="relu", name="relu5_2")
conv5_3 = mx.symbol.Convolution(
data=relu5_2, kernel=(3, 3), pad=(1, 1), num_filter=512, workspace=2048, name="conv5_3")
relu5_3 = mx.symbol.Activation(data=conv5_3, act_type="relu", name="relu5_3")
return relu5_3
def get_vgg_top_feature(data):
# group 6
flatten = mx.symbol.Flatten(data=data, name="flatten")
fc6 = mx.symbol.FullyConnected(data=flatten, num_hidden=4096, name="fc6")
relu6 = mx.symbol.Activation(data=fc6, act_type="relu", name="relu6")
drop6 = mx.symbol.Dropout(data=relu6, p=0.5, name="drop6")
# group 7
fc7 = mx.symbol.FullyConnected(data=drop6, num_hidden=4096, name="fc7")
relu7 = mx.symbol.Activation(data=fc7, act_type="relu", name="relu7")
drop7 = mx.symbol.Dropout(data=relu7, p=0.5, name="drop7")
return drop7
def get_vgg_train(anchor_scales, anchor_ratios, rpn_feature_stride,
rpn_pre_topk, rpn_post_topk, rpn_nms_thresh, rpn_min_size, rpn_batch_rois,
num_classes, rcnn_feature_stride, rcnn_pooled_size, rcnn_batch_size,
rcnn_batch_rois, rcnn_fg_fraction, rcnn_fg_overlap, rcnn_bbox_stds):
num_anchors = len(anchor_scales) * len(anchor_ratios)
data = mx.symbol.Variable(name="data")
im_info = mx.symbol.Variable(name="im_info")
gt_boxes = mx.symbol.Variable(name="gt_boxes")
rpn_label = mx.symbol.Variable(name='label')
rpn_bbox_target = mx.symbol.Variable(name='bbox_target')
rpn_bbox_weight = mx.symbol.Variable(name='bbox_weight')
# shared convolutional layers
conv_feat = get_vgg_feature(data)
# RPN layers
rpn_conv = mx.symbol.Convolution(
data=conv_feat, kernel=(3, 3), pad=(1, 1), num_filter=512, name="rpn_conv_3x3")
rpn_relu = mx.symbol.Activation(data=rpn_conv, act_type="relu", name="rpn_relu")
# rpn classification
rpn_cls_score = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name="rpn_cls_score")
rpn_cls_score_reshape = mx.symbol.Reshape(
data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape")
rpn_cls_prob = mx.symbol.SoftmaxOutput(data=rpn_cls_score_reshape, label=rpn_label, multi_output=True,
normalization='valid', use_ignore=True, ignore_label=-1, name="rpn_cls_prob")
rpn_cls_act = mx.symbol.softmax(
data=rpn_cls_score_reshape, axis=1, name="rpn_cls_act")
rpn_cls_act_reshape = mx.symbol.Reshape(
data=rpn_cls_act, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_act_reshape')
# rpn bbox regression
rpn_bbox_pred = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name="rpn_bbox_pred")
rpn_bbox_loss_ = rpn_bbox_weight * mx.symbol.smooth_l1(name='rpn_bbox_loss_', scalar=3.0, data=(rpn_bbox_pred - rpn_bbox_target))
rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss_, grad_scale=1.0 / rpn_batch_rois)
# rpn proposal
rois = mx.symbol.contrib.MultiProposal(
cls_prob=rpn_cls_act_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
feature_stride=rpn_feature_stride, scales=anchor_scales, ratios=anchor_ratios,
rpn_pre_nms_top_n=rpn_pre_topk, rpn_post_nms_top_n=rpn_post_topk,
threshold=rpn_nms_thresh, rpn_min_size=rpn_min_size)
# rcnn roi proposal target
group = mx.symbol.Custom(rois=rois, gt_boxes=gt_boxes, op_type='proposal_target',
num_classes=num_classes, batch_images=rcnn_batch_size,
batch_rois=rcnn_batch_rois, fg_fraction=rcnn_fg_fraction,
fg_overlap=rcnn_fg_overlap, box_stds=rcnn_bbox_stds)
rois = group[0]
label = group[1]
bbox_target = group[2]
bbox_weight = group[3]
# rcnn roi pool
roi_pool = mx.symbol.ROIPooling(
name='roi_pool', data=conv_feat, rois=rois, pooled_size=rcnn_pooled_size, spatial_scale=1.0 / rcnn_feature_stride)
# rcnn top feature
top_feat = get_vgg_top_feature(roi_pool)
# rcnn classification
cls_score = mx.symbol.FullyConnected(name='cls_score', data=top_feat, num_hidden=num_classes)
cls_prob = mx.symbol.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='batch')
# rcnn bbox regression
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=top_feat, num_hidden=num_classes * 4)
bbox_loss_ = bbox_weight * mx.symbol.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / rcnn_batch_rois)
# reshape output
label = mx.symbol.Reshape(data=label, shape=(rcnn_batch_size, -1), name='label_reshape')
cls_prob = mx.symbol.Reshape(data=cls_prob, shape=(rcnn_batch_size, -1, num_classes), name='cls_prob_reshape')
bbox_loss = mx.symbol.Reshape(data=bbox_loss, shape=(rcnn_batch_size, -1, 4 * num_classes), name='bbox_loss_reshape')
# group output
group = mx.symbol.Group([rpn_cls_prob, rpn_bbox_loss, cls_prob, bbox_loss, mx.symbol.BlockGrad(label)])
return group
def get_vgg_test(anchor_scales, anchor_ratios, rpn_feature_stride,
rpn_pre_topk, rpn_post_topk, rpn_nms_thresh, rpn_min_size,
num_classes, rcnn_feature_stride, rcnn_pooled_size, rcnn_batch_size):
num_anchors = len(anchor_scales) * len(anchor_ratios)
data = mx.symbol.Variable(name="data")
im_info = mx.symbol.Variable(name="im_info")
# shared convolutional layers
conv_feat = get_vgg_feature(data)
# rpn feature
rpn_conv = mx.symbol.Convolution(
data=conv_feat, kernel=(3, 3), pad=(1, 1), num_filter=512, name="rpn_conv_3x3")
rpn_relu = mx.symbol.Activation(data=rpn_conv, act_type="relu", name="rpn_relu")
# rpn classification
rpn_cls_score = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name="rpn_cls_score")
rpn_cls_score_reshape = mx.symbol.Reshape(
data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape")
rpn_cls_act = mx.symbol.softmax(
data=rpn_cls_score_reshape, axis=1, name="rpn_cls_act")
rpn_cls_act_reshape = mx.symbol.Reshape(
data=rpn_cls_act, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_act_reshape')
# rpn bbox regression
rpn_bbox_pred = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name="rpn_bbox_pred")
# rpn proposal
rois = mx.symbol.contrib.MultiProposal(
cls_prob=rpn_cls_act_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
feature_stride=rpn_feature_stride, scales=anchor_scales, ratios=anchor_ratios,
rpn_pre_nms_top_n=rpn_pre_topk, rpn_post_nms_top_n=rpn_post_topk,
threshold=rpn_nms_thresh, rpn_min_size=rpn_min_size)
# rcnn roi pool
roi_pool = mx.symbol.ROIPooling(
name='roi_pool', data=conv_feat, rois=rois, pooled_size=rcnn_pooled_size, spatial_scale=1.0 / rcnn_feature_stride)
# rcnn top feature
top_feat = get_vgg_top_feature(roi_pool)
# rcnn classification
cls_score = mx.symbol.FullyConnected(name='cls_score', data=top_feat, num_hidden=num_classes)
cls_prob = mx.symbol.softmax(name='cls_prob', data=cls_score)
# rcnn bbox regression
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=top_feat, num_hidden=num_classes * 4)
# reshape output
cls_prob = mx.symbol.Reshape(data=cls_prob, shape=(rcnn_batch_size, -1, num_classes), name='cls_prob_reshape')
bbox_pred = mx.symbol.Reshape(data=bbox_pred, shape=(rcnn_batch_size, -1, 4 * num_classes), name='bbox_pred_reshape')
# group output
group = mx.symbol.Group([rois, cls_prob, bbox_pred])
return group
|
sanghinitin/golismero
|
refs/heads/master
|
tools/sqlmap/thirdparty/ansistrm/ansistrm.py
|
8
|
#
# Copyright (C) 2010-2012 Vinay Sajip. All rights reserved. Licensed under the new BSD license.
#
import logging
import os
import re
from lib.core.convert import stdoutencode
class ColorizingStreamHandler(logging.StreamHandler):
# color names to indices
color_map = {
'black': 0,
'red': 1,
'green': 2,
'yellow': 3,
'blue': 4,
'magenta': 5,
'cyan': 6,
'white': 7,
}
# levels to (background, foreground, bold/intense)
if os.name == 'nt':
level_map = {
logging.DEBUG: (None, 'blue', False),
logging.INFO: (None, 'green', False),
logging.WARNING: (None, 'yellow', False),
logging.ERROR: (None, 'red', False),
logging.CRITICAL: ('red', 'white', False)
}
else:
level_map = {
logging.DEBUG: (None, 'blue', False),
logging.INFO: (None, 'green', False),
logging.WARNING: (None, 'yellow', False),
logging.ERROR: (None, 'red', False),
logging.CRITICAL: ('red', 'white', False)
}
csi = '\x1b['
reset = '\x1b[0m'
disable_coloring = False
@property
def is_tty(self):
isatty = getattr(self.stream, 'isatty', None)
return isatty and isatty() and not self.disable_coloring
def emit(self, record):
try:
message = stdoutencode(self.format(record))
stream = self.stream
if not self.is_tty:
if message and message[0] == "\r":
message = message[1:]
stream.write(message)
else:
self.output_colorized(message)
stream.write(getattr(self, 'terminator', '\n'))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
if os.name != 'nt':
def output_colorized(self, message):
self.stream.write(message)
else:
ansi_esc = re.compile(r'\x1b\[((?:\d+)(?:;(?:\d+))*)m')
nt_color_map = {
0: 0x00, # black
1: 0x04, # red
2: 0x02, # green
3: 0x06, # yellow
4: 0x01, # blue
5: 0x05, # magenta
6: 0x03, # cyan
7: 0x07, # white
}
def output_colorized(self, message):
import ctypes
parts = self.ansi_esc.split(message)
write = self.stream.write
h = None
fd = getattr(self.stream, 'fileno', None)
if fd is not None:
fd = fd()
if fd in (1, 2): # stdout or stderr
h = ctypes.windll.kernel32.GetStdHandle(-10 - fd)
while parts:
text = parts.pop(0)
if text:
write(text)
if parts:
params = parts.pop(0)
if h is not None:
params = [int(p) for p in params.split(';')]
color = 0
for p in params:
if 40 <= p <= 47:
color |= self.nt_color_map[p - 40] << 4
elif 30 <= p <= 37:
color |= self.nt_color_map[p - 30]
elif p == 1:
color |= 0x08 # foreground intensity on
elif p == 0: # reset to default color
color = 0x07
else:
pass # error condition ignored
ctypes.windll.kernel32.SetConsoleTextAttribute(h, color)
def colorize(self, message, record):
if record.levelno in self.level_map and self.is_tty:
bg, fg, bold = self.level_map[record.levelno]
params = []
if bg in self.color_map:
params.append(str(self.color_map[bg] + 40))
if fg in self.color_map:
params.append(str(self.color_map[fg] + 30))
if bold:
params.append('1')
if params and message:
if message.lstrip() != message:
prefix = re.search(r"\s+", message).group(0)
message = message[len(prefix):]
else:
prefix = ""
message = "%s%s" % (prefix, ''.join((self.csi, ';'.join(params),
'm', message, self.reset)))
return message
def format(self, record):
message = logging.StreamHandler.format(self, record)
return self.colorize(message, record)
|
linuxmint/synaptic
|
refs/heads/master
|
swig/apt-inst.py
|
4
|
#!/usr/bin/env python
import synaptic_common
import sys
class TextProgress(synaptic_common.SwigOpProgress):
def UpdateStatus(self, p):
print "\r%.2f " %(p),
def Done(self):
print "\nDone"
def Update(self):
print "?",
class TextAcquireProgress(synaptic_common.SwigAcquireStatus):
def UpdatePulse(self, FetchedBytes, CurrentCPS, CurrentItems):
print "Pulse: fetchedB %i, cps %s, currentItems %i" % (FetchedBytes, CurrentCPS, CurrentItems)
def Start(self):
print "TextAcquireProgress.Start"
def Stop(self):
print "TextAcquireProgress.Stop"
def Fetched(self, size, resume):
print "Fetched: %i %i" %(size,resume)
class TextInstallProgress(synaptic_common.SwigInstallProgress):
def startUpdate(self):
print "startUpdate"
def finishUpdate(self):
print "finishUpdate"
if len(sys.argv) < 2:
print "need argument"
sys.exit(1)
# FIXME: wrap this somewhere
_error = synaptic_common._GetErrorObj()
synaptic_common.RInitSystem()
lister = synaptic_common.RPackageLister()
t = TextProgress()
lister.setProgressMeter(t)
if not lister.openCache(False):
print "error opening cache file"
_error.DumpErrors()
sys.exit(1)
pkg = lister.getPackage(sys.argv[1])
if pkg == None:
print "Can't find pkg %s" % sys.argv[1]
sys.exit(1)
pkg.setReInstall(True)
pkg.setInstall()
#aProgress = synaptic_common.SwigAcquireStatus()
#iProgress = synaptic_common.SwigInstallProgress()
aProgress = TextAcquireProgress()
iProgress = TextInstallProgress()
lister.commitChanges(aProgress,iProgress)
|
stevepiercy/readthedocs.org
|
refs/heads/master
|
readthedocs/oauth/admin.py
|
6
|
from django.contrib import admin
from .models import RemoteRepository, RemoteOrganization
class RemoteRepositoryAdmin(admin.ModelAdmin):
raw_id_fields = ('users',)
class RemoteOrganizationAdmin(admin.ModelAdmin):
raw_id_fields = ('users',)
admin.site.register(RemoteRepository, RemoteRepositoryAdmin)
admin.site.register(RemoteOrganization, RemoteOrganizationAdmin)
|
up9cloud/line-api-server
|
refs/heads/master
|
lib/py/src/protocol/TMultiplexedProtocol.py
|
146
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from thrift.Thrift import TMessageType
from thrift.protocol import TProtocolDecorator
SEPARATOR = ":"
class TMultiplexedProtocol(TProtocolDecorator.TProtocolDecorator):
def __init__(self, protocol, serviceName):
TProtocolDecorator.TProtocolDecorator.__init__(self, protocol)
self.serviceName = serviceName
def writeMessageBegin(self, name, type, seqid):
if (type == TMessageType.CALL or
type == TMessageType.ONEWAY):
self.protocol.writeMessageBegin(
self.serviceName + SEPARATOR + name,
type,
seqid
)
else:
self.protocol.writeMessageBegin(name, type, seqid)
|
RaphaelNajera/Sunlight_Sensor
|
refs/heads/master
|
firmware/Adafruit_Python_PureIO/build/lib.linux-armv7l-2.7/Adafruit_PureIO/smbus.py
|
6
|
# Copyright (c) 2016 Adafruit Industries
# Author: Tony DiCola
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ctypes import *
from fcntl import ioctl
import struct
# I2C C API constants (from linux kernel headers)
I2C_M_TEN = 0x0010 # this is a ten bit chip address
I2C_M_RD = 0x0001 # read data, from slave to master
I2C_M_STOP = 0x8000 # if I2C_FUNC_PROTOCOL_MANGLING
I2C_M_NOSTART = 0x4000 # if I2C_FUNC_NOSTART
I2C_M_REV_DIR_ADDR = 0x2000 # if I2C_FUNC_PROTOCOL_MANGLING
I2C_M_IGNORE_NAK = 0x1000 # if I2C_FUNC_PROTOCOL_MANGLING
I2C_M_NO_RD_ACK = 0x0800 # if I2C_FUNC_PROTOCOL_MANGLING
I2C_M_RECV_LEN = 0x0400 # length will be first received byte
I2C_SLAVE = 0x0703 # Use this slave address
I2C_SLAVE_FORCE = 0x0706 # Use this slave address, even if
# is already in use by a driver!
I2C_TENBIT = 0x0704 # 0 for 7 bit addrs, != 0 for 10 bit
I2C_FUNCS = 0x0705 # Get the adapter functionality mask
I2C_RDWR = 0x0707 # Combined R/W transfer (one STOP only)
I2C_PEC = 0x0708 # != 0 to use PEC with SMBus
I2C_SMBUS = 0x0720 # SMBus transfer
# ctypes versions of I2C structs defined by kernel.
class i2c_msg(Structure):
_fields_ = [
('addr', c_uint16),
('flags', c_uint16),
('len', c_uint16),
('buf', POINTER(c_uint8))
]
class i2c_rdwr_ioctl_data(Structure):
_fields_ = [
('msgs', POINTER(i2c_msg)),
('nmsgs', c_uint32)
]
def make_i2c_rdwr_data(messages):
"""Utility function to create and return an i2c_rdwr_ioctl_data structure
populated with a list of specified I2C messages. The messages parameter
should be a list of tuples which represent the individual I2C messages to
send in this transaction. Tuples should contain 4 elements: address value,
flags value, buffer length, ctypes c_uint8 pointer to buffer.
"""
# Create message array and populate with provided data.
msg_data_type = i2c_msg*len(messages)
msg_data = msg_data_type()
for i, m in enumerate(messages):
msg_data[i].addr = m[0] & 0x7F
msg_data[i].flags = m[1]
msg_data[i].len = m[2]
msg_data[i].buf = m[3]
# Now build the data structure.
data = i2c_rdwr_ioctl_data()
data.msgs = msg_data
data.nmsgs = len(messages)
return data
# Create an interface that mimics the Python SMBus API.
class SMBus(object):
"""I2C interface that mimics the Python SMBus API but is implemented with
pure Python calls to ioctl and direct /dev/i2c device access.
"""
def __init__(self, bus=None):
"""Create a new smbus instance. Bus is an optional parameter that
specifies the I2C bus number to use, for example 1 would use device
/dev/i2c-1. If bus is not specified then the open function should be
called to open the bus.
"""
self._device = None
if bus is not None:
self.open(bus)
def __del__(self):
"""Clean up any resources used by the SMBus instance."""
self.close()
def __enter__(self):
"""Context manager enter function."""
# Just return this object so it can be used in a with statement, like
# with SMBus(1) as bus:
# # do stuff!
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Context manager exit function, ensures resources are cleaned up."""
self.close()
return False # Don't suppress exceptions.
def open(self, bus):
"""Open the smbus interface on the specified bus."""
# Close the device if it's already open.
if self._device is not None:
self.close()
# Try to open the file for the specified bus. Must turn off buffering
# or else Python 3 fails (see: https://bugs.python.org/issue20074)
self._device = open('/dev/i2c-{0}'.format(bus), 'r+b', buffering=0)
# TODO: Catch IOError and throw a better error message that describes
# what's wrong (i.e. I2C may not be enabled or the bus doesn't exist).
def close(self):
"""Close the smbus connection. You cannot make any other function
calls on the bus unless open is called!"""
if self._device is not None:
self._device.close()
self._device = None
def _select_device(self, addr):
"""Set the address of the device to communicate with on the I2C bus."""
ioctl(self._device.fileno(), I2C_SLAVE, addr & 0x7F)
def read_byte(self, addr):
"""Read a single byte from the specified device."""
assert self._device is not None, 'Bus must be opened before operations are made against it!'
self._select_device(addr)
return ord(self._device.read(1))
def read_byte_data(self, addr, cmd):
"""Read a single byte from the specified cmd register of the device."""
assert self._device is not None, 'Bus must be opened before operations are made against it!'
# Build ctypes values to marshall between ioctl and Python.
reg = c_uint8(cmd)
result = c_uint8()
# Build ioctl request.
request = make_i2c_rdwr_data([
(addr, 0, 1, pointer(reg)), # Write cmd register.
(addr, I2C_M_RD, 1, pointer(result)) # Read 1 byte as result.
])
# Make ioctl call and return result data.
ioctl(self._device.fileno(), I2C_RDWR, request)
return result.value
def read_word_data(self, addr, cmd):
"""Read a word (2 bytes) from the specified cmd register of the device.
Note that this will interpret data using the endianness of the processor
running Python (typically little endian)!
"""
assert self._device is not None, 'Bus must be opened before operations are made against it!'
# Build ctypes values to marshall between ioctl and Python.
reg = c_uint8(cmd)
result = c_uint16()
# Build ioctl request.
request = make_i2c_rdwr_data([
(addr, 0, 1, pointer(reg)), # Write cmd register.
(addr, I2C_M_RD, 2, cast(pointer(result), POINTER(c_uint8))) # Read word (2 bytes).
])
# Make ioctl call and return result data.
ioctl(self._device.fileno(), I2C_RDWR, request)
return result.value
def read_block_data(self, addr, cmd):
"""Perform a block read from the specified cmd register of the device.
The amount of data read is determined by the first byte send back by
the device. Data is returned as a bytearray.
"""
# TODO: Unfortunately this will require calling the low level I2C
# access ioctl to trigger a proper read_block_data. The amount of data
# returned isn't known until the device starts responding so an I2C_RDWR
# ioctl won't work.
raise NotImplementedError()
def read_i2c_block_data(self, addr, cmd, length=32):
"""Perform a read from the specified cmd register of device. Length number
of bytes (default of 32) will be read and returned as a bytearray.
"""
assert self._device is not None, 'Bus must be opened before operations are made against it!'
# Build ctypes values to marshall between ioctl and Python.
reg = c_uint8(cmd)
result = create_string_buffer(length)
# Build ioctl request.
request = make_i2c_rdwr_data([
(addr, 0, 1, pointer(reg)), # Write cmd register.
(addr, I2C_M_RD, length, cast(result, POINTER(c_uint8))) # Read data.
])
# Make ioctl call and return result data.
ioctl(self._device.fileno(), I2C_RDWR, request)
return bytearray(result.raw) # Use .raw instead of .value which will stop at a null byte!
def write_quick(self, addr):
"""Write a single byte to the specified device."""
# What a strange function, from the python-smbus source this appears to
# just write a single byte that initiates a write to the specified device
# address (but writes no data!). The functionality is duplicated below
# but the actual use case for this is unknown.
assert self._device is not None, 'Bus must be opened before operations are made against it!'
# Build ioctl request.
request = make_i2c_rdwr_data([
(addr, 0, 0, None), # Write with no data.
])
# Make ioctl call and return result data.
ioctl(self._device.fileno(), I2C_RDWR, request)
def write_byte(self, addr, val):
"""Write a single byte to the specified device."""
assert self._device is not None, 'Bus must be opened before operations are made against it!'
self._select_device(addr)
data = bytearray(1)
data[0] = val & 0xFF
self._device.write(data)
def write_byte_data(self, addr, cmd, val):
"""Write a byte of data to the specified cmd register of the device.
"""
assert self._device is not None, 'Bus must be opened before operations are made against it!'
# Construct a string of data to send with the command register and byte value.
data = bytearray(2)
data[0] = cmd & 0xFF
data[1] = val & 0xFF
# Send the data to the device.
self._select_device(addr)
self._device.write(data)
def write_word_data(self, addr, cmd, val):
"""Write a word (2 bytes) of data to the specified cmd register of the
device. Note that this will write the data in the endianness of the
processor running Python (typically little endian)!
"""
assert self._device is not None, 'Bus must be opened before operations are made against it!'
# Construct a string of data to send with the command register and word value.
data = struct.pack('=BH', cmd & 0xFF, val & 0xFFFF)
# Send the data to the device.
self._select_device(addr)
self._device.write(data)
def write_block_data(self, addr, cmd, vals):
"""Write a block of data to the specified cmd register of the device.
The amount of data to write should be the first byte inside the vals
string/bytearray and that count of bytes of data to write should follow
it.
"""
# Just use the I2C block data write to write the provided values and
# their length as the first byte.
data = bytearray(len(vals)+1)
data[0] = len(vals) & 0xFF
data[1:] = vals[0:]
self.write_i2c_block_data(addr, cmd, data)
def write_i2c_block_data(self, addr, cmd, vals):
"""Write a buffer of data to the specified cmd register of the device.
"""
assert self._device is not None, 'Bus must be opened before operations are made against it!'
# Construct a string of data to send, including room for the command register.
data = bytearray(len(vals)+1)
data[0] = cmd & 0xFF # Command register at the start.
data[1:] = vals[0:] # Copy in the block data (ugly but necessary to ensure
# the entire write happens in one transaction).
# Send the data to the device.
self._select_device(addr)
self._device.write(data)
def process_call(self, addr, cmd, val):
"""Perform a smbus process call by writing a word (2 byte) value to
the specified register of the device, and then reading a word of response
data (which is returned).
"""
assert self._device is not None, 'Bus must be opened before operations are made against it!'
# Build ctypes values to marshall between ioctl and Python.
data = create_string_buffer(struct.pack('=BH', cmd, val))
result = c_uint16()
# Build ioctl request.
request = make_i2c_rdwr_data([
(addr, 0, 3, cast(pointer(data), POINTER(c_uint8))), # Write data.
(addr, I2C_M_RD, 2, cast(pointer(result), POINTER(c_uint8))) # Read word (2 bytes).
])
# Make ioctl call and return result data.
ioctl(self._device.fileno(), I2C_RDWR, request)
# Note the python-smbus code appears to have a rather serious bug and
# does not return the result value! This is fixed below by returning it.
return result.value
|
cpacia/OpenBazaar-Server
|
refs/heads/master
|
keys/credentials.py
|
6
|
import base64
import random
from config import USERNAME, PASSWORD
from hashlib import sha256
def get_credentials(database):
settings = database.settings
creds = settings.get_credentials()
if creds == (USERNAME, PASSWORD):
return creds
elif creds is not None and (USERNAME is None or PASSWORD is None):
return creds
elif creds is not None and USERNAME is not None and PASSWORD is not None:
settings.set_credentials(USERNAME, PASSWORD)
return (USERNAME, PASSWORD)
elif creds is None and (USERNAME is None or PASSWORD is None):
username = base64.b64encode(sha256(str(random.getrandbits(255))).digest())[:20]
password = base64.b64encode(sha256(str(random.getrandbits(255))).digest())[:20]
settings.set_credentials(username, password)
return (username, password)
elif creds is None and (USERNAME is not None and PASSWORD is not None):
settings.set_credentials(USERNAME, PASSWORD)
return (USERNAME, PASSWORD)
|
icio/github3.py
|
refs/heads/develop
|
tests/test_users.py
|
9
|
import github3
try:
from unittest.mock import patch
except ImportError:
from mock import patch
from tests.utils import (BaseCase, load)
from datetime import datetime
class TestKey(BaseCase):
def __init__(self, methodName='runTest'):
super(TestKey, self).__init__(methodName)
self.key = github3.users.Key(load('key'))
self.api = "https://api.github.com/user/keys/10"
def setUp(self):
super(TestKey, self).setUp()
self.key = github3.users.Key(self.key.as_dict(), self.g)
def test_equality(self):
k = github3.users.Key(self.key.as_dict())
assert self.key == k
k._uniq += "cruft"
assert self.key != k
def test_str(self):
assert str(self.key) == self.key.key
assert repr(self.key).startswith('<User Key')
def test_delete(self):
self.response('', 204)
self.delete(self.api)
self.assertRaises(github3.GitHubError, self.key.delete)
self.not_called()
self.login()
assert self.key.delete()
self.mock_assertions()
def test_update(self):
self.response('key', 200)
self.patch(self.api)
self.conf = {
'data': {
'key': 'fakekey',
'title': 'New title',
}
}
self.assertRaises(github3.GitHubError, self.key.update, None, None)
self.login()
assert self.key.update(None, None) is False
self.not_called()
assert self.key.update(**self.conf['data'])
self.mock_assertions()
class TestPlan(BaseCase):
def __init__(self, methodName='runTest'):
super(TestPlan, self).__init__(methodName)
self.plan = github3.users.Plan({
'name': 'free',
'space': 400,
'collaborators': 10,
'private_repos': 20,
})
def test_str(self):
assert str(self.plan) == self.plan.name
assert repr(self.plan) == '<Plan [free]>'
assert self.plan.is_free()
class TestUser(BaseCase):
def __init__(self, methodName='runTest'):
super(TestUser, self).__init__(methodName)
self.user = github3.users.User(load('user'))
self.api = "https://api.github.com/users/sigmavirus24"
def setUp(self):
super(TestUser, self).setUp()
self.user = github3.users.User(self.user.as_dict(), self.g)
if hasattr(self.user.name, 'decode'):
self.user.name = self.user.name.decode('utf-8')
def test_refresh(self):
"""This sort of tests all instances of refresh for good measure."""
self.response('', 304)
self.get(self.api)
self.user.last_modified = last_modified = datetime.now().strftime(
'%a, %d %b %Y %H:%M:%S GMT'
)
self.user.etag = etag = '644b5b0155e6404a9cc4bd9d8b1ae730'
expected_headers = {
'If-Modified-Since': last_modified,
}
self.user.refresh(True)
self.request.assert_called_with('GET', self.api,
headers=expected_headers,
allow_redirects=True)
self.user.last_modified = None
expected_headers = {
'If-None-Match': etag
}
self.user.refresh(True)
self.request.assert_called_with('GET', self.api,
headers=expected_headers,
allow_redirects=True)
self.response('user', 200)
self.user.refresh()
self.mock_assertions()
def test_str(self):
assert str(self.user) == 'sigmavirus24'
assert repr(self.user) == '<User [sigmavirus24:Ian Cordasco]>'
def test_add_email_address(self):
self.assertRaises(github3.GitHubError, self.user.add_email_address,
'foo')
self.not_called()
self.login()
with patch.object(github3.users.User, 'add_email_addresses') as p:
self.user.add_email_address('foo')
p.assert_called_once_with(['foo'])
def test_add_email_addresses(self):
self.response('emails', 201, _iter=True)
self.post(self.github_url + 'user/emails')
self.conf = {
'data': '["foo@bar.com"]',
}
self.assertRaises(github3.GitHubError, self.user.add_email_addresses,
[])
self.not_called()
self.login()
self.user.add_email_addresses(['foo@bar.com'])
self.mock_assertions()
def test_delete_email_address(self):
self.assertRaises(github3.GitHubError, self.user.delete_email_address,
'foo')
self.not_called()
self.login()
with patch.object(github3.users.User, 'delete_email_addresses') as p:
self.user.delete_email_address('foo')
p.assert_called_once_with(['foo'])
def test_delete_email_addresses(self):
self.response('', 204)
self.delete(self.github_url + 'user/emails')
self.conf = {
'data': '["foo@bar.com"]'
}
self.assertRaises(github3.GitHubError,
self.user.delete_email_addresses,
[])
self.not_called()
self.login()
assert self.user.delete_email_addresses(['foo@bar.com'])
self.mock_assertions()
def test_is_assignee_on(self):
self.response('', 404)
self.get(self.github_url + 'repos/abc/def/assignees/sigmavirus24')
assert self.user.is_assignee_on('abc', 'def') is False
self.mock_assertions()
def test_is_following(self):
self.response('', 204)
self.get(self.api + '/following/kennethreitz')
assert self.user.is_following('kennethreitz')
self.mock_assertions()
def test_equality(self):
u = github3.users.User(load('user'))
assert self.user == u
u._uniq += 1
assert self.user != u
|
dfalt974/SickRage
|
refs/heads/master
|
lib/chardet/jpcntx.py
|
289
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# This is hiragana 2-char sequence table, the number in each cell represents its frequency category
jp2CharContext = (
(0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1),
(2,4,0,4,0,3,0,4,0,3,4,4,4,2,4,3,3,4,3,2,3,3,4,2,3,3,3,2,4,1,4,3,3,1,5,4,3,4,3,4,3,5,3,0,3,5,4,2,0,3,1,0,3,3,0,3,3,0,1,1,0,4,3,0,3,3,0,4,0,2,0,3,5,5,5,5,4,0,4,1,0,3,4),
(0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2),
(0,4,0,5,0,5,0,4,0,4,5,4,4,3,5,3,5,1,5,3,4,3,4,4,3,4,3,3,4,3,5,4,4,3,5,5,3,5,5,5,3,5,5,3,4,5,5,3,1,3,2,0,3,4,0,4,2,0,4,2,1,5,3,2,3,5,0,4,0,2,0,5,4,4,5,4,5,0,4,0,0,4,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,4,0,3,0,3,0,4,5,4,3,3,3,3,4,3,5,4,4,3,5,4,4,3,4,3,4,4,4,4,5,3,4,4,3,4,5,5,4,5,5,1,4,5,4,3,0,3,3,1,3,3,0,4,4,0,3,3,1,5,3,3,3,5,0,4,0,3,0,4,4,3,4,3,3,0,4,1,1,3,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,4,0,3,0,3,0,4,0,3,4,4,3,2,2,1,2,1,3,1,3,3,3,3,3,4,3,1,3,3,5,3,3,0,4,3,0,5,4,3,3,5,4,4,3,4,4,5,0,1,2,0,1,2,0,2,2,0,1,0,0,5,2,2,1,4,0,3,0,1,0,4,4,3,5,4,3,0,2,1,0,4,3),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,5,0,4,0,2,1,4,4,2,4,1,4,2,4,2,4,3,3,3,4,3,3,3,3,1,4,2,3,3,3,1,4,4,1,1,1,4,3,3,2,0,2,4,3,2,0,3,3,0,3,1,1,0,0,0,3,3,0,4,2,2,3,4,0,4,0,3,0,4,4,5,3,4,4,0,3,0,0,1,4),
(1,4,0,4,0,4,0,4,0,3,5,4,4,3,4,3,5,4,3,3,4,3,5,4,4,4,4,3,4,2,4,3,3,1,5,4,3,2,4,5,4,5,5,4,4,5,4,4,0,3,2,2,3,3,0,4,3,1,3,2,1,4,3,3,4,5,0,3,0,2,0,4,5,5,4,5,4,0,4,0,0,5,4),
(0,5,0,5,0,4,0,3,0,4,4,3,4,3,3,3,4,0,4,4,4,3,4,3,4,3,3,1,4,2,4,3,4,0,5,4,1,4,5,4,4,5,3,2,4,3,4,3,2,4,1,3,3,3,2,3,2,0,4,3,3,4,3,3,3,4,0,4,0,3,0,4,5,4,4,4,3,0,4,1,0,1,3),
(0,3,1,4,0,3,0,2,0,3,4,4,3,1,4,2,3,3,4,3,4,3,4,3,4,4,3,2,3,1,5,4,4,1,4,4,3,5,4,4,3,5,5,4,3,4,4,3,1,2,3,1,2,2,0,3,2,0,3,1,0,5,3,3,3,4,3,3,3,3,4,4,4,4,5,4,2,0,3,3,2,4,3),
(0,2,0,3,0,1,0,1,0,0,3,2,0,0,2,0,1,0,2,1,3,3,3,1,2,3,1,0,1,0,4,2,1,1,3,3,0,4,3,3,1,4,3,3,0,3,3,2,0,0,0,0,1,0,0,2,0,0,0,0,0,4,1,0,2,3,2,2,2,1,3,3,3,4,4,3,2,0,3,1,0,3,3),
(0,4,0,4,0,3,0,3,0,4,4,4,3,3,3,3,3,3,4,3,4,2,4,3,4,3,3,2,4,3,4,5,4,1,4,5,3,5,4,5,3,5,4,0,3,5,5,3,1,3,3,2,2,3,0,3,4,1,3,3,2,4,3,3,3,4,0,4,0,3,0,4,5,4,4,5,3,0,4,1,0,3,4),
(0,2,0,3,0,3,0,0,0,2,2,2,1,0,1,0,0,0,3,0,3,0,3,0,1,3,1,0,3,1,3,3,3,1,3,3,3,0,1,3,1,3,4,0,0,3,1,1,0,3,2,0,0,0,0,1,3,0,1,0,0,3,3,2,0,3,0,0,0,0,0,3,4,3,4,3,3,0,3,0,0,2,3),
(2,3,0,3,0,2,0,1,0,3,3,4,3,1,3,1,1,1,3,1,4,3,4,3,3,3,0,0,3,1,5,4,3,1,4,3,2,5,5,4,4,4,4,3,3,4,4,4,0,2,1,1,3,2,0,1,2,0,0,1,0,4,1,3,3,3,0,3,0,1,0,4,4,4,5,5,3,0,2,0,0,4,4),
(0,2,0,1,0,3,1,3,0,2,3,3,3,0,3,1,0,0,3,0,3,2,3,1,3,2,1,1,0,0,4,2,1,0,2,3,1,4,3,2,0,4,4,3,1,3,1,3,0,1,0,0,1,0,0,0,1,0,0,0,0,4,1,1,1,2,0,3,0,0,0,3,4,2,4,3,2,0,1,0,0,3,3),
(0,1,0,4,0,5,0,4,0,2,4,4,2,3,3,2,3,3,5,3,3,3,4,3,4,2,3,0,4,3,3,3,4,1,4,3,2,1,5,5,3,4,5,1,3,5,4,2,0,3,3,0,1,3,0,4,2,0,1,3,1,4,3,3,3,3,0,3,0,1,0,3,4,4,4,5,5,0,3,0,1,4,5),
(0,2,0,3,0,3,0,0,0,2,3,1,3,0,4,0,1,1,3,0,3,4,3,2,3,1,0,3,3,2,3,1,3,0,2,3,0,2,1,4,1,2,2,0,0,3,3,0,0,2,0,0,0,1,0,0,0,0,2,2,0,3,2,1,3,3,0,2,0,2,0,0,3,3,1,2,4,0,3,0,2,2,3),
(2,4,0,5,0,4,0,4,0,2,4,4,4,3,4,3,3,3,1,2,4,3,4,3,4,4,5,0,3,3,3,3,2,0,4,3,1,4,3,4,1,4,4,3,3,4,4,3,1,2,3,0,4,2,0,4,1,0,3,3,0,4,3,3,3,4,0,4,0,2,0,3,5,3,4,5,2,0,3,0,0,4,5),
(0,3,0,4,0,1,0,1,0,1,3,2,2,1,3,0,3,0,2,0,2,0,3,0,2,0,0,0,1,0,1,1,0,0,3,1,0,0,0,4,0,3,1,0,2,1,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,4,2,2,3,1,0,3,0,0,0,1,4,4,4,3,0,0,4,0,0,1,4),
(1,4,1,5,0,3,0,3,0,4,5,4,4,3,5,3,3,4,4,3,4,1,3,3,3,3,2,1,4,1,5,4,3,1,4,4,3,5,4,4,3,5,4,3,3,4,4,4,0,3,3,1,2,3,0,3,1,0,3,3,0,5,4,4,4,4,4,4,3,3,5,4,4,3,3,5,4,0,3,2,0,4,4),
(0,2,0,3,0,1,0,0,0,1,3,3,3,2,4,1,3,0,3,1,3,0,2,2,1,1,0,0,2,0,4,3,1,0,4,3,0,4,4,4,1,4,3,1,1,3,3,1,0,2,0,0,1,3,0,0,0,0,2,0,0,4,3,2,4,3,5,4,3,3,3,4,3,3,4,3,3,0,2,1,0,3,3),
(0,2,0,4,0,3,0,2,0,2,5,5,3,4,4,4,4,1,4,3,3,0,4,3,4,3,1,3,3,2,4,3,0,3,4,3,0,3,4,4,2,4,4,0,4,5,3,3,2,2,1,1,1,2,0,1,5,0,3,3,2,4,3,3,3,4,0,3,0,2,0,4,4,3,5,5,0,0,3,0,2,3,3),
(0,3,0,4,0,3,0,1,0,3,4,3,3,1,3,3,3,0,3,1,3,0,4,3,3,1,1,0,3,0,3,3,0,0,4,4,0,1,5,4,3,3,5,0,3,3,4,3,0,2,0,1,1,1,0,1,3,0,1,2,1,3,3,2,3,3,0,3,0,1,0,1,3,3,4,4,1,0,1,2,2,1,3),
(0,1,0,4,0,4,0,3,0,1,3,3,3,2,3,1,1,0,3,0,3,3,4,3,2,4,2,0,1,0,4,3,2,0,4,3,0,5,3,3,2,4,4,4,3,3,3,4,0,1,3,0,0,1,0,0,1,0,0,0,0,4,2,3,3,3,0,3,0,0,0,4,4,4,5,3,2,0,3,3,0,3,5),
(0,2,0,3,0,0,0,3,0,1,3,0,2,0,0,0,1,0,3,1,1,3,3,0,0,3,0,0,3,0,2,3,1,0,3,1,0,3,3,2,0,4,2,2,0,2,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,2,1,2,0,1,0,1,0,0,0,1,3,1,2,0,0,0,1,0,0,1,4),
(0,3,0,3,0,5,0,1,0,2,4,3,1,3,3,2,1,1,5,2,1,0,5,1,2,0,0,0,3,3,2,2,3,2,4,3,0,0,3,3,1,3,3,0,2,5,3,4,0,3,3,0,1,2,0,2,2,0,3,2,0,2,2,3,3,3,0,2,0,1,0,3,4,4,2,5,4,0,3,0,0,3,5),
(0,3,0,3,0,3,0,1,0,3,3,3,3,0,3,0,2,0,2,1,1,0,2,0,1,0,0,0,2,1,0,0,1,0,3,2,0,0,3,3,1,2,3,1,0,3,3,0,0,1,0,0,0,0,0,2,0,0,0,0,0,2,3,1,2,3,0,3,0,1,0,3,2,1,0,4,3,0,1,1,0,3,3),
(0,4,0,5,0,3,0,3,0,4,5,5,4,3,5,3,4,3,5,3,3,2,5,3,4,4,4,3,4,3,4,5,5,3,4,4,3,4,4,5,4,4,4,3,4,5,5,4,2,3,4,2,3,4,0,3,3,1,4,3,2,4,3,3,5,5,0,3,0,3,0,5,5,5,5,4,4,0,4,0,1,4,4),
(0,4,0,4,0,3,0,3,0,3,5,4,4,2,3,2,5,1,3,2,5,1,4,2,3,2,3,3,4,3,3,3,3,2,5,4,1,3,3,5,3,4,4,0,4,4,3,1,1,3,1,0,2,3,0,2,3,0,3,0,0,4,3,1,3,4,0,3,0,2,0,4,4,4,3,4,5,0,4,0,0,3,4),
(0,3,0,3,0,3,1,2,0,3,4,4,3,3,3,0,2,2,4,3,3,1,3,3,3,1,1,0,3,1,4,3,2,3,4,4,2,4,4,4,3,4,4,3,2,4,4,3,1,3,3,1,3,3,0,4,1,0,2,2,1,4,3,2,3,3,5,4,3,3,5,4,4,3,3,0,4,0,3,2,2,4,4),
(0,2,0,1,0,0,0,0,0,1,2,1,3,0,0,0,0,0,2,0,1,2,1,0,0,1,0,0,0,0,3,0,0,1,0,1,1,3,1,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,1,2,2,0,3,4,0,0,0,1,1,0,0,1,0,0,0,0,0,1,1),
(0,1,0,0,0,1,0,0,0,0,4,0,4,1,4,0,3,0,4,0,3,0,4,0,3,0,3,0,4,1,5,1,4,0,0,3,0,5,0,5,2,0,1,0,0,0,2,1,4,0,1,3,0,0,3,0,0,3,1,1,4,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0),
(1,4,0,5,0,3,0,2,0,3,5,4,4,3,4,3,5,3,4,3,3,0,4,3,3,3,3,3,3,2,4,4,3,1,3,4,4,5,4,4,3,4,4,1,3,5,4,3,3,3,1,2,2,3,3,1,3,1,3,3,3,5,3,3,4,5,0,3,0,3,0,3,4,3,4,4,3,0,3,0,2,4,3),
(0,1,0,4,0,0,0,0,0,1,4,0,4,1,4,2,4,0,3,0,1,0,1,0,0,0,0,0,2,0,3,1,1,1,0,3,0,0,0,1,2,1,0,0,1,1,1,1,0,1,0,0,0,1,0,0,3,0,0,0,0,3,2,0,2,2,0,1,0,0,0,2,3,2,3,3,0,0,0,0,2,1,0),
(0,5,1,5,0,3,0,3,0,5,4,4,5,1,5,3,3,0,4,3,4,3,5,3,4,3,3,2,4,3,4,3,3,0,3,3,1,4,4,3,4,4,4,3,4,5,5,3,2,3,1,1,3,3,1,3,1,1,3,3,2,4,5,3,3,5,0,4,0,3,0,4,4,3,5,3,3,0,3,4,0,4,3),
(0,5,0,5,0,3,0,2,0,4,4,3,5,2,4,3,3,3,4,4,4,3,5,3,5,3,3,1,4,0,4,3,3,0,3,3,0,4,4,4,4,5,4,3,3,5,5,3,2,3,1,2,3,2,0,1,0,0,3,2,2,4,4,3,1,5,0,4,0,3,0,4,3,1,3,2,1,0,3,3,0,3,3),
(0,4,0,5,0,5,0,4,0,4,5,5,5,3,4,3,3,2,5,4,4,3,5,3,5,3,4,0,4,3,4,4,3,2,4,4,3,4,5,4,4,5,5,0,3,5,5,4,1,3,3,2,3,3,1,3,1,0,4,3,1,4,4,3,4,5,0,4,0,2,0,4,3,4,4,3,3,0,4,0,0,5,5),
(0,4,0,4,0,5,0,1,1,3,3,4,4,3,4,1,3,0,5,1,3,0,3,1,3,1,1,0,3,0,3,3,4,0,4,3,0,4,4,4,3,4,4,0,3,5,4,1,0,3,0,0,2,3,0,3,1,0,3,1,0,3,2,1,3,5,0,3,0,1,0,3,2,3,3,4,4,0,2,2,0,4,4),
(2,4,0,5,0,4,0,3,0,4,5,5,4,3,5,3,5,3,5,3,5,2,5,3,4,3,3,4,3,4,5,3,2,1,5,4,3,2,3,4,5,3,4,1,2,5,4,3,0,3,3,0,3,2,0,2,3,0,4,1,0,3,4,3,3,5,0,3,0,1,0,4,5,5,5,4,3,0,4,2,0,3,5),
(0,5,0,4,0,4,0,2,0,5,4,3,4,3,4,3,3,3,4,3,4,2,5,3,5,3,4,1,4,3,4,4,4,0,3,5,0,4,4,4,4,5,3,1,3,4,5,3,3,3,3,3,3,3,0,2,2,0,3,3,2,4,3,3,3,5,3,4,1,3,3,5,3,2,0,0,0,0,4,3,1,3,3),
(0,1,0,3,0,3,0,1,0,1,3,3,3,2,3,3,3,0,3,0,0,0,3,1,3,0,0,0,2,2,2,3,0,0,3,2,0,1,2,4,1,3,3,0,0,3,3,3,0,1,0,0,2,1,0,0,3,0,3,1,0,3,0,0,1,3,0,2,0,1,0,3,3,1,3,3,0,0,1,1,0,3,3),
(0,2,0,3,0,2,1,4,0,2,2,3,1,1,3,1,1,0,2,0,3,1,2,3,1,3,0,0,1,0,4,3,2,3,3,3,1,4,2,3,3,3,3,1,0,3,1,4,0,1,1,0,1,2,0,1,1,0,1,1,0,3,1,3,2,2,0,1,0,0,0,2,3,3,3,1,0,0,0,0,0,2,3),
(0,5,0,4,0,5,0,2,0,4,5,5,3,3,4,3,3,1,5,4,4,2,4,4,4,3,4,2,4,3,5,5,4,3,3,4,3,3,5,5,4,5,5,1,3,4,5,3,1,4,3,1,3,3,0,3,3,1,4,3,1,4,5,3,3,5,0,4,0,3,0,5,3,3,1,4,3,0,4,0,1,5,3),
(0,5,0,5,0,4,0,2,0,4,4,3,4,3,3,3,3,3,5,4,4,4,4,4,4,5,3,3,5,2,4,4,4,3,4,4,3,3,4,4,5,5,3,3,4,3,4,3,3,4,3,3,3,3,1,2,2,1,4,3,3,5,4,4,3,4,0,4,0,3,0,4,4,4,4,4,1,0,4,2,0,2,4),
(0,4,0,4,0,3,0,1,0,3,5,2,3,0,3,0,2,1,4,2,3,3,4,1,4,3,3,2,4,1,3,3,3,0,3,3,0,0,3,3,3,5,3,3,3,3,3,2,0,2,0,0,2,0,0,2,0,0,1,0,0,3,1,2,2,3,0,3,0,2,0,4,4,3,3,4,1,0,3,0,0,2,4),
(0,0,0,4,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,1,0,2,0,1,0,0,0,0,0,3,1,3,0,3,2,0,0,0,1,0,3,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,0,2,0,0,0,0,0,0,2),
(0,2,1,3,0,2,0,2,0,3,3,3,3,1,3,1,3,3,3,3,3,3,4,2,2,1,2,1,4,0,4,3,1,3,3,3,2,4,3,5,4,3,3,3,3,3,3,3,0,1,3,0,2,0,0,1,0,0,1,0,0,4,2,0,2,3,0,3,3,0,3,3,4,2,3,1,4,0,1,2,0,2,3),
(0,3,0,3,0,1,0,3,0,2,3,3,3,0,3,1,2,0,3,3,2,3,3,2,3,2,3,1,3,0,4,3,2,0,3,3,1,4,3,3,2,3,4,3,1,3,3,1,1,0,1,1,0,1,0,1,0,1,0,0,0,4,1,1,0,3,0,3,1,0,2,3,3,3,3,3,1,0,0,2,0,3,3),
(0,0,0,0,0,0,0,0,0,0,3,0,2,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,3,0,3,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,2,3,0,0,0,0,0,0,0,0,3),
(0,2,0,3,1,3,0,3,0,2,3,3,3,1,3,1,3,1,3,1,3,3,3,1,3,0,2,3,1,1,4,3,3,2,3,3,1,2,2,4,1,3,3,0,1,4,2,3,0,1,3,0,3,0,0,1,3,0,2,0,0,3,3,2,1,3,0,3,0,2,0,3,4,4,4,3,1,0,3,0,0,3,3),
(0,2,0,1,0,2,0,0,0,1,3,2,2,1,3,0,1,1,3,0,3,2,3,1,2,0,2,0,1,1,3,3,3,0,3,3,1,1,2,3,2,3,3,1,2,3,2,0,0,1,0,0,0,0,0,0,3,0,1,0,0,2,1,2,1,3,0,3,0,0,0,3,4,4,4,3,2,0,2,0,0,2,4),
(0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,3,1,0,0,0,0,0,0,0,3),
(0,3,0,3,0,2,0,3,0,3,3,3,2,3,2,2,2,0,3,1,3,3,3,2,3,3,0,0,3,0,3,2,2,0,2,3,1,4,3,4,3,3,2,3,1,5,4,4,0,3,1,2,1,3,0,3,1,1,2,0,2,3,1,3,1,3,0,3,0,1,0,3,3,4,4,2,1,0,2,1,0,2,4),
(0,1,0,3,0,1,0,2,0,1,4,2,5,1,4,0,2,0,2,1,3,1,4,0,2,1,0,0,2,1,4,1,1,0,3,3,0,5,1,3,2,3,3,1,0,3,2,3,0,1,0,0,0,0,0,0,1,0,0,0,0,4,0,1,0,3,0,2,0,1,0,3,3,3,4,3,3,0,0,0,0,2,3),
(0,0,0,1,0,0,0,0,0,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,1,0,0,0,0,0,3),
(0,1,0,3,0,4,0,3,0,2,4,3,1,0,3,2,2,1,3,1,2,2,3,1,1,1,2,1,3,0,1,2,0,1,3,2,1,3,0,5,5,1,0,0,1,3,2,1,0,3,0,0,1,0,0,0,0,0,3,4,0,1,1,1,3,2,0,2,0,1,0,2,3,3,1,2,3,0,1,0,1,0,4),
(0,0,0,1,0,3,0,3,0,2,2,1,0,0,4,0,3,0,3,1,3,0,3,0,3,0,1,0,3,0,3,1,3,0,3,3,0,0,1,2,1,1,1,0,1,2,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,2,2,1,2,0,0,2,0,0,0,0,2,3,3,3,3,0,0,0,0,1,4),
(0,0,0,3,0,3,0,0,0,0,3,1,1,0,3,0,1,0,2,0,1,0,0,0,0,0,0,0,1,0,3,0,2,0,2,3,0,0,2,2,3,1,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,2,3),
(2,4,0,5,0,5,0,4,0,3,4,3,3,3,4,3,3,3,4,3,4,4,5,4,5,5,5,2,3,0,5,5,4,1,5,4,3,1,5,4,3,4,4,3,3,4,3,3,0,3,2,0,2,3,0,3,0,0,3,3,0,5,3,2,3,3,0,3,0,3,0,3,4,5,4,5,3,0,4,3,0,3,4),
(0,3,0,3,0,3,0,3,0,3,3,4,3,2,3,2,3,0,4,3,3,3,3,3,3,3,3,0,3,2,4,3,3,1,3,4,3,4,4,4,3,4,4,3,2,4,4,1,0,2,0,0,1,1,0,2,0,0,3,1,0,5,3,2,1,3,0,3,0,1,2,4,3,2,4,3,3,0,3,2,0,4,4),
(0,3,0,3,0,1,0,0,0,1,4,3,3,2,3,1,3,1,4,2,3,2,4,2,3,4,3,0,2,2,3,3,3,0,3,3,3,0,3,4,1,3,3,0,3,4,3,3,0,1,1,0,1,0,0,0,4,0,3,0,0,3,1,2,1,3,0,4,0,1,0,4,3,3,4,3,3,0,2,0,0,3,3),
(0,3,0,4,0,1,0,3,0,3,4,3,3,0,3,3,3,1,3,1,3,3,4,3,3,3,0,0,3,1,5,3,3,1,3,3,2,5,4,3,3,4,5,3,2,5,3,4,0,1,0,0,0,0,0,2,0,0,1,1,0,4,2,2,1,3,0,3,0,2,0,4,4,3,5,3,2,0,1,1,0,3,4),
(0,5,0,4,0,5,0,2,0,4,4,3,3,2,3,3,3,1,4,3,4,1,5,3,4,3,4,0,4,2,4,3,4,1,5,4,0,4,4,4,4,5,4,1,3,5,4,2,1,4,1,1,3,2,0,3,1,0,3,2,1,4,3,3,3,4,0,4,0,3,0,4,4,4,3,3,3,0,4,2,0,3,4),
(1,4,0,4,0,3,0,1,0,3,3,3,1,1,3,3,2,2,3,3,1,0,3,2,2,1,2,0,3,1,2,1,2,0,3,2,0,2,2,3,3,4,3,0,3,3,1,2,0,1,1,3,1,2,0,0,3,0,1,1,0,3,2,2,3,3,0,3,0,0,0,2,3,3,4,3,3,0,1,0,0,1,4),
(0,4,0,4,0,4,0,0,0,3,4,4,3,1,4,2,3,2,3,3,3,1,4,3,4,0,3,0,4,2,3,3,2,2,5,4,2,1,3,4,3,4,3,1,3,3,4,2,0,2,1,0,3,3,0,0,2,0,3,1,0,4,4,3,4,3,0,4,0,1,0,2,4,4,4,4,4,0,3,2,0,3,3),
(0,0,0,1,0,4,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,3,2,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2),
(0,2,0,3,0,4,0,4,0,1,3,3,3,0,4,0,2,1,2,1,1,1,2,0,3,1,1,0,1,0,3,1,0,0,3,3,2,0,1,1,0,0,0,0,0,1,0,2,0,2,2,0,3,1,0,0,1,0,1,1,0,1,2,0,3,0,0,0,0,1,0,0,3,3,4,3,1,0,1,0,3,0,2),
(0,0,0,3,0,5,0,0,0,0,1,0,2,0,3,1,0,1,3,0,0,0,2,0,0,0,1,0,0,0,1,1,0,0,4,0,0,0,2,3,0,1,4,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,1,0,0,0,0,0,0,0,2,0,0,3,0,0,0,0,0,3),
(0,2,0,5,0,5,0,1,0,2,4,3,3,2,5,1,3,2,3,3,3,0,4,1,2,0,3,0,4,0,2,2,1,1,5,3,0,0,1,4,2,3,2,0,3,3,3,2,0,2,4,1,1,2,0,1,1,0,3,1,0,1,3,1,2,3,0,2,0,0,0,1,3,5,4,4,4,0,3,0,0,1,3),
(0,4,0,5,0,4,0,4,0,4,5,4,3,3,4,3,3,3,4,3,4,4,5,3,4,5,4,2,4,2,3,4,3,1,4,4,1,3,5,4,4,5,5,4,4,5,5,5,2,3,3,1,4,3,1,3,3,0,3,3,1,4,3,4,4,4,0,3,0,4,0,3,3,4,4,5,0,0,4,3,0,4,5),
(0,4,0,4,0,3,0,3,0,3,4,4,4,3,3,2,4,3,4,3,4,3,5,3,4,3,2,1,4,2,4,4,3,1,3,4,2,4,5,5,3,4,5,4,1,5,4,3,0,3,2,2,3,2,1,3,1,0,3,3,3,5,3,3,3,5,4,4,2,3,3,4,3,3,3,2,1,0,3,2,1,4,3),
(0,4,0,5,0,4,0,3,0,3,5,5,3,2,4,3,4,0,5,4,4,1,4,4,4,3,3,3,4,3,5,5,2,3,3,4,1,2,5,5,3,5,5,2,3,5,5,4,0,3,2,0,3,3,1,1,5,1,4,1,0,4,3,2,3,5,0,4,0,3,0,5,4,3,4,3,0,0,4,1,0,4,4),
(1,3,0,4,0,2,0,2,0,2,5,5,3,3,3,3,3,0,4,2,3,4,4,4,3,4,0,0,3,4,5,4,3,3,3,3,2,5,5,4,5,5,5,4,3,5,5,5,1,3,1,0,1,0,0,3,2,0,4,2,0,5,2,3,2,4,1,3,0,3,0,4,5,4,5,4,3,0,4,2,0,5,4),
(0,3,0,4,0,5,0,3,0,3,4,4,3,2,3,2,3,3,3,3,3,2,4,3,3,2,2,0,3,3,3,3,3,1,3,3,3,0,4,4,3,4,4,1,1,4,4,2,0,3,1,0,1,1,0,4,1,0,2,3,1,3,3,1,3,4,0,3,0,1,0,3,1,3,0,0,1,0,2,0,0,4,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,3,0,2,0,3,0,1,5,4,3,3,3,1,4,2,1,2,3,4,4,2,4,4,5,0,3,1,4,3,4,0,4,3,3,3,2,3,2,5,3,4,3,2,2,3,0,0,3,0,2,1,0,1,2,0,0,0,0,2,1,1,3,1,0,2,0,4,0,3,4,4,4,5,2,0,2,0,0,1,3),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,1,1,0,0,0,4,2,1,1,0,1,0,3,2,0,0,3,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,2,0,0,0,1,4,0,4,2,1,0,0,0,0,0,1),
(0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,3,1,0,0,0,2,0,2,1,0,0,1,2,1,0,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0,0,1,0,0,2,1,0,0,0,0,0,0,0,0,2),
(0,4,0,4,0,4,0,3,0,4,4,3,4,2,4,3,2,0,4,4,4,3,5,3,5,3,3,2,4,2,4,3,4,3,1,4,0,2,3,4,4,4,3,3,3,4,4,4,3,4,1,3,4,3,2,1,2,1,3,3,3,4,4,3,3,5,0,4,0,3,0,4,3,3,3,2,1,0,3,0,0,3,3),
(0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1),
)
class JapaneseContextAnalysis(object):
NUM_OF_CATEGORY = 6
DONT_KNOW = -1
ENOUGH_REL_THRESHOLD = 100
MAX_REL_THRESHOLD = 1000
MINIMUM_DATA_THRESHOLD = 4
def __init__(self):
self._total_rel = None
self._rel_sample = None
self._need_to_skip_char_num = None
self._last_char_order = None
self._done = None
self.reset()
def reset(self):
self._total_rel = 0 # total sequence received
# category counters, each integer counts sequence in its category
self._rel_sample = [0] * self.NUM_OF_CATEGORY
# if last byte in current buffer is not the last byte of a character,
# we need to know how many bytes to skip in next buffer
self._need_to_skip_char_num = 0
self._last_char_order = -1 # The order of previous char
# If this flag is set to True, detection is done and conclusion has
# been made
self._done = False
def feed(self, byte_str, num_bytes):
if self._done:
return
# The buffer we got is byte oriented, and a character may span in more than one
# buffers. In case the last one or two byte in last buffer is not
# complete, we record how many byte needed to complete that character
# and skip these bytes here. We can choose to record those bytes as
# well and analyse the character once it is complete, but since a
# character will not make much difference, by simply skipping
# this character will simply our logic and improve performance.
i = self._need_to_skip_char_num
while i < num_bytes:
order, char_len = self.get_order(byte_str[i:i + 2])
i += char_len
if i > num_bytes:
self._need_to_skip_char_num = i - num_bytes
self._last_char_order = -1
else:
if (order != -1) and (self._last_char_order != -1):
self._total_rel += 1
if self._total_rel > self.MAX_REL_THRESHOLD:
self._done = True
break
self._rel_sample[jp2CharContext[self._last_char_order][order]] += 1
self._last_char_order = order
def got_enough_data(self):
return self._total_rel > self.ENOUGH_REL_THRESHOLD
def get_confidence(self):
# This is just one way to calculate confidence. It works well for me.
if self._total_rel > self.MINIMUM_DATA_THRESHOLD:
return (self._total_rel - self._rel_sample[0]) / self._total_rel
else:
return self.DONT_KNOW
def get_order(self, byte_str):
return -1, 1
class SJISContextAnalysis(JapaneseContextAnalysis):
def __init__(self):
super(SJISContextAnalysis, self).__init__()
self._charset_name = "SHIFT_JIS"
@property
def charset_name(self):
return self._charset_name
def get_order(self, byte_str):
if not byte_str:
return -1, 1
# find out current char's byte length
first_char = byte_str[0]
if (0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC):
char_len = 2
if (first_char == 0x87) or (0xFA <= first_char <= 0xFC):
self._charset_name = "CP932"
else:
char_len = 1
# return its order if it is hiragana
if len(byte_str) > 1:
second_char = byte_str[1]
if (first_char == 202) and (0x9F <= second_char <= 0xF1):
return second_char - 0x9F, char_len
return -1, char_len
class EUCJPContextAnalysis(JapaneseContextAnalysis):
def get_order(self, byte_str):
if not byte_str:
return -1, 1
# find out current char's byte length
first_char = byte_str[0]
if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE):
char_len = 2
elif first_char == 0x8F:
char_len = 3
else:
char_len = 1
# return its order if it is hiragana
if len(byte_str) > 1:
second_char = byte_str[1]
if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3):
return second_char - 0xA1, char_len
return -1, char_len
|
npuichigo/ttsflow
|
refs/heads/master
|
third_party/tensorflow/tensorflow/contrib/keras/python/keras/layers/normalization_test.py
|
19
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for normalization layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.keras.python import keras
from tensorflow.contrib.keras.python.keras import testing_utils
from tensorflow.python.platform import test
class NoiseLayersTest(test.TestCase):
def basic_batchnorm_test(self):
with self.test_session():
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={
'momentum': 0.9,
'epsilon': 0.1,
'gamma_regularizer': keras.regularizers.l2(0.01),
'beta_regularizer': keras.regularizers.l2(0.01)
},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={
'gamma_initializer': 'ones',
'beta_initializer': 'ones',
'moving_mean_initializer': 'zeros',
'moving_variance_initializer': 'ones'
},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={'scale': False,
'center': False},
input_shape=(3, 3))
def batchnorm_weights_test(self):
with self.test_session():
layer = keras.layers.BatchNormalization(scale=False, center=False)
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.weights), 2)
layer = keras.layers.BatchNormalization()
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 2)
self.assertEqual(len(layer.weights), 4)
def batchnorm_regularization_test(self):
with self.test_session():
layer = keras.layers.BatchNormalization(
gamma_regularizer='l1', beta_regularizer='l1')
layer.build((None, 3, 4))
self.assertEqual(len(layer.losses), 2)
layer = keras.layers.BatchNormalization(
gamma_constraint='l1', beta_constraint='l1')
layer.build((None, 3, 4))
self.assertEqual(len(layer.constraints), 2)
def test_batchnorm_correctness(self):
with self.test_session():
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(input_shape=(10,), momentum=0.8)
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= keras.backend.eval(norm.beta)
out /= keras.backend.eval(norm.gamma)
np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=1e-1)
def test_batchnorm_convnet(self):
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True):
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(
axis=1, input_shape=(3, 4, 4), momentum=0.8)
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(keras.backend.eval(norm.beta), (1, 3, 1, 1))
out /= np.reshape(keras.backend.eval(norm.gamma), (1, 3, 1, 1))
np.testing.assert_allclose(np.mean(out, axis=(0, 2, 3)), 0.0, atol=1e-1)
np.testing.assert_allclose(np.std(out, axis=(0, 2, 3)), 1.0, atol=1e-1)
def test_shared_batchnorm(self):
"""Test that a BN layer can be shared across different data streams.
"""
with self.test_session():
# Test single layer reuse
bn = keras.layers.BatchNormalization()
x1 = keras.layers.Input(shape=(10,))
_ = bn(x1)
x2 = keras.layers.Input(shape=(10,))
y2 = bn(x2)
x = np.random.normal(loc=5.0, scale=10.0, size=(2, 10))
model = keras.models.Model(x2, y2)
model.compile('sgd', 'mse')
model.train_on_batch(x, x)
assert len(model.updates) == 2
# Test model-level reuse
x3 = keras.layers.Input(shape=(10,))
y3 = model(x3)
new_model = keras.models.Model(x3, y3)
assert len(model.updates) == 2
new_model.compile('sgd', 'mse')
new_model.train_on_batch(x, x)
if __name__ == '__main__':
test.main()
|
LukeHoersten/ansible-modules-core
|
refs/heads/devel
|
cloud/docker/__init__.py
|
12133432
| |
openstack/poppy
|
refs/heads/master
|
tests/unit/storage/mockdb/__init__.py
|
12133432
| |
KieranWynn/pyquaternion
|
refs/heads/master
|
pyquaternion/test/__init__.py
|
12133432
| |
DARKPOP/external_chromium_org
|
refs/heads/dark-5.1
|
chrome/test/ispy/server/image_handler.py
|
101
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Request handler to display an image from Google Cloud Storage."""
import json
import os
import sys
import webapp2
from common import cloud_bucket
from common import constants
import gs_bucket
class ImageHandler(webapp2.RequestHandler):
"""A request handler to avoid the Same-Origin problem in the debug view."""
def get(self):
"""Handles get requests to the ImageHandler.
GET Parameters:
file_path: A path to an image resource in Google Cloud Storage.
"""
file_path = self.request.get('file_path')
if not file_path:
self.error(404)
return
bucket = gs_bucket.GoogleCloudStorageBucket(constants.BUCKET)
try:
image = bucket.DownloadFile(file_path)
except cloud_bucket.FileNotFoundError:
self.error(404)
else:
self.response.headers['Content-Type'] = 'image/png'
self.response.out.write(image)
|
RAPD/RAPD
|
refs/heads/master
|
src/old_agents/subcontractors/xdsme/new/xdsme-0.4.9/bin/Linux_i586/2pck.py
|
6
|
#!/usr/bin/env python
"""
28/01/02 pierre.legrand@crchul.ulaval.ca
"""
usage = """
>>> Usage : 2pck.py FORMAT image1 [image2 ...]\n
FORMAT = TIFF (marccd)
"""
import string,sys,os
def find_template(image_name):
dirname = os.path.split(os.getcwd())[1]
if image_name[-3:] == ".gz" or image_name[-3:] == ".GZ":
image_name = image_name[:-3]
if image_name[-2:] == ".Z" or image_name[-2:] == ".z":
image_name = image_name[:-2]
templ = string.join(image_name.split(".")[:-1])
ext = str(image_name.split(".")[-1])
n = 0
while templ[-1*n-1].isdigit():n+=1
return templ[:-1*n], n, ext
#print os.path.split(os.getcwd())
if len(sys.argv) >= 1:
format = sys.argv[1]
else:
print usage
sys.exit()
nx = ny = 2048
templ, n, ext = find_template(sys.argv[2])
templ_in = templ + n*"?" + "." + ext
templ_out = templ + n*"?" + ".pck"
print ">> input: %s" % templ_in
print ">> output: %s" % templ_out
i1, i2 = 1e10, -1
for img in sys.argv[2:]:
num = img[img.index(templ)+len(templ):img.rindex(ext)-1]
num = string.atoi(num)
i1, i2 = min(i1,num), max(i2,num)
print ">> first image: %7d\n>> last image: %7d" % (i1, i2)
script = """
NAME_TEMPLATE_OF_DATA_FRAMES= %s DIRECT %s
DATA_RANGE= %d %d
NX= %d NY= %d
NAME_TEMPLATE_OF_OUTPUT_FRAMES= %s
""" % (templ_in, format,i1,i2,nx,ny,templ_out)
open("2PCK.INP","wb").write(script)
#os.system("2pck")
if os.path.exists("dataframe"):
os.remove("dataframe")
|
2ndQuadrant/ansible
|
refs/heads/master
|
lib/ansible/plugins/doc_fragments/constructed.py
|
53
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
DOCUMENTATION = r'''
options:
strict:
description:
- If C(yes) make invalid entries a fatal error, otherwise skip and continue.
- Since it is possible to use facts in the expressions they might not always be available
and we ignore those errors by default.
type: bool
default: no
compose:
description: Create vars from jinja2 expressions.
type: dict
default: {}
groups:
description: Add hosts to group based on Jinja2 conditionals.
type: dict
default: {}
keyed_groups:
description: Add hosts to group based on the values of a variable.
type: list
default: []
'''
|
matthiask/feincms2-content
|
refs/heads/master
|
tests/testapp/models.py
|
2
|
from django.db import models
from django.urls import reverse
from content_editor.models import Region, Template, create_plugin_base
class AbstractRichText(models.Model):
text = models.TextField(blank=True)
class Meta:
abstract = True
verbose_name = "rich text"
class Article(models.Model):
title = models.CharField(max_length=200)
regions = [
Region(key="main", title="main region"),
Region(key="sidebar", title="sidebar region"),
]
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("article_detail", kwargs={"pk": self.pk})
ArticlePlugin = create_plugin_base(Article)
class RichText(AbstractRichText, ArticlePlugin):
pass
class Download(ArticlePlugin):
file = models.TextField() # FileField, but charfield is easier to test.
class Meta:
verbose_name = "download"
verbose_name_plural = "downloads"
class Thing(models.Model):
"""Added as inline to article admin to check whether non-ContentEditor
inlines still work"""
article = models.ForeignKey(Article, on_delete=models.CASCADE)
class Page(models.Model):
title = models.CharField(max_length=200)
parent = models.ForeignKey(
"self", related_name="children", blank=True, null=True, on_delete=models.CASCADE
)
template = Template(
key="test",
regions=[
Region(key="main", title="main region"),
Region(key="sidebar", title="sidebar region", inherited=True),
],
)
class Meta:
verbose_name = "page"
verbose_name_plural = "pages"
def get_absolute_url(self):
return reverse("page_detail", kwargs={"pk": self.pk})
@property
def regions(self):
return self.template.regions
PagePlugin = create_plugin_base(Page)
class PageText(AbstractRichText, PagePlugin):
pass
|
amitt001/Analytics-App
|
refs/heads/master
|
API/rate/reviews_sentiment_write.py
|
1
|
"""
READ FILE: ALL THE CHNAGES MUST BE DONE TO THIS FILE.
THIS FILE PICKLES THE CALSSIFIERS AND FEATURESETS
RUN 'reviews_sentiment_read.py' TO CHECK ACCURACY
FROM THE ALREADY PICKLED FILES.
Play Store apps reviews sentiment analysis using
NLTK module of Python.
Tagging reviews as positive and negative (and neutral)
"""
import re
import pickle
import random
from collections import OrderedDict
from statistics import mode
from unidecode import unidecode
from sklearn.svm import SVC, LinearSVC, NuSVC
from sklearn.linear_model import LogisticRegression,SGDClassifier
from sklearn.naive_bayes import MultinomialNB, GaussianNB, BernoulliNB
import nltk
from nltk.corpus import stopwords
from nltk.classify import ClassifierI
from nltk.tokenize import word_tokenize
from nltk.classify.scikitlearn import SklearnClassifier
class VoteClassifier(ClassifierI):
def __init__(self, *classifiers):
self._classifiers = classifiers
self.votes = []
def classify(self, features):
#classify the feature with all the 7 algos
#and based on the mode return the result
self.votes = [c.classify(features) for c in self._classifiers]
return mode(self.votes)
def confidence(self, features):
choice_votes = self.votes.count(mode(self.votes))
conf = choice_votes / float(len(self.votes))
return conf
def find_features(document):
"""
Thsi fucntion takes a list of words as input.
For each word it checks that word is in the
most_frequest words list or not.
If word in most_frequent words list
feature_dict[word] = True
else word not in most_frequent words list
feature_dict = Flase
"""
words = set(word_tokenize(document))
return dict((w,True if w in words else False) for w in word_features)
#documents = [(list(movie_reviews.words(fileids)), category) for category in movie_reviews.categories() for fileids in movie_reviews.fileids(category)]
short_pos = unidecode(open('positive10k.txt', 'r').read())
short_neg = unidecode(open('negative10k.txt', 'r').read())
documents = [ (r, 'pos') for r in short_pos.split('\n')]
documents += [ (r, 'neg') for r in short_neg.split('\n')]
stpwrd = dict((sw,True) for sw in stopwords.words('english')+['film', 'movie'] if sw not in ['not','below'])
all_words = [w.lower() for w in word_tokenize(short_pos) + word_tokenize(short_neg) if len(w) > 1 and not stpwrd.get(w)]
with open("pickle/documents.pickle","wb") as doc:
pickle.dump(documents, doc)
all_words = nltk.FreqDist(all_words)
all_words = OrderedDict(sorted(all_words.items(), key=lambda x:x[1], reverse=True))
word_features = all_words.keys()[0:5000]
with open("pickle/word_features5k.pickle","wb") as save_word_features:
pickle.dump(word_features, save_word_features)
featuresets = [(find_features(rev), category) for (rev, category) in documents]
random.shuffle(featuresets)
train_set = featuresets[:8000]
test_set = featuresets[8000:]
####DELETE Variables to Free up some space####
del short_neg
del short_pos
del stpwrd
del all_words
del word_features
del documents
del featuresets
#################
## CLASSIFIERS ##
#################
classifier = nltk.NaiveBayesClassifier.train(train_set)
with open('pickle/naive_bayes.pickle', 'wb') as saviour:
pickle.dump(classifier, saviour)
print("Naive bayes Algo accuracy", (nltk.classify.accuracy(classifier, test_set))*100)
classifier.show_most_informative_features(30)
MNB_classifier = SklearnClassifier(MultinomialNB())
MNB_classifier.train(train_set)
with open('pickle/mnb_classifier.pickle', 'wb') as saviour:
pickle.dump(MNB_classifier, saviour)
print("MNB_classifier accuracy percent:", (nltk.classify.accuracy(MNB_classifier, test_set))*100)
BernoulliNB_classifier = SklearnClassifier(BernoulliNB())
BernoulliNB_classifier.train(train_set)
with open('pickle/bernoullinb_classifier.pickle', 'wb') as saviour:
pickle.dump(BernoulliNB_classifier, saviour)
print("BernoulliNB_classifier accuracy percent:", (nltk.classify.accuracy(BernoulliNB_classifier, test_set))*100)
LogisticRegression_classifier = SklearnClassifier(LogisticRegression())
LogisticRegression_classifier.train(train_set)
with open('pickle/logisticregression_classifier.pickle', 'wb') as saviour:
pickle.dump(LogisticRegression_classifier, saviour)
print("LogisticRegression_classifier accuracy percent:", (nltk.classify.accuracy(LogisticRegression_classifier, test_set))*100)
SGDClassifier_classifier = SklearnClassifier(SGDClassifier())
SGDClassifier_classifier.train(train_set)
with open('pickle/sgdcclassifier_classifier.pickle', 'wb') as saviour:
pickle.dump(SGDClassifier_classifier, saviour)
print("SGDClassifier_classifier accuracy percent:", (nltk.classify.accuracy(SGDClassifier_classifier, test_set))*100)
LinearSVC_classifier = SklearnClassifier(LinearSVC())
LinearSVC_classifier.train(train_set)
with open('pickle/linearsvc_classifier.pickle', 'wb') as saviour:
pickle.dump(LinearSVC_classifier, saviour)
print("LinearSVC_classifier accuracy percent:", (nltk.classify.accuracy(LinearSVC_classifier, test_set))*100)
NuSVC_classifier = SklearnClassifier(NuSVC())
NuSVC_classifier.train(train_set)
with open('pickle/nusvc_classifier.pickle', 'wb') as saviour:
pickle.dump(NuSVC_classifier, saviour)
print("NuSVC_classifier accuracy percent:", (nltk.classify.accuracy(NuSVC_classifier, test_set))*100)
voted_classifier = VoteClassifier(classifier,
NuSVC_classifier,
LinearSVC_classifier,
SGDClassifier_classifier,
MNB_classifier,
BernoulliNB_classifier,
LogisticRegression_classifier)
print("#"*30)
print("Voted_classifier accuracy percent:", (nltk.classify.accuracy(voted_classifier, test_set))*100)
print("#"*30)
print("Classification:", voted_classifier.classify(test_set[0][0]), "Confidence %:",voted_classifier.confidence(test_set[0][0])*100)
print("Classification:", voted_classifier.classify(test_set[1][0]), "Confidence %:",voted_classifier.confidence(test_set[1][0])*100)
print("Classification:", voted_classifier.classify(test_set[2][0]), "Confidence %:",voted_classifier.confidence(test_set[2][0])*100)
print("Classification:", voted_classifier.classify(test_set[3][0]), "Confidence %:",voted_classifier.confidence(test_set[3][0])*100)
print("Classification:", voted_classifier.classify(test_set[4][0]), "Confidence %:",voted_classifier.confidence(test_set[4][0])*100)
print("Classification:", voted_classifier.classify(test_set[5][0]), "Confidence %:",voted_classifier.confidence(test_set[5][0])*100)
|
TangHao1987/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/utils/dictconfig.py
|
335
|
# This is a copy of the Python logging.config.dictconfig module,
# reproduced with permission. It is provided here for backwards
# compatibility for Python versions prior to 2.7.
#
# Copyright 2009-2010 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import logging.handlers
import re
import sys
import types
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
#
# This function is defined in logging only in recent versions of Python
#
try:
from logging import _checkLevel
except ImportError:
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in logging._levelNames:
raise ValueError('Unknown level: %r' % level)
rv = logging._levelNames[level]
else:
raise TypeError('Level not an integer or a '
'valid string: %r' % level)
return rv
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, key, default=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = __import__
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
#print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
#rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, basestring): # str for py3k
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
class DictConfigurator(BaseConfigurator):
"""
Configure logging using a dictionary-like object to describe the
configuration.
"""
def configure(self):
"""Do the configuration."""
config = self.config
if 'version' not in config:
raise ValueError("dictionary doesn't specify a version")
if config['version'] != 1:
raise ValueError("Unsupported version: %s" % config['version'])
incremental = config.pop('incremental', False)
EMPTY_DICT = {}
logging._acquireLock()
try:
if incremental:
handlers = config.get('handlers', EMPTY_DICT)
# incremental handler config only if handler name
# ties in to logging._handlers (Python 2.7)
if sys.version_info[:2] == (2, 7):
for name in handlers:
if name not in logging._handlers:
raise ValueError('No handler found with '
'name %r' % name)
else:
try:
handler = logging._handlers[name]
handler_config = handlers[name]
level = handler_config.get('level', None)
if level:
handler.setLevel(_checkLevel(level))
except StandardError, e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
try:
self.configure_logger(name, loggers[name], True)
except StandardError, e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
root = config.get('root', None)
if root:
try:
self.configure_root(root, True)
except StandardError, e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
else:
disable_existing = config.pop('disable_existing_loggers', True)
logging._handlers.clear()
del logging._handlerList[:]
# Do formatters first - they don't refer to anything else
formatters = config.get('formatters', EMPTY_DICT)
for name in formatters:
try:
formatters[name] = self.configure_formatter(
formatters[name])
except StandardError, e:
raise ValueError('Unable to configure '
'formatter %r: %s' % (name, e))
# Next, do filters - they don't refer to anything else, either
filters = config.get('filters', EMPTY_DICT)
for name in filters:
try:
filters[name] = self.configure_filter(filters[name])
except StandardError, e:
raise ValueError('Unable to configure '
'filter %r: %s' % (name, e))
# Next, do handlers - they refer to formatters and filters
# As handlers can refer to other handlers, sort the keys
# to allow a deterministic order of configuration
handlers = config.get('handlers', EMPTY_DICT)
for name in sorted(handlers):
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except StandardError, e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Next, do loggers - they refer to handlers and filters
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
root = logging.root
existing = root.manager.loggerDict.keys()
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort()
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
if name in existing:
i = existing.index(name)
prefixed = name + "."
pflen = len(prefixed)
num_existing = len(existing)
i = i + 1 # look at the entry after name
while (i < num_existing) and\
(existing[i][:pflen] == prefixed):
child_loggers.append(existing[i])
i = i + 1
existing.remove(name)
try:
self.configure_logger(name, loggers[name])
except StandardError, e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
for log in existing:
logger = root.manager.loggerDict[log]
if log in child_loggers:
logger.level = logging.NOTSET
logger.handlers = []
logger.propagate = True
elif disable_existing:
logger.disabled = True
# And finally, do the root logger
root = config.get('root', None)
if root:
try:
self.configure_root(root)
except StandardError, e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
finally:
logging._releaseLock()
def configure_formatter(self, config):
"""Configure a formatter from a dictionary."""
if '()' in config:
factory = config['()'] # for use in exception handler
try:
result = self.configure_custom(config)
except TypeError, te:
if "'format'" not in str(te):
raise
#Name of parameter changed from fmt to format.
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
config['fmt'] = config.pop('format')
config['()'] = factory
result = self.configure_custom(config)
else:
fmt = config.get('format', None)
dfmt = config.get('datefmt', None)
result = logging.Formatter(fmt, dfmt)
return result
def configure_filter(self, config):
"""Configure a filter from a dictionary."""
if '()' in config:
result = self.configure_custom(config)
else:
name = config.get('name', '')
result = logging.Filter(name)
return result
def add_filters(self, filterer, filters):
"""Add filters to a filterer from a list of names."""
for f in filters:
try:
filterer.addFilter(self.config['filters'][f])
except StandardError, e:
raise ValueError('Unable to add filter %r: %s' % (f, e))
def configure_handler(self, config):
"""Configure a handler from a dictionary."""
formatter = config.pop('formatter', None)
if formatter:
try:
formatter = self.config['formatters'][formatter]
except StandardError, e:
raise ValueError('Unable to set formatter '
'%r: %s' % (formatter, e))
level = config.pop('level', None)
filters = config.pop('filters', None)
if '()' in config:
c = config.pop('()')
if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
c = self.resolve(c)
factory = c
else:
klass = self.resolve(config.pop('class'))
#Special case for handler which refers to another handler
if issubclass(klass, logging.handlers.MemoryHandler) and\
'target' in config:
try:
config['target'] = self.config['handlers'][config['target']]
except StandardError, e:
raise ValueError('Unable to set target handler '
'%r: %s' % (config['target'], e))
elif issubclass(klass, logging.handlers.SMTPHandler) and\
'mailhost' in config:
config['mailhost'] = self.as_tuple(config['mailhost'])
elif issubclass(klass, logging.handlers.SysLogHandler) and\
'address' in config:
config['address'] = self.as_tuple(config['address'])
factory = klass
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
try:
result = factory(**kwargs)
except TypeError, te:
if "'stream'" not in str(te):
raise
#The argument name changed from strm to stream
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
kwargs['strm'] = kwargs.pop('stream')
result = factory(**kwargs)
if formatter:
result.setFormatter(formatter)
if level is not None:
result.setLevel(_checkLevel(level))
if filters:
self.add_filters(result, filters)
return result
def add_handlers(self, logger, handlers):
"""Add handlers to a logger from a list of names."""
for h in handlers:
try:
logger.addHandler(self.config['handlers'][h])
except StandardError, e:
raise ValueError('Unable to add handler %r: %s' % (h, e))
def common_logger_config(self, logger, config, incremental=False):
"""
Perform configuration which is common to root and non-root loggers.
"""
level = config.get('level', None)
if level is not None:
logger.setLevel(_checkLevel(level))
if not incremental:
#Remove any existing handlers
for h in logger.handlers[:]:
logger.removeHandler(h)
handlers = config.get('handlers', None)
if handlers:
self.add_handlers(logger, handlers)
filters = config.get('filters', None)
if filters:
self.add_filters(logger, filters)
def configure_logger(self, name, config, incremental=False):
"""Configure a non-root logger from a dictionary."""
logger = logging.getLogger(name)
self.common_logger_config(logger, config, incremental)
propagate = config.get('propagate', None)
if propagate is not None:
logger.propagate = propagate
def configure_root(self, config, incremental=False):
"""Configure a root logger from a dictionary."""
root = logging.getLogger()
self.common_logger_config(root, config, incremental)
dictConfigClass = DictConfigurator
def dictConfig(config):
"""Configure logging using a dictionary."""
dictConfigClass(config).configure()
|
k11a/snmptablemixer
|
refs/heads/master
|
pysnmp/smi/mibs/instances/__SNMP-MPD-MIB.py
|
2
|
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2017, Ilya Etingof <etingof@gmail.com>
# License: http://pysnmp.sf.net/license.html
#
MibScalarInstance, = mibBuilder.importSymbols('SNMPv2-SMI', 'MibScalarInstance')
(snmpUnknownSecurityModels,
snmpInvalidMsgs,
snmpUnknownPDUHandlers) = mibBuilder.importSymbols(
'SNMP-MPD-MIB',
'snmpUnknownSecurityModels',
'snmpInvalidMsgs',
'snmpUnknownPDUHandlers',
)
__snmpUnknownSecurityModels = MibScalarInstance(snmpUnknownSecurityModels.name, (0,),
snmpUnknownSecurityModels.syntax.clone(0))
__snmpInvalidMsgs = MibScalarInstance(snmpInvalidMsgs.name, (0,), snmpInvalidMsgs.syntax.clone(0))
__snmpUnknownPDUHandlers = MibScalarInstance(snmpUnknownPDUHandlers.name, (0,), snmpUnknownPDUHandlers.syntax.clone(0))
mibBuilder.exportSymbols(
'__SNMP-MPD-MIB',
snmpUnknownSecurityModels=__snmpUnknownSecurityModels,
snmpInvalidMsgs=__snmpInvalidMsgs,
snmpUnknownPDUHandlers=__snmpUnknownPDUHandlers
)
|
axbaretto/beam
|
refs/heads/master
|
sdks/python/.tox/lint/lib/python2.7/site-packages/google/protobuf/unittest_import_pb2.py
|
35
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/unittest_import.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import unittest_import_public_pb2 as google_dot_protobuf_dot_unittest__import__public__pb2
from google.protobuf.unittest_import_public_pb2 import *
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/unittest_import.proto',
package='protobuf_unittest_import',
syntax='proto2',
serialized_pb=_b('\n%google/protobuf/unittest_import.proto\x12\x18protobuf_unittest_import\x1a,google/protobuf/unittest_import_public.proto\"\x1a\n\rImportMessage\x12\t\n\x01\x64\x18\x01 \x01(\x05*<\n\nImportEnum\x12\x0e\n\nIMPORT_FOO\x10\x07\x12\x0e\n\nIMPORT_BAR\x10\x08\x12\x0e\n\nIMPORT_BAZ\x10\t*1\n\x10ImportEnumForMap\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x07\n\x03\x46OO\x10\x01\x12\x07\n\x03\x42\x41R\x10\x02\x42\x1f\n\x18\x63om.google.protobuf.testH\x01\xf8\x01\x01P\x00')
,
dependencies=[google_dot_protobuf_dot_unittest__import__public__pb2.DESCRIPTOR,],
public_dependencies=[google_dot_protobuf_dot_unittest__import__public__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_IMPORTENUM = _descriptor.EnumDescriptor(
name='ImportEnum',
full_name='protobuf_unittest_import.ImportEnum',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='IMPORT_FOO', index=0, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IMPORT_BAR', index=1, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IMPORT_BAZ', index=2, number=9,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=141,
serialized_end=201,
)
_sym_db.RegisterEnumDescriptor(_IMPORTENUM)
ImportEnum = enum_type_wrapper.EnumTypeWrapper(_IMPORTENUM)
_IMPORTENUMFORMAP = _descriptor.EnumDescriptor(
name='ImportEnumForMap',
full_name='protobuf_unittest_import.ImportEnumForMap',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FOO', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BAR', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=203,
serialized_end=252,
)
_sym_db.RegisterEnumDescriptor(_IMPORTENUMFORMAP)
ImportEnumForMap = enum_type_wrapper.EnumTypeWrapper(_IMPORTENUMFORMAP)
IMPORT_FOO = 7
IMPORT_BAR = 8
IMPORT_BAZ = 9
UNKNOWN = 0
FOO = 1
BAR = 2
_IMPORTMESSAGE = _descriptor.Descriptor(
name='ImportMessage',
full_name='protobuf_unittest_import.ImportMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='d', full_name='protobuf_unittest_import.ImportMessage.d', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=113,
serialized_end=139,
)
DESCRIPTOR.message_types_by_name['ImportMessage'] = _IMPORTMESSAGE
DESCRIPTOR.enum_types_by_name['ImportEnum'] = _IMPORTENUM
DESCRIPTOR.enum_types_by_name['ImportEnumForMap'] = _IMPORTENUMFORMAP
ImportMessage = _reflection.GeneratedProtocolMessageType('ImportMessage', (_message.Message,), dict(
DESCRIPTOR = _IMPORTMESSAGE,
__module__ = 'google.protobuf.unittest_import_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest_import.ImportMessage)
))
_sym_db.RegisterMessage(ImportMessage)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030com.google.protobuf.testH\001\370\001\001'))
# @@protoc_insertion_point(module_scope)
|
ProfessionalIT/professionalit-webiste
|
refs/heads/master
|
sdk/google_appengine/lib/django-1.4/django/contrib/databrowse/sites.py
|
84
|
from django import http
from django.db import models
from django.contrib.databrowse.datastructures import EasyModel
from django.shortcuts import render_to_response
from django.utils.safestring import mark_safe
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class DatabrowsePlugin(object):
def urls(self, plugin_name, easy_instance_field):
"""
Given an EasyInstanceField object, returns a list of URLs for this
plugin's views of this object. These URLs should be absolute.
Returns None if the EasyInstanceField object doesn't get a
list of plugin-specific URLs.
"""
return None
def model_index_html(self, request, model, site):
"""
Returns a snippet of HTML to include on the model index page.
"""
return ''
def model_view(self, request, model_databrowse, url):
"""
Handles main URL routing for a plugin's model-specific pages.
"""
raise NotImplementedError
class ModelDatabrowse(object):
plugins = {}
def __init__(self, model, site):
self.model = model
self.site = site
def root(self, request, url):
"""
Handles main URL routing for the databrowse app.
`url` is the remainder of the URL -- e.g. 'objects/3'.
"""
# Delegate to the appropriate method, based on the URL.
if url is None:
return self.main_view(request)
try:
plugin_name, rest_of_url = url.split('/', 1)
except ValueError: # need more than 1 value to unpack
plugin_name, rest_of_url = url, None
try:
plugin = self.plugins[plugin_name]
except KeyError:
raise http.Http404('A plugin with the requested name does not exist.')
return plugin.model_view(request, self, rest_of_url)
def main_view(self, request):
easy_model = EasyModel(self.site, self.model)
html_snippets = mark_safe(u'\n'.join([p.model_index_html(request, self.model, self.site) for p in self.plugins.values()]))
return render_to_response('databrowse/model_detail.html', {
'model': easy_model,
'root_url': self.site.root_url,
'plugin_html': html_snippets,
})
class DatabrowseSite(object):
def __init__(self):
self.registry = {} # model_class -> databrowse_class
self.root_url = None
def register(self, *model_list, **options):
"""
Registers the given model(s) with the given databrowse site.
The model(s) should be Model classes, not instances.
If a databrowse class isn't given, it will use DefaultModelDatabrowse
(the default databrowse options).
If a model is already registered, this will raise AlreadyRegistered.
"""
databrowse_class = options.pop('databrowse_class', DefaultModelDatabrowse)
for model in model_list:
if model in self.registry:
raise AlreadyRegistered('The model %s is already registered' % model.__name__)
self.registry[model] = databrowse_class
def unregister(self, *model_list):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
for model in model_list:
if model not in self.registry:
raise NotRegistered('The model %s is not registered' % model.__name__)
del self.registry[model]
def root(self, request, url):
"""
Handles main URL routing for the databrowse app.
`url` is the remainder of the URL -- e.g. 'comments/comment/'.
"""
self.root_url = request.path[:len(request.path) - len(url)]
url = url.rstrip('/') # Trim trailing slash, if it exists.
if url == '':
return self.index(request)
elif '/' in url:
return self.model_page(request, *url.split('/', 2))
raise http.Http404('The requested databrowse page does not exist.')
def index(self, request):
m_list = [EasyModel(self, m) for m in self.registry.keys()]
return render_to_response('databrowse/homepage.html', {'model_list': m_list, 'root_url': self.root_url})
def model_page(self, request, app_label, model_name, rest_of_url=None):
"""
Handles the model-specific functionality of the databrowse site, delegating
to the appropriate ModelDatabrowse class.
"""
model = models.get_model(app_label, model_name)
if model is None:
raise http.Http404("App %r, model %r, not found." % (app_label, model_name))
try:
databrowse_class = self.registry[model]
except KeyError:
raise http.Http404("This model exists but has not been registered with databrowse.")
return databrowse_class(model, self).root(request, rest_of_url)
site = DatabrowseSite()
from django.contrib.databrowse.plugins.calendars import CalendarPlugin
from django.contrib.databrowse.plugins.objects import ObjectDetailPlugin
from django.contrib.databrowse.plugins.fieldchoices import FieldChoicePlugin
class DefaultModelDatabrowse(ModelDatabrowse):
plugins = {'objects': ObjectDetailPlugin(), 'calendars': CalendarPlugin(), 'fields': FieldChoicePlugin()}
|
RudoCris/horizon
|
refs/heads/master
|
openstack_dashboard/dashboards/admin/networks/subnets/__init__.py
|
12133432
| |
crcresearch/osf.io
|
refs/heads/develop
|
admin/nodes/__init__.py
|
12133432
| |
gadsbyfly/PyBioMed
|
refs/heads/master
|
PyBioMed/test/__init__.py
|
12133432
| |
leeon/annotated-django
|
refs/heads/note
|
django/conf/locale/zh_TW/__init__.py
|
12133432
| |
Johnetordoff/osf.io
|
refs/heads/develop
|
addons/owncloud/views.py
|
9
|
"""Views for the node settings page."""
# -*- coding: utf-8 -*-
from rest_framework import status as http_status
from django.core.exceptions import ValidationError
from furl import furl
import requests
from flask import request
from framework.auth.decorators import must_be_logged_in
from addons.base import generic_views
from osf.models import ExternalAccount
from website.project.decorators import (
must_have_addon)
import owncloud
from addons.owncloud.models import OwnCloudProvider
from addons.owncloud.serializer import OwnCloudSerializer
from addons.owncloud import settings
SHORT_NAME = 'owncloud'
FULL_NAME = 'OwnCloud'
owncloud_account_list = generic_views.account_list(
SHORT_NAME,
OwnCloudSerializer
)
owncloud_import_auth = generic_views.import_auth(
SHORT_NAME,
OwnCloudSerializer
)
owncloud_deauthorize_node = generic_views.deauthorize_node(
SHORT_NAME
)
## Config ##
@must_be_logged_in
def owncloud_add_user_account(auth, **kwargs):
"""
Verifies new external account credentials and adds to user's list
This view expects `host`, `username` and `password` fields in the JSON
body of the request.
"""
# Ensure that ownCloud uses https
host_url = request.json.get('host')
host = furl()
host.host = host_url.rstrip('/').replace('https://', '').replace('http://', '')
host.scheme = 'https'
username = request.json.get('username')
password = request.json.get('password')
try:
oc = owncloud.Client(host.url, verify_certs=settings.USE_SSL)
oc.login(username, password)
oc.logout()
except requests.exceptions.ConnectionError:
return {
'message': 'Invalid ownCloud server.'
}, http_status.HTTP_400_BAD_REQUEST
except owncloud.owncloud.HTTPResponseError:
return {
'message': 'ownCloud Login failed.'
}, http_status.HTTP_401_UNAUTHORIZED
provider = OwnCloudProvider(account=None, host=host.url,
username=username, password=password)
try:
provider.account.save()
except ValidationError:
# ... or get the old one
provider.account = ExternalAccount.objects.get(
provider=provider.short_name,
provider_id='{}:{}'.format(host.url, username).lower()
)
if provider.account.oauth_key != password:
provider.account.oauth_key = password
provider.account.save()
user = auth.user
if not user.external_accounts.filter(id=provider.account.id).exists():
user.external_accounts.add(provider.account)
user.get_or_add_addon('owncloud', auth=auth)
user.save()
return {}
@must_have_addon(SHORT_NAME, 'user')
@must_have_addon(SHORT_NAME, 'node')
def owncloud_folder_list(node_addon, user_addon, **kwargs):
""" Returns all the subsequent folders under the folder id passed.
Not easily generalizable due to `path` kwarg.
"""
path = request.args.get('path')
return node_addon.get_folders(path=path)
def _set_folder(node_addon, folder, auth):
node_addon.set_folder(folder['path'], auth=auth)
node_addon.save()
owncloud_set_config = generic_views.set_config(
SHORT_NAME,
FULL_NAME,
OwnCloudSerializer,
_set_folder
)
owncloud_get_config = generic_views.get_config(
SHORT_NAME,
OwnCloudSerializer
)
|
rspavel/spack
|
refs/heads/develop
|
var/spack/repos/builtin/packages/mirdeep2/package.py
|
5
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
from os import chmod
import glob
class Mirdeep2(Package):
"""miRDeep2 is a completely overhauled tool which discovers microRNA genes
by analyzing sequenced RNAs."""
homepage = "https://www.mdc-berlin.de/8551903/en/"
url = "https://www.mdc-berlin.de/system/files/migrated_files/fiona/mirdeep2_0_0_8.zip"
version('0.0.8', sha256='eeb24ee1b0e76b49ee2f542da3ee7d46d163c80b152709e963492b79f4fca1b8')
depends_on('perl', type=('build', 'run'))
depends_on('perl-pdf-api2', type=('build', 'run'))
depends_on('bowtie')
depends_on('viennarna')
depends_on('squid')
depends_on('randfold')
def url_for_version(self, version):
url = 'https://www.mdc-berlin.de/system/files/migrated_files/fiona/mirdeep2_{0}.zip'
return url.format(version.underscored)
def patch(self):
with working_dir('src'):
files = glob.iglob("*.pl")
for file in files:
chmod(file, 0o755)
change = FileFilter(file)
change.filter('usr/bin/perl', 'usr/bin/env perl')
change.filter('perl -W', 'perl')
change.filter("../Rfam_for_miRDeep.fa",
"Rfam_for_miRDeep.fa")
change.filter("../install_successful",
"install_successful")
def install(self, spec, prefix):
install_tree('src', prefix.bin)
install('Rfam_for_miRDeep.fa', prefix.bin)
# miRDeep looks for the install_sucessful file to check if you used
# their automated install script which we'll just be kind of hacking
# around
touch(prefix.bin.install_successful)
|
manikishan/fosswebsite
|
refs/heads/master
|
projects/urls.py
|
8
|
# created by Navaneeth s, navisk13@gmail.com
from django.conf.urls import url
from django.contrib.auth.decorators import login_required
from projects.views import *
urlpatterns = [
url(r'^$', ProjectListView.as_view(), name='project'),
url(r'^(?P<pk>[0-9]+)/$', ProjectDetailView.as_view(), name='project_detail'),
url(r'^create/$', login_required(ProjectCreateView.as_view()), name='project_create'),
url(r'^(?P<pk>[0-9]+)/add-language/$', login_required(LanguageCreateView.as_view()), name='language_create'),
url(r'^(?P<pk>[0-9]+)/add-image/$', login_required(ProjectScreenShotCreateView.as_view()), name='image_create'),
url(r'^(?P<pk>[0-9]+)/gallery/$', ProjectScreenShotListView.as_view(), name='image_list'),
url(r'^(?P<pk>[0-9]+)/update$', login_required(ProjectUpdateView.as_view()), name='project_update'),
url(r'^(?P<pk>[0-9]+)/delete/$', login_required(ProjectDeleteView.as_view()), name='project_delete'),
url(r'^project-member/(?P<pk>[0-9]+)/delete/$', login_required(ProjectMemberDeleteView.as_view()), name='project_member_delete'),
url(r'^language/(?P<pk>[0-9]+)/delete/$', login_required(LanguageDeleteView.as_view()), name='language_delete'),
url(r'^image/(?P<pk>[0-9]+)/delete/$', login_required(ProjectScreenShotDeleteView.as_view()), name='image_delete'),
]
|
QLGu/django-oscar
|
refs/heads/master
|
tests/unit/partner/availability_mixin_tests.py
|
63
|
from decimal import Decimal as D
from django.test import TestCase
import mock
from oscar.apps.partner import strategy, availability
class TestStockRequiredMixin(TestCase):
def setUp(self):
self.mixin = strategy.StockRequired()
self.product = mock.Mock()
self.stockrecord = mock.Mock()
self.stockrecord.price_excl_tax = D('12.00')
def test_returns_unavailable_without_stockrecord(self):
policy = self.mixin.availability_policy(
self.product, None)
self.assertIsInstance(policy, availability.Unavailable)
def test_returns_available_when_product_class_doesnt_track_stock(self):
product_class = mock.Mock(track_stock=False)
self.product.get_product_class = mock.Mock(return_value=product_class)
policy = self.mixin.availability_policy(
self.product, self.stockrecord)
self.assertIsInstance(policy, availability.Available)
def test_returns_stockrequired_when_product_class_does_track_stock(self):
product_class = mock.Mock(track_stock=True)
self.product.get_product_class = mock.Mock(return_value=product_class)
policy = self.mixin.availability_policy(
self.product, self.stockrecord)
self.assertIsInstance(policy, availability.StockRequired)
|
denisenkom/django
|
refs/heads/master
|
django/contrib/auth/tests/test_forms.py
|
5
|
from __future__ import unicode_literals
import os
import re
from django import forms
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
from django.contrib.auth.forms import (UserCreationForm, AuthenticationForm,
PasswordChangeForm, SetPasswordForm, UserChangeForm, PasswordResetForm,
ReadOnlyPasswordHashField, ReadOnlyPasswordHashWidget)
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.core import mail
from django.forms.fields import Field, CharField
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.encoding import force_text
from django.utils._os import upath
from django.utils import translation
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class UserCreationFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_user_already_exists(self):
data = {
'username': 'testclient',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["username"].errors,
[force_text(form.error_messages['duplicate_username'])])
def test_invalid_data(self):
data = {
'username': 'jsmith!',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["username"].errors,
[force_text(form.fields['username'].error_messages['invalid'])])
def test_password_verification(self):
# The verification password is incorrect.
data = {
'username': 'jsmith',
'password1': 'test123',
'password2': 'test',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_both_passwords(self):
# One (or both) passwords weren't given
data = {'username': 'jsmith'}
form = UserCreationForm(data)
required_error = [force_text(Field.default_error_messages['required'])]
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, required_error)
data['password2'] = 'test123'
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, [])
def test_success(self):
# The success case.
data = {
'username': 'jsmith@example.com',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
u = form.save()
self.assertEqual(repr(u), '<User: jsmith@example.com>')
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class AuthenticationFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_invalid_username(self):
# The user submits an invalid username.
data = {
'username': 'jsmith_does_not_exist',
'password': 'test123',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
})])
def test_inactive_user(self):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['inactive'])])
def test_inactive_user_i18n(self):
with self.settings(USE_I18N=True), translation.override('pt-br', deactivate=True):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['inactive'])])
def test_custom_login_allowed_policy(self):
# The user is inactive, but our custom form policy allows him to log in.
data = {
'username': 'inactive',
'password': 'password',
}
class AuthenticationFormWithInactiveUsersOkay(AuthenticationForm):
def confirm_login_allowed(self, user):
pass
form = AuthenticationFormWithInactiveUsersOkay(None, data)
self.assertTrue(form.is_valid())
# If we want to disallow some logins according to custom logic,
# we should raise a django.forms.ValidationError in the form.
class PickyAuthenticationForm(AuthenticationForm):
def confirm_login_allowed(self, user):
if user.username == "inactive":
raise forms.ValidationError(_("This user is disallowed."))
raise forms.ValidationError(_("Sorry, nobody's allowed in."))
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ['This user is disallowed.'])
data = {
'username': 'testclient',
'password': 'password',
}
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ["Sorry, nobody's allowed in."])
def test_success(self):
# The success case
data = {
'username': 'testclient',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.non_field_errors(), [])
def test_username_field_label(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label="Name", max_length=75)
form = CustomAuthenticationForm()
self.assertEqual(form['username'].label, "Name")
def test_username_field_label_not_set(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField()
form = CustomAuthenticationForm()
UserModel = get_user_model()
username_field = UserModel._meta.get_field(UserModel.USERNAME_FIELD)
self.assertEqual(form.fields['username'].label, capfirst(username_field.verbose_name))
def test_username_field_label_empty_string(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label='')
form = CustomAuthenticationForm()
self.assertEqual(form.fields['username'].label, "")
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class SetPasswordFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_success(self):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = SetPasswordForm(user, data)
self.assertTrue(form.is_valid())
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class PasswordChangeFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_incorrect_password(self):
user = User.objects.get(username='testclient')
data = {
'old_password': 'test',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["old_password"].errors,
[force_text(form.error_messages['password_incorrect'])])
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_success(self):
# The success case.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
def test_field_order(self):
# Regression test - check the order of fields:
user = User.objects.get(username='testclient')
self.assertEqual(list(PasswordChangeForm(user, {}).fields),
['old_password', 'new_password1', 'new_password2'])
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class UserChangeFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_username_validity(self):
user = User.objects.get(username='testclient')
data = {'username': 'not valid'}
form = UserChangeForm(data, instance=user)
self.assertFalse(form.is_valid())
self.assertEqual(form['username'].errors,
[force_text(form.fields['username'].error_messages['invalid'])])
def test_bug_14242(self):
# A regression test, introduce by adding an optimization for the
# UserChangeForm.
class MyUserForm(UserChangeForm):
def __init__(self, *args, **kwargs):
super(MyUserForm, self).__init__(*args, **kwargs)
self.fields['groups'].help_text = 'These groups give users different permissions'
class Meta(UserChangeForm.Meta):
fields = ('groups',)
# Just check we can create it
MyUserForm({})
def test_unsuable_password(self):
user = User.objects.get(username='empty_password')
user.set_unusable_password()
user.save()
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_empty_password(self):
user = User.objects.get(username='empty_password')
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_unmanageable_password(self):
user = User.objects.get(username='unmanageable_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."),
form.as_table())
def test_bug_17944_unknown_password_algorithm(self):
user = User.objects.get(username='unknown_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."),
form.as_table())
def test_bug_19133(self):
"The change form does not return the password value"
# Use the form to construct the POST data
user = User.objects.get(username='testclient')
form_for_data = UserChangeForm(instance=user)
post_data = form_for_data.initial
# The password field should be readonly, so anything
# posted here should be ignored; the form will be
# valid, and give back the 'initial' value for the
# password field.
post_data['password'] = 'new password'
form = UserChangeForm(instance=user, data=post_data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['password'], 'sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161')
def test_bug_19349_bound_password_field(self):
user = User.objects.get(username='testclient')
form = UserChangeForm(data={}, instance=user)
# When rendering the bound password field,
# ReadOnlyPasswordHashWidget needs the initial
# value to render correctly
self.assertEqual(form.initial['password'], form['password'].value())
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class PasswordResetFormTest(TestCase):
fixtures = ['authtestdata.json']
def create_dummy_user(self):
"""creates a user and returns a tuple
(user_object, username, email)
"""
username = 'jsmith'
email = 'jsmith@example.com'
user = User.objects.create_user(username, email, 'test123')
return (user, username, email)
def test_invalid_email(self):
data = {'email': 'not valid'}
form = PasswordResetForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['email'].errors, [_('Enter a valid email address.')])
def test_nonexistant_email(self):
# Test nonexistant email address. This should not fail because it would
# expose information about registered users.
data = {'email': 'foo@bar.com'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(len(mail.outbox), 0)
@override_settings(
TEMPLATE_LOADERS=('django.template.loaders.filesystem.Loader',),
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(upath(__file__)), 'templates'),
),
)
def test_cleaned_data(self):
# Regression test
(user, username, email) = self.create_dummy_user()
data = {'email': email}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
form.save(domain_override='example.com')
self.assertEqual(form.cleaned_data['email'], email)
self.assertEqual(len(mail.outbox), 1)
@override_settings(
TEMPLATE_LOADERS=('django.template.loaders.filesystem.Loader',),
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(upath(__file__)), 'templates'),
),
)
def test_custom_email_subject(self):
data = {'email': 'testclient@example.com'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Custom password reset on example.com')
def test_bug_5605(self):
# bug #5605, preserve the case of the user name (before the @ in the
# email address) when creating a user.
user = User.objects.create_user('forms_test2', 'tesT@EXAMple.com', 'test')
self.assertEqual(user.email, 'tesT@example.com')
user = User.objects.create_user('forms_test3', 'tesT', 'test')
self.assertEqual(user.email, 'tesT')
def test_inactive_user(self):
#tests that inactive user cannot
#receive password reset email
(user, username, email) = self.create_dummy_user()
user.is_active = False
user.save()
form = PasswordResetForm({'email': email})
self.assertTrue(form.is_valid())
self.assertEqual(len(mail.outbox), 0)
def test_unusable_password(self):
user = User.objects.create_user('testuser', 'test@example.com', 'test')
data = {"email": "test@example.com"}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
user.set_unusable_password()
user.save()
form = PasswordResetForm(data)
# The form itself is valid, but no email is sent
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
@override_settings(
TEMPLATE_LOADERS=('django.template.loaders.filesystem.Loader',),
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(upath(__file__)), 'templates'),
),
)
def test_save_plaintext_email(self):
"""
Test the PasswordResetForm.save() method with no html_email_template_name
parameter passed in.
Test to ensure original behavior is unchanged after the parameter was added.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertFalse(message.is_multipart())
self.assertEqual(message.get_content_type(), 'text/plain')
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(mail.outbox[0].alternatives), 0)
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w+/-]', message.get_payload()))
@override_settings(
TEMPLATE_LOADERS=('django.template.loaders.filesystem.Loader',),
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(upath(__file__)), 'templates'),
),
)
def test_save_html_email_template_name(self):
"""
Test the PasswordResetFOrm.save() method with html_email_template_name
parameter specified.
Test to ensure that a multipart email is sent with both text/plain
and text/html parts.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save(html_email_template_name='registration/html_password_reset_email.html')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(len(mail.outbox[0].alternatives), 1)
message = mail.outbox[0].message()
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(message.get_payload()), 2)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w/-]+', message.get_payload(0).get_payload()))
self.assertTrue(re.match(r'^<html><a href="http://example.com/reset/[\w/-]+/">Link</a></html>$', message.get_payload(1).get_payload()))
class ReadOnlyPasswordHashTest(TestCase):
def test_bug_19349_render_with_none_value(self):
# Rendering the widget with value set to None
# mustn't raise an exception.
widget = ReadOnlyPasswordHashWidget()
html = widget.render(name='password', value=None, attrs={})
self.assertIn(_("No password set."), html)
def test_readonly_field_has_changed(self):
field = ReadOnlyPasswordHashField()
self.assertFalse(field._has_changed('aaa', 'bbb'))
|
ronakkhunt/kuma
|
refs/heads/master
|
vendor/packages/pygments/lexers/web.py
|
77
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.web
~~~~~~~~~~~~~~~~~~~
Just export previously exported lexers.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexers.html import HtmlLexer, DtdLexer, XmlLexer, XsltLexer, \
HamlLexer, ScamlLexer, JadeLexer
from pygments.lexers.css import CssLexer, SassLexer, ScssLexer
from pygments.lexers.javascript import JavascriptLexer, LiveScriptLexer, \
DartLexer, TypeScriptLexer, LassoLexer, ObjectiveJLexer, CoffeeScriptLexer
from pygments.lexers.actionscript import ActionScriptLexer, \
ActionScript3Lexer, MxmlLexer
from pygments.lexers.php import PhpLexer
from pygments.lexers.webmisc import DuelLexer, XQueryLexer, SlimLexer, QmlLexer
from pygments.lexers.data import JsonLexer
JSONLexer = JsonLexer # for backwards compatibility with Pygments 1.5
__all__ = []
|
Andr3iC/courtlistener
|
refs/heads/master
|
cleaning_scripts/make_slugs_for_citations_that_lack_them_243.py
|
5
|
import os
import sys
execfile('/etc/courtlistener')
sys.path.append(INSTALL_ROOT)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
from alert.lib.string_utils import trunc
from alert.search.models import Citation
from django.utils.text import slugify
from optparse import OptionParser
def fixer(simulate=False, verbose=False):
"""If a Citation lacks a slug, we make one for it."""
citations = Citation.objects.filter(slug=None)
for citation in citations:
if verbose:
print "Fixing %s" % citation
citation.slug = trunc(slugify(citation.case_name), 50)
if not simulate:
citation.save()
def main():
usage = "usage: %prog [--verbose] [---simulate]"
parser = OptionParser(usage)
parser.add_option('-v', '--verbose', action="store_true", dest='verbose',
default=False, help="Display log during execution")
parser.add_option('-s', '--simulate', action="store_true",
dest='simulate', default=False, help=("Simulate the corrections "
"without actually making them."))
(options, args) = parser.parse_args()
verbose = options.verbose
simulate = options.simulate
if simulate:
print "*******************************************"
print "* SIMULATE MODE - NO CHANGES WILL BE MADE *"
print "*******************************************"
return fixer(simulate, verbose)
if __name__ == '__main__':
main()
|
mbohlool/client-python
|
refs/heads/master
|
kubernetes/client/models/v1_replication_controller_spec.py
|
1
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ReplicationControllerSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'min_ready_seconds': 'int',
'replicas': 'int',
'selector': 'dict(str, str)',
'template': 'V1PodTemplateSpec'
}
attribute_map = {
'min_ready_seconds': 'minReadySeconds',
'replicas': 'replicas',
'selector': 'selector',
'template': 'template'
}
def __init__(self, min_ready_seconds=None, replicas=None, selector=None, template=None):
"""
V1ReplicationControllerSpec - a model defined in Swagger
"""
self._min_ready_seconds = None
self._replicas = None
self._selector = None
self._template = None
self.discriminator = None
if min_ready_seconds is not None:
self.min_ready_seconds = min_ready_seconds
if replicas is not None:
self.replicas = replicas
if selector is not None:
self.selector = selector
if template is not None:
self.template = template
@property
def min_ready_seconds(self):
"""
Gets the min_ready_seconds of this V1ReplicationControllerSpec.
Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)
:return: The min_ready_seconds of this V1ReplicationControllerSpec.
:rtype: int
"""
return self._min_ready_seconds
@min_ready_seconds.setter
def min_ready_seconds(self, min_ready_seconds):
"""
Sets the min_ready_seconds of this V1ReplicationControllerSpec.
Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)
:param min_ready_seconds: The min_ready_seconds of this V1ReplicationControllerSpec.
:type: int
"""
self._min_ready_seconds = min_ready_seconds
@property
def replicas(self):
"""
Gets the replicas of this V1ReplicationControllerSpec.
Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
:return: The replicas of this V1ReplicationControllerSpec.
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""
Sets the replicas of this V1ReplicationControllerSpec.
Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
:param replicas: The replicas of this V1ReplicationControllerSpec.
:type: int
"""
self._replicas = replicas
@property
def selector(self):
"""
Gets the selector of this V1ReplicationControllerSpec.
Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
:return: The selector of this V1ReplicationControllerSpec.
:rtype: dict(str, str)
"""
return self._selector
@selector.setter
def selector(self, selector):
"""
Sets the selector of this V1ReplicationControllerSpec.
Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
:param selector: The selector of this V1ReplicationControllerSpec.
:type: dict(str, str)
"""
self._selector = selector
@property
def template(self):
"""
Gets the template of this V1ReplicationControllerSpec.
Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
:return: The template of this V1ReplicationControllerSpec.
:rtype: V1PodTemplateSpec
"""
return self._template
@template.setter
def template(self, template):
"""
Sets the template of this V1ReplicationControllerSpec.
Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
:param template: The template of this V1ReplicationControllerSpec.
:type: V1PodTemplateSpec
"""
self._template = template
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1ReplicationControllerSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
izonder/intellij-community
|
refs/heads/master
|
python/testData/resolve/multiFile/importPackageIntoSelfInit/mygame/display.py
|
249
|
def foo(): pass
|
slisson/intellij-community
|
refs/heads/master
|
python/helpers/pydev/_pydev_imps/_pydev_time.py
|
53
|
from time import *
try:
from gevent import monkey
saved = monkey.saved['time']
for key, val in saved.items():
globals()[key] = val
except:
pass
|
nwchandler/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/f5/bigip_facts.py
|
72
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2013, Matt Hite <mhite@hotmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigip_facts
short_description: Collect facts from F5 BIG-IP devices
description:
- Collect facts from F5 BIG-IP devices via iControl SOAP API
version_added: "1.6"
author:
- Matt Hite (@mhite)
- Tim Rupp (@caphrim007)
notes:
- Requires BIG-IP software version >= 11.4
- F5 developed module 'bigsuds' required (see http://devcentral.f5.com)
- Best run as a local_action in your playbook
- Tested with manager and above account privilege level
- C(provision) facts were added in 2.2
requirements:
- bigsuds
options:
session:
description:
- BIG-IP session support; may be useful to avoid concurrency
issues in certain circumstances.
required: false
default: true
choices: []
aliases: []
include:
description:
- Fact category or list of categories to collect
required: true
default: null
choices:
- address_class
- certificate
- client_ssl_profile
- device
- device_group
- interface
- key
- node
- pool
- provision
- rule
- self_ip
- software
- system_info
- traffic_group
- trunk
- virtual_address
- virtual_server
- vlan
aliases: []
filter:
description:
- Shell-style glob matching string used to filter fact keys. Not
applicable for software, provision, and system_info fact categories.
required: false
default: null
choices: []
aliases: []
extends_documentation_fragment: f5
'''
EXAMPLES = '''
- name: Collect BIG-IP facts
bigip_facts:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
include: "interface,vlan"
delegate_to: localhost
'''
try:
from suds import MethodNotFound, WebFault
except ImportError:
bigsuds_found = False
else:
bigsuds_found = True
import fnmatch
import re
import traceback
class F5(object):
"""F5 iControl class.
F5 BIG-IP iControl API class.
Attributes:
api: iControl API instance.
"""
def __init__(self, host, user, password, session=False, validate_certs=True, port=443):
self.api = bigip_api(host, user, password, validate_certs, port)
if session:
self.start_session()
def start_session(self):
self.api = self.api.with_session_id()
def get_api(self):
return self.api
def set_recursive_query_state(self, state):
self.api.System.Session.set_recursive_query_state(state)
def get_recursive_query_state(self):
return self.api.System.Session.get_recursive_query_state()
def enable_recursive_query_state(self):
self.set_recursive_query_state('STATE_ENABLED')
def disable_recursive_query_state(self):
self.set_recursive_query_state('STATE_DISABLED')
def set_active_folder(self, folder):
self.api.System.Session.set_active_folder(folder=folder)
def get_active_folder(self):
return self.api.System.Session.get_active_folder()
class Interfaces(object):
"""Interfaces class.
F5 BIG-IP interfaces class.
Attributes:
api: iControl API instance.
interfaces: A list of BIG-IP interface names.
"""
def __init__(self, api, regex=None):
self.api = api
self.interfaces = api.Networking.Interfaces.get_list()
if regex:
re_filter = re.compile(regex)
self.interfaces = filter(re_filter.search, self.interfaces)
def get_list(self):
return self.interfaces
def get_active_media(self):
return self.api.Networking.Interfaces.get_active_media(self.interfaces)
def get_actual_flow_control(self):
return self.api.Networking.Interfaces.get_actual_flow_control(self.interfaces)
def get_bundle_state(self):
return self.api.Networking.Interfaces.get_bundle_state(self.interfaces)
def get_description(self):
return self.api.Networking.Interfaces.get_description(self.interfaces)
def get_dual_media_state(self):
return self.api.Networking.Interfaces.get_dual_media_state(self.interfaces)
def get_enabled_state(self):
return self.api.Networking.Interfaces.get_enabled_state(self.interfaces)
def get_if_index(self):
return self.api.Networking.Interfaces.get_if_index(self.interfaces)
def get_learning_mode(self):
return self.api.Networking.Interfaces.get_learning_mode(self.interfaces)
def get_lldp_admin_status(self):
return self.api.Networking.Interfaces.get_lldp_admin_status(self.interfaces)
def get_lldp_tlvmap(self):
return self.api.Networking.Interfaces.get_lldp_tlvmap(self.interfaces)
def get_mac_address(self):
return self.api.Networking.Interfaces.get_mac_address(self.interfaces)
def get_media(self):
return self.api.Networking.Interfaces.get_media(self.interfaces)
def get_media_option(self):
return self.api.Networking.Interfaces.get_media_option(self.interfaces)
def get_media_option_sfp(self):
return self.api.Networking.Interfaces.get_media_option_sfp(self.interfaces)
def get_media_sfp(self):
return self.api.Networking.Interfaces.get_media_sfp(self.interfaces)
def get_media_speed(self):
return self.api.Networking.Interfaces.get_media_speed(self.interfaces)
def get_media_status(self):
return self.api.Networking.Interfaces.get_media_status(self.interfaces)
def get_mtu(self):
return self.api.Networking.Interfaces.get_mtu(self.interfaces)
def get_phy_master_slave_mode(self):
return self.api.Networking.Interfaces.get_phy_master_slave_mode(self.interfaces)
def get_prefer_sfp_state(self):
return self.api.Networking.Interfaces.get_prefer_sfp_state(self.interfaces)
def get_flow_control(self):
return self.api.Networking.Interfaces.get_requested_flow_control(self.interfaces)
def get_sflow_poll_interval(self):
return self.api.Networking.Interfaces.get_sflow_poll_interval(self.interfaces)
def get_sflow_poll_interval_global(self):
return self.api.Networking.Interfaces.get_sflow_poll_interval_global(self.interfaces)
def get_sfp_media_state(self):
return self.api.Networking.Interfaces.get_sfp_media_state(self.interfaces)
def get_stp_active_edge_port_state(self):
return self.api.Networking.Interfaces.get_stp_active_edge_port_state(self.interfaces)
def get_stp_enabled_state(self):
return self.api.Networking.Interfaces.get_stp_enabled_state(self.interfaces)
def get_stp_link_type(self):
return self.api.Networking.Interfaces.get_stp_link_type(self.interfaces)
def get_stp_protocol_detection_reset_state(self):
return self.api.Networking.Interfaces.get_stp_protocol_detection_reset_state(self.interfaces)
class SelfIPs(object):
"""Self IPs class.
F5 BIG-IP Self IPs class.
Attributes:
api: iControl API instance.
self_ips: List of self IPs.
"""
def __init__(self, api, regex=None):
self.api = api
self.self_ips = api.Networking.SelfIPV2.get_list()
if regex:
re_filter = re.compile(regex)
self.self_ips = filter(re_filter.search, self.self_ips)
def get_list(self):
return self.self_ips
def get_address(self):
return self.api.Networking.SelfIPV2.get_address(self.self_ips)
def get_allow_access_list(self):
return self.api.Networking.SelfIPV2.get_allow_access_list(self.self_ips)
def get_description(self):
return self.api.Networking.SelfIPV2.get_description(self.self_ips)
def get_enforced_firewall_policy(self):
return self.api.Networking.SelfIPV2.get_enforced_firewall_policy(self.self_ips)
def get_floating_state(self):
return self.api.Networking.SelfIPV2.get_floating_state(self.self_ips)
def get_fw_rule(self):
return self.api.Networking.SelfIPV2.get_fw_rule(self.self_ips)
def get_netmask(self):
return self.api.Networking.SelfIPV2.get_netmask(self.self_ips)
def get_staged_firewall_policy(self):
return self.api.Networking.SelfIPV2.get_staged_firewall_policy(self.self_ips)
def get_traffic_group(self):
return self.api.Networking.SelfIPV2.get_traffic_group(self.self_ips)
def get_vlan(self):
return self.api.Networking.SelfIPV2.get_vlan(self.self_ips)
def get_is_traffic_group_inherited(self):
return self.api.Networking.SelfIPV2.is_traffic_group_inherited(self.self_ips)
class Trunks(object):
"""Trunks class.
F5 BIG-IP trunks class.
Attributes:
api: iControl API instance.
trunks: List of trunks.
"""
def __init__(self, api, regex=None):
self.api = api
self.trunks = api.Networking.Trunk.get_list()
if regex:
re_filter = re.compile(regex)
self.trunks = filter(re_filter.search, self.trunks)
def get_list(self):
return self.trunks
def get_active_lacp_state(self):
return self.api.Networking.Trunk.get_active_lacp_state(self.trunks)
def get_configured_member_count(self):
return self.api.Networking.Trunk.get_configured_member_count(self.trunks)
def get_description(self):
return self.api.Networking.Trunk.get_description(self.trunks)
def get_distribution_hash_option(self):
return self.api.Networking.Trunk.get_distribution_hash_option(self.trunks)
def get_interface(self):
return self.api.Networking.Trunk.get_interface(self.trunks)
def get_lacp_enabled_state(self):
return self.api.Networking.Trunk.get_lacp_enabled_state(self.trunks)
def get_lacp_timeout_option(self):
return self.api.Networking.Trunk.get_lacp_timeout_option(self.trunks)
def get_link_selection_policy(self):
return self.api.Networking.Trunk.get_link_selection_policy(self.trunks)
def get_media_speed(self):
return self.api.Networking.Trunk.get_media_speed(self.trunks)
def get_media_status(self):
return self.api.Networking.Trunk.get_media_status(self.trunks)
def get_operational_member_count(self):
return self.api.Networking.Trunk.get_operational_member_count(self.trunks)
def get_stp_enabled_state(self):
return self.api.Networking.Trunk.get_stp_enabled_state(self.trunks)
def get_stp_protocol_detection_reset_state(self):
return self.api.Networking.Trunk.get_stp_protocol_detection_reset_state(self.trunks)
class Vlans(object):
"""Vlans class.
F5 BIG-IP Vlans class.
Attributes:
api: iControl API instance.
vlans: List of VLANs.
"""
def __init__(self, api, regex=None):
self.api = api
self.vlans = api.Networking.VLAN.get_list()
if regex:
re_filter = re.compile(regex)
self.vlans = filter(re_filter.search, self.vlans)
def get_list(self):
return self.vlans
def get_auto_lasthop(self):
return self.api.Networking.VLAN.get_auto_lasthop(self.vlans)
def get_cmp_hash_algorithm(self):
return self.api.Networking.VLAN.get_cmp_hash_algorithm(self.vlans)
def get_description(self):
return self.api.Networking.VLAN.get_description(self.vlans)
def get_dynamic_forwarding(self):
return self.api.Networking.VLAN.get_dynamic_forwarding(self.vlans)
def get_failsafe_action(self):
return self.api.Networking.VLAN.get_failsafe_action(self.vlans)
def get_failsafe_state(self):
return self.api.Networking.VLAN.get_failsafe_state(self.vlans)
def get_failsafe_timeout(self):
return self.api.Networking.VLAN.get_failsafe_timeout(self.vlans)
def get_if_index(self):
return self.api.Networking.VLAN.get_if_index(self.vlans)
def get_learning_mode(self):
return self.api.Networking.VLAN.get_learning_mode(self.vlans)
def get_mac_masquerade_address(self):
return self.api.Networking.VLAN.get_mac_masquerade_address(self.vlans)
def get_member(self):
return self.api.Networking.VLAN.get_member(self.vlans)
def get_mtu(self):
return self.api.Networking.VLAN.get_mtu(self.vlans)
def get_sflow_poll_interval(self):
return self.api.Networking.VLAN.get_sflow_poll_interval(self.vlans)
def get_sflow_poll_interval_global(self):
return self.api.Networking.VLAN.get_sflow_poll_interval_global(self.vlans)
def get_sflow_sampling_rate(self):
return self.api.Networking.VLAN.get_sflow_sampling_rate(self.vlans)
def get_sflow_sampling_rate_global(self):
return self.api.Networking.VLAN.get_sflow_sampling_rate_global(self.vlans)
def get_source_check_state(self):
return self.api.Networking.VLAN.get_source_check_state(self.vlans)
def get_true_mac_address(self):
return self.api.Networking.VLAN.get_true_mac_address(self.vlans)
def get_vlan_id(self):
return self.api.Networking.VLAN.get_vlan_id(self.vlans)
class Software(object):
"""Software class.
F5 BIG-IP software class.
Attributes:
api: iControl API instance.
"""
def __init__(self, api):
self.api = api
def get_all_software_status(self):
return self.api.System.SoftwareManagement.get_all_software_status()
class VirtualServers(object):
"""Virtual servers class.
F5 BIG-IP virtual servers class.
Attributes:
api: iControl API instance.
virtual_servers: List of virtual servers.
"""
def __init__(self, api, regex=None):
self.api = api
self.virtual_servers = api.LocalLB.VirtualServer.get_list()
if regex:
re_filter = re.compile(regex)
self.virtual_servers = filter(re_filter.search, self.virtual_servers)
def get_list(self):
return self.virtual_servers
def get_actual_hardware_acceleration(self):
return self.api.LocalLB.VirtualServer.get_actual_hardware_acceleration(self.virtual_servers)
def get_authentication_profile(self):
return self.api.LocalLB.VirtualServer.get_authentication_profile(self.virtual_servers)
def get_auto_lasthop(self):
return self.api.LocalLB.VirtualServer.get_auto_lasthop(self.virtual_servers)
def get_bw_controller_policy(self):
return self.api.LocalLB.VirtualServer.get_bw_controller_policy(self.virtual_servers)
def get_clone_pool(self):
return self.api.LocalLB.VirtualServer.get_clone_pool(self.virtual_servers)
def get_cmp_enable_mode(self):
return self.api.LocalLB.VirtualServer.get_cmp_enable_mode(self.virtual_servers)
def get_connection_limit(self):
return self.api.LocalLB.VirtualServer.get_connection_limit(self.virtual_servers)
def get_connection_mirror_state(self):
return self.api.LocalLB.VirtualServer.get_connection_mirror_state(self.virtual_servers)
def get_default_pool_name(self):
return self.api.LocalLB.VirtualServer.get_default_pool_name(self.virtual_servers)
def get_description(self):
return self.api.LocalLB.VirtualServer.get_description(self.virtual_servers)
def get_destination(self):
return self.api.LocalLB.VirtualServer.get_destination_v2(self.virtual_servers)
def get_enabled_state(self):
return self.api.LocalLB.VirtualServer.get_enabled_state(self.virtual_servers)
def get_enforced_firewall_policy(self):
return self.api.LocalLB.VirtualServer.get_enforced_firewall_policy(self.virtual_servers)
def get_fallback_persistence_profile(self):
return self.api.LocalLB.VirtualServer.get_fallback_persistence_profile(self.virtual_servers)
def get_fw_rule(self):
return self.api.LocalLB.VirtualServer.get_fw_rule(self.virtual_servers)
def get_gtm_score(self):
return self.api.LocalLB.VirtualServer.get_gtm_score(self.virtual_servers)
def get_last_hop_pool(self):
return self.api.LocalLB.VirtualServer.get_last_hop_pool(self.virtual_servers)
def get_nat64_state(self):
return self.api.LocalLB.VirtualServer.get_nat64_state(self.virtual_servers)
def get_object_status(self):
return self.api.LocalLB.VirtualServer.get_object_status(self.virtual_servers)
def get_persistence_profile(self):
return self.api.LocalLB.VirtualServer.get_persistence_profile(self.virtual_servers)
def get_profile(self):
return self.api.LocalLB.VirtualServer.get_profile(self.virtual_servers)
def get_protocol(self):
return self.api.LocalLB.VirtualServer.get_protocol(self.virtual_servers)
def get_rate_class(self):
return self.api.LocalLB.VirtualServer.get_rate_class(self.virtual_servers)
def get_rate_limit(self):
return self.api.LocalLB.VirtualServer.get_rate_limit(self.virtual_servers)
def get_rate_limit_destination_mask(self):
return self.api.LocalLB.VirtualServer.get_rate_limit_destination_mask(self.virtual_servers)
def get_rate_limit_mode(self):
return self.api.LocalLB.VirtualServer.get_rate_limit_mode(self.virtual_servers)
def get_rate_limit_source_mask(self):
return self.api.LocalLB.VirtualServer.get_rate_limit_source_mask(self.virtual_servers)
def get_related_rule(self):
return self.api.LocalLB.VirtualServer.get_related_rule(self.virtual_servers)
def get_rule(self):
return self.api.LocalLB.VirtualServer.get_rule(self.virtual_servers)
def get_security_log_profile(self):
return self.api.LocalLB.VirtualServer.get_security_log_profile(self.virtual_servers)
def get_snat_pool(self):
return self.api.LocalLB.VirtualServer.get_snat_pool(self.virtual_servers)
def get_snat_type(self):
return self.api.LocalLB.VirtualServer.get_snat_type(self.virtual_servers)
def get_source_address(self):
return self.api.LocalLB.VirtualServer.get_source_address(self.virtual_servers)
def get_source_address_translation_lsn_pool(self):
return self.api.LocalLB.VirtualServer.get_source_address_translation_lsn_pool(self.virtual_servers)
def get_source_address_translation_snat_pool(self):
return self.api.LocalLB.VirtualServer.get_source_address_translation_snat_pool(self.virtual_servers)
def get_source_address_translation_type(self):
return self.api.LocalLB.VirtualServer.get_source_address_translation_type(self.virtual_servers)
def get_source_port_behavior(self):
return self.api.LocalLB.VirtualServer.get_source_port_behavior(self.virtual_servers)
def get_staged_firewall_policy(self):
return self.api.LocalLB.VirtualServer.get_staged_firewall_policy(self.virtual_servers)
def get_translate_address_state(self):
return self.api.LocalLB.VirtualServer.get_translate_address_state(self.virtual_servers)
def get_translate_port_state(self):
return self.api.LocalLB.VirtualServer.get_translate_port_state(self.virtual_servers)
def get_type(self):
return self.api.LocalLB.VirtualServer.get_type(self.virtual_servers)
def get_vlan(self):
return self.api.LocalLB.VirtualServer.get_vlan(self.virtual_servers)
def get_wildmask(self):
return self.api.LocalLB.VirtualServer.get_wildmask(self.virtual_servers)
class Pools(object):
"""Pools class.
F5 BIG-IP pools class.
Attributes:
api: iControl API instance.
pool_names: List of pool names.
"""
def __init__(self, api, regex=None):
self.api = api
self.pool_names = api.LocalLB.Pool.get_list()
if regex:
re_filter = re.compile(regex)
self.pool_names = filter(re_filter.search, self.pool_names)
def get_list(self):
return self.pool_names
def get_action_on_service_down(self):
return self.api.LocalLB.Pool.get_action_on_service_down(self.pool_names)
def get_active_member_count(self):
return self.api.LocalLB.Pool.get_active_member_count(self.pool_names)
def get_aggregate_dynamic_ratio(self):
return self.api.LocalLB.Pool.get_aggregate_dynamic_ratio(self.pool_names)
def get_allow_nat_state(self):
return self.api.LocalLB.Pool.get_allow_nat_state(self.pool_names)
def get_allow_snat_state(self):
return self.api.LocalLB.Pool.get_allow_snat_state(self.pool_names)
def get_client_ip_tos(self):
return self.api.LocalLB.Pool.get_client_ip_tos(self.pool_names)
def get_client_link_qos(self):
return self.api.LocalLB.Pool.get_client_link_qos(self.pool_names)
def get_description(self):
return self.api.LocalLB.Pool.get_description(self.pool_names)
def get_gateway_failsafe_device(self):
return self.api.LocalLB.Pool.get_gateway_failsafe_device(self.pool_names)
def get_ignore_persisted_weight_state(self):
return self.api.LocalLB.Pool.get_ignore_persisted_weight_state(self.pool_names)
def get_lb_method(self):
return self.api.LocalLB.Pool.get_lb_method(self.pool_names)
def get_member(self):
return self.api.LocalLB.Pool.get_member_v2(self.pool_names)
def get_minimum_active_member(self):
return self.api.LocalLB.Pool.get_minimum_active_member(self.pool_names)
def get_minimum_up_member(self):
return self.api.LocalLB.Pool.get_minimum_up_member(self.pool_names)
def get_minimum_up_member_action(self):
return self.api.LocalLB.Pool.get_minimum_up_member_action(self.pool_names)
def get_minimum_up_member_enabled_state(self):
return self.api.LocalLB.Pool.get_minimum_up_member_enabled_state(self.pool_names)
def get_monitor_association(self):
return self.api.LocalLB.Pool.get_monitor_association(self.pool_names)
def get_monitor_instance(self):
return self.api.LocalLB.Pool.get_monitor_instance(self.pool_names)
def get_object_status(self):
return self.api.LocalLB.Pool.get_object_status(self.pool_names)
def get_profile(self):
return self.api.LocalLB.Pool.get_profile(self.pool_names)
def get_queue_depth_limit(self):
return self.api.LocalLB.Pool.get_queue_depth_limit(self.pool_names)
def get_queue_on_connection_limit_state(self):
return self.api.LocalLB.Pool.get_queue_on_connection_limit_state(self.pool_names)
def get_queue_time_limit(self):
return self.api.LocalLB.Pool.get_queue_time_limit(self.pool_names)
def get_reselect_tries(self):
return self.api.LocalLB.Pool.get_reselect_tries(self.pool_names)
def get_server_ip_tos(self):
return self.api.LocalLB.Pool.get_server_ip_tos(self.pool_names)
def get_server_link_qos(self):
return self.api.LocalLB.Pool.get_server_link_qos(self.pool_names)
def get_simple_timeout(self):
return self.api.LocalLB.Pool.get_simple_timeout(self.pool_names)
def get_slow_ramp_time(self):
return self.api.LocalLB.Pool.get_slow_ramp_time(self.pool_names)
class Devices(object):
"""Devices class.
F5 BIG-IP devices class.
Attributes:
api: iControl API instance.
devices: List of devices.
"""
def __init__(self, api, regex=None):
self.api = api
self.devices = api.Management.Device.get_list()
if regex:
re_filter = re.compile(regex)
self.devices = filter(re_filter.search, self.devices)
def get_list(self):
return self.devices
def get_active_modules(self):
return self.api.Management.Device.get_active_modules(self.devices)
def get_base_mac_address(self):
return self.api.Management.Device.get_base_mac_address(self.devices)
def get_blade_addresses(self):
return self.api.Management.Device.get_blade_addresses(self.devices)
def get_build(self):
return self.api.Management.Device.get_build(self.devices)
def get_chassis_id(self):
return self.api.Management.Device.get_chassis_id(self.devices)
def get_chassis_type(self):
return self.api.Management.Device.get_chassis_type(self.devices)
def get_comment(self):
return self.api.Management.Device.get_comment(self.devices)
def get_configsync_address(self):
return self.api.Management.Device.get_configsync_address(self.devices)
def get_contact(self):
return self.api.Management.Device.get_contact(self.devices)
def get_description(self):
return self.api.Management.Device.get_description(self.devices)
def get_edition(self):
return self.api.Management.Device.get_edition(self.devices)
def get_failover_state(self):
return self.api.Management.Device.get_failover_state(self.devices)
def get_local_device(self):
return self.api.Management.Device.get_local_device()
def get_hostname(self):
return self.api.Management.Device.get_hostname(self.devices)
def get_inactive_modules(self):
return self.api.Management.Device.get_inactive_modules(self.devices)
def get_location(self):
return self.api.Management.Device.get_location(self.devices)
def get_management_address(self):
return self.api.Management.Device.get_management_address(self.devices)
def get_marketing_name(self):
return self.api.Management.Device.get_marketing_name(self.devices)
def get_multicast_address(self):
return self.api.Management.Device.get_multicast_address(self.devices)
def get_optional_modules(self):
return self.api.Management.Device.get_optional_modules(self.devices)
def get_platform_id(self):
return self.api.Management.Device.get_platform_id(self.devices)
def get_primary_mirror_address(self):
return self.api.Management.Device.get_primary_mirror_address(self.devices)
def get_product(self):
return self.api.Management.Device.get_product(self.devices)
def get_secondary_mirror_address(self):
return self.api.Management.Device.get_secondary_mirror_address(self.devices)
def get_software_version(self):
return self.api.Management.Device.get_software_version(self.devices)
def get_timelimited_modules(self):
return self.api.Management.Device.get_timelimited_modules(self.devices)
def get_timezone(self):
return self.api.Management.Device.get_timezone(self.devices)
def get_unicast_addresses(self):
return self.api.Management.Device.get_unicast_addresses(self.devices)
class DeviceGroups(object):
"""Device groups class.
F5 BIG-IP device groups class.
Attributes:
api: iControl API instance.
device_groups: List of device groups.
"""
def __init__(self, api, regex=None):
self.api = api
self.device_groups = api.Management.DeviceGroup.get_list()
if regex:
re_filter = re.compile(regex)
self.device_groups = filter(re_filter.search, self.device_groups)
def get_list(self):
return self.device_groups
def get_all_preferred_active(self):
return self.api.Management.DeviceGroup.get_all_preferred_active(self.device_groups)
def get_autosync_enabled_state(self):
return self.api.Management.DeviceGroup.get_autosync_enabled_state(self.device_groups)
def get_description(self):
return self.api.Management.DeviceGroup.get_description(self.device_groups)
def get_device(self):
return self.api.Management.DeviceGroup.get_device(self.device_groups)
def get_full_load_on_sync_state(self):
return self.api.Management.DeviceGroup.get_full_load_on_sync_state(self.device_groups)
def get_incremental_config_sync_size_maximum(self):
return self.api.Management.DeviceGroup.get_incremental_config_sync_size_maximum(self.device_groups)
def get_network_failover_enabled_state(self):
return self.api.Management.DeviceGroup.get_network_failover_enabled_state(self.device_groups)
def get_sync_status(self):
return self.api.Management.DeviceGroup.get_sync_status(self.device_groups)
def get_type(self):
return self.api.Management.DeviceGroup.get_type(self.device_groups)
class TrafficGroups(object):
"""Traffic groups class.
F5 BIG-IP traffic groups class.
Attributes:
api: iControl API instance.
traffic_groups: List of traffic groups.
"""
def __init__(self, api, regex=None):
self.api = api
self.traffic_groups = api.Management.TrafficGroup.get_list()
if regex:
re_filter = re.compile(regex)
self.traffic_groups = filter(re_filter.search, self.traffic_groups)
def get_list(self):
return self.traffic_groups
def get_auto_failback_enabled_state(self):
return self.api.Management.TrafficGroup.get_auto_failback_enabled_state(self.traffic_groups)
def get_auto_failback_time(self):
return self.api.Management.TrafficGroup.get_auto_failback_time(self.traffic_groups)
def get_default_device(self):
return self.api.Management.TrafficGroup.get_default_device(self.traffic_groups)
def get_description(self):
return self.api.Management.TrafficGroup.get_description(self.traffic_groups)
def get_ha_load_factor(self):
return self.api.Management.TrafficGroup.get_ha_load_factor(self.traffic_groups)
def get_ha_order(self):
return self.api.Management.TrafficGroup.get_ha_order(self.traffic_groups)
def get_is_floating(self):
return self.api.Management.TrafficGroup.get_is_floating(self.traffic_groups)
def get_mac_masquerade_address(self):
return self.api.Management.TrafficGroup.get_mac_masquerade_address(self.traffic_groups)
def get_unit_id(self):
return self.api.Management.TrafficGroup.get_unit_id(self.traffic_groups)
class Rules(object):
"""Rules class.
F5 BIG-IP iRules class.
Attributes:
api: iControl API instance.
rules: List of iRules.
"""
def __init__(self, api, regex=None):
self.api = api
self.rules = api.LocalLB.Rule.get_list()
if regex:
re_filter = re.compile(regex)
self.traffic_groups = filter(re_filter.search, self.rules)
def get_list(self):
return self.rules
def get_description(self):
return self.api.LocalLB.Rule.get_description(rule_names=self.rules)
def get_ignore_vertification(self):
return self.api.LocalLB.Rule.get_ignore_vertification(rule_names=self.rules)
def get_verification_status(self):
return self.api.LocalLB.Rule.get_verification_status_v2(rule_names=self.rules)
def get_definition(self):
return [x['rule_definition'] for x in self.api.LocalLB.Rule.query_rule(rule_names=self.rules)]
class Nodes(object):
"""Nodes class.
F5 BIG-IP nodes class.
Attributes:
api: iControl API instance.
nodes: List of nodes.
"""
def __init__(self, api, regex=None):
self.api = api
self.nodes = api.LocalLB.NodeAddressV2.get_list()
if regex:
re_filter = re.compile(regex)
self.nodes = filter(re_filter.search, self.nodes)
def get_list(self):
return self.nodes
def get_address(self):
return self.api.LocalLB.NodeAddressV2.get_address(nodes=self.nodes)
def get_connection_limit(self):
return self.api.LocalLB.NodeAddressV2.get_connection_limit(nodes=self.nodes)
def get_description(self):
return self.api.LocalLB.NodeAddressV2.get_description(nodes=self.nodes)
def get_dynamic_ratio(self):
return self.api.LocalLB.NodeAddressV2.get_dynamic_ratio_v2(nodes=self.nodes)
def get_monitor_instance(self):
return self.api.LocalLB.NodeAddressV2.get_monitor_instance(nodes=self.nodes)
def get_monitor_rule(self):
return self.api.LocalLB.NodeAddressV2.get_monitor_rule(nodes=self.nodes)
def get_monitor_status(self):
return self.api.LocalLB.NodeAddressV2.get_monitor_status(nodes=self.nodes)
def get_object_status(self):
return self.api.LocalLB.NodeAddressV2.get_object_status(nodes=self.nodes)
def get_rate_limit(self):
return self.api.LocalLB.NodeAddressV2.get_rate_limit(nodes=self.nodes)
def get_ratio(self):
return self.api.LocalLB.NodeAddressV2.get_ratio(nodes=self.nodes)
def get_session_status(self):
return self.api.LocalLB.NodeAddressV2.get_session_status(nodes=self.nodes)
class VirtualAddresses(object):
"""Virtual addresses class.
F5 BIG-IP virtual addresses class.
Attributes:
api: iControl API instance.
virtual_addresses: List of virtual addresses.
"""
def __init__(self, api, regex=None):
self.api = api
self.virtual_addresses = api.LocalLB.VirtualAddressV2.get_list()
if regex:
re_filter = re.compile(regex)
self.virtual_addresses = filter(re_filter.search, self.virtual_addresses)
def get_list(self):
return self.virtual_addresses
def get_address(self):
return self.api.LocalLB.VirtualAddressV2.get_address(self.virtual_addresses)
def get_arp_state(self):
return self.api.LocalLB.VirtualAddressV2.get_arp_state(self.virtual_addresses)
def get_auto_delete_state(self):
return self.api.LocalLB.VirtualAddressV2.get_auto_delete_state(self.virtual_addresses)
def get_connection_limit(self):
return self.api.LocalLB.VirtualAddressV2.get_connection_limit(self.virtual_addresses)
def get_description(self):
return self.api.LocalLB.VirtualAddressV2.get_description(self.virtual_addresses)
def get_enabled_state(self):
return self.api.LocalLB.VirtualAddressV2.get_enabled_state(self.virtual_addresses)
def get_icmp_echo_state(self):
return self.api.LocalLB.VirtualAddressV2.get_icmp_echo_state(self.virtual_addresses)
def get_is_floating_state(self):
return self.api.LocalLB.VirtualAddressV2.get_is_floating_state(self.virtual_addresses)
def get_netmask(self):
return self.api.LocalLB.VirtualAddressV2.get_netmask(self.virtual_addresses)
def get_object_status(self):
return self.api.LocalLB.VirtualAddressV2.get_object_status(self.virtual_addresses)
def get_route_advertisement_state(self):
return self.api.LocalLB.VirtualAddressV2.get_route_advertisement_state(self.virtual_addresses)
def get_traffic_group(self):
return self.api.LocalLB.VirtualAddressV2.get_traffic_group(self.virtual_addresses)
class AddressClasses(object):
"""Address group/class class.
F5 BIG-IP address group/class class.
Attributes:
api: iControl API instance.
address_classes: List of address classes.
"""
def __init__(self, api, regex=None):
self.api = api
self.address_classes = api.LocalLB.Class.get_address_class_list()
if regex:
re_filter = re.compile(regex)
self.address_classes = filter(re_filter.search, self.address_classes)
def get_list(self):
return self.address_classes
def get_address_class(self):
key = self.api.LocalLB.Class.get_address_class(self.address_classes)
value = self.api.LocalLB.Class.get_address_class_member_data_value(key)
result = list(map(zip, [x['members'] for x in key], value))
return result
def get_description(self):
return self.api.LocalLB.Class.get_description(self.address_classes)
class Certificates(object):
"""Certificates class.
F5 BIG-IP certificates class.
Attributes:
api: iControl API instance.
certificates: List of certificate identifiers.
certificate_list: List of certificate information structures.
"""
def __init__(self, api, regex=None, mode="MANAGEMENT_MODE_DEFAULT"):
self.api = api
self.certificate_list = api.Management.KeyCertificate.get_certificate_list(mode=mode)
self.certificates = [x['certificate']['cert_info']['id'] for x in self.certificate_list]
if regex:
re_filter = re.compile(regex)
self.certificates = filter(re_filter.search, self.certificates)
self.certificate_list = [x for x in self.certificate_list if x['certificate']['cert_info']['id'] in self.certificates]
def get_list(self):
return self.certificates
def get_certificate_list(self):
return self.certificate_list
class Keys(object):
"""Keys class.
F5 BIG-IP keys class.
Attributes:
api: iControl API instance.
keys: List of key identifiers.
key_list: List of key information structures.
"""
def __init__(self, api, regex=None, mode="MANAGEMENT_MODE_DEFAULT"):
self.api = api
self.key_list = api.Management.KeyCertificate.get_key_list(mode=mode)
self.keys = [x['key_info']['id'] for x in self.key_list]
if regex:
re_filter = re.compile(regex)
self.keys = filter(re_filter.search, self.keys)
self.key_list = [x for x in self.key_list if x['key_info']['id'] in self.keys]
def get_list(self):
return self.keys
def get_key_list(self):
return self.key_list
class ProfileClientSSL(object):
"""Client SSL profiles class.
F5 BIG-IP client SSL profiles class.
Attributes:
api: iControl API instance.
profiles: List of client SSL profiles.
"""
def __init__(self, api, regex=None):
self.api = api
self.profiles = api.LocalLB.ProfileClientSSL.get_list()
if regex:
re_filter = re.compile(regex)
self.profiles = filter(re_filter.search, self.profiles)
def get_list(self):
return self.profiles
def get_alert_timeout(self):
return self.api.LocalLB.ProfileClientSSL.get_alert_timeout(self.profiles)
def get_allow_nonssl_state(self):
return self.api.LocalLB.ProfileClientSSL.get_allow_nonssl_state(self.profiles)
def get_authenticate_depth(self):
return self.api.LocalLB.ProfileClientSSL.get_authenticate_depth(self.profiles)
def get_authenticate_once_state(self):
return self.api.LocalLB.ProfileClientSSL.get_authenticate_once_state(self.profiles)
def get_ca_file(self):
return self.api.LocalLB.ProfileClientSSL.get_ca_file_v2(self.profiles)
def get_cache_size(self):
return self.api.LocalLB.ProfileClientSSL.get_cache_size(self.profiles)
def get_cache_timeout(self):
return self.api.LocalLB.ProfileClientSSL.get_cache_timeout(self.profiles)
def get_certificate_file(self):
return self.api.LocalLB.ProfileClientSSL.get_certificate_file_v2(self.profiles)
def get_chain_file(self):
return self.api.LocalLB.ProfileClientSSL.get_chain_file_v2(self.profiles)
def get_cipher_list(self):
return self.api.LocalLB.ProfileClientSSL.get_cipher_list(self.profiles)
def get_client_certificate_ca_file(self):
return self.api.LocalLB.ProfileClientSSL.get_client_certificate_ca_file_v2(self.profiles)
def get_crl_file(self):
return self.api.LocalLB.ProfileClientSSL.get_crl_file_v2(self.profiles)
def get_default_profile(self):
return self.api.LocalLB.ProfileClientSSL.get_default_profile(self.profiles)
def get_description(self):
return self.api.LocalLB.ProfileClientSSL.get_description(self.profiles)
def get_forward_proxy_ca_certificate_file(self):
return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_certificate_file(self.profiles)
def get_forward_proxy_ca_key_file(self):
return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_key_file(self.profiles)
def get_forward_proxy_ca_passphrase(self):
return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_passphrase(self.profiles)
def get_forward_proxy_certificate_extension_include(self):
return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_certificate_extension_include(self.profiles)
def get_forward_proxy_certificate_lifespan(self):
return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_certificate_lifespan(self.profiles)
def get_forward_proxy_enabled_state(self):
return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_enabled_state(self.profiles)
def get_forward_proxy_lookup_by_ipaddr_port_state(self):
return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_lookup_by_ipaddr_port_state(self.profiles)
def get_handshake_timeout(self):
return self.api.LocalLB.ProfileClientSSL.get_handshake_timeout(self.profiles)
def get_key_file(self):
return self.api.LocalLB.ProfileClientSSL.get_key_file_v2(self.profiles)
def get_modssl_emulation_state(self):
return self.api.LocalLB.ProfileClientSSL.get_modssl_emulation_state(self.profiles)
def get_passphrase(self):
return self.api.LocalLB.ProfileClientSSL.get_passphrase(self.profiles)
def get_peer_certification_mode(self):
return self.api.LocalLB.ProfileClientSSL.get_peer_certification_mode(self.profiles)
def get_profile_mode(self):
return self.api.LocalLB.ProfileClientSSL.get_profile_mode(self.profiles)
def get_renegotiation_maximum_record_delay(self):
return self.api.LocalLB.ProfileClientSSL.get_renegotiation_maximum_record_delay(self.profiles)
def get_renegotiation_period(self):
return self.api.LocalLB.ProfileClientSSL.get_renegotiation_period(self.profiles)
def get_renegotiation_state(self):
return self.api.LocalLB.ProfileClientSSL.get_renegotiation_state(self.profiles)
def get_renegotiation_throughput(self):
return self.api.LocalLB.ProfileClientSSL.get_renegotiation_throughput(self.profiles)
def get_retain_certificate_state(self):
return self.api.LocalLB.ProfileClientSSL.get_retain_certificate_state(self.profiles)
def get_secure_renegotiation_mode(self):
return self.api.LocalLB.ProfileClientSSL.get_secure_renegotiation_mode(self.profiles)
def get_server_name(self):
return self.api.LocalLB.ProfileClientSSL.get_server_name(self.profiles)
def get_session_ticket_state(self):
return self.api.LocalLB.ProfileClientSSL.get_session_ticket_state(self.profiles)
def get_sni_default_state(self):
return self.api.LocalLB.ProfileClientSSL.get_sni_default_state(self.profiles)
def get_sni_require_state(self):
return self.api.LocalLB.ProfileClientSSL.get_sni_require_state(self.profiles)
def get_ssl_option(self):
return self.api.LocalLB.ProfileClientSSL.get_ssl_option(self.profiles)
def get_strict_resume_state(self):
return self.api.LocalLB.ProfileClientSSL.get_strict_resume_state(self.profiles)
def get_unclean_shutdown_state(self):
return self.api.LocalLB.ProfileClientSSL.get_unclean_shutdown_state(self.profiles)
def get_is_base_profile(self):
return self.api.LocalLB.ProfileClientSSL.is_base_profile(self.profiles)
def get_is_system_profile(self):
return self.api.LocalLB.ProfileClientSSL.is_system_profile(self.profiles)
class SystemInfo(object):
"""System information class.
F5 BIG-IP system information class.
Attributes:
api: iControl API instance.
"""
def __init__(self, api):
self.api = api
def get_base_mac_address(self):
return self.api.System.SystemInfo.get_base_mac_address()
def get_blade_temperature(self):
return self.api.System.SystemInfo.get_blade_temperature()
def get_chassis_slot_information(self):
return self.api.System.SystemInfo.get_chassis_slot_information()
def get_globally_unique_identifier(self):
return self.api.System.SystemInfo.get_globally_unique_identifier()
def get_group_id(self):
return self.api.System.SystemInfo.get_group_id()
def get_hardware_information(self):
return self.api.System.SystemInfo.get_hardware_information()
def get_marketing_name(self):
return self.api.System.SystemInfo.get_marketing_name()
def get_product_information(self):
return self.api.System.SystemInfo.get_product_information()
def get_pva_version(self):
return self.api.System.SystemInfo.get_pva_version()
def get_system_id(self):
return self.api.System.SystemInfo.get_system_id()
def get_system_information(self):
return self.api.System.SystemInfo.get_system_information()
def get_time(self):
return self.api.System.SystemInfo.get_time()
def get_time_zone(self):
return self.api.System.SystemInfo.get_time_zone()
def get_uptime(self):
return self.api.System.SystemInfo.get_uptime()
class ProvisionInfo(object):
"""Provision information class.
F5 BIG-IP provision information class.
Attributes:
api: iControl API instance.
"""
def __init__(self, api):
self.api = api
def get_list(self):
result = []
list = self.api.Management.Provision.get_list()
for item in list:
item = item.lower().replace('tmos_module_', '')
result.append(item)
return result
def get_provisioned_list(self):
result = []
list = self.api.Management.Provision.get_provisioned_list()
for item in list:
item = item.lower().replace('tmos_module_', '')
result.append(item)
return result
def generate_dict(api_obj, fields):
result_dict = {}
lists = []
supported_fields = []
if api_obj.get_list():
for field in fields:
try:
api_response = getattr(api_obj, "get_" + field)()
except (MethodNotFound, WebFault):
pass
else:
lists.append(api_response)
supported_fields.append(field)
for i, j in enumerate(api_obj.get_list()):
temp = {}
temp.update([(item[0], item[1][i]) for item in zip(supported_fields, lists)])
result_dict[j] = temp
return result_dict
def generate_simple_dict(api_obj, fields):
result_dict = {}
for field in fields:
try:
api_response = getattr(api_obj, "get_" + field)()
except (MethodNotFound, WebFault):
pass
else:
result_dict[field] = api_response
return result_dict
def generate_interface_dict(f5, regex):
interfaces = Interfaces(f5.get_api(), regex)
fields = ['active_media', 'actual_flow_control', 'bundle_state',
'description', 'dual_media_state', 'enabled_state', 'if_index',
'learning_mode', 'lldp_admin_status', 'lldp_tlvmap',
'mac_address', 'media', 'media_option', 'media_option_sfp',
'media_sfp', 'media_speed', 'media_status', 'mtu',
'phy_master_slave_mode', 'prefer_sfp_state', 'flow_control',
'sflow_poll_interval', 'sflow_poll_interval_global',
'sfp_media_state', 'stp_active_edge_port_state',
'stp_enabled_state', 'stp_link_type',
'stp_protocol_detection_reset_state']
return generate_dict(interfaces, fields)
def generate_self_ip_dict(f5, regex):
self_ips = SelfIPs(f5.get_api(), regex)
fields = ['address', 'allow_access_list', 'description',
'enforced_firewall_policy', 'floating_state', 'fw_rule',
'netmask', 'staged_firewall_policy', 'traffic_group',
'vlan', 'is_traffic_group_inherited']
return generate_dict(self_ips, fields)
def generate_trunk_dict(f5, regex):
trunks = Trunks(f5.get_api(), regex)
fields = ['active_lacp_state', 'configured_member_count', 'description',
'distribution_hash_option', 'interface', 'lacp_enabled_state',
'lacp_timeout_option', 'link_selection_policy', 'media_speed',
'media_status', 'operational_member_count', 'stp_enabled_state',
'stp_protocol_detection_reset_state']
return generate_dict(trunks, fields)
def generate_vlan_dict(f5, regex):
vlans = Vlans(f5.get_api(), regex)
fields = ['auto_lasthop', 'cmp_hash_algorithm', 'description',
'dynamic_forwarding', 'failsafe_action', 'failsafe_state',
'failsafe_timeout', 'if_index', 'learning_mode',
'mac_masquerade_address', 'member', 'mtu',
'sflow_poll_interval', 'sflow_poll_interval_global',
'sflow_sampling_rate', 'sflow_sampling_rate_global',
'source_check_state', 'true_mac_address', 'vlan_id']
return generate_dict(vlans, fields)
def generate_vs_dict(f5, regex):
virtual_servers = VirtualServers(f5.get_api(), regex)
fields = ['actual_hardware_acceleration', 'authentication_profile',
'auto_lasthop', 'bw_controller_policy', 'clone_pool',
'cmp_enable_mode', 'connection_limit', 'connection_mirror_state',
'default_pool_name', 'description', 'destination',
'enabled_state', 'enforced_firewall_policy',
'fallback_persistence_profile', 'fw_rule', 'gtm_score',
'last_hop_pool', 'nat64_state', 'object_status',
'persistence_profile', 'profile', 'protocol',
'rate_class', 'rate_limit', 'rate_limit_destination_mask',
'rate_limit_mode', 'rate_limit_source_mask', 'related_rule',
'rule', 'security_log_profile', 'snat_pool', 'snat_type',
'source_address', 'source_address_translation_lsn_pool',
'source_address_translation_snat_pool',
'source_address_translation_type', 'source_port_behavior',
'staged_firewall_policy', 'translate_address_state',
'translate_port_state', 'type', 'vlan', 'wildmask']
return generate_dict(virtual_servers, fields)
def generate_pool_dict(f5, regex):
pools = Pools(f5.get_api(), regex)
fields = ['action_on_service_down', 'active_member_count',
'aggregate_dynamic_ratio', 'allow_nat_state',
'allow_snat_state', 'client_ip_tos', 'client_link_qos',
'description', 'gateway_failsafe_device',
'ignore_persisted_weight_state', 'lb_method', 'member',
'minimum_active_member', 'minimum_up_member',
'minimum_up_member_action', 'minimum_up_member_enabled_state',
'monitor_association', 'monitor_instance', 'object_status',
'profile', 'queue_depth_limit',
'queue_on_connection_limit_state', 'queue_time_limit',
'reselect_tries', 'server_ip_tos', 'server_link_qos',
'simple_timeout', 'slow_ramp_time']
return generate_dict(pools, fields)
def generate_device_dict(f5, regex):
devices = Devices(f5.get_api(), regex)
fields = ['active_modules', 'base_mac_address', 'blade_addresses',
'build', 'chassis_id', 'chassis_type', 'comment',
'configsync_address', 'contact', 'description', 'edition',
'failover_state', 'hostname', 'inactive_modules', 'location',
'management_address', 'marketing_name', 'multicast_address',
'optional_modules', 'platform_id', 'primary_mirror_address',
'product', 'secondary_mirror_address', 'software_version',
'timelimited_modules', 'timezone', 'unicast_addresses']
return generate_dict(devices, fields)
def generate_device_group_dict(f5, regex):
device_groups = DeviceGroups(f5.get_api(), regex)
fields = ['all_preferred_active', 'autosync_enabled_state', 'description',
'device', 'full_load_on_sync_state',
'incremental_config_sync_size_maximum',
'network_failover_enabled_state', 'sync_status', 'type']
return generate_dict(device_groups, fields)
def generate_traffic_group_dict(f5, regex):
traffic_groups = TrafficGroups(f5.get_api(), regex)
fields = ['auto_failback_enabled_state', 'auto_failback_time',
'default_device', 'description', 'ha_load_factor',
'ha_order', 'is_floating', 'mac_masquerade_address',
'unit_id']
return generate_dict(traffic_groups, fields)
def generate_rule_dict(f5, regex):
rules = Rules(f5.get_api(), regex)
fields = ['definition', 'description', 'ignore_vertification',
'verification_status']
return generate_dict(rules, fields)
def generate_node_dict(f5, regex):
nodes = Nodes(f5.get_api(), regex)
fields = ['address', 'connection_limit', 'description', 'dynamic_ratio',
'monitor_instance', 'monitor_rule', 'monitor_status',
'object_status', 'rate_limit', 'ratio', 'session_status']
return generate_dict(nodes, fields)
def generate_virtual_address_dict(f5, regex):
virtual_addresses = VirtualAddresses(f5.get_api(), regex)
fields = ['address', 'arp_state', 'auto_delete_state', 'connection_limit',
'description', 'enabled_state', 'icmp_echo_state',
'is_floating_state', 'netmask', 'object_status',
'route_advertisement_state', 'traffic_group']
return generate_dict(virtual_addresses, fields)
def generate_address_class_dict(f5, regex):
address_classes = AddressClasses(f5.get_api(), regex)
fields = ['address_class', 'description']
return generate_dict(address_classes, fields)
def generate_certificate_dict(f5, regex):
certificates = Certificates(f5.get_api(), regex)
return dict(zip(certificates.get_list(), certificates.get_certificate_list()))
def generate_key_dict(f5, regex):
keys = Keys(f5.get_api(), regex)
return dict(zip(keys.get_list(), keys.get_key_list()))
def generate_client_ssl_profile_dict(f5, regex):
profiles = ProfileClientSSL(f5.get_api(), regex)
fields = ['alert_timeout', 'allow_nonssl_state', 'authenticate_depth',
'authenticate_once_state', 'ca_file', 'cache_size',
'cache_timeout', 'certificate_file', 'chain_file',
'cipher_list', 'client_certificate_ca_file', 'crl_file',
'default_profile', 'description',
'forward_proxy_ca_certificate_file', 'forward_proxy_ca_key_file',
'forward_proxy_ca_passphrase',
'forward_proxy_certificate_extension_include',
'forward_proxy_certificate_lifespan',
'forward_proxy_enabled_state',
'forward_proxy_lookup_by_ipaddr_port_state', 'handshake_timeout',
'key_file', 'modssl_emulation_state', 'passphrase',
'peer_certification_mode', 'profile_mode',
'renegotiation_maximum_record_delay', 'renegotiation_period',
'renegotiation_state', 'renegotiation_throughput',
'retain_certificate_state', 'secure_renegotiation_mode',
'server_name', 'session_ticket_state', 'sni_default_state',
'sni_require_state', 'ssl_option', 'strict_resume_state',
'unclean_shutdown_state', 'is_base_profile', 'is_system_profile']
return generate_dict(profiles, fields)
def generate_system_info_dict(f5):
system_info = SystemInfo(f5.get_api())
fields = ['base_mac_address',
'blade_temperature', 'chassis_slot_information',
'globally_unique_identifier', 'group_id',
'hardware_information',
'marketing_name',
'product_information', 'pva_version', 'system_id',
'system_information', 'time',
'time_zone', 'uptime']
return generate_simple_dict(system_info, fields)
def generate_software_list(f5):
software = Software(f5.get_api())
software_list = software.get_all_software_status()
return software_list
def generate_provision_dict(f5):
provisioned = ProvisionInfo(f5.get_api())
fields = ['list', 'provisioned_list']
return generate_simple_dict(provisioned, fields)
def main():
argument_spec = f5_argument_spec()
meta_args = dict(
session=dict(type='bool', default=False),
include=dict(type='list', required=True),
filter=dict(type='str', required=False),
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec
)
if not bigsuds_found:
module.fail_json(msg="the python suds and bigsuds modules are required")
server = module.params['server']
server_port = module.params['server_port']
user = module.params['user']
password = module.params['password']
validate_certs = module.params['validate_certs']
session = module.params['session']
fact_filter = module.params['filter']
if validate_certs:
import ssl
if not hasattr(ssl, 'SSLContext'):
module.fail_json(
msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task'
)
if fact_filter:
regex = fnmatch.translate(fact_filter)
else:
regex = None
include = [x.lower() for x in module.params['include']]
valid_includes = ('address_class', 'certificate', 'client_ssl_profile',
'device', 'device_group', 'interface', 'key', 'node',
'pool', 'provision', 'rule', 'self_ip', 'software',
'system_info', 'traffic_group', 'trunk',
'virtual_address', 'virtual_server', 'vlan')
include_test = map(lambda x: x in valid_includes, include)
if not all(include_test):
module.fail_json(msg="value of include must be one or more of: %s, got: %s" % (",".join(valid_includes), ",".join(include)))
try:
facts = {}
if len(include) > 0:
f5 = F5(server, user, password, session, validate_certs, server_port)
saved_active_folder = f5.get_active_folder()
saved_recursive_query_state = f5.get_recursive_query_state()
if saved_active_folder != "/":
f5.set_active_folder("/")
if saved_recursive_query_state != "STATE_ENABLED":
f5.enable_recursive_query_state()
if 'interface' in include:
facts['interface'] = generate_interface_dict(f5, regex)
if 'self_ip' in include:
facts['self_ip'] = generate_self_ip_dict(f5, regex)
if 'trunk' in include:
facts['trunk'] = generate_trunk_dict(f5, regex)
if 'vlan' in include:
facts['vlan'] = generate_vlan_dict(f5, regex)
if 'virtual_server' in include:
facts['virtual_server'] = generate_vs_dict(f5, regex)
if 'pool' in include:
facts['pool'] = generate_pool_dict(f5, regex)
if 'provision' in include:
facts['provision'] = generate_provision_dict(f5)
if 'device' in include:
facts['device'] = generate_device_dict(f5, regex)
if 'device_group' in include:
facts['device_group'] = generate_device_group_dict(f5, regex)
if 'traffic_group' in include:
facts['traffic_group'] = generate_traffic_group_dict(f5, regex)
if 'rule' in include:
facts['rule'] = generate_rule_dict(f5, regex)
if 'node' in include:
facts['node'] = generate_node_dict(f5, regex)
if 'virtual_address' in include:
facts['virtual_address'] = generate_virtual_address_dict(f5, regex)
if 'address_class' in include:
facts['address_class'] = generate_address_class_dict(f5, regex)
if 'software' in include:
facts['software'] = generate_software_list(f5)
if 'certificate' in include:
facts['certificate'] = generate_certificate_dict(f5, regex)
if 'key' in include:
facts['key'] = generate_key_dict(f5, regex)
if 'client_ssl_profile' in include:
facts['client_ssl_profile'] = generate_client_ssl_profile_dict(f5, regex)
if 'system_info' in include:
facts['system_info'] = generate_system_info_dict(f5)
# restore saved state
if saved_active_folder and saved_active_folder != "/":
f5.set_active_folder(saved_active_folder)
if saved_recursive_query_state and \
saved_recursive_query_state != "STATE_ENABLED":
f5.set_recursive_query_state(saved_recursive_query_state)
result = {'ansible_facts': facts}
except Exception as e:
module.fail_json(msg="received exception: %s\ntraceback: %s" % (e, traceback.format_exc()))
module.exit_json(**result)
# include magic from lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.f5_utils import *
if __name__ == '__main__':
main()
|
codeforamerica/skillcamp
|
refs/heads/master
|
ENV/lib/python2.7/site-packages/werkzeug/testsuite/wsgi.py
|
99
|
# -*- coding: utf-8 -*-
"""
werkzeug.testsuite.wsgi
~~~~~~~~~~~~~~~~~~~~~~~
Tests the WSGI utilities.
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import unittest
from os import path
from contextlib import closing
from werkzeug.testsuite import WerkzeugTestCase, get_temporary_directory
from werkzeug.wrappers import BaseResponse
from werkzeug.exceptions import BadRequest, ClientDisconnected
from werkzeug.test import Client, create_environ, run_wsgi_app
from werkzeug import wsgi
from werkzeug._compat import StringIO, BytesIO, NativeStringIO, to_native
class WSGIUtilsTestCase(WerkzeugTestCase):
def test_shareddatamiddleware_get_file_loader(self):
app = wsgi.SharedDataMiddleware(None, {})
assert callable(app.get_file_loader('foo'))
def test_shared_data_middleware(self):
def null_application(environ, start_response):
start_response('404 NOT FOUND', [('Content-Type', 'text/plain')])
yield b'NOT FOUND'
test_dir = get_temporary_directory()
with open(path.join(test_dir, to_native(u'äöü', 'utf-8')), 'w') as test_file:
test_file.write(u'FOUND')
app = wsgi.SharedDataMiddleware(null_application, {
'/': path.join(path.dirname(__file__), 'res'),
'/sources': path.join(path.dirname(__file__), 'res'),
'/pkg': ('werkzeug.debug', 'shared'),
'/foo': test_dir
})
for p in '/test.txt', '/sources/test.txt', '/foo/äöü':
app_iter, status, headers = run_wsgi_app(app, create_environ(p))
self.assert_equal(status, '200 OK')
with closing(app_iter) as app_iter:
data = b''.join(app_iter).strip()
self.assert_equal(data, b'FOUND')
app_iter, status, headers = run_wsgi_app(
app, create_environ('/pkg/debugger.js'))
with closing(app_iter) as app_iter:
contents = b''.join(app_iter)
self.assert_in(b'$(function() {', contents)
app_iter, status, headers = run_wsgi_app(
app, create_environ('/missing'))
self.assert_equal(status, '404 NOT FOUND')
self.assert_equal(b''.join(app_iter).strip(), b'NOT FOUND')
def test_get_host(self):
env = {'HTTP_X_FORWARDED_HOST': 'example.org',
'SERVER_NAME': 'bullshit', 'HOST_NAME': 'ignore me dammit'}
self.assert_equal(wsgi.get_host(env), 'example.org')
self.assert_equal(
wsgi.get_host(create_environ('/', 'http://example.org')),
'example.org')
def test_get_host_multiple_forwarded(self):
env = {'HTTP_X_FORWARDED_HOST': 'example.com, example.org',
'SERVER_NAME': 'bullshit', 'HOST_NAME': 'ignore me dammit'}
self.assert_equal(wsgi.get_host(env), 'example.com')
self.assert_equal(
wsgi.get_host(create_environ('/', 'http://example.com')),
'example.com')
def test_get_host_validation(self):
env = {'HTTP_X_FORWARDED_HOST': 'example.org',
'SERVER_NAME': 'bullshit', 'HOST_NAME': 'ignore me dammit'}
self.assert_equal(wsgi.get_host(env, trusted_hosts=['.example.org']),
'example.org')
self.assert_raises(BadRequest, wsgi.get_host, env,
trusted_hosts=['example.com'])
def test_responder(self):
def foo(environ, start_response):
return BaseResponse(b'Test')
client = Client(wsgi.responder(foo), BaseResponse)
response = client.get('/')
self.assert_equal(response.status_code, 200)
self.assert_equal(response.data, b'Test')
def test_pop_path_info(self):
original_env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b///c'}
# regular path info popping
def assert_tuple(script_name, path_info):
self.assert_equal(env.get('SCRIPT_NAME'), script_name)
self.assert_equal(env.get('PATH_INFO'), path_info)
env = original_env.copy()
pop = lambda: wsgi.pop_path_info(env)
assert_tuple('/foo', '/a/b///c')
self.assert_equal(pop(), 'a')
assert_tuple('/foo/a', '/b///c')
self.assert_equal(pop(), 'b')
assert_tuple('/foo/a/b', '///c')
self.assert_equal(pop(), 'c')
assert_tuple('/foo/a/b///c', '')
self.assert_is_none(pop())
def test_peek_path_info(self):
env = {
'SCRIPT_NAME': '/foo',
'PATH_INFO': '/aaa/b///c'
}
self.assert_equal(wsgi.peek_path_info(env), 'aaa')
self.assert_equal(wsgi.peek_path_info(env), 'aaa')
self.assert_equal(wsgi.peek_path_info(env, charset=None), b'aaa')
self.assert_equal(wsgi.peek_path_info(env, charset=None), b'aaa')
def test_path_info_and_script_name_fetching(self):
env = create_environ(u'/\N{SNOWMAN}', u'http://example.com/\N{COMET}/')
self.assert_equal(wsgi.get_path_info(env), u'/\N{SNOWMAN}')
self.assert_equal(wsgi.get_path_info(env, charset=None), u'/\N{SNOWMAN}'.encode('utf-8'))
self.assert_equal(wsgi.get_script_name(env), u'/\N{COMET}')
self.assert_equal(wsgi.get_script_name(env, charset=None), u'/\N{COMET}'.encode('utf-8'))
def test_query_string_fetching(self):
env = create_environ(u'/?\N{SNOWMAN}=\N{COMET}')
qs = wsgi.get_query_string(env)
self.assert_strict_equal(qs, '%E2%98%83=%E2%98%84')
def test_limited_stream(self):
class RaisingLimitedStream(wsgi.LimitedStream):
def on_exhausted(self):
raise BadRequest('input stream exhausted')
io = BytesIO(b'123456')
stream = RaisingLimitedStream(io, 3)
self.assert_strict_equal(stream.read(), b'123')
self.assert_raises(BadRequest, stream.read)
io = BytesIO(b'123456')
stream = RaisingLimitedStream(io, 3)
self.assert_strict_equal(stream.tell(), 0)
self.assert_strict_equal(stream.read(1), b'1')
self.assert_strict_equal(stream.tell(), 1)
self.assert_strict_equal(stream.read(1), b'2')
self.assert_strict_equal(stream.tell(), 2)
self.assert_strict_equal(stream.read(1), b'3')
self.assert_strict_equal(stream.tell(), 3)
self.assert_raises(BadRequest, stream.read)
io = BytesIO(b'123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
self.assert_strict_equal(stream.readline(), b'123456\n')
self.assert_strict_equal(stream.readline(), b'ab')
io = BytesIO(b'123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
self.assert_strict_equal(stream.readlines(), [b'123456\n', b'ab'])
io = BytesIO(b'123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
self.assert_strict_equal(stream.readlines(2), [b'12'])
self.assert_strict_equal(stream.readlines(2), [b'34'])
self.assert_strict_equal(stream.readlines(), [b'56\n', b'ab'])
io = BytesIO(b'123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
self.assert_strict_equal(stream.readline(100), b'123456\n')
io = BytesIO(b'123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
self.assert_strict_equal(stream.readlines(100), [b'123456\n', b'ab'])
io = BytesIO(b'123456')
stream = wsgi.LimitedStream(io, 3)
self.assert_strict_equal(stream.read(1), b'1')
self.assert_strict_equal(stream.read(1), b'2')
self.assert_strict_equal(stream.read(), b'3')
self.assert_strict_equal(stream.read(), b'')
io = BytesIO(b'123456')
stream = wsgi.LimitedStream(io, 3)
self.assert_strict_equal(stream.read(-1), b'123')
io = BytesIO(b'123456')
stream = wsgi.LimitedStream(io, 0)
self.assert_strict_equal(stream.read(-1), b'')
io = StringIO(u'123456')
stream = wsgi.LimitedStream(io, 0)
self.assert_strict_equal(stream.read(-1), u'')
io = StringIO(u'123\n456\n')
stream = wsgi.LimitedStream(io, 8)
self.assert_strict_equal(list(stream), [u'123\n', u'456\n'])
def test_limited_stream_disconnection(self):
io = BytesIO(b'A bit of content')
# disconnect detection on out of bytes
stream = wsgi.LimitedStream(io, 255)
with self.assert_raises(ClientDisconnected):
stream.read()
# disconnect detection because file close
io = BytesIO(b'x' * 255)
io.close()
stream = wsgi.LimitedStream(io, 255)
with self.assert_raises(ClientDisconnected):
stream.read()
def test_path_info_extraction(self):
x = wsgi.extract_path_info('http://example.com/app', '/app/hello')
self.assert_equal(x, u'/hello')
x = wsgi.extract_path_info('http://example.com/app',
'https://example.com/app/hello')
self.assert_equal(x, u'/hello')
x = wsgi.extract_path_info('http://example.com/app/',
'https://example.com/app/hello')
self.assert_equal(x, u'/hello')
x = wsgi.extract_path_info('http://example.com/app/',
'https://example.com/app')
self.assert_equal(x, u'/')
x = wsgi.extract_path_info(u'http://☃.net/', u'/fööbär')
self.assert_equal(x, u'/fööbär')
x = wsgi.extract_path_info(u'http://☃.net/x', u'http://☃.net/x/fööbär')
self.assert_equal(x, u'/fööbär')
env = create_environ(u'/fööbär', u'http://☃.net/x/')
x = wsgi.extract_path_info(env, u'http://☃.net/x/fööbär')
self.assert_equal(x, u'/fööbär')
x = wsgi.extract_path_info('http://example.com/app/',
'https://example.com/a/hello')
self.assert_is_none(x)
x = wsgi.extract_path_info('http://example.com/app/',
'https://example.com/app/hello',
collapse_http_schemes=False)
self.assert_is_none(x)
def test_get_host_fallback(self):
self.assert_equal(wsgi.get_host({
'SERVER_NAME': 'foobar.example.com',
'wsgi.url_scheme': 'http',
'SERVER_PORT': '80'
}), 'foobar.example.com')
self.assert_equal(wsgi.get_host({
'SERVER_NAME': 'foobar.example.com',
'wsgi.url_scheme': 'http',
'SERVER_PORT': '81'
}), 'foobar.example.com:81')
def test_get_current_url_unicode(self):
env = create_environ()
env['QUERY_STRING'] = 'foo=bar&baz=blah&meh=\xcf'
rv = wsgi.get_current_url(env)
self.assert_strict_equal(rv,
u'http://localhost/?foo=bar&baz=blah&meh=\ufffd')
def test_multi_part_line_breaks(self):
data = 'abcdef\r\nghijkl\r\nmnopqrstuvwxyz\r\nABCDEFGHIJK'
test_stream = NativeStringIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data),
buffer_size=16))
self.assert_equal(lines, ['abcdef\r\n', 'ghijkl\r\n',
'mnopqrstuvwxyz\r\n', 'ABCDEFGHIJK'])
data = 'abc\r\nThis line is broken by the buffer length.' \
'\r\nFoo bar baz'
test_stream = NativeStringIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data),
buffer_size=24))
self.assert_equal(lines, ['abc\r\n', 'This line is broken by the '
'buffer length.\r\n', 'Foo bar baz'])
def test_multi_part_line_breaks_bytes(self):
data = b'abcdef\r\nghijkl\r\nmnopqrstuvwxyz\r\nABCDEFGHIJK'
test_stream = BytesIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data),
buffer_size=16))
self.assert_equal(lines, [b'abcdef\r\n', b'ghijkl\r\n',
b'mnopqrstuvwxyz\r\n', b'ABCDEFGHIJK'])
data = b'abc\r\nThis line is broken by the buffer length.' \
b'\r\nFoo bar baz'
test_stream = BytesIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data),
buffer_size=24))
self.assert_equal(lines, [b'abc\r\n', b'This line is broken by the '
b'buffer length.\r\n', b'Foo bar baz'])
def test_multi_part_line_breaks_problematic(self):
data = 'abc\rdef\r\nghi'
for x in range(1, 10):
test_stream = NativeStringIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data),
buffer_size=4))
self.assert_equal(lines, ['abc\r', 'def\r\n', 'ghi'])
def test_iter_functions_support_iterators(self):
data = ['abcdef\r\nghi', 'jkl\r\nmnopqrstuvwxyz\r', '\nABCDEFGHIJK']
lines = list(wsgi.make_line_iter(data))
self.assert_equal(lines, ['abcdef\r\n', 'ghijkl\r\n',
'mnopqrstuvwxyz\r\n', 'ABCDEFGHIJK'])
def test_make_chunk_iter(self):
data = [u'abcdefXghi', u'jklXmnopqrstuvwxyzX', u'ABCDEFGHIJK']
rv = list(wsgi.make_chunk_iter(data, 'X'))
self.assert_equal(rv, [u'abcdef', u'ghijkl', u'mnopqrstuvwxyz',
u'ABCDEFGHIJK'])
data = u'abcdefXghijklXmnopqrstuvwxyzXABCDEFGHIJK'
test_stream = StringIO(data)
rv = list(wsgi.make_chunk_iter(test_stream, 'X', limit=len(data),
buffer_size=4))
self.assert_equal(rv, [u'abcdef', u'ghijkl', u'mnopqrstuvwxyz',
u'ABCDEFGHIJK'])
def test_make_chunk_iter_bytes(self):
data = [b'abcdefXghi', b'jklXmnopqrstuvwxyzX', b'ABCDEFGHIJK']
rv = list(wsgi.make_chunk_iter(data, 'X'))
self.assert_equal(rv, [b'abcdef', b'ghijkl', b'mnopqrstuvwxyz',
b'ABCDEFGHIJK'])
data = b'abcdefXghijklXmnopqrstuvwxyzXABCDEFGHIJK'
test_stream = BytesIO(data)
rv = list(wsgi.make_chunk_iter(test_stream, 'X', limit=len(data),
buffer_size=4))
self.assert_equal(rv, [b'abcdef', b'ghijkl', b'mnopqrstuvwxyz',
b'ABCDEFGHIJK'])
def test_lines_longer_buffer_size(self):
data = '1234567890\n1234567890\n'
for bufsize in range(1, 15):
lines = list(wsgi.make_line_iter(NativeStringIO(data), limit=len(data),
buffer_size=4))
self.assert_equal(lines, ['1234567890\n', '1234567890\n'])
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(WSGIUtilsTestCase))
return suite
|
heeraj123/oh-mainline
|
refs/heads/master
|
vendor/packages/Django/tests/regressiontests/comment_tests/urls_default.py
|
133
|
from django.conf.urls import patterns, include
urlpatterns = patterns('',
(r'^', include('django.contrib.comments.urls')),
# Provide the auth system login and logout views
(r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}),
(r'^accounts/logout/$', 'django.contrib.auth.views.logout'),
)
|
fuzeman/trakt.py
|
refs/heads/master
|
tests/sync/ratings/test_shows.py
|
1
|
# flake8: noqa: F403, F405
from tests.core import mock
from trakt import Trakt
from datetime import datetime
from dateutil.tz import tzutc
from hamcrest import *
from httmock import HTTMock
def test_ratings():
with HTTMock(mock.fixtures, mock.unknown):
collection = {}
with Trakt.configuration.auth('mock', 'mock'):
Trakt['sync/ratings'].shows(store=collection)
Trakt['sync/ratings'].seasons(store=collection)
Trakt['sync/ratings'].episodes(store=collection)
# Ensure collection is valid
assert_that(collection, not_none())
assert_that(collection, has_length(6))
# Chuck (2007)
assert_that(collection[('tvdb', '80348')], has_properties({
'pk': ('tvdb', '80348'),
'title': 'Chuck',
'year': 2007,
'rating': has_properties({
'value': 10,
'votes': None,
'timestamp': datetime(2014, 10, 19, 23, 2, 23, tzinfo=tzutc())
}),
# Seasons
'seasons': all_of(
has_length(1),
has_entry(1, has_properties({
'pk': 1,
# Rating
'rating': has_properties({
'value': 10,
'votes': None,
'timestamp': datetime(2015, 3, 11, 23, 29, 35, tzinfo=tzutc())
}),
# Episodes
'episodes': all_of(
has_length(1),
has_entry(1, has_properties({
'pk': (1, 1),
# Rating
'rating': has_properties({
'value': 10,
'votes': None,
'timestamp': datetime(2014, 10, 19, 23, 2, 24, tzinfo=tzutc())
}),
# Keys
'keys': [
(1, 1),
('tvdb', '332179'),
('tmdb', '63425'),
('tvrage', '579282'),
('trakt', '74041')
]
}))
),
# Keys
'keys': [
1,
('tvdb', '27985'),
('tmdb', '3650'),
('trakt', '3993')
]
})),
),
# Keys
'keys': [
('tvdb', '80348'),
('tmdb', '1404'),
('imdb', 'tt0934814'),
('tvrage', '15614'),
('slug', 'chuck'),
('trakt', '1395')
]
}))
|
avocado-framework/avocado-vt
|
refs/heads/master
|
virttest/qemu_storage.py
|
1
|
"""
Classes and functions to handle block/disk images for KVM.
This exports:
- two functions for get image/blkdebug filename
- class for image operates and basic parameters
"""
import collections
import json
import logging
import os
import re
import six
import string
from avocado.core import exceptions
from avocado.utils import process
from virttest import utils_misc
from virttest import virt_vm
from virttest import storage
from virttest import nvme
from virttest import data_dir
from virttest import error_context
def filename_to_file_opts(filename):
"""Convert filename into file opts, used by both qemu-img and qemu-kvm"""
file_opts = {}
if not filename:
file_opts = {}
elif filename.startswith('iscsi:'):
filename_pattern = re.compile(
r'iscsi://((?P<user>.+?):(?P<password>.+?)@)?(?P<portal>.+)/(?P<target>.+?)/(?P<lun>\d+)')
matches = filename_pattern.match(filename)
if matches:
if (matches.group('portal') is not None
and matches.group('target') is not None
and matches.group('lun') is not None):
# required options for iscsi
file_opts = {'driver': 'iscsi',
'transport': 'tcp',
'portal': matches.group('portal'),
'target': matches.group('target'),
'lun': int(matches.group('lun'))}
if matches.group('user') is not None:
# optional option
file_opts['user'] = matches.group('user')
elif filename.startswith('rbd:'):
filename_pattern = re.compile(
r'rbd:(?P<pool>.+?)/(?P<namespace>.+?(?=/))?/?(?P<image>[^:]+)'
r'(:conf=(?P<conf>.+))?'
)
matches = filename_pattern.match(filename)
if matches:
if (matches.group('pool') is not None
and matches.group('image') is not None):
# required options for rbd
file_opts = {'driver': 'rbd',
'pool': matches.group('pool'),
'image': matches.group('image')}
if matches.group('conf') is not None:
# optional option
file_opts['conf'] = matches.group('conf')
if matches.group('namespace') is not None:
# optional option
file_opts['namespace'] = matches.group('namespace')
elif filename.startswith('gluster'):
filename_pattern = re.compile(
r'gluster\+?(?P<type>.+)?://((?P<host>[^/]+?)(:(?P<port>\d+))?)?/'
r'(?P<volume>.+?)/(?P<path>[^,?]+)'
r'(\?socket=(?P<socket>[^,]+))?'
)
matches = filename_pattern.match(filename)
if matches:
servers = []
transport = 'inet' if not matches.group('type') or matches.group('type') == 'tcp' else matches.group('type')
if matches.group('host'):
# 'IPv4/hostname' or '[IPv6 address]'
host = matches.group('host').strip('[]')
# port should be set for both qemu-img and qemu-kvm
port = matches.group('port') if matches.group('port') else '0'
servers.append({'type': transport,
'host': host,
'port': port})
elif matches.group('socket'):
servers.append({'type': transport,
'path': matches.group('socket')})
if matches.group('volume') and matches.group('path') and servers:
# required options for gluster
file_opts = {'driver': 'gluster',
'volume': matches.group('volume'),
'path': matches.group('path')}
file_opts.update({'server.{i}.{k}'.format(i=i, k=k): v
for i, server in enumerate(servers)
for k, v in six.iteritems(server)})
elif re.match(r'nbd(\+\w+)?://', filename):
filename_pattern = re.compile(
r'nbd(\+(?:.+))?://((?P<host>[^/:?]+)(:(?P<port>\d+))?)?'
r'(/(?P<export>[^?]+))?'
r'(\?socket=(?P<socket>.+))?'
)
matches = filename_pattern.match(filename)
if matches:
server = {}
host = matches.group('host')
sock = matches.group('socket')
if host:
# 10890 is the default port for tcp connection
port = matches.group('port') if matches.group(
'port') else '10809'
server = {'server.type': 'inet',
'server.host': host,
'server.port': port}
elif sock:
server = {'server.type': 'unix', 'server.path': sock}
if server:
# server is required
file_opts = {'driver': 'nbd'}
file_opts.update(server)
if matches.group('export'):
file_opts['export'] = matches.group('export')
elif filename.startswith('nvme:'):
addr, namespace = nvme.parse_uri(filename)
file_opts = {'driver': 'nvme', 'device': addr, 'namespace': int(namespace)}
elif filename.startswith('ssh:'):
filename_pattern = re.compile(
r'ssh://((?P<user>.+)@)?(?P<host>[^/:?]+)(:(?P<port>\d+))?'
r'(?P<path>/[^?]+)'
r'(\?host_key_check=(?P<host_key_check>.+))?'
)
matches = filename_pattern.match(filename)
if matches:
matches = matches.groupdict()
if matches['host'] is not None and matches['path'] is not None:
# required ssh options
file_opts = {
'driver': 'ssh',
'server.host': matches['host'],
'server.port': matches['port'] if matches['port'] else 22,
'path': matches['path']
}
if matches['user'] is not None:
file_opts['user'] = matches['user']
# options in qemu-kvm are different from uri
if matches['host_key_check'] is not None:
if matches['host_key_check'] == 'no':
file_opts['host-key-check.mode'] = 'none'
elif matches['host_key_check'] == 'yes':
file_opts['host-key-check.mode'] = 'known_hosts'
else:
m = re.match(r'(?P<type>md5|sha1):(?P<hash>.+)',
matches['host_key_check']).groupdict()
file_opts.update({
'host-key-check.mode': 'hash',
'host-key-check.type': m['type'],
'host-key-check.hash': m['hash']
})
elif re.match(r'(http|https|ftp|ftps)://', filename):
filename_pattern = re.compile(
r'(?P<protocol>.+?)://((?P<user>.+?)(:(?P<password>.+?))?@)?'
r'(?P<server>.+?)/(?P<path>.+)')
matches = filename_pattern.match(filename)
if matches:
matches = matches.groupdict()
if all((matches['protocol'], matches['server'], matches['path'])):
# required libcurl options, note server can be hostname:port
file_opts = {
'driver': matches['protocol'],
'url': '{protocol}://{server}/{path}'.format(
protocol=matches['protocol'],
server=matches['server'],
path=matches['path']
)
}
if matches['user'] is not None:
file_opts['username'] = matches['user']
# FIXME: Judge the host device by the string starts with "/dev/".
elif filename.startswith('/dev/'):
file_opts = {'driver': 'host_device', 'filename': filename}
else:
file_opts = {'driver': 'file', 'filename': filename}
if not file_opts:
raise ValueError("Wrong filename %s" % filename)
return file_opts
def _get_image_meta(image, params, root_dir):
"""Retrieve image meta dict."""
meta = collections.OrderedDict()
meta["file"] = collections.OrderedDict()
filename = storage.get_image_filename(params, root_dir)
meta_file = filename_to_file_opts(filename)
meta["file"].update(meta_file)
image_format = params.get("image_format", "qcow2")
meta["driver"] = image_format
secret = storage.ImageSecret.image_secret_define_by_params(image, params)
if image_format == "luks":
meta["key-secret"] = secret.aid
image_encryption = params.get("image_encryption", "off")
if image_format == "qcow2" and image_encryption == "luks":
meta["encrypt.key-secret"] = secret.aid
auth_info = storage.StorageAuth.auth_info_define_by_params(image, params)
if auth_info is not None:
if auth_info.storage_type == 'ceph':
if auth_info.data:
# qemu-img needs secret object only for ceph access
meta['file']['password-secret'] = auth_info.aid
elif auth_info.storage_type == 'iscsi-direct':
if auth_info.data:
# '-b json' demands password
# note that image creation doesn't support secret object
meta['file']['password'] = auth_info.data
if auth_info.iscsi_initiator:
meta['file']['initiator-name'] = auth_info.iscsi_initiator
elif auth_info.storage_type == 'glusterfs-direct':
if auth_info.debug:
meta['file']['debug'] = int(auth_info.debug)
if auth_info.logfile:
meta['file']['logfile'] = auth_info.logfile
peers = []
for peer in auth_info.peers:
if 'path' in peer:
# access storage with unix domain socket
peers.append({'type': 'unix', 'path': peer['path']})
else:
# access storage with hostname/ip + port
peers.append({'host': peer['host'],
'type': peer.get('type', 'inet'),
'port': '%s' % peer.get('port', '0')})
meta['file'].update({'server.{i}.{k}'.format(i=i + 1, k=k): v
for i, server in enumerate(peers)
for k, v in six.iteritems(server)})
elif auth_info.storage_type == 'nbd':
# qemu-img, as a client, accesses nbd storage
if auth_info.tls_creds:
meta['file']['tls-creds'] = auth_info.aid
if auth_info.reconnect_delay:
meta['file']['reconnect-delay'] = auth_info.reconnect_delay
elif auth_info.storage_type == 'curl':
mapping = {
'password-secret': (auth_info.data, auth_info.aid),
'sslverify': (auth_info.sslverify, auth_info.sslverify),
'cookie-secret': (auth_info.cookie,
auth_info.cookie.aid
if auth_info.cookie else ''),
'readahead': (auth_info.readahead, auth_info.readahead),
'timeout': (auth_info.timeout, auth_info.timeout)
}
meta['file'].update({
k: v[1] for k, v in six.iteritems(mapping) if v[0]
})
return meta
def get_image_json(image, params, root_dir):
"""Generate image json representation."""
return "json:%s" % json.dumps(_get_image_meta(image, params, root_dir))
def get_image_opts(image, params, root_dir):
"""Generate image-opts."""
def _dict_to_dot(dct):
"""Convert dictionary to dot representation."""
flat = []
prefix = []
stack = [six.iteritems(dct)]
while stack:
it = stack[-1]
try:
key, value = next(it)
except StopIteration:
if prefix:
prefix.pop()
stack.pop()
continue
if isinstance(value, collections.Mapping):
prefix.append(key)
stack.append(six.iteritems(value))
else:
flat.append((".".join(prefix + [key]), value))
return flat
meta = _get_image_meta(image, params, root_dir)
return ",".join(["%s=%s" % (attr, value) for
attr, value in _dict_to_dot(meta)])
def get_image_repr(image, params, root_dir, representation=None):
"""Get image representation."""
mapping = {"filename": lambda i, p, r: storage.get_image_filename(p, r),
"json": get_image_json,
"opts": get_image_opts}
func = mapping.get(representation, None)
if func is None:
image_secret = storage.ImageSecret.image_secret_define_by_params(
image, params)
access_needed = False
auth_info = storage.StorageAuth.auth_info_define_by_params(image,
params)
if auth_info is not None:
if auth_info.storage_type == 'ceph':
# only ceph access needs secret object
if auth_info.data:
access_needed = True
elif auth_info.storage_type == 'iscsi-direct':
# url with u/p is used to access iscsi image,
# besides u/p, iscsi access may need initiator
if auth_info.iscsi_initiator:
access_needed = True
elif auth_info.storage_type == 'glusterfs-direct':
# debug, logfile and other servers represent in json
if auth_info.debug or auth_info.logfile or auth_info.peers:
access_needed = True
elif auth_info.storage_type == 'nbd':
# tls-creds, reconnect_delay represent in json
access_needed = True
elif auth_info.storage_type == 'curl':
# u/p can be included in url, while the others should be
# represented in json
if any((auth_info.sslverify, auth_info.cookie,
auth_info.readahead, auth_info.timeout)):
access_needed = True
func = mapping["json"] if image_secret or access_needed else mapping["filename"]
return func(image, params, root_dir)
class _ParameterAssembler(string.Formatter):
"""
Command line parameter assembler.
This will automatically prepend parameter if corresponding value is passed
to the format string.
"""
sentinal = object()
def __init__(self, cmd_params=None):
string.Formatter.__init__(self)
self.cmd_params = cmd_params or {}
def format(self, format_string, *args, **kwargs):
"""Remove redundant whitespaces and return format string."""
ret = string.Formatter.format(self, format_string, *args, **kwargs)
return re.sub(" +", " ", ret)
def get_value(self, key, args, kwargs):
try:
val = string.Formatter.get_value(self, key, args, kwargs)
except KeyError:
if key in self.cmd_params:
val = None
else:
raise
return (self.cmd_params.get(key, self.sentinal), val)
def convert_field(self, value, conversion):
"""
Do conversion on the resulting object.
supported conversions:
'b': keep the parameter only if bool(value) is True.
'v': keep both the parameter and its corresponding value,
the default mode.
"""
if value[0] is self.sentinal:
return string.Formatter.convert_field(self, value[1], conversion)
if conversion is None:
conversion = "v"
if conversion == "v":
return "" if value[1] is None else " ".join(value)
if conversion == "b":
return value[0] if bool(value[1]) else ""
raise ValueError("Unknown conversion specifier {}".format(conversion))
class QemuImg(storage.QemuImg):
"""KVM class for handling operations of disk/block images."""
qemu_img_parameters = {
"image_format": "-f",
"backing_file": "-b",
"backing_format": "-F",
"unsafe": "-u",
"options": "-o",
"secret_object": "",
"tls_creds_object": "",
"image_opts": "",
"check_repair": "-r",
"output_format": "--output",
"force_share": "-U",
"resize_preallocation": "--preallocation",
"resize_shrink": "--shrink",
"convert_compressed": "-c",
"cache_mode": "-t",
"source_cache_mode": "-T",
"target_image_format": "-O",
"convert_sparse_size": "-S",
"rate_limit": "-r",
"convert_target_is_zero": "--target-is-zero",
"convert_backing_file": "-B",
"commit_drop": "-d",
"compare_strict_mode": "-s",
"compare_second_image_format": "-F"
}
create_cmd = ("create {secret_object} {tls_creds_object} {image_format} "
"{backing_file} {backing_format} {unsafe!b} {options} "
"{image_filename} {image_size}")
check_cmd = ("check {secret_object} {tls_creds_object} "
"{image_opts} {image_format} "
"{output_format} {check_repair} {force_share!b} "
"{image_filename}")
convert_cmd = ("convert {secret_object} {tls_creds_object} "
"{convert_compressed!b} {skip_target_image_creation} "
"{image_format} {cache_mode} {source_cache_mode} "
"{target_image_format} {options} {convert_sparse_size} "
"{rate_limit} {convert_target_is_zero!b} "
"{convert_backing_file} "
"{image_filename} {target_image_filename} "
"{target_image_opts}")
commit_cmd = ("commit {secret_object} {image_format} {cache_mode} "
"{backing_file} {commit_drop!b} {image_filename} "
"{rate_limit}")
resize_cmd = ("resize {secret_object} {image_opts} {resize_shrink!b} "
"{resize_preallocation} {image_filename} {image_size}")
rebase_cmd = ("rebase {secret_object} {image_format} {cache_mode} "
"{source_cache_mode} {unsafe!b} {backing_file} "
"{backing_format} {image_filename}")
dd_cmd = ("dd {secret_object} {tls_creds_object} {image_format} "
"{target_image_format} {block_size} {count} {skip} "
"if={image_filename} of={target_image_filename}")
compare_cmd = ("compare {secret_object} {tls_creds_object} {image_format} "
"{compare_second_image_format} {source_cache_mode} "
"{compare_strict_mode!b} {force_share!b} "
"{image_filename} {compare_second_image_filename}")
def __init__(self, params, root_dir, tag):
"""
Init the default value for image object.
:param params: Dictionary containing the test parameters.
:param root_dir: Base directory for relative filenames.
:param tag: Image tag defined in parameter images
"""
storage.QemuImg.__init__(self, params, root_dir, tag)
self.image_cmd = utils_misc.get_qemu_img_binary(params)
q_result = process.run(self.image_cmd + ' -h', ignore_status=True,
shell=True, verbose=False)
self.help_text = q_result.stdout_text
self.cap_force_share = '-U' in self.help_text
self._cmd_formatter = _ParameterAssembler(self.qemu_img_parameters)
def _parse_options(self, params):
"""Build options used for qemu-img amend, create, convert, measure."""
options_mapping = {
"preallocated": (None, "preallocation", ("qcow2", "raw", "luks")),
"image_cluster_size": (None, "cluster_size", ("qcow2",)),
"lazy_refcounts": (None, "lazy_refcounts", ("qcow2",)),
"qcow2_compatible": (None, "compat", ("qcow2",))
}
image_format = params.get("image_format", "qcow2")
options = []
for key, (default, opt_key, support_fmt) in options_mapping.items():
if image_format in support_fmt:
value = params.get(key, default)
if value is not None:
options.append("%s=%s" % (opt_key, value))
if self.encryption_config.key_secret:
opts = list(self.encryption_config)
opts.remove("base_key_secrets")
if image_format == "luks":
opts.remove("format")
for opt_key in opts:
opt_val = getattr(self.encryption_config, opt_key)
if opt_val:
if image_format == "qcow2":
opt_key = "encrypt.%s" % opt_key
options.append("%s=%s" % (opt_key.replace("_", "-"),
str(opt_val)))
if self.data_file:
options.extend(
("data_file=%s" % self.data_file.image_filename,
"data_file_raw=%s" % params.get("image_data_file_raw", "off")))
for access_secret, secret_type in self._image_access_secret:
if secret_type == 'password':
options.append("password-secret=%s" % access_secret.aid)
elif secret_type == 'key':
options.append("key-secret=%s" % access_secret.aid)
elif secret_type == 'cookie':
options.append("cookie-secret=%s" % access_secret.aid)
image_extra_params = params.get("image_extra_params")
if image_extra_params:
options.append(image_extra_params.strip(','))
if params.get("has_backing_file") == "yes":
backing_param = params.object_params("backing_file")
backing_file = storage.get_image_filename(backing_param,
self.root_dir)
options.append("backing_file=%s" % backing_file)
backing_fmt = backing_param.get("image_format")
options.append("backing_fmt=%s" % backing_fmt)
return options
def _need_auth_info(self, image=None):
"""
Check if a specified image's auth info is required.
qemu-img's 'json:{}' instead of 'filename(uri)' should
be used when auth info is required.
The auth info includes sensitive data like password as
well as other info like iscsi initiator.
:param image: image name
:return: True or False
"""
needed = False
if self.image_access is not None:
tag = image if image else self.tag
if tag == self.tag:
needed = self.image_access.image_auth is not None
else:
needed = tag in self.image_access.image_backing_auth
return needed
@property
def _image_access_tls_creds(self):
tls_creds = None
creds = self.image_access.image_auth if self.image_access else None
if creds is not None:
if creds.storage_type == 'nbd':
if creds.tls_creds:
tls_creds = creds
return tls_creds
@property
def _backing_access_tls_creds(self):
tls_creds_list = []
creds_list = self.image_access.image_backing_auth.values() if self.image_access else []
for creds in creds_list:
if creds.storage_type == 'nbd':
if creds.tls_creds:
tls_creds_list.append(creds)
return tls_creds_list
@property
def _image_access_secret(self):
"""
Get the access secret object and its type of the image itself,
the type can be 'key' or 'password' or 'cookie'
:return: a list of tuple(StorageAuth object, secret type) or []
:note: an image can have more than one secret objects, e.g.
access secret object and cookie secret object for libcurl
"""
secrets = []
auth = self.image_access.image_auth if self.image_access else None
if auth is not None:
if auth.storage_type == 'ceph':
# ceph image access requires secret object by
# qemu-img and only 'password-secret' is supported
if auth.data:
secrets.append((auth, 'password'))
elif auth.storage_type == 'curl':
# a libcurl image can have more than one secret object
if auth.data:
secrets.append((auth, 'password'))
if auth.cookie:
secrets.append((auth.cookie, 'cookie'))
return secrets
@property
def _backing_access_secrets(self):
"""
Get the backing images' access secret objects and types,
the type can be 'key' or 'password'
:return: a list of (StorageAuth object, secret type) or []
"""
secrets = []
info = self.image_access.image_backing_auth.values() if self.image_access else []
for auth in info:
if auth.storage_type == 'ceph':
# ceph image access requires secret object by
# qemu-img and only 'password-secret' is supported
if auth.data:
secrets.append((auth, 'password'))
elif auth.storage_type == 'curl':
if auth.data:
secrets.append((auth, 'password'))
if auth.cookie:
secrets.append((auth.cookie, 'cookie'))
return secrets
@property
def _secret_objects(self):
"""All secret objects str needed for command line."""
secret_objects = self.encryption_config.image_key_secrets
secret_obj_str = "--object secret,id={s.aid},data={s.data}"
return [secret_obj_str.format(s=s) for s in secret_objects]
@property
def _image_access_tls_creds_object(self):
"""Get the tls-creds object str of the image itself."""
tls_obj_str = '--object tls-creds-x509,id={s.aid},endpoint=client,dir={s.tls_creds}'
creds = self._image_access_tls_creds
return tls_obj_str.format(s=creds) if creds else ''
@property
def _backing_access_tls_creds_objects(self):
"""Get all tls-creds object str of the backing images."""
tls_creds = []
tls_obj_str = '--object tls-creds-x509,id={s.aid},endpoint=client,dir={s.tls_creds}'
for creds in self._backing_access_tls_creds:
tls_creds.append(tls_obj_str.format(s=creds))
return tls_creds
@property
def _image_access_secret_object(self):
"""Get the secret object str of the image itself."""
secrets = []
for access_secret, secret_type in self._image_access_secret:
secret_obj_str = ''
if secret_type == 'password':
secret_obj_str = '--object secret,id={s.aid},format={s.data_format},file={s.filename}'
elif secret_type == 'key' or secret_type == 'cookie':
secret_obj_str = '--object secret,id={s.aid},format={s.data_format},data={s.data}'
secrets.append(secret_obj_str.format(s=access_secret))
return secrets
@property
def _backing_access_secret_objects(self):
"""Get all secret object str of the backing images."""
secrets = []
for access_secret, secret_type in self._backing_access_secrets:
secret_obj_str = ''
if secret_type == 'password':
secret_obj_str = "--object secret,id={s.aid},format={s.data_format},file={s.filename}"
elif secret_type == 'key' or secret_type == 'cookie':
secret_obj_str = "--object secret,id={s.aid},format={s.data_format},data={s.data}"
secrets.append(secret_obj_str.format(s=access_secret))
return secrets
@error_context.context_aware
def create(self, params, ignore_errors=False):
"""
Create an image using qemu_img or dd.
:param params: Dictionary containing the test parameters.
:param ignore_errors: Whether to ignore errors on the image creation
cmd.
:note: params should contain:
image_name
name of the image file, without extension
image_format
format of the image (qcow2, raw etc)
image_cluster_size (optional)
cluster size for the image
image_size
requested size of the image (a string qemu-img can
understand, such as '10G')
create_with_dd
use dd to create the image (raw format only)
base_image(optional)
the base image name when create snapshot
base_format(optional)
the format of base image
encrypted(optional)
if the image is encrypted, allowed values: on and off.
Default is "off"
preallocated(optional)
if preallocation when create image, allowed values: off,
metadata. Default is "off"
:return: tuple (path to the image created, process.CmdResult object
containing the result of the creation command).
"""
if params.get(
"create_with_dd") == "yes" and self.image_format == "raw":
# maps K,M,G,T => (count, bs)
human = {'K': (1, 1),
'M': (1, 1024),
'G': (1024, 1024),
'T': (1024, 1048576),
}
if self.size[-1] in human:
block_size = human[self.size[-1]][1]
size = int(self.size[:-1]) * human[self.size[-1]][0]
qemu_img_cmd = ("dd if=/dev/zero of=%s count=%s bs=%sK"
% (self.image_filename, size, block_size))
else:
cmd_dict = {}
cmd_dict["image_format"] = self.image_format
if self.base_tag:
# if base image has secret, use json representation
base_key_secrets = self.encryption_config.base_key_secrets
if (self.base_tag in [s.image_id for s in base_key_secrets]
or self._need_auth_info(self.base_tag)):
base_params = params.object_params(self.base_tag)
cmd_dict["backing_file"] = "'%s'" % \
get_image_json(self.base_tag, base_params,
self.root_dir)
else:
cmd_dict["backing_file"] = self.base_image_filename
if self.base_format:
cmd_dict["backing_format"] = self.base_format
# secret objects of the backing images
secret_objects = self._backing_access_secret_objects
# secret object of the image itself
if self._image_access_secret_object:
secret_objects.extend(self._image_access_secret_object)
image_secret_objects = self._secret_objects
if image_secret_objects:
secret_objects.extend(image_secret_objects)
if secret_objects:
cmd_dict["secret_object"] = " ".join(secret_objects)
# tls creds objects of the backing images of the source
tls_creds_objects = self._backing_access_tls_creds_objects
# tls creds object of the source image itself
if self._image_access_tls_creds_object:
tls_creds_objects.append(self._image_access_tls_creds_object)
if tls_creds_objects:
cmd_dict["tls_creds_object"] = " ".join(tls_creds_objects)
cmd_dict["image_filename"] = self.image_filename
cmd_dict["image_size"] = self.size
options = self._parse_options(params)
if options:
cmd_dict["options"] = ",".join(options)
qemu_img_cmd = self.image_cmd + " " + \
self._cmd_formatter.format(self.create_cmd, **cmd_dict)
if (params.get("image_backend", "filesystem") == "filesystem"):
image_dirname = os.path.dirname(self.image_filename)
if image_dirname and not os.path.isdir(image_dirname):
e_msg = ("Parent directory of the image file %s does "
"not exist" % self.image_filename)
logging.error(e_msg)
logging.error("This usually means a serious setup exceptions.")
logging.error("Please verify if your data dir contains the "
"expected directory structure")
logging.error("Backing data dir: %s",
data_dir.get_backing_data_dir())
logging.error("Directory structure:")
for root, _, _ in os.walk(data_dir.get_backing_data_dir()):
logging.error(root)
logging.warning("We'll try to proceed by creating the dir. "
"Other errors may ensue")
os.makedirs(image_dirname)
msg = "Create image by command: %s" % qemu_img_cmd
error_context.context(msg, logging.info)
cmd_result = process.run(
qemu_img_cmd, shell=True, verbose=False, ignore_status=True)
if cmd_result.exit_status != 0 and not ignore_errors:
raise exceptions.TestError("Failed to create image %s\n%s" %
(self.image_filename, cmd_result))
if self.encryption_config.key_secret:
self.encryption_config.key_secret.save_to_file()
cmd_result.stdout = cmd_result.stdout_text
cmd_result.stderr = cmd_result.stderr_text
return self.image_filename, cmd_result
def convert(self, params, root_dir, cache_mode=None,
source_cache_mode=None, skip_target_creation=False):
"""
Convert image
:param params: dictionary containing the test parameters
:param root_dir: dir for save the convert image
:param cache_mode: The cache mode used to write the output disk image.
Valid options are: ``none``, ``writeback``
(default), ``writethrough``, ``directsync`` and
``unsafe``.
:param source_cache_mode: the cache mode used with source image file
:param skip_target_creation: qemu-img skips the creation of the target
volume if True(-n), i.e. the target image
should be created before image convert
:note: params should contain:
convert_target
the convert target image tag
compressed
indicates that target image must be compressed
sparse_size
indicate the consecutive number of bytes contains zeros to
create sparse image during conversion
rate_limit
indicate rate limit for the convert process,
the unit is bytes per second
convert_target_is_zero
indicate that an existing target device will return
zeros for all reads
convert_backing_file
indicate that setting backing file to target image
"""
convert_target = params["convert_target"]
convert_params = params.object_params(convert_target)
convert_image = QemuImg(convert_params, root_dir, convert_target)
convert_compressed = convert_params.get("convert_compressed")
sparse_size = convert_params.get("sparse_size")
rate_limit = convert_params.get("rate_limit")
convert_target_is_zero = convert_params.get_boolean(
"convert_target_is_zero")
convert_backing_file = convert_params.get("convert_backing_file")
cmd_dict = {
"convert_compressed": convert_compressed == "yes",
"convert_sparse_size": sparse_size,
"rate_limit": rate_limit,
"image_filename": self.image_filename,
"image_format": self.image_format,
"target_image_format": convert_image.image_format,
"target_image_filename": convert_image.image_filename,
"cache_mode": cache_mode,
"source_cache_mode": source_cache_mode,
"skip_target_image_creation": "-n" if skip_target_creation else "",
"convert_target_is_zero": convert_target_is_zero,
"convert_backing_file": convert_backing_file,
"target_image_opts": ""
}
options = convert_image._parse_options(convert_params)
if options:
cmd_dict["options"] = ",".join(options)
if skip_target_creation:
# -o has no effect when skipping image creation
# This will become an error in future QEMU versions
if options:
cmd_dict.pop("options")
cmd_dict.pop("target_image_format")
cmd_dict["target_image_filename"] = ""
cmd_dict["target_image_opts"] = ("--target-image-opts '%s'"
% get_image_opts(
convert_image.tag,
convert_image.params,
convert_image.root_dir))
if (self.encryption_config.key_secret
or self._need_auth_info(self.tag)):
cmd_dict["image_filename"] = "'%s'" % get_image_json(
self.tag, self.params, self.root_dir)
cmd_dict.pop("image_format")
# source images secrets(luks)
secret_objects = self._secret_objects
# secret objects of the backing images of the source
if self._backing_access_secret_objects:
secret_objects.extend(self._backing_access_secret_objects)
# secret object of the source image itself
if self._image_access_secret_object:
secret_objects.extend(self._image_access_secret_object)
# target image access secret object
# target image to be converted never has backing images
if convert_image._image_access_secret_object:
secret_objects.extend(convert_image._image_access_secret_object)
# target image secret(luks)
if convert_image.encryption_config.key_secret:
secret_objects.extend(convert_image._secret_objects)
if secret_objects:
cmd_dict["secret_object"] = " ".join(secret_objects)
# tls creds objects of the backing images of the source
tls_creds_objects = self._backing_access_tls_creds_objects
# tls creds object of the source image itself
if self._image_access_tls_creds_object:
tls_creds_objects.append(self._image_access_tls_creds_object)
# tls creds object of the target image
if convert_image._image_access_tls_creds_object:
tls_creds_objects.append(
convert_image._image_access_tls_creds_object)
if tls_creds_objects:
cmd_dict["tls_creds_object"] = " ".join(tls_creds_objects)
convert_cmd = self.image_cmd + " " + \
self._cmd_formatter.format(self.convert_cmd, **cmd_dict)
logging.info("Convert image %s from %s to %s", self.image_filename,
self.image_format, convert_image.image_format)
process.run(convert_cmd)
if convert_image.encryption_config.key_secret:
convert_image.encryption_config.key_secret.save_to_file()
return convert_target
def rebase(self, params, cache_mode=None, source_cache_mode=None):
"""
Rebase image.
:param params: dictionary containing the test parameters
:param cache_mode: the cache mode used to write the output disk image,
the valid options are: 'none', 'writeback' (default),
'writethrough', 'directsync' and 'unsafe'.
"""
self.check_option("base_image_filename")
self.check_option("base_format")
rebase_mode = params.get("rebase_mode")
cmd_dict = {"image_format": self.image_format,
"image_filename": self.image_filename,
"cache_mode": cache_mode,
"source_cache_mode": source_cache_mode,
"unsafe": rebase_mode == "unsafe"}
secret_objects = self._secret_objects
if secret_objects:
cmd_dict["secret_object"] = " ".join(secret_objects)
if self.encryption_config.key_secret:
cmd_dict["image_filename"] = "'%s'" % get_image_json(
self.tag, self.params, self.root_dir)
cmd_dict.pop("image_format")
if self.base_tag:
if self.base_tag == "null":
cmd_dict["backing_file"] = "''"
else:
base_params = self.params.object_params(self.base_tag)
base_image = QemuImg(base_params, self.root_dir, self.base_tag)
self.base_image_filename = base_image.image_filename
self.base_format = base_image.image_format
if base_image.encryption_config.key_secret:
cmd_dict["backing_file"] = "'%s'" % get_image_json(
base_image.tag, base_image.params, base_image.root_dir)
else:
cmd_dict["backing_file"] = base_image.image_filename
cmd_dict["backing_format"] = base_image.image_format
else:
raise exceptions.TestError("Can not find the image parameters need"
" for rebase.")
logging.info("Rebase snapshot %s to %s..." % (self.image_filename,
self.base_image_filename))
rebase_cmd = self.image_cmd + " " + \
self._cmd_formatter.format(self.rebase_cmd, **cmd_dict)
process.run(rebase_cmd)
return self.base_tag
def commit(self, params={}, cache_mode=None, base=None, drop=False):
"""
Commit image to it's base file
:param cache_mode: the cache mode used to write the output disk image,
the valid options are: 'none', 'writeback' (default),
'writethrough', 'directsync' and 'unsafe'.
:param base: the backing file into which the changes will be committed
:param drop: drop image after commit
"""
rate_limit = self.params.get("rate_limit")
cmd_dict = {"image_format": self.image_format,
"image_filename": self.image_filename,
"cache_mode": cache_mode,
"commit_drop": drop,
"rate_limit": rate_limit}
secret_objects = self._secret_objects
if secret_objects:
cmd_dict["secret_object"] = " ".join(secret_objects)
if base:
base_params = self.params.object_params(base)
base_image = QemuImg(base_params, self.root_dir, base)
if base_image.encryption_config.key_secret:
cmd_dict["backing_file"] = "'%s'" % get_image_json(
base, base_params, self.root_dir)
else:
cmd_dict["backing_file"] = base_image.image_filename
if self.encryption_config.key_secret:
cmd_dict["image_filename"] = "'%s'" % get_image_json(
self.tag, self.params, self.root_dir)
cmd_dict.pop("image_format")
commit_cmd = self.image_cmd + " " + \
self._cmd_formatter.format(self.commit_cmd, **cmd_dict)
logging.info("Commit image %s" % self.image_filename)
process.run(commit_cmd)
return self.image_filename
def snapshot_create(self):
"""
Create a snapshot image.
:note: params should contain:
snapshot_image_name -- the name of snapshot image file
"""
cmd = self.image_cmd
if self.snapshot_tag:
cmd += " snapshot -c %s" % self.snapshot_image_filename
else:
raise exceptions.TestError("Can not find the snapshot image"
" parameters")
cmd += " %s" % self.image_filename
process.run(cmd)
return self.snapshot_tag
def snapshot_del(self, blkdebug_cfg=""):
"""
Delete a snapshot image.
:param blkdebug_cfg: The configure file of blkdebug
:note: params should contain:
snapshot_image_name -- the name of snapshot image file
"""
cmd = self.image_cmd
if self.snapshot_tag:
cmd += " snapshot -d %s" % self.snapshot_image_filename
else:
raise exceptions.TestError("Can not find the snapshot image"
" parameters")
if blkdebug_cfg:
cmd += " blkdebug:%s:%s" % (blkdebug_cfg, self.image_filename)
else:
cmd += " %s" % self.image_filename
process.run(cmd)
def snapshot_list(self):
"""
List all snapshots in the given image
"""
cmd = self.image_cmd
cmd += " snapshot -l %s" % self.image_filename
return process.run(cmd).stdout_text
def snapshot_apply(self):
"""
Apply a snapshot image.
:note: params should contain:
snapshot_image_name -- the name of snapshot image file
"""
cmd = self.image_cmd
if self.snapshot_tag:
cmd += " snapshot -a %s %s" % (self.snapshot_image_filename,
self.image_filename)
else:
raise exceptions.TestError("Can not find the snapshot image"
" parameters")
process.run(cmd)
def remove(self):
"""
Remove an image file.
"""
logging.debug("Removing image file %s", self.image_filename)
storage.file_remove(self.params, self.image_filename)
if self.data_file:
logging.debug("Removing external data file of image %s",
self.data_file.image_filename)
storage.file_remove(self.data_file.params,
self.data_file.image_filename)
secret_files = []
if self.encryption_config.key_secret:
secret_files.append(self.encryption_config.key_secret.filename)
if self.image_access:
secrets = []
# image secret
if self.image_access.image_auth:
secrets.append(self.image_access.image_auth)
# backing secrets
secrets.extend(self.image_access.image_backing_auth.values())
for auth in secrets:
if auth.data:
secret_files.append(auth.filename)
for f in secret_files:
if os.path.exists(f):
os.unlink(f)
def info(self, force_share=False, output="human"):
"""
Run qemu-img info command on image file and return its output.
:param output: string of output format(`human`, `json`)
"""
logging.debug("Run qemu-img info command on %s", self.image_filename)
backing_chain = self.params.get("backing_chain")
force_share &= self.cap_force_share
cmd = self.image_cmd
cmd += " info"
if self._image_access_secret_object:
# secret object of the image itself
cmd += " %s" % " ".join(self._image_access_secret_object)
if self._image_access_tls_creds_object:
# tls creds object of the image itself
cmd += " %s" % self._image_access_tls_creds_object
if backing_chain == "yes":
if self._backing_access_secret_objects:
# secret objects of the backing images
cmd += " %s" % " ".join(self._backing_access_secret_objects)
if self._backing_access_tls_creds_objects:
# tls creds objects of the backing images
cmd += " %s" % " ".join(self._backing_access_tls_creds_objects)
if "--backing-chain" in self.help_text:
cmd += " --backing-chain"
else:
logging.warn("'--backing-chain' option is not supported")
if force_share:
cmd += " -U"
image_filename = self.image_filename
if self._need_auth_info(self.tag):
# use json repr when access info is required
image_filename = "'%s'" % get_image_json(self.tag, self.params,
self.root_dir)
if os.path.exists(image_filename) or self.is_remote_image():
cmd += " %s --output=%s" % (image_filename, output)
output = process.run(cmd, verbose=True).stdout_text
else:
logging.debug("Image file %s not found", image_filename)
output = None
return output
def get_format(self):
"""
Get the fimage file format.
"""
image_info = self.info()
if image_info:
image_format = re.findall("file format: (\w+)", image_info)[0]
else:
image_format = None
return image_format
def support_cmd(self, cmd):
"""
Verifies whether qemu-img supports command cmd.
:param cmd: Command string.
"""
supports_cmd = True
if cmd not in self.help_text:
logging.error("%s does not support command '%s'", self.image_cmd,
cmd)
supports_cmd = False
return supports_cmd
def compare_images(self, image1, image2, strict_mode=False,
verbose=True, force_share=False):
"""
Compare 2 images using the appropriate tools for each virt backend.
:param image1: image path of first image
:param image2: image path of second image
:param strict_mode: Boolean value, True for strict mode,
False for default mode.
:param verbose: Record output in debug file or not
:return: process.CmdResult object containing the result of the command
"""
compare_images = self.support_cmd("compare")
force_share &= self.cap_force_share
if not compare_images:
logging.warn("sub-command compare not supported by qemu-img")
return None
else:
logging.info("Comparing images %s and %s", image1, image2)
compare_cmd = "%s compare" % self.image_cmd
if force_share:
compare_cmd += " -U"
if strict_mode:
compare_cmd += " -s"
compare_cmd += " %s %s" % (image1, image2)
cmd_result = process.run(compare_cmd, ignore_status=True,
shell=True)
if verbose:
logging.debug("Output from command: %s",
cmd_result.stdout_text)
if cmd_result.exit_status == 0:
logging.info("Compared images are equal")
elif cmd_result.exit_status == 1:
raise exceptions.TestFail("Compared images differ")
else:
raise exceptions.TestError("Error in image comparison")
return cmd_result
def compare_to(self, target_image, source_cache_mode=None,
strict_mode=False, force_share=False, verbose=True):
"""
Compare to target image.
:param target_image: target image object
:param source_cache_mode: source cache used to open source image
:param strict_mode: compare fails on sector allocation or image size
:param force_share: open image in shared mode
:return: compare result [process.CmdResult]
"""
if not self.support_cmd("compare"):
logging.warn("qemu-img subcommand compare not supported")
return
force_share &= self.cap_force_share
logging.info("compare image %s to image %s",
self.image_filename, target_image.image_filename)
cmd_dict = {
"image_format": self.image_format,
"compare_second_image_format": target_image.image_format,
"source_cache_mode": source_cache_mode,
"compare_strict_mode": strict_mode,
"force_share": force_share,
"image_filename": self.image_filename,
"compare_second_image_filename": target_image.image_filename,
}
secret_objects = self._secret_objects + target_image._secret_objects
# source image's backing access secret objects
if self._backing_access_secret_objects:
secret_objects.extend(self._backing_access_secret_objects)
# source image access secret object
if self._image_access_secret_object:
secret_objects.extend(self._image_access_secret_object)
# target image's backing access secret objects
if target_image._backing_access_secret_objects:
secret_objects.extend(target_image._backing_access_secret_objects)
# target image access secret object
if target_image._image_access_secret_object:
secret_objects.extend(target_image._image_access_secret_object)
# if compared images are in the same snapshot chain,
# needs to remove duplicated secrets
secret_objects = list(set(secret_objects))
cmd_dict["secret_object"] = " ".join(secret_objects)
# tls creds objects of the backing images of the source
tls_creds_objects = self._backing_access_tls_creds_objects
# tls creds object of the source image
if self._image_access_tls_creds_object:
tls_creds_objects.append(self._image_access_tls_creds_object)
# tls creds objects of the backing images of the target
if target_image._backing_access_tls_creds_objects:
tls_creds_objects.extend(
target_image._backing_access_tls_creds_objects)
# tls creds object of the target image
if target_image._image_access_tls_creds_object:
tls_creds_objects.append(
target_image._image_access_tls_creds_object)
tls_creds_objects = list(set(tls_creds_objects))
cmd_dict["tls_creds_object"] = " ".join(tls_creds_objects)
if (self.encryption_config.key_secret
or self._need_auth_info(self.tag)):
cmd_dict["image_filename"] = "'%s'" % \
get_image_json(self.tag, self.params, self.root_dir)
if (target_image.encryption_config.key_secret
or target_image._need_auth_info(target_image.tag)):
cmd_dict["compare_second_image_filename"] = "'%s'" % \
get_image_json(target_image.tag, target_image.params,
target_image.root_dir)
compare_cmd = self.image_cmd + " " + \
self._cmd_formatter.format(self.compare_cmd, **cmd_dict)
result = process.run(compare_cmd, ignore_status=True, shell=True)
if verbose:
logging.debug("compare output:\n%s", result.stdout_text)
return result
def check(self, params, root_dir, force_share=False, output=None):
"""
Check an image using the appropriate tools for each virt backend.
:param params: Dictionary containing the test parameters.
:param root_dir: Base directory for relative filenames.
:param output: The format of the output(json, human).
:note: params should contain:
image_name -- the name of the image file, without extension
image_format -- the format of the image (qcow2, raw etc)
:return: The output of check result if the image exists, or None.
"""
image_filename = self.image_filename
logging.debug("Checking image file %s", image_filename)
force_share &= self.cap_force_share
cmd_dict = {"image_filename": image_filename,
"force_share": force_share,
"output_format": output}
if (self.encryption_config.key_secret
or self._need_auth_info(self.tag)):
cmd_dict["image_filename"] = "'%s'" % get_image_json(
self.tag, params, root_dir)
# access secret objects of the backing images
secret_objects = self._backing_access_secret_objects
# access secret object of the image itself
if self._image_access_secret_object:
secret_objects.extend(self._image_access_secret_object)
# image(e.g. luks image) secret objects
image_secret_objects = self._secret_objects
if image_secret_objects:
secret_objects.extend(image_secret_objects)
if secret_objects:
cmd_dict["secret_object"] = " ".join(secret_objects)
# tls creds objects of the backing images
tls_creds_objects = self._backing_access_tls_creds_objects
# tls creds object of the image itself
if self._image_access_tls_creds_object:
tls_creds_objects.append(self._image_access_tls_creds_object)
if tls_creds_objects:
cmd_dict["tls_creds_object"] = " ".join(tls_creds_objects)
check_cmd = self.image_cmd + " " + self._cmd_formatter.format(
self.check_cmd, **cmd_dict)
cmd_result = process.run(check_cmd, ignore_status=True,
shell=True, verbose=False)
return cmd_result
def check_image(self, params, root_dir, force_share=False):
"""
Check an image using the appropriate tools for each virt backend.
:param params: Dictionary containing the test parameters.
:param root_dir: Base directory for relative filenames.
:note: params should contain:
image_name -- the name of the image file, without extension
image_format -- the format of the image (qcow2, raw etc)
:raise VMImageCheckError: In case qemu-img check fails on the image.
"""
image_filename = self.image_filename
logging.debug("Checking image file %s", image_filename)
image_is_checkable = self.image_format in ['qcow2', 'qed']
force_share &= self.cap_force_share
if (storage.file_exists(params, image_filename) or
self.is_remote_image()) and image_is_checkable:
try:
# FIXME: do we really need it?
self.info(force_share)
except process.CmdError:
logging.error("Error getting info from image %s",
image_filename)
cmd_result = self.check(params, root_dir, force_share)
# Error check, large chances of a non-fatal problem.
# There are chances that bad data was skipped though
if cmd_result.exit_status == 1:
stdout = cmd_result.stdout_text
for e_line in stdout.splitlines():
logging.error("[stdout] %s", e_line)
stderr = cmd_result.stderr_text
for e_line in stderr.splitlines():
logging.error("[stderr] %s", e_line)
chk = params.get("backup_image_on_check_error", "no")
if chk == "yes":
self.backup_image(params, root_dir, "backup", False)
raise exceptions.TestWarn(
"qemu-img check not completed because of internal "
"errors. Some bad data in the image may have gone "
"unnoticed (%s)" % image_filename)
# Exit status 2 is data corruption for sure,
# so fail the test
elif cmd_result.exit_status == 2:
stdout = cmd_result.stdout_text
for e_line in stdout.splitlines():
logging.error("[stdout] %s", e_line)
stderr = cmd_result.stderr_text
for e_line in stderr.splitlines():
logging.error("[stderr] %s", e_line)
chk = params.get("backup_image_on_check_error", "no")
if chk == "yes":
self.backup_image(params, root_dir, "backup", False)
raise virt_vm.VMImageCheckError(image_filename)
# Leaked clusters, they are known to be harmless to data
# integrity
elif cmd_result.exit_status == 3:
raise exceptions.TestWarn("Leaked clusters were noticed"
" during image check. No data "
"integrity problem was found "
"though. (%s)" % image_filename)
else:
if not storage.file_exists(params, image_filename):
logging.debug("Image file %s not found, skipping check",
image_filename)
elif not image_is_checkable:
logging.debug(
"Image format %s is not checkable, skipping check",
self.image_format)
def amend(self, params, cache_mode=None, ignore_status=False):
"""
Amend the image format specific options for the image
:param params: dictionary containing the test parameters
:param cache_mode: the cache mode used to write the output disk image,
the valid options are: 'none', 'writeback'
(default), 'writethrough', 'directsync' and
'unsafe'.
:param ignore_status: Whether to raise an exception when command
returns =! 0 (False), or not (True).
:note: params may contain amend options:
amend_size
virtual disk size of the image (a string qemu-img can
understand, such as '10G')
amend_compat
compatibility level (0.10 or 1.1)
amend_backing_file
file name of a base image
amend_backing_fmt
image format of the base image
amend_encryption
encrypt the image, allowed values: on and off.
Default is "off"
amend_cluster_size
cluster size for the image
amend_preallocation
preallocation mode when create image, allowed values: off,
metadata. Default is "off"
amend_lazy_refcounts
postpone refcount updates, allowed values: on and off.
Default is "off"
amend_refcount_bits
width of a reference count entry in bits
amend_keyslot
keyslot for the password, allowed values: between 0 and 7
amend_state
the state for the keyslot,
allowed values: active and inactive
amend_new-secret
the new secret object for the password,
used for adding a new password
amend_old-secret
the old secret object for the password,
used for erasing an existing password
amend_extra_params
additional options, used for extending amend
:return: process.CmdResult object containing the result of the
command
"""
cmd_list = [self.image_cmd, 'amend']
secret_objects = self._secret_objects
if secret_objects:
# add a secret object, use for adding and erasing password
sec_id = params["amend_secret_id"]
sec_data = params["amend_secret_data"]
secret_obj_str = "--object secret,id=%s,data=%s" % (sec_id, sec_data)
secret_objects.append(secret_obj_str)
cmd_list.append(" ".join(secret_objects))
options = []
for key, val in six.iteritems(params):
if key.startswith('amend_') and \
key not in ["amend_secret_id", "amend_secret_data"]:
options.append("%s=%s" % (key[6:], val))
if cache_mode:
cmd_list.append("-t %s" % cache_mode)
if options:
cmd_list.append("-o %s" %
",".join(options).replace("extra_params=", ""))
if self.encryption_config.key_secret:
cmd_list.append("'%s'" % get_image_json(self.tag,
self.params, self.root_dir))
else:
cmd_list.append("-f %s %s" % (self.image_format, self.image_filename))
logging.info("Amend image %s" % self.image_filename)
cmd_result = process.run(" ".join(cmd_list), ignore_status=ignore_status)
return cmd_result
def resize(self, size, shrink=False, preallocation=None):
"""
Qemu image resize wrapper.
:param size: string of size representations.(eg. +1G, -1k, 1T)
:param shrink: boolean
:param preallocation: preallocation mode
:return: process.CmdResult object containing the result of the
command
"""
cmd_dict = {
"resize_shrink": shrink,
"resize_preallocation": preallocation,
"image_filename": self.image_filename,
"image_size": size,
}
if self.encryption_config.key_secret:
cmd_dict["image_filename"] = "'%s'" % get_image_json(
self.tag, self.params, self.root_dir)
secret_objects = self._secret_objects
if secret_objects:
cmd_dict["secret_object"] = " ".join(secret_objects)
resize_cmd = self.image_cmd + " " + \
self._cmd_formatter.format(self.resize_cmd, **cmd_dict)
cmd_result = process.run(resize_cmd, ignore_status=True)
return cmd_result
def map(self, output="human"):
"""
Qemu image map wrapper.
:param output: string, the map command output format(`human`, `json`)
:return: process.CmdResult object containing the result of the
command
"""
cmd_list = [self.image_cmd, "map",
("--output=%s" % output), self.image_filename]
cmd_result = process.run(" ".join(cmd_list), ignore_status=True)
return cmd_result
def measure(self, target_fmt, size=None, output="human"):
"""
Qemu image measure wrapper.
:param target_fmt: string, the target image format
:param size: string, the benchmark size of a target_fmt, if `None` it
will measure the image object itself with target_fmt
:param output: string, the measure command output format
(`human`, `json`)
:return: process.CmdResult object containing the result of the
command
"""
cmd_list = [self.image_cmd, "measure", ("--output=%s" % output),
("-O %s" % target_fmt)]
if target_fmt == "luks":
target_image = self.params.get("image_measure_target", "tgt")
target_image_secret = self.params.get("image_secret_%s" %
target_image, "measure")
target_image_params = self.params.object_params(target_image)
target_image_params["image_format"] = "luks"
target_image_params["image_secret"] = target_image_secret
target_image_object = QemuImg(
target_image_params, self.root_dir, target_image)
cmd_list.append(target_image_object._secret_objects[-1])
cmd_list.append('-o key-secret=%s' %
target_image_object.encryption_config.key_secret.aid)
if size:
cmd_list.append(("--size %s" % size))
else:
if self.encryption_config.key_secret:
cmd_list.append(self._secret_objects[-1])
image_json_str = get_image_json(self.tag,
self.params,
self.root_dir)
cmd_list.append("'%s'" % image_json_str)
else:
cmd_list.extend([("-f %s" % self.image_format),
self.image_filename])
cmd_result = process.run(" ".join(cmd_list), ignore_status=True)
return cmd_result
def dd(self, output, bs=None, count=None, skip=None):
"""
Qemu image dd wrapper, like dd command, clone the image.
Please use convert to convert one format of image to another.
:param output: of=output
:param bs: bs=bs, the block size in bytes
:param count: count=count, count of blocks copied
:param skip: skip=skip, count of blocks skipped
:return: process.CmdResult object containing the result of the
command
"""
cmd_dict = {
"image_filename": self.image_filename,
"target_image_filename": output,
"image_format": self.image_format,
"target_image_format": self.image_format
}
cmd_dict['block_size'] = 'bs=%d' % bs if bs is not None else ''
cmd_dict['count'] = 'count=%d' % count if count is not None else ''
cmd_dict['skip'] = 'skip=%d' % skip if skip is not None else ''
# TODO: use raw copy(-f raw -O raw) and ignore image secret and format
# for we cannot set secret for the output
raw_copy = True if self.encryption_config.key_secret else False
if raw_copy:
cmd_dict['image_format'] = cmd_dict['target_image_format'] = 'raw'
# use 'json:{}' instead when accessing storage with auth
meta = _get_image_meta(self.tag,
self.params,
self.root_dir) if self._need_auth_info(self.tag) else None
if meta is not None:
if raw_copy:
# drop image secret from meta
for key in ['encrypt.key-secret', 'key-secret']:
if key in meta: # pylint: disable=E1135
meta.pop(key)
meta['driver'] = cmd_dict.pop("image_format") # pylint: disable=E1137
cmd_dict["image_filename"] = "'json:%s'" % json.dumps(meta)
# access secret objects of the backing images
secret_objects = self._backing_access_secret_objects
# access secret object of the image itself
if self._image_access_secret_object:
secret_objects.extend(self._image_access_secret_object)
if secret_objects:
cmd_dict["secret_object"] = " ".join(secret_objects)
# tls creds objects of the backing images
tls_creds_objects = self._backing_access_tls_creds_objects
# tls creds object of the image itself
if self._image_access_tls_creds_object:
tls_creds_objects.append(self._image_access_tls_creds_object)
if tls_creds_objects:
cmd_dict["tls_creds_object"] = " ".join(tls_creds_objects)
dd_cmd = self.image_cmd + " " + \
self._cmd_formatter.format(self.dd_cmd, **cmd_dict)
return process.run(dd_cmd, ignore_status=True)
def copy_data_remote(self, src, dst):
bs = 1024 * 1024 # 1M, faster copy
self.dd(dst, bs)
class Iscsidev(storage.Iscsidev):
"""
Class for handle iscsi devices for VM
"""
def __init__(self, params, root_dir, tag):
"""
Init the default value for image object.
:param params: Dictionary containing the test parameters.
:param root_dir: Base directory for relative filenames.
:param tag: Image tag defined in parameter images
"""
super(Iscsidev, self).__init__(params, root_dir, tag)
def setup(self):
"""
Access the iscsi target. And return the local raw device name.
"""
if self.iscsidevice.logged_in():
logging.warn("Session already present. Don't need to"
" login again")
else:
self.iscsidevice.login()
if utils_misc.wait_for(self.iscsidevice.get_device_name,
self.iscsi_init_timeout):
device_name = self.iscsidevice.get_device_name()
else:
raise exceptions.TestError("Can not get iscsi device name in host"
" in %ss" % self.iscsi_init_timeout)
if self.device_id:
device_name += self.device_id
return device_name
def cleanup(self):
"""
Logout the iscsi target and clean up the config and image.
"""
if self.exec_cleanup:
self.iscsidevice.cleanup()
if self.emulated_file_remove:
logging.debug("Removing file %s", self.emulated_image)
if os.path.exists(self.emulated_image):
os.unlink(self.emulated_image)
else:
logging.debug("File %s not found", self.emulated_image)
class LVMdev(storage.LVMdev):
"""
Class for handle lvm devices for VM
"""
def __init__(self, params, root_dir, tag):
"""
Init the default value for image object.
:param params: Dictionary containing the test parameters.
:param root_dir: Base directory for relative filenames.
:param tag: Image tag defined in parameter images
"""
super(LVMdev, self).__init__(params, root_dir, tag)
def setup(self):
"""
Get logical volume path;
"""
return self.lvmdevice.setup()
def cleanup(self):
"""
Cleanup useless volumes;
"""
return self.lvmdevice.cleanup()
|
pilou-/ansible-modules-core
|
refs/heads/devel
|
cloud/docker/__init__.py
|
12133432
| |
kyuridenamida/atcoder-tools
|
refs/heads/master
|
atcodertools/config/__init__.py
|
12133432
| |
kartikshah1/Test
|
refs/heads/master
|
assignments/templatetags/__init__.py
|
12133432
| |
sugartom/tensorflow-alien
|
refs/heads/master
|
tensorflow/contrib/rnn/python/kernel_tests/fused_rnn_cell_test.py
|
34
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.contrib.rnn.python.ops.fused_rnn_cell."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.rnn.python.ops import core_rnn
from tensorflow.contrib.rnn.python.ops import core_rnn_cell_impl
from tensorflow.contrib.rnn.python.ops import fused_rnn_cell
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class FusedRnnCellTest(test.TestCase):
def testBasicRNNFusedWrapper(self):
"""This test checks that using a wrapper for BasicRNN works as expected."""
with self.test_session() as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=19890212)
cell = core_rnn_cell_impl.BasicRNNCell(10)
batch_size = 5
input_size = 20
timelen = 15
inputs = constant_op.constant(
np.random.randn(timelen, batch_size, input_size))
with variable_scope.variable_scope("basic", initializer=initializer):
unpacked_inputs = array_ops.unstack(inputs)
outputs, state = core_rnn.static_rnn(
cell, unpacked_inputs, dtype=dtypes.float64)
packed_outputs = array_ops.stack(outputs)
basic_vars = [
v for v in variables.trainable_variables()
if v.name.startswith("basic/")
]
sess.run([variables.global_variables_initializer()])
basic_outputs, basic_state = sess.run([packed_outputs, state])
basic_grads = sess.run(gradients_impl.gradients(packed_outputs, inputs))
basic_wgrads = sess.run(
gradients_impl.gradients(packed_outputs, basic_vars))
with variable_scope.variable_scope(
"fused_static", initializer=initializer):
fused_cell = fused_rnn_cell.FusedRNNCellAdaptor(
core_rnn_cell_impl.BasicRNNCell(10))
outputs, state = fused_cell(inputs, dtype=dtypes.float64)
fused_static_vars = [
v for v in variables.trainable_variables()
if v.name.startswith("fused_static/")
]
sess.run([variables.global_variables_initializer()])
fused_static_outputs, fused_static_state = sess.run([outputs, state])
fused_static_grads = sess.run(gradients_impl.gradients(outputs, inputs))
fused_static_wgrads = sess.run(
gradients_impl.gradients(outputs, fused_static_vars))
self.assertAllClose(basic_outputs, fused_static_outputs)
self.assertAllClose(basic_state, fused_static_state)
self.assertAllClose(basic_grads, fused_static_grads)
for basic, fused in zip(basic_wgrads, fused_static_wgrads):
self.assertAllClose(basic, fused, rtol=1e-2, atol=1e-2)
with variable_scope.variable_scope(
"fused_dynamic", initializer=initializer):
fused_cell = fused_rnn_cell.FusedRNNCellAdaptor(
core_rnn_cell_impl.BasicRNNCell(10), use_dynamic_rnn=True)
outputs, state = fused_cell(inputs, dtype=dtypes.float64)
fused_dynamic_vars = [
v for v in variables.trainable_variables()
if v.name.startswith("fused_dynamic/")
]
sess.run([variables.global_variables_initializer()])
fused_dynamic_outputs, fused_dynamic_state = sess.run([outputs, state])
fused_dynamic_grads = sess.run(
gradients_impl.gradients(outputs, inputs))
fused_dynamic_wgrads = sess.run(
gradients_impl.gradients(outputs, fused_dynamic_vars))
self.assertAllClose(basic_outputs, fused_dynamic_outputs)
self.assertAllClose(basic_state, fused_dynamic_state)
self.assertAllClose(basic_grads, fused_dynamic_grads)
for basic, fused in zip(basic_wgrads, fused_dynamic_wgrads):
self.assertAllClose(basic, fused, rtol=1e-2, atol=1e-2)
def testTimeReversedFusedRNN(self):
with self.test_session() as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=19890213)
fw_cell = core_rnn_cell_impl.BasicRNNCell(10)
bw_cell = core_rnn_cell_impl.BasicRNNCell(10)
batch_size = 5
input_size = 20
timelen = 15
inputs = constant_op.constant(
np.random.randn(timelen, batch_size, input_size))
# test bi-directional rnn
with variable_scope.variable_scope("basic", initializer=initializer):
unpacked_inputs = array_ops.unstack(inputs)
outputs, fw_state, bw_state = core_rnn.static_bidirectional_rnn(
fw_cell, bw_cell, unpacked_inputs, dtype=dtypes.float64)
packed_outputs = array_ops.stack(outputs)
basic_vars = [
v for v in variables.trainable_variables()
if v.name.startswith("basic/")
]
sess.run([variables.global_variables_initializer()])
basic_outputs, basic_fw_state, basic_bw_state = sess.run(
[packed_outputs, fw_state, bw_state])
basic_grads = sess.run(gradients_impl.gradients(packed_outputs, inputs))
basic_wgrads = sess.run(
gradients_impl.gradients(packed_outputs, basic_vars))
with variable_scope.variable_scope("fused", initializer=initializer):
fused_cell = fused_rnn_cell.FusedRNNCellAdaptor(
core_rnn_cell_impl.BasicRNNCell(10))
fused_bw_cell = fused_rnn_cell.TimeReversedFusedRNN(
fused_rnn_cell.FusedRNNCellAdaptor(
core_rnn_cell_impl.BasicRNNCell(10)))
fw_outputs, fw_state = fused_cell(
inputs, dtype=dtypes.float64, scope="fw")
bw_outputs, bw_state = fused_bw_cell(
inputs, dtype=dtypes.float64, scope="bw")
outputs = array_ops.concat([fw_outputs, bw_outputs], 2)
fused_vars = [
v for v in variables.trainable_variables()
if v.name.startswith("fused/")
]
sess.run([variables.global_variables_initializer()])
fused_outputs, fused_fw_state, fused_bw_state = sess.run(
[outputs, fw_state, bw_state])
fused_grads = sess.run(gradients_impl.gradients(outputs, inputs))
fused_wgrads = sess.run(gradients_impl.gradients(outputs, fused_vars))
self.assertAllClose(basic_outputs, fused_outputs)
self.assertAllClose(basic_fw_state, fused_fw_state)
self.assertAllClose(basic_bw_state, fused_bw_state)
self.assertAllClose(basic_grads, fused_grads)
for basic, fused in zip(basic_wgrads, fused_wgrads):
self.assertAllClose(basic, fused, rtol=1e-2, atol=1e-2)
if __name__ == "__main__":
test.main()
|
pepetreshere/odoo
|
refs/heads/patch-2
|
addons/sale_coupon/tests/test_program_with_code_operations.py
|
3
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.sale_coupon.tests.common import TestSaleCouponCommon
from odoo.exceptions import UserError
class TestProgramWithCodeOperations(TestSaleCouponCommon):
# Test the basic operation (apply_coupon) on an coupon program on which we should
# apply the reward when the code is correct or remove the reward automatically when the reward is
# not valid anymore.
def test_program_usability(self):
# After clicking "Generate coupons", there is no domain so it shows "Match all records".
# But when you click, domain is false (default field value; empty string) so it won't generate anything.
# This is even more weird because if you add something in the domain and then delete it,
# you visually come back to the initial state except the domain became '[]' instead of ''.
# In this case, it will generate the coupon for every partner.
# Thus, we should ensure that if you leave the domain untouched, it generates a coupon for each partner
# as hinted on the screen ('Match all records (X records)')
self.env['coupon.generate.wizard'].with_context(active_id=self.code_promotion_program.id).create({
'generation_type': 'nbr_customer',
}).generate_coupon()
self.assertEqual(len(self.code_promotion_program.coupon_ids), len(self.env['res.partner'].search([])), "It should have generated a coupon for every partner")
def test_program_basic_operation_coupon_code(self):
# Test case: Generate a coupon for my customer, and add a reward then remove it automatically
self.code_promotion_program.reward_type = 'discount'
self.env['coupon.generate.wizard'].with_context(active_id=self.code_promotion_program.id).create({
'generation_type': 'nbr_customer',
'partners_domain': "[('id', 'in', [%s])]" % (self.steve.id),
}).generate_coupon()
coupon = self.code_promotion_program.coupon_ids
# Test the valid code on a wrong sales order
wrong_partner_order = self.env['sale.order'].create({
'partner_id': self.env['res.partner'].create({'name': 'My Partner'}).id,
})
with self.assertRaises(UserError):
self.env['sale.coupon.apply.code'].with_context(active_id=wrong_partner_order.id).create({
'coupon_code': coupon.code
}).process_coupon()
# Test now on a valid sales order
order = self.empty_order
order.write({'order_line': [
(0, False, {
'product_id': self.product_A.id,
'name': '1 Product A',
'product_uom': self.uom_unit.id,
'product_uom_qty': 1.0,
})
]})
self.env['sale.coupon.apply.code'].with_context(active_id=order.id).create({
'coupon_code': coupon.code
}).process_coupon()
order.recompute_coupon_lines()
self.assertEqual(len(order.order_line.ids), 2)
self.assertEqual(coupon.state, 'used')
# Remove the product A from the sale order
order.write({'order_line': [(2, order.order_line[0].id, False)]})
order.recompute_coupon_lines()
self.assertEqual(len(order.order_line.ids), 0)
self.assertEqual(coupon.state, 'new')
def test_program_coupon_double_consuming(self):
# Test case:
# - Generate a coupon
# - add to a sale order A, cancel the sale order
# - add to a sale order B, confirm the order
# - go back to A, reset to draft and confirm
self.code_promotion_program.reward_type = 'discount'
self.env['coupon.generate.wizard'].with_context(active_id=self.code_promotion_program.id).create({
'generation_type': 'nbr_coupon',
'nbr_coupons': 1,
}).generate_coupon()
coupon = self.code_promotion_program.coupon_ids
sale_order_a = self.empty_order.copy()
sale_order_b = self.empty_order.copy()
sale_order_a.write({'order_line': [
(0, False, {
'product_id': self.product_A.id,
'name': '1 Product A',
'product_uom': self.uom_unit.id,
'product_uom_qty': 1.0,
})
]})
self.env['sale.coupon.apply.code'].with_context(active_id=sale_order_a.id).create({
'coupon_code': coupon.code
}).process_coupon()
sale_order_a.recompute_coupon_lines()
self.assertEqual(len(sale_order_a.order_line.ids), 2)
self.assertEqual(coupon.state, 'used')
self.assertEqual(coupon.sales_order_id, sale_order_a)
sale_order_a.action_cancel()
sale_order_b.write({'order_line': [
(0, False, {
'product_id': self.product_A.id,
'name': '1 Product A',
'product_uom': self.uom_unit.id,
'product_uom_qty': 1.0,
})
]})
self.env['sale.coupon.apply.code'].with_context(active_id=sale_order_b.id).create({
'coupon_code': coupon.code
}).process_coupon()
sale_order_b.recompute_coupon_lines()
self.assertEqual(len(sale_order_b.order_line.ids), 2)
self.assertEqual(coupon.state, 'used')
self.assertEqual(coupon.sales_order_id, sale_order_b)
sale_order_b.action_confirm()
sale_order_a.action_draft()
sale_order_a.action_confirm()
# reward line removed automatically
self.assertEqual(len(sale_order_a.order_line.ids), 1)
def test_coupon_code_with_pricelist(self):
# Test case: Generate a coupon (10% discount) and apply it on an order with a specific pricelist (10% discount)
self.env['coupon.generate.wizard'].with_context(active_id=self.code_promotion_program_with_discount.id).create({
'generation_type': 'nbr_coupon',
'nbr_coupons': 1,
}).generate_coupon()
coupon = self.code_promotion_program_with_discount.coupon_ids
first_pricelist = self.env['product.pricelist'].create({
'name': 'First pricelist',
'discount_policy': 'with_discount',
'item_ids': [(0, 0, {
'compute_price': 'percentage',
'base': 'list_price',
'percent_price': 10,
'applied_on': '3_global',
'name': 'First discount'
})]
})
order = self.empty_order
order.pricelist_id = first_pricelist
order.write({'order_line': [
(0, False, {
'product_id': self.product_C.id,
'name': '1 Product C',
'product_uom': self.uom_unit.id,
'product_uom_qty': 1.0,
})
]})
self.env['sale.coupon.apply.code'].with_context(active_id=order.id).create({
'coupon_code': coupon.code
}).process_coupon()
order.recompute_coupon_lines()
self.assertEqual(len(order.order_line.ids), 2)
self.assertEqual(coupon.state, 'used')
self.assertEqual(order.amount_total, 81, "SO total should be 81: (10% of 100 with pricelist) + 10% of 90 with coupon code")
def test_on_next_order_reward_promotion_program(self):
# The flow:
# 1. Create a program `A` that gives a free `Product B` on next order if you buy a an `product A`
# This program should be code_needed with code `free_B_on_next_order`
# 2. Create a program `B` that gives 10% discount on next order automatically
# 3. Create a SO with a `third product` and recompute coupon, you SHOULD get a coupon (from program `B`) for your next order that will discount 10%
# 4. Try to apply `A`, it should error since we did not buy any product A.
# 5. Add a product A to the cart and try to apply `A` again, this time it should work
# 6. Verify you have 2 generated coupons and validate the SO (so the 2 generated coupons will be valid)
# 7. Create a new SO (with the same partner) and try to apply coupon generated by `A`. it SHOULD error since we don't have any `Product B` in the cart
# 8. Add a Product B in the cart
# 9. Try to apply once again coupon generated by `A`, it should give you the free product B
# 10. Try to apply coupon generated by `B`, it should give you 10% discount.
# => SO will then be 0$ until we recompute the order lines
# 1.
self.immediate_promotion_program.write({
'promo_applicability': 'on_next_order',
'promo_code_usage': 'code_needed',
'promo_code': 'free_B_on_next_order',
})
# 2.
self.p1 = self.env['coupon.program'].create({
'name': 'Code for 10% on next order',
'discount_type': 'percentage',
'discount_percentage': 10.0,
'program_type': 'promotion_program',
'promo_code_usage': 'no_code_needed',
'promo_applicability': 'on_next_order',
})
# 3.
order = self.empty_order.copy()
self.third_product = self.env['product.product'].create({
'name': 'Thrid Product',
'list_price': 5,
'sale_ok': True
})
order.write({'order_line': [
(0, False, {
'product_id': self.third_product.id,
'name': '1 Third Product',
'product_uom': self.uom_unit.id,
'product_uom_qty': 1.0,
})
]})
order.recompute_coupon_lines()
self.assertEqual(len(self.p1.coupon_ids.ids), 1, "You should get a coupon for you next order that will offer 10% discount")
# 4.
with self.assertRaises(UserError):
self.env['sale.coupon.apply.code'].with_context(active_id=order.id).create({
'coupon_code': 'free_B_on_next_order'
}).process_coupon()
# 5.
order.write({'order_line': [
(0, False, {
'product_id': self.product_A.id,
'name': '1 Product A',
'product_uom': self.uom_unit.id,
'product_uom_qty': 1.0,
})
]})
self.env['sale.coupon.apply.code'].with_context(active_id=order.id).create({
'coupon_code': 'free_B_on_next_order'
}).process_coupon()
# 6.
self.assertEqual(len(order.generated_coupon_ids), 2, "You should get a second coupon for your next order that will offer a free Product B")
order.action_confirm()
# 7.
order_bis = self.empty_order
with self.assertRaises(UserError):
self.env['sale.coupon.apply.code'].with_context(active_id=order_bis.id).create({
'coupon_code': order.generated_coupon_ids[1].code
}).process_coupon()
# 8.
order_bis.write({'order_line': [
(0, False, {
'product_id': self.product_B.id,
'name': '1 Product B',
'product_uom': self.uom_unit.id,
'product_uom_qty': 1.0,
})
]})
# 9.
self.env['sale.coupon.apply.code'].with_context(active_id=order_bis.id).create({
'coupon_code': order.generated_coupon_ids[1].code
}).process_coupon()
self.assertEqual(len(order_bis.order_line), 2, "You should get a free Product B")
# 10.
self.env['sale.coupon.apply.code'].with_context(active_id=order_bis.id).create({
'coupon_code': order.generated_coupon_ids[0].code
}).process_coupon()
self.assertEqual(len(order_bis.order_line), 3, "You should get a 10% discount line")
self.assertEqual(order_bis.amount_total, 0, "SO total should be null: (Paid product - Free product = 0) + 10% of nothing")
def test_on_next_order_reward_promotion_program_with_requirements(self):
self.immediate_promotion_program.write({
'promo_applicability': 'on_next_order',
'promo_code_usage': 'code_needed',
'promo_code': 'free_B_on_next_order',
'rule_minimum_amount': 700,
'rule_minimum_amount_tax_inclusion': 'tax_excluded'
})
order = self.empty_order.copy()
self.product_A.lst_price = 700
order.write({'order_line': [
(0, False, {
'product_id': self.product_A.id,
'name': '1 Product A',
'product_uom': self.uom_unit.id,
'product_uom_qty': 1.0,
})
]})
self.env['sale.coupon.apply.code'].with_context(active_id=order.id).create({
'coupon_code': 'free_B_on_next_order'
}).process_coupon()
self.assertEqual(len(self.immediate_promotion_program.coupon_ids.ids), 1, "You should get a coupon for you next order that will offer a free product B")
order_bis = self.empty_order
order_bis.write({'order_line': [
(0, False, {
'product_id': self.product_B.id,
'name': '1 Product B',
'product_uom': self.uom_unit.id,
'product_uom_qty': 1.0,
})
]})
with self.assertRaises(UserError):
# It should error since we did not validated the previous SO, so the coupon is `reserved` but not `new`
self.env['sale.coupon.apply.code'].with_context(active_id=order_bis.id).create({
'coupon_code': order.generated_coupon_ids[0].code
}).process_coupon()
order.action_confirm()
# It should not error even if the SO does not have the requirements (700$ and 1 product A), since these requirements where only used to generate the coupon that we are now applying
self.env['sale.coupon.apply.code'].with_context(active_id=order_bis.id).create({
'coupon_code': order.generated_coupon_ids[0].code
}).process_coupon()
self.assertEqual(len(order_bis.order_line), 2, "You should get 1 regular product_B and 1 free product_B")
order_bis.recompute_coupon_lines()
self.assertEqual(len(order_bis.order_line), 2, "Free product from a coupon generated from a promotion program on next order should not dissapear")
|
geminy/aidear
|
refs/heads/master
|
snippets/python/PycharmProject/stack.py
|
1
|
#!/usr/bin/env python
stack = []
def pushit():
stack.append(raw_input('Enter new string: ').strip())
def popit():
if len(stack) == 0:
print 'Cannot pop from an empty stack!'
else:
print 'Romoved [', stack.pop(), ']'
def viewstack():
print stack # calls str() internally
CMDs = {'u': pushit, 'o':popit, 'v': viewstack}
def showmenu():
pr = '''
p(U)sh
p(O)p
(V)iew
(Q)uit
Enter choice: '''
while True:
while True:
try:
choice = raw_input(pr).strip()[0].lower()
except (EOFError, KeyboardInterrupt, IndexError):
choice = 'q'
print '\nYou picked: [%s]' % (choice)
if choice not in 'uovq':
print 'Invalid option, try again'
else:
break
if choice == 'q':
break
CMDs[choice]() # weird usage
if '__main__' == __name__:
showmenu()
|
ustayready/CredSniper
|
refs/heads/master
|
modules/example/example.py
|
1
|
from __future__ import print_function
from flask import redirect, request, jsonify, Markup
from os import system
from core import functions
from core.base_module import *
import uuid
import mechanicalsoup
import bs4
import re, sys, time, random
import time
import json
class ExampleModule(BaseModule):
def __init__(self, enable_2fa=False):
super().__init__(self)
self.set_name('example')
self.add_route('main', '/')
self.add_route('twofactor', '/twofactor')
self.add_route('redirect', '/redirect')
self.enable_two_factor(enable_2fa)
def main(self):
next_url = '/twofactor'
template = self.env.get_template('login.html')
return template.render(
next_url=next_url,
hostname=request.host,
)
def twofactor(self):
self.user = request.values.get('username')
self.password = request.values.get('password')
next_url = '/redirect'
functions.cache_creds(self.name, self.user, self.password)
template = self.env.get_template('twofactor.html')
return template.render(
hostname=request.host,
next_url=next_url,
username=self.user,
password=self.password,
)
def redirect(self):
self.user = request.values.get('username')
self.password = request.values.get('password')
self.two_factor_token = request.values.get('two_factor_token')
self.two_factor_type = request.values.get('two_factor_type')
city, region, zip_code = '','',''
try:
geoip_url = 'https://freegeoip.net/json/{}'.format(
request.remote_addr
)
geo_browser = mechanicalsoup.StatefulBrowser()
geo_response = geo_browser.open(geoip_url)
geo = json.loads(geo_response.text)
city = geo['city']
region = geo['region_name']
zip_code = geo['zip_code']
except Exception as ex:
pass
functions.store_creds(
self.name,
self.user,
self.password,
self.two_factor_token,
self.two_factor_type,
request.remote_addr,
city,
region,
zip_code
)
return redirect(self.final_url, code=302)
def load(enable_2fa=False):
return ExampleModule(enable_2fa)
|
pombreda/pyamg
|
refs/heads/master
|
Examples/VisualizingAggregation/demo2.py
|
1
|
# 3D example of viewing aggregates from SA using VTK
from pyamg.aggregation import standard_aggregation
from pyamg.vis import vis_coarse, vtk_writer
from pyamg.gallery import load_example
# retrieve the problem
data = load_example('unit_cube')
A = data['A'].tocsr()
V = data['vertices']
E2V = data['elements']
# perform smoothed aggregation
Agg = standard_aggregation(A)
# create the vtk file of aggregates
vis_coarse.vis_aggregate_groups(Verts=V, E2V=E2V, Agg=Agg, \
mesh_type='tet', output='vtk', \
fname='output_aggs.vtu')
# create the vtk file for a mesh
vtk_writer.write_basic_mesh(Verts=V, E2V=E2V, \
mesh_type='tet', \
fname='output_mesh.vtu')
# to use Paraview:
# start Paraview: Paraview --data=output_mesh.vtu
# apply
# under display in the object inspector:
# select wireframe representation
# select a better solid color
# selecting surface with edges and low opacity also helps
# open file: output_aggs.vtu
# under display in the object inspector:
# select surface with edges representation
# select a better solid color
# increase line width and point size to see these aggs (if present)
# reduce the opacity, sometimes helps
|
jarvys/django-1.7-jdb
|
refs/heads/master
|
tests/sites_framework/models.py
|
160
|
from django.contrib.sites.managers import CurrentSiteManager
from django.contrib.sites.models import Site
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class AbstractArticle(models.Model):
title = models.CharField(max_length=50)
objects = models.Manager()
on_site = CurrentSiteManager()
class Meta:
abstract = True
def __str__(self):
return self.title
class SyndicatedArticle(AbstractArticle):
sites = models.ManyToManyField(Site)
class ExclusiveArticle(AbstractArticle):
site = models.ForeignKey(Site)
class CustomArticle(AbstractArticle):
places_this_article_should_appear = models.ForeignKey(Site)
objects = models.Manager()
on_site = CurrentSiteManager("places_this_article_should_appear")
|
tjsavage/full_nonrel_starter
|
refs/heads/master
|
django/contrib/comments/views/utils.py
|
192
|
"""
A few bits of helper functions for comment views.
"""
import urllib
import textwrap
from django.http import HttpResponseRedirect
from django.core import urlresolvers
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.core.exceptions import ObjectDoesNotExist
from django.contrib import comments
def next_redirect(data, default, default_view, **get_kwargs):
"""
Handle the "where should I go next?" part of comment views.
The next value could be a kwarg to the function (``default``), or a
``?next=...`` GET arg, or the URL of a given view (``default_view``). See
the view modules for examples.
Returns an ``HttpResponseRedirect``.
"""
next = data.get("next", default)
if next is None:
next = urlresolvers.reverse(default_view)
if get_kwargs:
if '#' in next:
tmp = next.rsplit('#', 1)
next = tmp[0]
anchor = '#' + tmp[1]
else:
anchor = ''
joiner = ('?' in next) and '&' or '?'
next += joiner + urllib.urlencode(get_kwargs) + anchor
return HttpResponseRedirect(next)
def confirmation_view(template, doc="Display a confirmation view."):
"""
Confirmation view generator for the "comment was
posted/flagged/deleted/approved" views.
"""
def confirmed(request):
comment = None
if 'c' in request.GET:
try:
comment = comments.get_model().objects.get(pk=request.GET['c'])
except (ObjectDoesNotExist, ValueError):
pass
return render_to_response(template,
{'comment': comment},
context_instance=RequestContext(request)
)
confirmed.__doc__ = textwrap.dedent("""\
%s
Templates: `%s``
Context:
comment
The posted comment
""" % (doc, template)
)
return confirmed
|
GenericSnake/interactive-tutorials
|
refs/heads/master
|
markdown/extensions/wikilinks.py
|
123
|
#!/usr/bin/env python
'''
WikiLinks Extension for Python-Markdown
======================================
Converts [[WikiLinks]] to relative links. Requires Python-Markdown 2.0+
Basic usage:
>>> import markdown
>>> text = "Some text with a [[WikiLink]]."
>>> html = markdown.markdown(text, ['wikilinks'])
>>> html
u'<p>Some text with a <a class="wikilink" href="/WikiLink/">WikiLink</a>.</p>'
Whitespace behavior:
>>> markdown.markdown('[[ foo bar_baz ]]', ['wikilinks'])
u'<p><a class="wikilink" href="/foo_bar_baz/">foo bar_baz</a></p>'
>>> markdown.markdown('foo [[ ]] bar', ['wikilinks'])
u'<p>foo bar</p>'
To define custom settings the simple way:
>>> markdown.markdown(text,
... ['wikilinks(base_url=/wiki/,end_url=.html,html_class=foo)']
... )
u'<p>Some text with a <a class="foo" href="/wiki/WikiLink.html">WikiLink</a>.</p>'
Custom settings the complex way:
>>> md = markdown.Markdown(
... extensions = ['wikilinks'],
... extension_configs = {'wikilinks': [
... ('base_url', 'http://example.com/'),
... ('end_url', '.html'),
... ('html_class', '') ]},
... safe_mode = True)
>>> md.convert(text)
u'<p>Some text with a <a href="http://example.com/WikiLink.html">WikiLink</a>.</p>'
Use MetaData with mdx_meta.py (Note the blank html_class in MetaData):
>>> text = """wiki_base_url: http://example.com/
... wiki_end_url: .html
... wiki_html_class:
...
... Some text with a [[WikiLink]]."""
>>> md = markdown.Markdown(extensions=['meta', 'wikilinks'])
>>> md.convert(text)
u'<p>Some text with a <a href="http://example.com/WikiLink.html">WikiLink</a>.</p>'
MetaData should not carry over to next document:
>>> md.convert("No [[MetaData]] here.")
u'<p>No <a class="wikilink" href="/MetaData/">MetaData</a> here.</p>'
Define a custom URL builder:
>>> def my_url_builder(label, base, end):
... return '/bar/'
>>> md = markdown.Markdown(extensions=['wikilinks'],
... extension_configs={'wikilinks' : [('build_url', my_url_builder)]})
>>> md.convert('[[foo]]')
u'<p><a class="wikilink" href="/bar/">foo</a></p>'
From the command line:
python markdown.py -x wikilinks(base_url=http://example.com/,end_url=.html,html_class=foo) src.txt
By [Waylan Limberg](http://achinghead.com/).
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
Dependencies:
* [Python 2.3+](http://python.org)
* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/)
'''
import markdown
import re
def build_url(label, base, end):
""" Build a url from the label, a base, and an end. """
clean_label = re.sub(r'([ ]+_)|(_[ ]+)|([ ]+)', '_', label)
return '%s%s%s'% (base, clean_label, end)
class WikiLinkExtension(markdown.Extension):
def __init__(self, configs):
# set extension defaults
self.config = {
'base_url' : ['/', 'String to append to beginning or URL.'],
'end_url' : ['/', 'String to append to end of URL.'],
'html_class' : ['wikilink', 'CSS hook. Leave blank for none.'],
'build_url' : [build_url, 'Callable formats URL from label.'],
}
# Override defaults with user settings
for key, value in configs :
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
self.md = md
# append to end of inline patterns
WIKILINK_RE = r'\[\[([A-Za-z0-9_ -]+)\]\]'
wikilinkPattern = WikiLinks(WIKILINK_RE, self.config)
wikilinkPattern.md = md
md.inlinePatterns.add('wikilink', wikilinkPattern, "<not_strong")
class WikiLinks(markdown.inlinepatterns.Pattern):
def __init__(self, pattern, config):
markdown.inlinepatterns.Pattern.__init__(self, pattern)
self.config = config
def handleMatch(self, m):
if m.group(2).strip():
base_url, end_url, html_class = self._getMeta()
label = m.group(2).strip()
url = self.config['build_url'][0](label, base_url, end_url)
a = markdown.etree.Element('a')
a.text = label
a.set('href', url)
if html_class:
a.set('class', html_class)
else:
a = ''
return a
def _getMeta(self):
""" Return meta data or config data. """
base_url = self.config['base_url'][0]
end_url = self.config['end_url'][0]
html_class = self.config['html_class'][0]
if hasattr(self.md, 'Meta'):
if self.md.Meta.has_key('wiki_base_url'):
base_url = self.md.Meta['wiki_base_url'][0]
if self.md.Meta.has_key('wiki_end_url'):
end_url = self.md.Meta['wiki_end_url'][0]
if self.md.Meta.has_key('wiki_html_class'):
html_class = self.md.Meta['wiki_html_class'][0]
return base_url, end_url, html_class
def makeExtension(configs=None) :
return WikiLinkExtension(configs=configs)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
Alwnikrotikz/pyglet
|
refs/heads/master
|
contrib/scene2d/examples/keen_intro.py
|
29
|
#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from pyglet.window import Window
from pyglet import clock
from scene2d.textsprite import *
from pyglet import font
width, height = 640, 480
window = Window(width=width, height=height)
arial = font.load('Arial', 500, bold=True)
commander = TextSprite(arial, 'COMMANDER', color=(1, 1, 1, 0.5))
keen = TextSprite(arial, 'KEEN', color=(1, 1, 1, 0.5))
print dir(keen)
commander.x = width
keen.x = -keen.width
commander.dx = -(commander.width + width) / 10
keen.dx = (keen.width + width) / 10
clock.set_fps_limit(30)
while not window.has_exit:
window.dispatch_events()
time = clock.tick()
glClear(GL_COLOR_BUFFER_BIT)
for text in (commander, keen):
glLoadIdentity()
text.x += text.dx * time
text.draw()
window.flip()
|
ajtowns/bitcoin
|
refs/heads/master
|
test/functional/mempool_package_onemore.py
|
10
|
#!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test descendant package tracking carve-out allowing one final transaction in
an otherwise-full package as long as it has only one parent and is <= 10k in
size.
"""
from decimal import Decimal
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
chain_transaction,
)
MAX_ANCESTORS = 25
MAX_DESCENDANTS = 25
class MempoolPackagesTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [["-maxorphantx=1000"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Mine some blocks and have them mature.
self.nodes[0].generate(COINBASE_MATURITY + 1)
utxo = self.nodes[0].listunspent(10)
txid = utxo[0]['txid']
vout = utxo[0]['vout']
value = utxo[0]['amount']
fee = Decimal("0.0002")
# MAX_ANCESTORS transactions off a confirmed tx should be fine
chain = []
for _ in range(4):
(txid, sent_value) = chain_transaction(self.nodes[0], [txid], [vout], value, fee, 2)
vout = 0
value = sent_value
chain.append([txid, value])
for _ in range(MAX_ANCESTORS - 4):
(txid, sent_value) = chain_transaction(self.nodes[0], [txid], [0], value, fee, 1)
value = sent_value
chain.append([txid, value])
(second_chain, second_chain_value) = chain_transaction(self.nodes[0], [utxo[1]['txid']], [utxo[1]['vout']], utxo[1]['amount'], fee, 1)
# Check mempool has MAX_ANCESTORS + 1 transactions in it
assert_equal(len(self.nodes[0].getrawmempool(True)), MAX_ANCESTORS + 1)
# Adding one more transaction on to the chain should fail.
assert_raises_rpc_error(-26, "too-long-mempool-chain, too many unconfirmed ancestors [limit: 25]", chain_transaction, self.nodes[0], [txid], [0], value, fee, 1)
# ...even if it chains on from some point in the middle of the chain.
assert_raises_rpc_error(-26, "too-long-mempool-chain, too many descendants", chain_transaction, self.nodes[0], [chain[2][0]], [1], chain[2][1], fee, 1)
assert_raises_rpc_error(-26, "too-long-mempool-chain, too many descendants", chain_transaction, self.nodes[0], [chain[1][0]], [1], chain[1][1], fee, 1)
# ...even if it chains on to two parent transactions with one in the chain.
assert_raises_rpc_error(-26, "too-long-mempool-chain, too many descendants", chain_transaction, self.nodes[0], [chain[0][0], second_chain], [1, 0], chain[0][1] + second_chain_value, fee, 1)
# ...especially if its > 40k weight
assert_raises_rpc_error(-26, "too-long-mempool-chain, too many descendants", chain_transaction, self.nodes[0], [chain[0][0]], [1], chain[0][1], fee, 350)
# But not if it chains directly off the first transaction
(replacable_txid, replacable_orig_value) = chain_transaction(self.nodes[0], [chain[0][0]], [1], chain[0][1], fee, 1)
# and the second chain should work just fine
chain_transaction(self.nodes[0], [second_chain], [0], second_chain_value, fee, 1)
# Make sure we can RBF the chain which used our carve-out rule
second_tx_outputs = {self.nodes[0].getrawtransaction(replacable_txid, True)["vout"][0]['scriptPubKey']['address']: replacable_orig_value - (Decimal(1) / Decimal(100))}
second_tx = self.nodes[0].createrawtransaction([{'txid': chain[0][0], 'vout': 1}], second_tx_outputs)
signed_second_tx = self.nodes[0].signrawtransactionwithwallet(second_tx)
self.nodes[0].sendrawtransaction(signed_second_tx['hex'])
# Finally, check that we added two transactions
assert_equal(len(self.nodes[0].getrawmempool(True)), MAX_ANCESTORS + 3)
if __name__ == '__main__':
MempoolPackagesTest().main()
|
John-Boik/Principled-Societies-Project
|
refs/heads/master
|
leddaApp/static/brython/src/Lib/test/test_urllibnet.py
|
23
|
#!/usr/bin/env python3
import unittest
from test import support
import contextlib
import socket
import urllib.request
import sys
import os
import email.message
import time
class URLTimeoutTest(unittest.TestCase):
# XXX this test doesn't seem to test anything useful.
TIMEOUT = 30.0
def setUp(self):
socket.setdefaulttimeout(self.TIMEOUT)
def tearDown(self):
socket.setdefaulttimeout(None)
def testURLread(self):
with support.transient_internet("www.python.org"):
f = urllib.request.urlopen("http://www.python.org/")
x = f.read()
class urlopenNetworkTests(unittest.TestCase):
"""Tests urllib.reqest.urlopen using the network.
These tests are not exhaustive. Assuming that testing using files does a
good job overall of some of the basic interface features. There are no
tests exercising the optional 'data' and 'proxies' arguments. No tests
for transparent redirection have been written.
setUp is not used for always constructing a connection to
http://www.python.org/ since there a few tests that don't use that address
and making a connection is expensive enough to warrant minimizing unneeded
connections.
"""
@contextlib.contextmanager
def urlopen(self, *args, **kwargs):
resource = args[0]
with support.transient_internet(resource):
r = urllib.request.urlopen(*args, **kwargs)
try:
yield r
finally:
r.close()
def test_basic(self):
# Simple test expected to pass.
with self.urlopen("http://www.python.org/") as open_url:
for attr in ("read", "readline", "readlines", "fileno", "close",
"info", "geturl"):
self.assertTrue(hasattr(open_url, attr), "object returned from "
"urlopen lacks the %s attribute" % attr)
self.assertTrue(open_url.read(), "calling 'read' failed")
def test_readlines(self):
# Test both readline and readlines.
with self.urlopen("http://www.python.org/") as open_url:
self.assertIsInstance(open_url.readline(), bytes,
"readline did not return a string")
self.assertIsInstance(open_url.readlines(), list,
"readlines did not return a list")
def test_info(self):
# Test 'info'.
with self.urlopen("http://www.python.org/") as open_url:
info_obj = open_url.info()
self.assertIsInstance(info_obj, email.message.Message,
"object returned by 'info' is not an "
"instance of email.message.Message")
self.assertEqual(info_obj.get_content_subtype(), "html")
def test_geturl(self):
# Make sure same URL as opened is returned by geturl.
URL = "http://www.python.org/"
with self.urlopen(URL) as open_url:
gotten_url = open_url.geturl()
self.assertEqual(gotten_url, URL)
def test_getcode(self):
# test getcode() with the fancy opener to get 404 error codes
URL = "http://www.python.org/XXXinvalidXXX"
with support.transient_internet(URL):
open_url = urllib.request.FancyURLopener().open(URL)
try:
code = open_url.getcode()
finally:
open_url.close()
self.assertEqual(code, 404)
def test_fileno(self):
if sys.platform in ('win32',):
# On Windows, socket handles are not file descriptors; this
# test can't pass on Windows.
return
# Make sure fd returned by fileno is valid.
with self.urlopen("http://www.python.org/", timeout=None) as open_url:
fd = open_url.fileno()
with os.fdopen(fd, encoding='utf-8') as f:
self.assertTrue(f.read(), "reading from file created using fd "
"returned by fileno failed")
def test_bad_address(self):
# Make sure proper exception is raised when connecting to a bogus
# address.
bogus_domain = "sadflkjsasf.i.nvali.d"
try:
socket.gethostbyname(bogus_domain)
except OSError:
# socket.gaierror is too narrow, since getaddrinfo() may also
# fail with EAI_SYSTEM and ETIMEDOUT (seen on Ubuntu 13.04),
# i.e. Python's TimeoutError.
pass
else:
# This happens with some overzealous DNS providers such as OpenDNS
self.skipTest("%r should not resolve for test to work" % bogus_domain)
self.assertRaises(IOError,
# SF patch 809915: In Sep 2003, VeriSign started
# highjacking invalid .com and .net addresses to
# boost traffic to their own site. This test
# started failing then. One hopes the .invalid
# domain will be spared to serve its defined
# purpose.
# urllib.urlopen, "http://www.sadflkjsasadf.com/")
urllib.request.urlopen,
"http://sadflkjsasf.i.nvali.d/")
class urlretrieveNetworkTests(unittest.TestCase):
"""Tests urllib.request.urlretrieve using the network."""
@contextlib.contextmanager
def urlretrieve(self, *args, **kwargs):
resource = args[0]
with support.transient_internet(resource):
file_location, info = urllib.request.urlretrieve(*args, **kwargs)
try:
yield file_location, info
finally:
support.unlink(file_location)
def test_basic(self):
# Test basic functionality.
with self.urlretrieve("http://www.python.org/") as (file_location, info):
self.assertTrue(os.path.exists(file_location), "file location returned by"
" urlretrieve is not a valid path")
with open(file_location, encoding='utf-8') as f:
self.assertTrue(f.read(), "reading from the file location returned"
" by urlretrieve failed")
def test_specified_path(self):
# Make sure that specifying the location of the file to write to works.
with self.urlretrieve("http://www.python.org/",
support.TESTFN) as (file_location, info):
self.assertEqual(file_location, support.TESTFN)
self.assertTrue(os.path.exists(file_location))
with open(file_location, encoding='utf-8') as f:
self.assertTrue(f.read(), "reading from temporary file failed")
def test_header(self):
# Make sure header returned as 2nd value from urlretrieve is good.
with self.urlretrieve("http://www.python.org/") as (file_location, info):
self.assertIsInstance(info, email.message.Message,
"info is not an instance of email.message.Message")
logo = "http://www.python.org/community/logos/python-logo-master-v3-TM.png"
def test_data_header(self):
with self.urlretrieve(self.logo) as (file_location, fileheaders):
datevalue = fileheaders.get('Date')
dateformat = '%a, %d %b %Y %H:%M:%S GMT'
try:
time.strptime(datevalue, dateformat)
except ValueError:
self.fail('Date value not in %r format', dateformat)
def test_reporthook(self):
records = []
def recording_reporthook(blocks, block_size, total_size):
records.append((blocks, block_size, total_size))
with self.urlretrieve(self.logo, reporthook=recording_reporthook) as (
file_location, fileheaders):
expected_size = int(fileheaders['Content-Length'])
records_repr = repr(records) # For use in error messages.
self.assertGreater(len(records), 1, msg="There should always be two "
"calls; the first one before the transfer starts.")
self.assertEqual(records[0][0], 0)
self.assertGreater(records[0][1], 0,
msg="block size can't be 0 in %s" % records_repr)
self.assertEqual(records[0][2], expected_size)
self.assertEqual(records[-1][2], expected_size)
block_sizes = {block_size for _, block_size, _ in records}
self.assertEqual({records[0][1]}, block_sizes,
msg="block sizes in %s must be equal" % records_repr)
self.assertGreaterEqual(records[-1][0]*records[0][1], expected_size,
msg="number of blocks * block size must be"
" >= total size in %s" % records_repr)
def test_main():
support.requires('network')
support.run_unittest(URLTimeoutTest,
urlopenNetworkTests,
urlretrieveNetworkTests)
if __name__ == "__main__":
test_main()
|
android-ia/platform_external_chromium_org_tools_gyp
|
refs/heads/master
|
PRESUBMIT.py
|
32
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for GYP.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
PYLINT_BLACKLIST = [
# TODO: fix me.
# From SCons, not done in google style.
'test/lib/TestCmd.py',
'test/lib/TestCommon.py',
'test/lib/TestGyp.py',
]
PYLINT_DISABLED_WARNINGS = [
# TODO: fix me.
# Many tests include modules they don't use.
'W0611',
# Include order doesn't properly include local files?
'F0401',
# Some use of built-in names.
'W0622',
# Some unused variables.
'W0612',
# Operator not preceded/followed by space.
'C0323',
'C0322',
# Unnecessary semicolon.
'W0301',
# Unused argument.
'W0613',
# String has no effect (docstring in wrong place).
'W0105',
# Comma not followed by space.
'C0324',
# Access to a protected member.
'W0212',
# Bad indent.
'W0311',
# Line too long.
'C0301',
# Undefined variable.
'E0602',
# Not exception type specified.
'W0702',
# No member of that name.
'E1101',
# Dangerous default {}.
'W0102',
# Others, too many to sort.
'W0201', 'W0232', 'E1103', 'W0621', 'W0108', 'W0223', 'W0231',
'R0201', 'E0101', 'C0321',
# ************* Module copy
# W0104:427,12:_test.odict.__setitem__: Statement seems to have no effect
'W0104',
]
def CheckChangeOnUpload(input_api, output_api):
report = []
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api))
return report
def CheckChangeOnCommit(input_api, output_api):
report = []
# Accept any year number from 2009 to the current year.
current_year = int(input_api.time.strftime('%Y'))
allowed_years = (str(s) for s in reversed(xrange(2009, current_year + 1)))
years_re = '(' + '|'.join(allowed_years) + ')'
# The (c) is deprecated, but tolerate it until it's removed from all files.
license = (
r'.*? Copyright (\(c\) )?%(year)s Google Inc\. All rights reserved\.\n'
r'.*? Use of this source code is governed by a BSD-style license that '
r'can be\n'
r'.*? found in the LICENSE file\.\n'
) % {
'year': years_re,
}
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, license_header=license))
report.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
'http://gyp-status.appspot.com/status',
'http://gyp-status.appspot.com/current'))
import os
import sys
old_sys_path = sys.path
try:
sys.path = ['pylib', 'test/lib'] + sys.path
blacklist = PYLINT_BLACKLIST
if sys.platform == 'win32':
blacklist = [os.path.normpath(x).replace('\\', '\\\\')
for x in PYLINT_BLACKLIST]
report.extend(input_api.canned_checks.RunPylint(
input_api,
output_api,
black_list=blacklist,
disabled_warnings=PYLINT_DISABLED_WARNINGS))
finally:
sys.path = old_sys_path
return report
def GetPreferredTrySlaves():
return ['gyp-win32', 'gyp-win64', 'gyp-linux', 'gyp-mac', 'gyp-android']
|
edelooff/newWeb
|
refs/heads/master
|
newweb/pagemaker/admin.py
|
1
|
#!/usr/bin/python
"""newWeb PageMaker Mixins for admin purposes."""
# Standard modules
import datetime
import decimal
import inspect
import os
# Package modules
from .. import model
from .. import templateparser
NOT_ALLOWED_METHODS = dir({}) + ['key', 'identifier']
FIELDTYPES = {'datetime': datetime.datetime,
'decimal': decimal.Decimal}
class AdminMixin(object):
"""Provides an admin interface based on the available models"""
def _Admin(self, url):
self.parser.RegisterFunction('classname', lambda cls: type(cls).__name__)
if not self.ADMIN_MODEL:
return 'Setup ADMIN_MODEL first'
indextemplate = templateparser.FileTemplate(
os.path.join(os.path.dirname(__file__), 'admin', 'index.html'))
urlparts = (url or '').split('/')
table = None
method = 'List'
methods = None
results = None
columns = None
basepath = self.__BasePath()
resultshtml = []
columns = None
edithtml = None
message = None
docs = None
if len(urlparts) > 2:
if urlparts[1] == 'table':
table = urlparts[2]
methods = self.__AdminTablesMethods(table)
docs = self.__GetClassDocs(table)
if len(urlparts) > 3:
method = urlparts[3]
if method == 'edit':
edithtml = self.__EditRecord(table, urlparts[4])
elif method == 'delete':
key = self.post.getfirst('key')
if self.__DeleteRecord(table, key):
message = '%s with key %s deleted.' %(table, key)
else:
message = 'Could not find %s with key %s.' %(table, key)
elif method == 'save':
message = self.__SaveRecord(table, self.post.getfirst('key'))
else:
(columns, results) = self.__AdminTablesMethodsResults(urlparts[2],
method)
resulttemplate = templateparser.FileTemplate(
os.path.join(os.path.dirname(__file__), 'admin', 'record.html'))
for result in results:
resultshtml.append(resulttemplate.Parse(result=result['result'],
key=result['key'],
table=table,
basepath=basepath,
fieldtypes=FIELDTYPES))
elif urlparts[1] == 'method':
table = urlparts[2]
methods = self.__AdminTablesMethods(table)
docs = self.__GetDocs(table, urlparts[3])
return indextemplate.Parse(basepath=basepath,
tables=self.__AdminTables(),
table=table,
columns=columns,
method=method,
methods=methods,
results=resultshtml,
edit=edithtml,
message=message,
docs=docs)
def __GetDocs(self, table, method):
if self.__CheckTable(table):
table = getattr(self.ADMIN_MODEL, table)
methodobj = getattr(table, method)
if methodobj.__doc__:
return inspect.cleandoc(methodobj.__doc__)
try:
while table:
table = table.__bases__[0]
methodobj = getattr(table, method)
if methodobj.__doc__:
return inspect.cleandoc(methodobj.__doc__)
except AttributeError:
pass
return 'No documentation avaiable'
def __GetClassDocs(self, table):
if self.__CheckTable(table):
table = getattr(self.ADMIN_MODEL, table)
if table.__doc__:
return inspect.cleandoc(table.__doc__)
try:
while table:
table = table.__bases__[0]
if table.__doc__:
return inspect.cleandoc(table.__doc__)
except AttributeError:
pass
return 'No documentation avaiable'
def __EditRecord(self, table, key):
self.parser.RegisterFunction('classname', lambda cls: type(cls).__name__)
edittemplate = templateparser.FileTemplate(
os.path.join(os.path.dirname(__file__), 'admin', 'edit.html'))
fields = self.__EditRecordFields(table, key)
if not fields:
return 'Could not load record with %s' % key
return edittemplate.Parse(table=table,
key=key,
basepath=self.__BasePath(),
fields=fields,
fieldtypes=FIELDTYPES)
def __SaveRecord(self, table, key):
if self.__CheckTable(table):
table = getattr(self.ADMIN_MODEL, table)
try:
obj = table.FromPrimary(self.connection, key)
except model.NotExistError:
return 'Could not load record with %s' % key
for item in obj.keys():
if (isinstance(obj[item], int) or
isinstance(obj[item], long)):
obj[item] = int(self.post.getfirst(item, 0))
elif (isinstance(obj[item], float) or
isinstance(obj[item], decimal.Decimal)):
obj[item] = float(self.post.getfirst(item, 0))
elif isinstance(obj[item], basestring):
obj[item] = self.post.getfirst(item, '')
elif isinstance(obj[item], datetime.datetime):
obj[item] = self.post.getfirst(item, '')
else:
obj[item] = int(self.post.getfirst(item, 0))
try:
obj.Save()
except Exception, error:
return error
return 'Changes saved'
return 'Invalid table'
def __DeleteRecord(self, table, key):
if self.__CheckTable(table):
table = getattr(self.ADMIN_MODEL, table)
try:
obj = table.FromPrimary(self.connection, key)
obj.Delete()
return True
except model.NotExistError:
return False
return False
def __BasePath(self):
return self.req.path.split('/')[1]
def __EditRecordFields(self, table, key):
if self.__CheckTable(table):
table = getattr(self.ADMIN_MODEL, table)
try:
return table.FromPrimary(self.connection, key)
except model.NotExistError:
return False
return False
def __CheckTable(self, table):
"""Verfies the given name is that of a model.BaseRecord subclass."""
tableclass = getattr(self.ADMIN_MODEL, table)
return type(tableclass) == type and issubclass(tableclass, model.Record)
def __AdminTables(self):
tables = []
for table in dir(self.ADMIN_MODEL):
if self.__CheckTable(table):
tables.append(table)
return tables
def __AdminTablesMethods(self, table):
if self.__CheckTable(table):
table = getattr(self.ADMIN_MODEL, table)
methods = []
for method in dir(table):
if (not method.startswith('_')
and method not in NOT_ALLOWED_METHODS
and callable(getattr(table, method))):
methods.append(method)
return methods
return False
def __AdminTablesMethodsResults(self, tablename, methodname='List'):
if self.__CheckTable(tablename):
table = getattr(self.ADMIN_MODEL, tablename)
method = getattr(table, methodname)
results = method(self.connection)
resultslist = []
for result in results:
resultslist.append({'result': result.values(),
'key': result.key})
if resultslist:
return result.keys(), resultslist
return (), ()
|
pekeler/arangodb
|
refs/heads/devel
|
3rdParty/V8-4.3.61/third_party/python_26/Lib/site-packages/win32/test/test_pywintypes.py
|
17
|
import unittest
import pywintypes
import time
class TestCase(unittest.TestCase):
def testPyTimeFormat(self):
struct_current = time.localtime()
pytime_current = pywintypes.Time(struct_current)
# try and test all the standard parts of the format
format_string = "%a %A %b %B %c %d %H %I %j %m %M %p %S %U %w %W %x %X %y %Y %Z"
self.assertEquals(pytime_current.Format(format_string), time.strftime(format_string, struct_current))
def testPyTimePrint(self):
# This used to crash with an invalid, or too early time.
# We don't really want to check that it does cause a ValueError
# (as hopefully this wont be true forever). So either working, or
# ValueError is OK.
t = pywintypes.Time(-2)
try:
t.Format()
except ValueError:
return
def testGUID(self):
s = "{00020400-0000-0000-C000-000000000046}"
iid = pywintypes.IID(s)
iid2 = pywintypes.IID(buffer(iid), True)
self.assertEquals(iid, iid2)
self.assertRaises(ValueError, pywintypes.IID, '00', True) # too short
self.assertRaises(TypeError, pywintypes.IID, 0, True) # no buffer
if __name__ == '__main__':
unittest.main()
|
40023256/W17test
|
refs/heads/master
|
static/Brython3.1.3-20150514-095342/Lib/signal.py
|
743
|
"""This module provides mechanisms to use signal handlers in Python.
Functions:
alarm() -- cause SIGALRM after a specified time [Unix only]
setitimer() -- cause a signal (described below) after a specified
float time and the timer may restart then [Unix only]
getitimer() -- get current value of timer [Unix only]
signal() -- set the action for a given signal
getsignal() -- get the signal action for a given signal
pause() -- wait until a signal arrives [Unix only]
default_int_handler() -- default SIGINT handler
signal constants:
SIG_DFL -- used to refer to the system default handler
SIG_IGN -- used to ignore the signal
NSIG -- number of defined signals
SIGINT, SIGTERM, etc. -- signal numbers
itimer constants:
ITIMER_REAL -- decrements in real time, and delivers SIGALRM upon
expiration
ITIMER_VIRTUAL -- decrements only when the process is executing,
and delivers SIGVTALRM upon expiration
ITIMER_PROF -- decrements both when the process is executing and
when the system is executing on behalf of the process.
Coupled with ITIMER_VIRTUAL, this timer is usually
used to profile the time spent by the application
in user and kernel space. SIGPROF is delivered upon
expiration.
*** IMPORTANT NOTICE ***
A signal handler function is called with two arguments:
the first is the signal number, the second is the interrupted stack frame."""
CTRL_BREAK_EVENT=1
CTRL_C_EVENT=0
NSIG=23
SIGABRT=22
SIGBREAK=21
SIGFPE=8
SIGILL=4
SIGINT=2
SIGSEGV=11
SIGTERM=15
SIG_DFL=0
SIG_IGN=1
def signal(signalnum, handler) :
pass
|
dxwu/BinderFilter
|
refs/heads/master
|
resources/android-toolchain-16/lib/python2.7/test/pyclbr_input.py
|
324
|
"""Test cases for test_pyclbr.py"""
def f(): pass
class Other(object):
@classmethod
def foo(c): pass
def om(self): pass
class B (object):
def bm(self): pass
class C (B):
foo = Other().foo
om = Other.om
d = 10
# XXX: This causes test_pyclbr.py to fail, but only because the
# introspection-based is_method() code in the test can't
# distinguish between this and a genuine method function like m().
# The pyclbr.py module gets this right as it parses the text.
#
#f = f
def m(self): pass
@staticmethod
def sm(self): pass
@classmethod
def cm(self): pass
|
topxiaoke/myedx
|
refs/heads/master
|
common/test/acceptance/pages/xblock/__init__.py
|
12133432
| |
redreamality/semanticizer
|
refs/heads/master
|
semanticizer/processors/__init__.py
|
12133432
| |
ABaldwinHunter/django-clone-classic
|
refs/heads/master
|
django/urls/exceptions.py
|
133
|
from __future__ import unicode_literals
from django.http import Http404
class Resolver404(Http404):
pass
class NoReverseMatch(Exception):
pass
|
mlaitinen/odoo
|
refs/heads/8.0
|
addons/account_voucher/report/account_voucher_sales_receipt.py
|
326
|
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp import tools
class sale_receipt_report(osv.osv):
_name = "sale.receipt.report"
_description = "Sales Receipt Statistics"
_auto = False
_rec_name = 'date'
_columns = {
'date': fields.date('Date', readonly=True),
'currency_id': fields.many2one('res.currency', 'Currency', readonly=True),
'journal_id': fields.many2one('account.journal', 'Journal', readonly=True),
'partner_id': fields.many2one('res.partner', 'Partner', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'user_id': fields.many2one('res.users', 'Salesperson', readonly=True),
'price_total': fields.float('Total Without Tax', readonly=True),
'price_total_tax': fields.float('Total With Tax', readonly=True),
'nbr':fields.integer('# of Voucher Lines', readonly=True),
'type': fields.selection([
('sale','Sale'),
('purchase','Purchase'),
('payment','Payment'),
('receipt','Receipt'),
],'Type', readonly=True),
'state': fields.selection([
('draft','Draft'),
('proforma','Pro-forma'),
('posted','Posted'),
('cancel','Cancelled')
], 'Voucher Status', readonly=True),
'pay_now':fields.selection([
('pay_now','Pay Directly'),
('pay_later','Pay Later or Group Funds'),
],'Payment', readonly=True),
'date_due': fields.date('Due Date', readonly=True),
'account_id': fields.many2one('account.account', 'Account',readonly=True),
'delay_to_pay': fields.float('Avg. Delay To Pay', readonly=True, group_operator="avg"),
'due_delay': fields.float('Avg. Due Delay', readonly=True, group_operator="avg")
}
_order = 'date desc'
def init(self, cr):
tools.drop_view_if_exists(cr, 'sale_receipt_report')
cr.execute("""
create or replace view sale_receipt_report as (
select min(avl.id) as id,
av.date as date,
av.partner_id as partner_id,
aj.currency as currency_id,
av.journal_id as journal_id,
rp.user_id as user_id,
av.company_id as company_id,
count(avl.*) as nbr,
av.type as type,
av.state,
av.pay_now,
av.date_due as date_due,
av.account_id as account_id,
sum(av.amount-av.tax_amount)/(select count(l.id) from account_voucher_line as l
left join account_voucher as a ON (a.id=l.voucher_id)
where a.id=av.id) as price_total,
sum(av.amount)/(select count(l.id) from account_voucher_line as l
left join account_voucher as a ON (a.id=l.voucher_id)
where a.id=av.id) as price_total_tax,
sum((select extract(epoch from avg(date_trunc('day',aml.date_created)-date_trunc('day',l.create_date)))/(24*60*60)::decimal(16,2)
from account_move_line as aml
left join account_voucher as a ON (a.move_id=aml.move_id)
left join account_voucher_line as l ON (a.id=l.voucher_id)
where a.id=av.id)) as delay_to_pay,
sum((select extract(epoch from avg(date_trunc('day',a.date_due)-date_trunc('day',a.date)))/(24*60*60)::decimal(16,2)
from account_move_line as aml
left join account_voucher as a ON (a.move_id=aml.move_id)
left join account_voucher_line as l ON (a.id=l.voucher_id)
where a.id=av.id)) as due_delay
from account_voucher_line as avl
left join account_voucher as av on (av.id=avl.voucher_id)
left join res_partner as rp ON (rp.id=av.partner_id)
left join account_journal as aj ON (aj.id=av.journal_id)
where av.type='sale' and aj.type in ('sale','sale_refund')
group by
av.date,
av.id,
av.partner_id,
aj.currency,
av.journal_id,
rp.user_id,
av.company_id,
av.type,
av.state,
av.date_due,
av.account_id,
av.tax_amount,
av.amount,
av.tax_amount,
av.pay_now
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
gogobook/Spirit
|
refs/heads/master
|
spirit/settings_tests.py
|
7
|
# -*- coding: utf-8 -*-
"""
Django settings for running the tests of spirit app
"""
from __future__ import unicode_literals
import os
from spirit.settings import *
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = 'TEST'
INSTALLED_APPS += [
'spirit.core.tests',
]
ROOT_URLCONF = 'spirit.urls'
USE_TZ = True
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media_test')
STATIC_ROOT = os.path.join(BASE_DIR, 'static_test')
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', ]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db_test.sqlite3'),
}
}
CACHES.update({
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
})
# speedup tests requiring login
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.MD5PasswordHasher',
]
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Keep templates in memory
del TEMPLATES[0]['APP_DIRS']
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
]
TEMPLATES[0]['OPTIONS']['debug'] = True
|
nvoron23/statsmodels
|
refs/heads/master
|
statsmodels/sandbox/stats/multicomp.py
|
26
|
'''
from pystatsmodels mailinglist 20100524
Notes:
- unfinished, unverified, but most parts seem to work in MonteCarlo
- one example taken from lecture notes looks ok
- needs cases with non-monotonic inequality for test to see difference between
one-step, step-up and step-down procedures
- FDR doesn't look really better then Bonferoni in the MC examples that I tried
update:
- now tested against R, stats and multtest,
I have all of their methods for p-value correction
- getting Hommel was impossible until I found reference for pvalue correction
- now, since I have p-values correction, some of the original tests (rej/norej)
implementation is not really needed anymore. I think I keep it for reference.
Test procedure for Hommel in development session log
- I haven't updated other functions and classes in here.
- multtest has some good helper function according to docs
- still need to update references, the real papers
- fdr with estimated true hypothesis still missing
- multiple comparison procedures incomplete or missing
- I will get multiple comparison for now only for independent case, which might
be conservative in correlated case (?).
some References:
Gibbons, Jean Dickinson and Chakraborti Subhabrata, 2003, Nonparametric Statistical
Inference, Fourth Edition, Marcel Dekker
p.363: 10.4 THE KRUSKAL-WALLIS ONE-WAY ANOVA TEST AND MULTIPLE COMPARISONS
p.367: multiple comparison for kruskal formula used in multicomp.kruskal
Sheskin, David J., 2004, Handbook of Parametric and Nonparametric Statistical
Procedures, 3rd ed., Chapman&Hall/CRC
Test 21: The Single-Factor Between-Subjects Analysis of Variance
Test 22: The Kruskal-Wallis One-Way Analysis of Variance by Ranks Test
Zwillinger, Daniel and Stephen Kokoska, 2000, CRC standard probability and
statistics tables and formulae, Chapman&Hall/CRC
14.9 WILCOXON RANKSUM (MANN WHITNEY) TEST
S. Paul Wright, Adjusted P-Values for Simultaneous Inference, Biometrics
Vol. 48, No. 4 (Dec., 1992), pp. 1005-1013, International Biometric Society
Stable URL: http://www.jstor.org/stable/2532694
(p-value correction for Hommel in appendix)
for multicomparison
new book "multiple comparison in R"
Hsu is a good reference but I don't have it.
Author: Josef Pktd and example from H Raja and rewrite from Vincent Davis
TODO
----
* handle exception if empty, shows up only sometimes when running this
- DONE I think
Traceback (most recent call last):
File "C:\Josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\sandbox\stats\multicomp.py", line 711, in <module>
print('sh', multipletests(tpval, alpha=0.05, method='sh')
File "C:\Josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\sandbox\stats\multicomp.py", line 241, in multipletests
rejectmax = np.max(np.nonzero(reject))
File "C:\Programs\Python25\lib\site-packages\numpy\core\fromnumeric.py", line 1765, in amax
return _wrapit(a, 'max', axis, out)
File "C:\Programs\Python25\lib\site-packages\numpy\core\fromnumeric.py", line 37, in _wrapit
result = getattr(asarray(obj),method)(*args, **kwds)
ValueError: zero-size array to ufunc.reduce without identity
* name of function multipletests, rename to something like pvalue_correction?
'''
#import xlrd
#import xlwt
from __future__ import print_function
from statsmodels.compat.python import lzip, range, lrange, zip
import scipy.stats
import numpy
import numpy as np
import math
import copy
from scipy import stats
from statsmodels.iolib.table import SimpleTable
from numpy.testing import assert_almost_equal, assert_equal
#temporary circular import
from statsmodels.stats.multitest import multipletests, _ecdf as ecdf, fdrcorrection as fdrcorrection0, fdrcorrection_twostage
from statsmodels.graphics import utils
qcrit = '''
2 3 4 5 6 7 8 9 10
5 3.64 5.70 4.60 6.98 5.22 7.80 5.67 8.42 6.03 8.91 6.33 9.32 6.58 9.67 6.80 9.97 6.99 10.24
6 3.46 5.24 4.34 6.33 4.90 7.03 5.30 7.56 5.63 7.97 5.90 8.32 6.12 8.61 6.32 8.87 6.49 9.10
7 3.34 4.95 4.16 5.92 4.68 6.54 5.06 7.01 5.36 7.37 5.61 7.68 5.82 7.94 6.00 8.17 6.16 8.37
8 3.26 4.75 4.04 5.64 4.53 6.20 4.89 6.62 5.17 6.96 5.40 7.24 5.60 7.47 5.77 7.68 5.92 7.86
9 3.20 4.60 3.95 5.43 4.41 5.96 4.76 6.35 5.02 6.66 5.24 6.91 5.43 7.13 5.59 7.33 5.74 7.49
10 3.15 4.48 3.88 5.27 4.33 5.77 4.65 6.14 4.91 6.43 5.12 6.67 5.30 6.87 5.46 7.05 5.60 7.21
11 3.11 4.39 3.82 5.15 4.26 5.62 4.57 5.97 4.82 6.25 5.03 6.48 5.20 6.67 5.35 6.84 5.49 6.99
12 3.08 4.32 3.77 5.05 4.20 5.50 4.51 5.84 4.75 6.10 4.95 6.32 5.12 6.51 5.27 6.67 5.39 6.81
13 3.06 4.26 3.73 4.96 4.15 5.40 4.45 5.73 4.69 5.98 4.88 6.19 5.05 6.37 5.19 6.53 5.32 6.67
14 3.03 4.21 3.70 4.89 4.11 5.32 4.41 5.63 4.64 5.88 4.83 6.08 4.99 6.26 5.13 6.41 5.25 6.54
15 3.01 4.17 3.67 4.84 4.08 5.25 4.37 5.56 4.59 5.80 4.78 5.99 4.94 6.16 5.08 6.31 5.20 6.44
16 3.00 4.13 3.65 4.79 4.05 5.19 4.33 5.49 4.56 5.72 4.74 5.92 4.90 6.08 5.03 6.22 5.15 6.35
17 2.98 4.10 3.63 4.74 4.02 5.14 4.30 5.43 4.52 5.66 4.70 5.85 4.86 6.01 4.99 6.15 5.11 6.27
18 2.97 4.07 3.61 4.70 4.00 5.09 4.28 5.38 4.49 5.60 4.67 5.79 4.82 5.94 4.96 6.08 5.07 6.20
19 2.96 4.05 3.59 4.67 3.98 5.05 4.25 5.33 4.47 5.55 4.65 5.73 4.79 5.89 4.92 6.02 5.04 6.14
20 2.95 4.02 3.58 4.64 3.96 5.02 4.23 5.29 4.45 5.51 4.62 5.69 4.77 5.84 4.90 5.97 5.01 6.09
24 2.92 3.96 3.53 4.55 3.90 4.91 4.17 5.17 4.37 5.37 4.54 5.54 4.68 5.69 4.81 5.81 4.92 5.92
30 2.89 3.89 3.49 4.45 3.85 4.80 4.10 5.05 4.30 5.24 4.46 5.40 4.60 5.54 4.72 5.65 4.82 5.76
40 2.86 3.82 3.44 4.37 3.79 4.70 4.04 4.93 4.23 5.11 4.39 5.26 4.52 5.39 4.63 5.50 4.73 5.60
60 2.83 3.76 3.40 4.28 3.74 4.59 3.98 4.82 4.16 4.99 4.31 5.13 4.44 5.25 4.55 5.36 4.65 5.45
120 2.80 3.70 3.36 4.20 3.68 4.50 3.92 4.71 4.10 4.87 4.24 5.01 4.36 5.12 4.47 5.21 4.56 5.30
infinity 2.77 3.64 3.31 4.12 3.63 4.40 3.86 4.60 4.03 4.76 4.17 4.88 4.29 4.99 4.39 5.08 4.47 5.16
'''
res = [line.split() for line in qcrit.replace('infinity','9999').split('\n')]
c=np.array(res[2:-1]).astype(float)
#c[c==9999] = np.inf
ccols = np.arange(2,11)
crows = c[:,0]
cv005 = c[:, 1::2]
cv001 = c[:, 2::2]
from scipy import interpolate
def get_tukeyQcrit(k, df, alpha=0.05):
'''
return critical values for Tukey's HSD (Q)
Parameters
----------
k : int in {2, ..., 10}
number of tests
df : int
degrees of freedom of error term
alpha : {0.05, 0.01}
type 1 error, 1-confidence level
not enough error checking for limitations
'''
if alpha == 0.05:
intp = interpolate.interp1d(crows, cv005[:,k-2])
elif alpha == 0.01:
intp = interpolate.interp1d(crows, cv001[:,k-2])
else:
raise ValueError('only implemented for alpha equal to 0.01 and 0.05')
return intp(df)
def get_tukeyQcrit2(k, df, alpha=0.05):
'''
return critical values for Tukey's HSD (Q)
Parameters
----------
k : int in {2, ..., 10}
number of tests
df : int
degrees of freedom of error term
alpha : {0.05, 0.01}
type 1 error, 1-confidence level
not enough error checking for limitations
'''
from statsmodels.stats.libqsturng import qsturng
return qsturng(1-alpha, k, df)
def Tukeythreegene(first,second,third):
#Performing the Tukey HSD post-hoc test for three genes
## qwb = xlrd.open_workbook('F:/Lab/bioinformatics/qcrittable.xls')
## #opening the workbook containing the q crit table
## qwb.sheet_names()
## qcrittable = qwb.sheet_by_name(u'Sheet1')
firstmean = numpy.mean(first) #means of the three arrays
secondmean = numpy.mean(second)
thirdmean = numpy.mean(third)
firststd = numpy.std(first) #standard deviations of the threearrays
secondstd = numpy.std(second)
thirdstd = numpy.std(third)
firsts2 = math.pow(firststd,2) #standard deviation squared of the three arrays
seconds2 = math.pow(secondstd,2)
thirds2 = math.pow(thirdstd,2)
mserrornum = firsts2*2+seconds2*2+thirds2*2 #numerator for mean square error
mserrorden = (len(first)+len(second)+len(third))-3 #denominator for mean square error
mserror = mserrornum/mserrorden #mean square error
standarderror = math.sqrt(mserror/len(first))
#standard error, which is square root of mserror and the number of samples in a group
dftotal = len(first)+len(second)+len(third)-1 #various degrees of freedom
dfgroups = 2
dferror = dftotal-dfgroups
qcrit = 0.5 # fix arbitrary#qcrittable.cell(dftotal, 3).value
qcrit = get_tukeyQcrit(3, dftotal, alpha=0.05)
#getting the q critical value, for degrees of freedom total and 3 groups
qtest3to1 = (math.fabs(thirdmean-firstmean))/standarderror
#calculating q test statistic values
qtest3to2 = (math.fabs(thirdmean-secondmean))/standarderror
qtest2to1 = (math.fabs(secondmean-firstmean))/standarderror
conclusion = []
## print(qcrit
print(qtest3to1)
print(qtest3to2)
print(qtest2to1)
if(qtest3to1>qcrit): #testing all q test statistic values to q critical values
conclusion.append('3to1null')
else:
conclusion.append('3to1alt')
if(qtest3to2>qcrit):
conclusion.append('3to2null')
else:
conclusion.append('3to2alt')
if(qtest2to1>qcrit):
conclusion.append('2to1null')
else:
conclusion.append('2to1alt')
return conclusion
#rewrite by Vincent
def Tukeythreegene2(genes): #Performing the Tukey HSD post-hoc test for three genes
"""gend is a list, ie [first, second, third]"""
# qwb = xlrd.open_workbook('F:/Lab/bioinformatics/qcrittable.xls')
#opening the workbook containing the q crit table
# qwb.sheet_names()
# qcrittable = qwb.sheet_by_name(u'Sheet1')
means = []
stds = []
for gene in genes:
means.append(numpy.mean(gene))
std.append(numpy.std(gene))
#firstmean = numpy.mean(first) #means of the three arrays
#secondmean = numpy.mean(second)
#thirdmean = numpy.mean(third)
#firststd = numpy.std(first) #standard deviations of the three arrays
#secondstd = numpy.std(second)
#thirdstd = numpy.std(third)
stds2 = []
for std in stds:
stds2.append(math.pow(std,2))
#firsts2 = math.pow(firststd,2) #standard deviation squared of the three arrays
#seconds2 = math.pow(secondstd,2)
#thirds2 = math.pow(thirdstd,2)
#mserrornum = firsts2*2+seconds2*2+thirds2*2 #numerator for mean square error
mserrornum = sum(stds2)*2
mserrorden = (len(genes[0])+len(genes[1])+len(genes[2]))-3 #denominator for mean square error
mserror = mserrornum/mserrorden #mean square error
def catstack(args):
x = np.hstack(args)
labels = np.hstack([k*np.ones(len(arr)) for k,arr in enumerate(args)])
return x, labels
def maxzero(x):
'''find all up zero crossings and return the index of the highest
Not used anymore
>>> np.random.seed(12345)
>>> x = np.random.randn(8)
>>> x
array([-0.20470766, 0.47894334, -0.51943872, -0.5557303 , 1.96578057,
1.39340583, 0.09290788, 0.28174615])
>>> maxzero(x)
(4, array([1, 4]))
no up-zero-crossing at end
>>> np.random.seed(0)
>>> x = np.random.randn(8)
>>> x
array([ 1.76405235, 0.40015721, 0.97873798, 2.2408932 , 1.86755799,
-0.97727788, 0.95008842, -0.15135721])
>>> maxzero(x)
(None, array([6]))
'''
x = np.asarray(x)
cond1 = x[:-1] < 0
cond2 = x[1:] > 0
#allzeros = np.nonzero(np.sign(x[:-1])*np.sign(x[1:]) <= 0)[0] + 1
allzeros = np.nonzero((cond1 & cond2) | (x[1:]==0))[0] + 1
if x[-1] >=0:
maxz = max(allzeros)
else:
maxz = None
return maxz, allzeros
def maxzerodown(x):
'''find all up zero crossings and return the index of the highest
Not used anymore
>>> np.random.seed(12345)
>>> x = np.random.randn(8)
>>> x
array([-0.20470766, 0.47894334, -0.51943872, -0.5557303 , 1.96578057,
1.39340583, 0.09290788, 0.28174615])
>>> maxzero(x)
(4, array([1, 4]))
no up-zero-crossing at end
>>> np.random.seed(0)
>>> x = np.random.randn(8)
>>> x
array([ 1.76405235, 0.40015721, 0.97873798, 2.2408932 , 1.86755799,
-0.97727788, 0.95008842, -0.15135721])
>>> maxzero(x)
(None, array([6]))
'''
x = np.asarray(x)
cond1 = x[:-1] > 0
cond2 = x[1:] < 0
#allzeros = np.nonzero(np.sign(x[:-1])*np.sign(x[1:]) <= 0)[0] + 1
allzeros = np.nonzero((cond1 & cond2) | (x[1:]==0))[0] + 1
if x[-1] <=0:
maxz = max(allzeros)
else:
maxz = None
return maxz, allzeros
def rejectionline(n, alpha=0.5):
'''reference line for rejection in multiple tests
Not used anymore
from: section 3.2, page 60
'''
t = np.arange(n)/float(n)
frej = t/( t * (1-alpha) + alpha)
return frej
#I don't remember what I changed or why 2 versions,
#this follows german diss ??? with rline
#this might be useful if the null hypothesis is not "all effects are zero"
#rename to _bak and working again on fdrcorrection0
def fdrcorrection_bak(pvals, alpha=0.05, method='indep'):
'''Reject False discovery rate correction for pvalues
Old version, to be deleted
missing: methods that estimate fraction of true hypotheses
'''
pvals = np.asarray(pvals)
pvals_sortind = np.argsort(pvals)
pvals_sorted = pvals[pvals_sortind]
pecdf = ecdf(pvals_sorted)
if method in ['i', 'indep', 'p', 'poscorr']:
rline = pvals_sorted / alpha
elif method in ['n', 'negcorr']:
cm = np.sum(1./np.arange(1, len(pvals)))
rline = pvals_sorted / alpha * cm
elif method in ['g', 'onegcorr']: #what's this ? german diss
rline = pvals_sorted / (pvals_sorted*(1-alpha) + alpha)
elif method in ['oth', 'o2negcorr']: # other invalid, cut-paste
cm = np.sum(np.arange(len(pvals)))
rline = pvals_sorted / alpha /cm
else:
raise ValueError('method not available')
reject = pecdf >= rline
if reject.any():
rejectmax = max(np.nonzero(reject)[0])
else:
rejectmax = 0
reject[:rejectmax] = True
return reject[pvals_sortind.argsort()]
def mcfdr(nrepl=100, nobs=50, ntests=10, ntrue=6, mu=0.5, alpha=0.05, rho=0.):
'''MonteCarlo to test fdrcorrection
'''
nfalse = ntests - ntrue
locs = np.array([0.]*ntrue + [mu]*(ntests - ntrue))
results = []
for i in range(nrepl):
#rvs = locs + stats.norm.rvs(size=(nobs, ntests))
rvs = locs + randmvn(rho, size=(nobs, ntests))
tt, tpval = stats.ttest_1samp(rvs, 0)
res = fdrcorrection_bak(np.abs(tpval), alpha=alpha, method='i')
res0 = fdrcorrection0(np.abs(tpval), alpha=alpha)
#res and res0 give the same results
results.append([np.sum(res[:ntrue]), np.sum(res[ntrue:])] +
[np.sum(res0[:ntrue]), np.sum(res0[ntrue:])] +
res.tolist() +
np.sort(tpval).tolist() +
[np.sum(tpval[:ntrue]<alpha),
np.sum(tpval[ntrue:]<alpha)] +
[np.sum(tpval[:ntrue]<alpha/ntests),
np.sum(tpval[ntrue:]<alpha/ntests)])
return np.array(results)
def randmvn(rho, size=(1, 2), standardize=False):
'''create random draws from equi-correlated multivariate normal distribution
Parameters
----------
rho : float
correlation coefficient
size : tuple of int
size is interpreted (nobs, nvars) where each row
Returns
-------
rvs : ndarray, (nobs, nvars)
where each row is a independent random draw of nvars-dimensional correlated rvs
'''
nobs, nvars = size
if 0 < rho and rho < 1:
rvs = np.random.randn(nobs, nvars+1)
rvs2 = rvs[:,:-1] * np.sqrt((1-rho)) + rvs[:,-1:] * np.sqrt(rho)
elif rho ==0:
rvs2 = np.random.randn(nobs, nvars)
elif rho < 0:
if rho < -1./(nvars-1):
raise ValueError('rho has to be larger than -1./(nvars-1)')
elif rho == -1./(nvars-1):
rho = -1./(nvars-1+1e-10) #barely positive definite
#use Cholesky
A = rho*np.ones((nvars,nvars))+(1-rho)*np.eye(nvars)
rvs2 = np.dot(np.random.randn(nobs, nvars), np.linalg.cholesky(A).T)
if standardize:
rvs2 = stats.zscore(rvs2)
return rvs2
#============================
#
# Part 2: Multiple comparisons and independent samples tests
#
#============================
def tiecorrect(xranks):
'''
should be equivalent of scipy.stats.tiecorrect
'''
#casting to int rounds down, but not relevant for this case
rankbincount = np.bincount(np.asarray(xranks,dtype=int))
nties = rankbincount[rankbincount > 1]
ntot = float(len(xranks));
tiecorrection = 1 - (nties**3 - nties).sum()/(ntot**3 - ntot)
return tiecorrection
class GroupsStats(object):
'''
statistics by groups (another version)
groupstats as a class with lazy evaluation (not yet - decorators are still
missing)
written this time as equivalent of scipy.stats.rankdata
gs = GroupsStats(X, useranks=True)
assert_almost_equal(gs.groupmeanfilter, stats.rankdata(X[:,0]), 15)
TODO: incomplete doc strings
'''
def __init__(self, x, useranks=False, uni=None, intlab=None):
'''descriptive statistics by groups
Parameters
----------
x : array, 2d
first column data, second column group labels
useranks : boolean
if true, then use ranks as data corresponding to the
scipy.stats.rankdata definition (start at 1, ties get mean)
uni, intlab : arrays (optional)
to avoid call to unique, these can be given as inputs
'''
self.x = np.asarray(x)
if intlab is None:
uni, intlab = np.unique(x[:,1], return_inverse=True)
elif uni is None:
uni = np.unique(x[:,1])
self.useranks = useranks
self.uni = uni
self.intlab = intlab
self.groupnobs = groupnobs = np.bincount(intlab)
#temporary until separated and made all lazy
self.runbasic(useranks=useranks)
def runbasic_old(self, useranks=False):
#check: refactoring screwed up case useranks=True
#groupxsum = np.bincount(intlab, weights=X[:,0])
#groupxmean = groupxsum * 1.0 / groupnobs
x = self.x
if useranks:
self.xx = x[:,1].argsort().argsort() + 1 #rankraw
else:
self.xx = x[:,0]
self.groupsum = groupranksum = np.bincount(self.intlab, weights=self.xx)
#print('groupranksum', groupranksum, groupranksum.shape, self.groupnobs.shape
# start at 1 for stats.rankdata :
self.groupmean = grouprankmean = groupranksum * 1.0 / self.groupnobs # + 1
self.groupmeanfilter = grouprankmean[self.intlab]
#return grouprankmean[intlab]
def runbasic(self, useranks=False):
#check: refactoring screwed up case useranks=True
#groupxsum = np.bincount(intlab, weights=X[:,0])
#groupxmean = groupxsum * 1.0 / groupnobs
x = self.x
if useranks:
xuni, xintlab = np.unique(x[:,0], return_inverse=True)
ranksraw = x[:,0].argsort().argsort() + 1 #rankraw
self.xx = GroupsStats(np.column_stack([ranksraw, xintlab]),
useranks=False).groupmeanfilter
else:
self.xx = x[:,0]
self.groupsum = groupranksum = np.bincount(self.intlab, weights=self.xx)
#print('groupranksum', groupranksum, groupranksum.shape, self.groupnobs.shape
# start at 1 for stats.rankdata :
self.groupmean = grouprankmean = groupranksum * 1.0 / self.groupnobs # + 1
self.groupmeanfilter = grouprankmean[self.intlab]
#return grouprankmean[intlab]
def groupdemean(self):
return self.xx - self.groupmeanfilter
def groupsswithin(self):
xtmp = self.groupdemean()
return np.bincount(self.intlab, weights=xtmp**2)
def groupvarwithin(self):
return self.groupsswithin()/(self.groupnobs-1) #.sum()
class TukeyHSDResults(object):
"""Results from Tukey HSD test, with additional plot methods
Can also compute and plot additional post-hoc evaluations using this
results class.
Attributes
----------
reject : array of boolean, True if we reject Null for group pair
meandiffs : pairwise mean differences
confint : confidence interval for pairwise mean differences
std_pairs : standard deviation of pairwise mean differences
q_crit : critical value of studentized range statistic at given alpha
halfwidths : half widths of simultaneous confidence interval
Notes
-----
halfwidths is only available after call to `plot_simultaneous`.
Other attributes contain information about the data from the
MultiComparison instance: data, df_total, groups, groupsunique, variance.
"""
def __init__(self, mc_object, results_table, q_crit, reject=None,
meandiffs=None, std_pairs=None, confint=None, df_total=None,
reject2=None, variance=None):
self._multicomp = mc_object
self._results_table = results_table
self.q_crit = q_crit
self.reject = reject
self.meandiffs = meandiffs
self.std_pairs = std_pairs
self.confint = confint
self.df_total = df_total
self.reject2 = reject2
self.variance = variance
# Taken out of _multicomp for ease of access for unknowledgeable users
self.data = self._multicomp.data
self.groups =self._multicomp.groups
self.groupsunique = self._multicomp.groupsunique
def __str__(self):
return str(self._results_table)
def summary(self):
'''Summary table that can be printed
'''
return self._results_table
def _simultaneous_ci(self):
"""Compute simultaneous confidence intervals for comparison of means.
"""
self.halfwidths = simultaneous_ci(self.q_crit, self.variance,
self._multicomp.groupstats.groupnobs,
self._multicomp.pairindices)
def plot_simultaneous(self, comparison_name=None, ax=None, figsize=(10,6),
xlabel=None, ylabel=None):
"""Plot a universal confidence interval of each group mean
Visiualize significant differences in a plot with one confidence
interval per group instead of all pairwise confidence intervals.
Parameters
----------
comparison_name : string, optional
if provided, plot_intervals will color code all groups that are
significantly different from the comparison_name red, and will
color code insignificant groups gray. Otherwise, all intervals will
just be plotted in black.
ax : matplotlib axis, optional
An axis handle on which to attach the plot.
figsize : tuple, optional
tuple for the size of the figure generated
xlabel : string, optional
Name to be displayed on x axis
ylabel : string, optional
Name to be displayed on y axis
Returns
-------
fig : Matplotlib Figure object
handle to figure object containing interval plots
Notes
-----
Multiple comparison tests are nice, but lack a good way to be
visualized. If you have, say, 6 groups, showing a graph of the means
between each group will require 15 confidence intervals.
Instead, we can visualize inter-group differences with a single
interval for each group mean. Hochberg et al. [1] first proposed this
idea and used Tukey's Q critical value to compute the interval widths.
Unlike plotting the differences in the means and their respective
confidence intervals, any two pairs can be compared for significance
by looking for overlap.
References
----------
.. [1] Hochberg, Y., and A. C. Tamhane. Multiple Comparison Procedures.
Hoboken, NJ: John Wiley & Sons, 1987.
Examples
--------
>>> from statsmodels.examples.try_tukey_hsd import cylinders, cyl_labels
>>> from statsmodels.stats.multicomp import MultiComparison
>>> cardata = MultiComparison(cylinders, cyl_labels)
>>> results = cardata.tukeyhsd()
>>> results.plot_simultaneous()
<matplotlib.figure.Figure at 0x...>
This example shows an example plot comparing significant differences
in group means. Significant differences at the alpha=0.05 level can be
identified by intervals that do not overlap (i.e. USA vs Japan,
USA vs Germany).
>>> results.plot_simultaneous(comparison_name="USA")
<matplotlib.figure.Figure at 0x...>
Optionally provide one of the group names to color code the plot to
highlight group means different from comparison_name.
"""
fig, ax1 = utils.create_mpl_ax(ax)
if figsize is not None:
fig.set_size_inches(figsize)
if getattr(self, 'halfwidths', None) is None:
self._simultaneous_ci()
means = self._multicomp.groupstats.groupmean
sigidx = []
nsigidx = []
minrange = [means[i] - self.halfwidths[i] for i in range(len(means))]
maxrange = [means[i] + self.halfwidths[i] for i in range(len(means))]
if comparison_name is None:
ax1.errorbar(means, lrange(len(means)), xerr=self.halfwidths,
marker='o', linestyle='None', color='k', ecolor='k')
else:
if comparison_name not in self.groupsunique:
raise ValueError('comparison_name not found in group names.')
midx = np.where(self.groupsunique==comparison_name)[0]
for i in range(len(means)):
if self.groupsunique[i] == comparison_name:
continue
if (min(maxrange[i], maxrange[midx]) -
max(minrange[i], minrange[midx]) < 0):
sigidx.append(i)
else:
nsigidx.append(i)
#Plot the master comparison
ax1.errorbar(means[midx], midx, xerr=self.halfwidths[midx],
marker='o', linestyle='None', color='b', ecolor='b')
ax1.plot([minrange[midx]]*2, [-1, self._multicomp.ngroups],
linestyle='--', color='0.7')
ax1.plot([maxrange[midx]]*2, [-1, self._multicomp.ngroups],
linestyle='--', color='0.7')
#Plot those that are significantly different
if len(sigidx) > 0:
ax1.errorbar(means[sigidx], sigidx,
xerr=self.halfwidths[sigidx], marker='o',
linestyle='None', color='r', ecolor='r')
#Plot those that are not significantly different
if len(nsigidx) > 0:
ax1.errorbar(means[nsigidx], nsigidx,
xerr=self.halfwidths[nsigidx], marker='o',
linestyle='None', color='0.5', ecolor='0.5')
ax1.set_title('Multiple Comparisons Between All Pairs (Tukey)')
r = np.max(maxrange) - np.min(minrange)
ax1.set_ylim([-1, self._multicomp.ngroups])
ax1.set_xlim([np.min(minrange) - r / 10., np.max(maxrange) + r / 10.])
ax1.set_yticklabels(np.insert(self.groupsunique.astype(str), 0, ''))
ax1.set_yticks(np.arange(-1, len(means)+1))
ax1.set_xlabel(xlabel if xlabel is not None else '')
ax1.set_ylabel(ylabel if ylabel is not None else '')
return fig
class MultiComparison(object):
'''Tests for multiple comparisons
Parameters
----------
data : array
independent data samples
groups : array
group labels corresponding to each data point
group_order : list of strings, optional
the desired order for the group mean results to be reported in. If
not specified, results are reported in increasing order.
If group_order does not contain all labels that are in groups, then
only those observations are kept that have a label in group_order.
'''
def __init__(self, data, groups, group_order=None):
if len(data) != len(groups):
raise ValueError('data has %d elements and groups has %d' % (len(data), len(groups)))
self.data = np.asarray(data)
self.groups = groups = np.asarray(groups)
# Allow for user-provided sorting of groups
if group_order is None:
self.groupsunique, self.groupintlab = np.unique(groups,
return_inverse=True)
else:
#check if group_order has any names not in groups
for grp in group_order:
if grp not in groups:
raise ValueError(
"group_order value '%s' not found in groups"%grp)
self.groupsunique = np.array(group_order)
self.groupintlab = np.empty(len(data), int)
self.groupintlab.fill(-999) # instead of a nan
count = 0
for name in self.groupsunique:
idx = np.where(self.groups == name)[0]
count += len(idx)
self.groupintlab[idx] = np.where(self.groupsunique == name)[0]
if count != data.shape[0]:
#raise ValueError('group_order does not contain all groups')
# warn and keep only observations with label in group_order
import warnings
warnings.warn('group_order does not contain all groups:' +
' dropping observations')
mask_keep = self.groupintlab != -999
self.groupintlab = self.groupintlab[mask_keep]
self.data = self.data[mask_keep]
self.groups = self.groups[mask_keep]
if len(self.groupsunique) < 2:
raise ValueError('2 or more groups required for multiple comparisons')
self.datali = [data[self.groups == k] for k in self.groupsunique]
self.pairindices = np.triu_indices(len(self.groupsunique), 1) #tuple
self.nobs = self.data.shape[0]
self.ngroups = len(self.groupsunique)
def getranks(self):
'''convert data to rankdata and attach
This creates rankdata as it is used for non-parametric tests, where
in the case of ties the average rank is assigned.
'''
#bug: the next should use self.groupintlab instead of self.groups
#update: looks fixed
#self.ranks = GroupsStats(np.column_stack([self.data, self.groups]),
self.ranks = GroupsStats(np.column_stack([self.data, self.groupintlab]),
useranks=True)
self.rankdata = self.ranks.groupmeanfilter
def kruskal(self, pairs=None, multimethod='T'):
'''
pairwise comparison for kruskal-wallis test
This is just a reimplementation of scipy.stats.kruskal and does
not yet use a multiple comparison correction.
'''
self.getranks()
tot = self.nobs
meanranks = self.ranks.groupmean
groupnobs = self.ranks.groupnobs
# simultaneous/separate treatment of multiple tests
f=(tot * (tot + 1.) / 12.) / stats.tiecorrect(self.rankdata) #(xranks)
print('MultiComparison.kruskal')
for i,j in zip(*self.pairindices):
#pdiff = np.abs(mrs[i] - mrs[j])
pdiff = np.abs(meanranks[i] - meanranks[j])
se = np.sqrt(f * np.sum(1. / groupnobs[[i,j]] )) #np.array([8,8]))) #Fixme groupnobs[[i,j]] ))
Q = pdiff / se
# TODO : print(statments, fix
print(i,j, pdiff, se, pdiff / se, pdiff / se > 2.6310)
print(stats.norm.sf(Q) * 2)
return stats.norm.sf(Q) * 2
def allpairtest(self, testfunc, alpha=0.05, method='bonf', pvalidx=1):
'''run a pairwise test on all pairs with multiple test correction
The statistical test given in testfunc is calculated for all pairs
and the p-values are adjusted by methods in multipletests. The p-value
correction is generic and based only on the p-values, and does not
take any special structure of the hypotheses into account.
Parameters
----------
testfunc : function
A test function for two (independent) samples. It is assumed that
the return value on position pvalidx is the p-value.
alpha : float
familywise error rate
method : string
This specifies the method for the p-value correction. Any method
of multipletests is possible.
pvalidx : int (default: 1)
position of the p-value in the return of testfunc
Returns
-------
sumtab : SimpleTable instance
summary table for printing
errors: TODO: check if this is still wrong, I think it's fixed.
results from multipletests are in different order
pval_corrected can be larger than 1 ???
'''
res = []
for i,j in zip(*self.pairindices):
res.append(testfunc(self.datali[i], self.datali[j]))
res = np.array(res)
reject, pvals_corrected, alphacSidak, alphacBonf = \
multipletests(res[:, pvalidx], alpha=0.05, method=method)
#print(np.column_stack([res[:,0],res[:,1], reject, pvals_corrected])
i1, i2 = self.pairindices
if pvals_corrected is None:
resarr = np.array(lzip(self.groupsunique[i1], self.groupsunique[i2],
np.round(res[:,0],4),
np.round(res[:,1],4),
reject),
dtype=[('group1', object),
('group2', object),
('stat',float),
('pval',float),
('reject', np.bool8)])
else:
resarr = np.array(lzip(self.groupsunique[i1], self.groupsunique[i2],
np.round(res[:,0],4),
np.round(res[:,1],4),
np.round(pvals_corrected,4),
reject),
dtype=[('group1', object),
('group2', object),
('stat',float),
('pval',float),
('pval_corr',float),
('reject', np.bool8)])
from statsmodels.iolib.table import SimpleTable
results_table = SimpleTable(resarr, headers=resarr.dtype.names)
results_table.title = (
'Test Multiple Comparison %s \n%s%4.2f method=%s'
% (testfunc.__name__, 'FWER=', alpha, method) +
'\nalphacSidak=%4.2f, alphacBonf=%5.3f'
% (alphacSidak, alphacBonf))
return results_table, (res, reject, pvals_corrected,
alphacSidak, alphacBonf), resarr
def tukeyhsd(self, alpha=0.05):
"""Tukey's range test to compare means of all pairs of groups
Parameters
----------
alpha : float, optional
Value of FWER at which to calculate HSD.
Returns
-------
results : TukeyHSDResults instance
A results class containing relevant data and some post-hoc
calculations
"""
self.groupstats = GroupsStats(
np.column_stack([self.data, self.groupintlab]),
useranks=False)
gmeans = self.groupstats.groupmean
gnobs = self.groupstats.groupnobs #var_ = self.groupstats.groupvarwithin() #possibly an error in varcorrection in this case
var_ = np.var(self.groupstats.groupdemean(), ddof=len(gmeans))
#res contains: 0:(idx1, idx2), 1:reject, 2:meandiffs, 3: std_pairs, 4:confint, 5:q_crit,
#6:df_total, 7:reject2
res = tukeyhsd(gmeans, gnobs, var_, df=None, alpha=alpha, q_crit=None)
resarr = np.array(lzip(self.groupsunique[res[0][0]], self.groupsunique[res[0][1]],
np.round(res[2],4),
np.round(res[4][:, 0],4),
np.round(res[4][:, 1],4),
res[1]),
dtype=[('group1', object),
('group2', object),
('meandiff',float),
('lower',float),
('upper',float),
('reject', np.bool8)])
results_table = SimpleTable(resarr, headers=resarr.dtype.names)
results_table.title = 'Multiple Comparison of Means - Tukey HSD,' + \
'FWER=%4.2f' % alpha
return TukeyHSDResults(self, results_table, res[5], res[1], res[2],
res[3], res[4], res[6], res[7], var_)
def rankdata(x):
'''rankdata, equivalent to scipy.stats.rankdata
just a different implementation, I have not yet compared speed
'''
uni, intlab = np.unique(x[:,0], return_inverse=True)
groupnobs = np.bincount(intlab)
groupxsum = np.bincount(intlab, weights=X[:,0])
groupxmean = groupxsum * 1.0 / groupnobs
rankraw = x[:,0].argsort().argsort()
groupranksum = np.bincount(intlab, weights=rankraw)
# start at 1 for stats.rankdata :
grouprankmean = groupranksum * 1.0 / groupnobs + 1
return grouprankmean[intlab]
#new
def compare_ordered(vals, alpha):
'''simple ordered sequential comparison of means
vals : array_like
means or rankmeans for independent groups
incomplete, no return, not used yet
'''
vals = np.asarray(vals)
alphaf = alpha # Notation ?
sortind = np.argsort(vals)
pvals = vals[sortind]
sortrevind = sortind.argsort()
ntests = len(vals)
#alphacSidak = 1 - np.power((1. - alphaf), 1./ntests)
#alphacBonf = alphaf / float(ntests)
v1, v2 = np.triu_indices(ntests, 1)
#v1,v2 have wrong sequence
for i in range(4):
for j in range(4,i, -1):
print(i,j)
def varcorrection_unbalanced(nobs_all, srange=False):
'''correction factor for variance with unequal sample sizes
this is just a harmonic mean
Parameters
----------
nobs_all : array_like
The number of observations for each sample
srange : bool
if true, then the correction is divided by the number of samples
for the variance of the studentized range statistic
Returns
-------
correction : float
Correction factor for variance.
Notes
-----
variance correction factor is
1/k * sum_i 1/n_i
where k is the number of samples and summation is over i=0,...,k-1.
If all n_i are the same, then the correction factor is 1.
This needs to be multiplied by the joint variance estimate, means square
error, MSE. To obtain the correction factor for the standard deviation,
square root needs to be taken.
'''
nobs_all = np.asarray(nobs_all)
if not srange:
return (1./nobs_all).sum()
else:
return (1./nobs_all).sum()/len(nobs_all)
def varcorrection_pairs_unbalanced(nobs_all, srange=False):
'''correction factor for variance with unequal sample sizes for all pairs
this is just a harmonic mean
Parameters
----------
nobs_all : array_like
The number of observations for each sample
srange : bool
if true, then the correction is divided by 2 for the variance of
the studentized range statistic
Returns
-------
correction : array
Correction factor for variance.
Notes
-----
variance correction factor is
1/k * sum_i 1/n_i
where k is the number of samples and summation is over i=0,...,k-1.
If all n_i are the same, then the correction factor is 1.
This needs to be multiplies by the joint variance estimate, means square
error, MSE. To obtain the correction factor for the standard deviation,
square root needs to be taken.
For the studentized range statistic, the resulting factor has to be
divided by 2.
'''
#TODO: test and replace with broadcasting
n1, n2 = np.meshgrid(nobs_all, nobs_all)
if not srange:
return (1./n1 + 1./n2)
else:
return (1./n1 + 1./n2) / 2.
def varcorrection_unequal(var_all, nobs_all, df_all):
'''return joint variance from samples with unequal variances and unequal
sample sizes
something is wrong
Parameters
----------
var_all : array_like
The variance for each sample
nobs_all : array_like
The number of observations for each sample
df_all : array_like
degrees of freedom for each sample
Returns
-------
varjoint : float
joint variance.
dfjoint : float
joint Satterthwait's degrees of freedom
Notes
-----
(copy, paste not correct)
variance is
1/k * sum_i 1/n_i
where k is the number of samples and summation is over i=0,...,k-1.
If all n_i are the same, then the correction factor is 1/n.
This needs to be multiplies by the joint variance estimate, means square
error, MSE. To obtain the correction factor for the standard deviation,
square root needs to be taken.
This is for variance of mean difference not of studentized range.
'''
var_all = np.asarray(var_all)
var_over_n = var_all *1./ nobs_all #avoid integer division
varjoint = var_over_n.sum()
dfjoint = varjoint**2 / (var_over_n**2 * df_all).sum()
return varjoint, dfjoint
def varcorrection_pairs_unequal(var_all, nobs_all, df_all):
'''return joint variance from samples with unequal variances and unequal
sample sizes for all pairs
something is wrong
Parameters
----------
var_all : array_like
The variance for each sample
nobs_all : array_like
The number of observations for each sample
df_all : array_like
degrees of freedom for each sample
Returns
-------
varjoint : array
joint variance.
dfjoint : array
joint Satterthwait's degrees of freedom
Notes
-----
(copy, paste not correct)
variance is
1/k * sum_i 1/n_i
where k is the number of samples and summation is over i=0,...,k-1.
If all n_i are the same, then the correction factor is 1.
This needs to be multiplies by the joint variance estimate, means square
error, MSE. To obtain the correction factor for the standard deviation,
square root needs to be taken.
TODO: something looks wrong with dfjoint, is formula from SPSS
'''
#TODO: test and replace with broadcasting
v1, v2 = np.meshgrid(var_all, var_all)
n1, n2 = np.meshgrid(nobs_all, nobs_all)
df1, df2 = np.meshgrid(df_all, df_all)
varjoint = v1/n1 + v2/n2
dfjoint = varjoint**2 / (df1 * (v1/n1)**2 + df2 * (v2/n2)**2)
return varjoint, dfjoint
def tukeyhsd(mean_all, nobs_all, var_all, df=None, alpha=0.05, q_crit=None):
'''simultaneous Tukey HSD
check: instead of sorting, I use absolute value of pairwise differences
in means. That's irrelevant for the test, but maybe reporting actual
differences would be better.
CHANGED: meandiffs are with sign, studentized range uses abs
q_crit added for testing
TODO: error in variance calculation when nobs_all is scalar, missing 1/n
'''
mean_all = np.asarray(mean_all)
#check if or when other ones need to be arrays
n_means = len(mean_all)
if df is None:
df = nobs_all - 1
if np.size(df) == 1: # assumes balanced samples with df = n - 1, n_i = n
df_total = n_means * df
df = np.ones(n_means) * df
else:
df_total = np.sum(df)
if (np.size(nobs_all) == 1) and (np.size(var_all) == 1):
#balanced sample sizes and homogenous variance
var_pairs = 1. * var_all / nobs_all * np.ones((n_means, n_means))
elif np.size(var_all) == 1:
#unequal sample sizes and homogenous variance
var_pairs = var_all * varcorrection_pairs_unbalanced(nobs_all,
srange=True)
elif np.size(var_all) > 1:
var_pairs, df_sum = varcorrection_pairs_unequal(nobs_all, var_all, df)
var_pairs /= 2.
#check division by two for studentized range
else:
raise ValueError('not supposed to be here')
#meandiffs_ = mean_all[:,None] - mean_all
meandiffs_ = mean_all - mean_all[:,None] #reverse sign, check with R example
std_pairs_ = np.sqrt(var_pairs)
#select all pairs from upper triangle of matrix
idx1, idx2 = np.triu_indices(n_means, 1)
meandiffs = meandiffs_[idx1, idx2]
std_pairs = std_pairs_[idx1, idx2]
st_range = np.abs(meandiffs) / std_pairs #studentized range statistic
df_total_ = max(df_total, 5) #TODO: smallest df in table
if q_crit is None:
q_crit = get_tukeyQcrit2(n_means, df_total, alpha=alpha)
reject = st_range > q_crit
crit_int = std_pairs * q_crit
reject2 = np.abs(meandiffs) > crit_int
confint = np.column_stack((meandiffs - crit_int, meandiffs + crit_int))
return (idx1, idx2), reject, meandiffs, std_pairs, confint, q_crit, \
df_total, reject2
def simultaneous_ci(q_crit, var, groupnobs, pairindices=None):
"""Compute simultaneous confidence intervals for comparison of means.
q_crit value is generated from tukey hsd test. Variance is considered
across all groups. Returned halfwidths can be thought of as uncertainty
intervals around each group mean. They allow for simultaneous
comparison of pairwise significance among any pairs (by checking for
overlap)
Parameters
----------
q_crit : float
The Q critical value studentized range statistic from Tukey's HSD
var : float
The group variance
groupnobs : array-like object
Number of observations contained in each group.
pairindices : tuple of lists, optional
Indices corresponding to the upper triangle of matrix. Computed
here if not supplied
Returns
-------
halfwidths : ndarray
Half the width of each confidence interval for each group given in
groupnobs
See Also
--------
MultiComparison : statistics class providing significance tests
tukeyhsd : among other things, computes q_crit value
References
----------
.. [1] Hochberg, Y., and A. C. Tamhane. Multiple Comparison Procedures.
Hoboken, NJ: John Wiley & Sons, 1987.)
"""
# Set initial variables
ng = len(groupnobs)
if pairindices is None:
pairindices = np.triu_indices(ng, 1)
# Compute dij for all pairwise comparisons ala hochberg p. 95
gvar = var / groupnobs
d12 = np.sqrt(gvar[pairindices[0]] + gvar[pairindices[1]])
# Create the full d matrix given all known dij vals
d = np.zeros((ng, ng))
d[pairindices] = d12
d = d + d.conj().T
# Compute the two global sums from hochberg eq 3.32
sum1 = np.sum(d12)
sum2 = np.sum(d, axis=0)
if (ng > 2):
w = ((ng-1.) * sum2 - sum1) / ((ng - 1.) * (ng - 2.))
else:
w = sum1 * np.ones(2, 1) / 2.
return (q_crit / np.sqrt(2))*w
def distance_st_range(mean_all, nobs_all, var_all, df=None, triu=False):
'''pairwise distance matrix, outsourced from tukeyhsd
CHANGED: meandiffs are with sign, studentized range uses abs
q_crit added for testing
TODO: error in variance calculation when nobs_all is scalar, missing 1/n
'''
mean_all = np.asarray(mean_all)
#check if or when other ones need to be arrays
n_means = len(mean_all)
if df is None:
df = nobs_all - 1
if np.size(df) == 1: # assumes balanced samples with df = n - 1, n_i = n
df_total = n_means * df
else:
df_total = np.sum(df)
if (np.size(nobs_all) == 1) and (np.size(var_all) == 1):
#balanced sample sizes and homogenous variance
var_pairs = 1. * var_all / nobs_all * np.ones((n_means, n_means))
elif np.size(var_all) == 1:
#unequal sample sizes and homogenous variance
var_pairs = var_all * varcorrection_pairs_unbalanced(nobs_all,
srange=True)
elif np.size(var_all) > 1:
var_pairs, df_sum = varcorrection_pairs_unequal(nobs_all, var_all, df)
var_pairs /= 2.
#check division by two for studentized range
else:
raise ValueError('not supposed to be here')
#meandiffs_ = mean_all[:,None] - mean_all
meandiffs = mean_all - mean_all[:,None] #reverse sign, check with R example
std_pairs = np.sqrt(var_pairs)
idx1, idx2 = np.triu_indices(n_means, 1)
if triu:
#select all pairs from upper triangle of matrix
meandiffs = meandiffs_[idx1, idx2]
std_pairs = std_pairs_[idx1, idx2]
st_range = np.abs(meandiffs) / std_pairs #studentized range statistic
return st_range, meandiffs, std_pairs, (idx1,idx2) #return square arrays
def contrast_allpairs(nm):
'''contrast or restriction matrix for all pairs of nm variables
Parameters
----------
nm : int
Returns
-------
contr : ndarray, 2d, (nm*(nm-1)/2, nm)
contrast matrix for all pairwise comparisons
'''
contr = []
for i in range(nm):
for j in range(i+1, nm):
contr_row = np.zeros(nm)
contr_row[i] = 1
contr_row[j] = -1
contr.append(contr_row)
return np.array(contr)
def contrast_all_one(nm):
'''contrast or restriction matrix for all against first comparison
Parameters
----------
nm : int
Returns
-------
contr : ndarray, 2d, (nm-1, nm)
contrast matrix for all against first comparisons
'''
contr = np.column_stack((np.ones(nm-1), -np.eye(nm-1)))
return contr
def contrast_diff_mean(nm):
'''contrast or restriction matrix for all against mean comparison
Parameters
----------
nm : int
Returns
-------
contr : ndarray, 2d, (nm-1, nm)
contrast matrix for all against mean comparisons
'''
return np.eye(nm) - np.ones((nm,nm))/nm
def tukey_pvalues(std_range, nm, df):
#corrected but very slow with warnings about integration
from statsmodels.sandbox.distributions.multivariate import mvstdtprob
#nm = len(std_range)
contr = contrast_allpairs(nm)
corr = np.dot(contr, contr.T)/2.
tstat = std_range / np.sqrt(2) * np.ones(corr.shape[0]) #need len of all pairs
return multicontrast_pvalues(tstat, corr, df=df)
def test_tukey_pvalues():
#testcase with 3 is not good because all pairs has also 3*(3-1)/2=3 elements
res = tukey_pvalues(3.649, 3, 16) #3.649*np.ones(3), 16)
assert_almost_equal(0.05, res[0], 3)
assert_almost_equal(0.05*np.ones(3), res[1], 3)
def multicontrast_pvalues(tstat, tcorr, df=None, dist='t', alternative='two-sided'):
'''pvalues for simultaneous tests
'''
from statsmodels.sandbox.distributions.multivariate import mvstdtprob
if (df is None) and (dist == 't'):
raise ValueError('df has to be specified for the t-distribution')
tstat = np.asarray(tstat)
ntests = len(tstat)
cc = np.abs(tstat)
pval_global = 1 - mvstdtprob(-cc,cc, tcorr, df)
pvals = []
for ti in cc:
limits = ti*np.ones(ntests)
pvals.append(1 - mvstdtprob(-cc,cc, tcorr, df))
return pval_global, np.asarray(pvals)
class StepDown(object):
'''a class for step down methods
This is currently for simple tree subset descend, similar to homogeneous_subsets,
but checks all leave-one-out subsets instead of assuming an ordered set.
Comment in SAS manual:
SAS only uses interval subsets of the sorted list, which is sufficient for range
tests (maybe also equal variance and balanced sample sizes are required).
For F-test based critical distances, the restriction to intervals is not sufficient.
This version uses a single critical value of the studentized range distribution
for all comparisons, and is therefore a step-down version of Tukey HSD.
The class is written so it can be subclassed, where the get_distance_matrix and
get_crit are overwritten to obtain other step-down procedures such as REGW.
iter_subsets can be overwritten, to get a recursion as in the many to one comparison
with a control such as in Dunnet's test.
A one-sided right tail test is not covered because the direction of the inequality
is hard coded in check_set. Also Peritz's check of partitions is not possible, but
I have not seen it mentioned in any more recent references.
I have only partially read the step-down procedure for closed tests by Westfall.
One change to make it more flexible, is to separate out the decision on a subset,
also because the F-based tests, FREGW in SPSS, take information from all elements of
a set and not just pairwise comparisons. I haven't looked at the details of
the F-based tests such as Sheffe yet. It looks like running an F-test on equality
of means in each subset. This would also outsource how pairwise conditions are
combined, any larger or max. This would also imply that the distance matrix cannot
be calculated in advance for tests like the F-based ones.
'''
def __init__(self, vals, nobs_all, var_all, df=None):
self.vals = vals
self.n_vals = len(vals)
self.nobs_all = nobs_all
self.var_all = var_all
self.df = df
# the following has been moved to run
#self.cache_result = {}
#self.crit = self.getcrit(0.5) #decide where to set alpha, moved to run
#self.accepted = [] #store accepted sets, not unique
def get_crit(self, alpha):
#currently tukey Q, add others
q_crit = get_tukeyQcrit(self.n_vals, self.df, alpha=alpha)
return q_crit * np.ones(self.n_vals)
def get_distance_matrix(self):
'''studentized range statistic'''
#make into property, decorate
dres = distance_st_range(self.vals, self.nobs_all, self.var_all, df=self.df)
self.distance_matrix = dres[0]
def iter_subsets(self, indices):
for ii in range(len(indices)):
idxsub = copy.copy(indices)
idxsub.pop(ii)
yield idxsub
def check_set(self, indices):
'''check whether pairwise distances of indices satisfy condition
'''
indtup = tuple(indices)
if indtup in self.cache_result:
return self.cache_result[indtup]
else:
set_distance_matrix = self.distance_matrix[np.asarray(indices)[:,None], indices]
n_elements = len(indices)
if np.any(set_distance_matrix > self.crit[n_elements-1]):
res = True
else:
res = False
self.cache_result[indtup] = res
return res
def stepdown(self, indices):
print(indices)
if self.check_set(indices): # larger than critical distance
if (len(indices) > 2): # step down into subsets if more than 2 elements
for subs in self.iter_subsets(indices):
self.stepdown(subs)
else:
self.rejected.append(tuple(indices))
else:
self.accepted.append(tuple(indices))
return indices
def run(self, alpha):
'''main function to run the test,
could be done in __call__ instead
this could have all the initialization code
'''
self.cache_result = {}
self.crit = self.get_crit(alpha) #decide where to set alpha, moved to run
self.accepted = [] #store accepted sets, not unique
self.rejected = []
self.get_distance_matrix()
self.stepdown(lrange(self.n_vals))
return list(set(self.accepted)), list(set(sd.rejected))
def homogeneous_subsets(vals, dcrit):
'''recursively check all pairs of vals for minimum distance
step down method as in Newman-Keuls and Ryan procedures. This is not a
closed procedure since not all partitions are checked.
Parameters
----------
vals : array_like
values that are pairwise compared
dcrit : array_like or float
critical distance for rejecting, either float, or 2-dimensional array
with distances on the upper triangle.
Returns
-------
rejs : list of pairs
list of pair-indices with (strictly) larger than critical difference
nrejs : list of pairs
list of pair-indices with smaller than critical difference
lli : list of tuples
list of subsets with smaller than critical difference
res : tree
result of all comparisons (for checking)
this follows description in SPSS notes on Post-Hoc Tests
Because of the recursive structure, some comparisons are made several
times, but only unique pairs or sets are returned.
Examples
--------
>>> m = [0, 2, 2.5, 3, 6, 8, 9, 9.5,10 ]
>>> rej, nrej, ssli, res = homogeneous_subsets(m, 2)
>>> set_partition(ssli)
([(5, 6, 7, 8), (1, 2, 3), (4,)], [0])
>>> [np.array(m)[list(pp)] for pp in set_partition(ssli)[0]]
[array([ 8. , 9. , 9.5, 10. ]), array([ 2. , 2.5, 3. ]), array([ 6.])]
'''
nvals = len(vals)
indices_ = lrange(nvals)
rejected = []
subsetsli = []
if np.size(dcrit) == 1:
dcrit = dcrit*np.ones((nvals, nvals)) #example numbers for experimenting
def subsets(vals, indices_):
'''recursive function for constructing homogeneous subset
registers rejected and subsetli in outer scope
'''
i, j = (indices_[0], indices_[-1])
if vals[-1] - vals[0] > dcrit[i,j]:
rejected.append((indices_[0], indices_[-1]))
return [subsets(vals[:-1], indices_[:-1]),
subsets(vals[1:], indices_[1:]),
(indices_[0], indices_[-1])]
else:
subsetsli.append(tuple(indices_))
return indices_
res = subsets(vals, indices_)
all_pairs = [(i,j) for i in range(nvals) for j in range(nvals-1,i,-1)]
rejs = set(rejected)
not_rejected = list(set(all_pairs) - rejs)
return list(rejs), not_rejected, list(set(subsetsli)), res
def set_partition(ssli):
'''extract a partition from a list of tuples
this should be correctly called select largest disjoint sets.
Begun and Gabriel 1981 don't seem to be bothered by sets of accepted
hypothesis with joint elements,
e.g. maximal_accepted_sets = { {1,2,3}, {2,3,4} }
This creates a set partition from a list of sets given as tuples.
It tries to find the partition with the largest sets. That is, sets are
included after being sorted by length.
If the list doesn't include the singletons, then it will be only a
partial partition. Missing items are singletons (I think).
Examples
--------
>>> li
[(5, 6, 7, 8), (1, 2, 3), (4, 5), (0, 1)]
>>> set_partition(li)
([(5, 6, 7, 8), (1, 2, 3)], [0, 4])
'''
part = []
for s in sorted(list(set(ssli)), key=len)[::-1]:
#print(s,
s_ = set(s).copy()
if not any(set(s_).intersection(set(t)) for t in part):
#print('inside:', s
part.append(s)
#else: print(part
missing = list(set(i for ll in ssli for i in ll)
- set(i for ll in part for i in ll))
return part, missing
def set_remove_subs(ssli):
'''remove sets that are subsets of another set from a list of tuples
Parameters
----------
ssli : list of tuples
each tuple is considered as a set
Returns
-------
part : list of tuples
new list with subset tuples removed, it is sorted by set-length of tuples. The
list contains original tuples, duplicate elements are not removed.
Examples
--------
>>> set_remove_subs([(0, 1), (1, 2), (1, 2, 3), (0,)])
[(1, 2, 3), (0, 1)]
>>> set_remove_subs([(0, 1), (1, 2), (1,1, 1, 2, 3), (0,)])
[(1, 1, 1, 2, 3), (0, 1)]
'''
#TODO: maybe convert all tuples to sets immediately, but I don't need the extra efficiency
part = []
for s in sorted(list(set(ssli)), key=lambda x: len(set(x)))[::-1]:
#print(s,
#s_ = set(s).copy()
if not any(set(s).issubset(set(t)) for t in part):
#print('inside:', s
part.append(s)
#else: print(part
## missing = list(set(i for ll in ssli for i in ll)
## - set(i for ll in part for i in ll))
return part
if __name__ == '__main__':
examples = ['tukey', 'tukeycrit', 'fdr', 'fdrmc', 'bonf', 'randmvn',
'multicompdev', 'None']#[-1]
if 'tukey' in examples:
#Example Tukey
x = np.array([[0,0,1]]).T + np.random.randn(3, 20)
print(Tukeythreegene(*x))
#Example FDR
#------------
if ('fdr' in examples) or ('bonf' in examples):
x1 = [1,1,1,0,-1,-1,-1,0,1,1,-1,1]
print(lzip(np.arange(len(x1)), x1))
print(maxzero(x1))
#[(0, 1), (1, 1), (2, 1), (3, 0), (4, -1), (5, -1), (6, -1), (7, 0), (8, 1), (9, 1), (10, -1), (11, 1)]
#(11, array([ 3, 7, 11]))
print(maxzerodown(-np.array(x1)))
locs = np.linspace(0,1,10)
locs = np.array([0.]*6 + [0.75]*4)
rvs = locs + stats.norm.rvs(size=(20,10))
tt, tpval = stats.ttest_1samp(rvs, 0)
tpval_sortind = np.argsort(tpval)
tpval_sorted = tpval[tpval_sortind]
reject = tpval_sorted < ecdf(tpval_sorted)*0.05
reject2 = max(np.nonzero(reject))
print(reject)
res = np.array(lzip(np.round(rvs.mean(0),4),np.round(tpval,4),
reject[tpval_sortind.argsort()]),
dtype=[('mean',float),
('pval',float),
('reject', np.bool8)])
#from statsmodels.iolib import SimpleTable
print(SimpleTable(res, headers=res.dtype.names))
print(fdrcorrection_bak(tpval, alpha=0.05))
print(reject)
print('\nrandom example')
print('bonf', multipletests(tpval, alpha=0.05, method='bonf'))
print('sidak', multipletests(tpval, alpha=0.05, method='sidak'))
print('hs', multipletests(tpval, alpha=0.05, method='hs'))
print('sh', multipletests(tpval, alpha=0.05, method='sh'))
pvals = np.array('0.0020 0.0045 0.0060 0.0080 0.0085 0.0090 0.0175 0.0250 '
'0.1055 0.5350'.split(), float)
print('\nexample from lecturnotes')
for meth in ['bonf', 'sidak', 'hs', 'sh']:
print(meth)
print(multipletests(pvals, alpha=0.05, method=meth))
if 'fdrmc' in examples:
mcres = mcfdr(nobs=100, nrepl=1000, ntests=30, ntrue=30, mu=0.1, alpha=0.05, rho=0.3)
mcmeans = np.array(mcres).mean(0)
print(mcmeans)
print(mcmeans[0]/6., 1-mcmeans[1]/4.)
print(mcmeans[:4], mcmeans[-4:])
if 'randmvn' in examples:
rvsmvn = randmvn(0.8, (5000,5))
print(np.corrcoef(rvsmvn, rowvar=0))
print(rvsmvn.var(0))
if 'tukeycrit' in examples:
print(get_tukeyQcrit(8, 8, alpha=0.05), 5.60)
print(get_tukeyQcrit(8, 8, alpha=0.01), 7.47)
if 'multicompdev' in examples:
#development of kruskal-wallis multiple-comparison
#example from matlab file exchange
X = np.array([[7.68, 1], [7.69, 1], [7.70, 1], [7.70, 1], [7.72, 1],
[7.73, 1], [7.73, 1], [7.76, 1], [7.71, 2], [7.73, 2],
[7.74, 2], [7.74, 2], [7.78, 2], [7.78, 2], [7.80, 2],
[7.81, 2], [7.74, 3], [7.75, 3], [7.77, 3], [7.78, 3],
[7.80, 3], [7.81, 3], [7.84, 3], [7.71, 4], [7.71, 4],
[7.74, 4], [7.79, 4], [7.81, 4], [7.85, 4], [7.87, 4],
[7.91, 4]])
xli = [X[X[:,1]==k,0] for k in range(1,5)]
xranks = stats.rankdata(X[:,0])
xranksli = [xranks[X[:,1]==k] for k in range(1,5)]
xnobs = np.array([len(x) for x in xli])
meanranks = [item.mean() for item in xranksli]
sumranks = [item.sum() for item in xranksli]
# equivalent function
#from scipy import special
#-np.sqrt(2.)*special.erfcinv(2-0.5) == stats.norm.isf(0.25)
stats.norm.sf(0.67448975019608171)
stats.norm.isf(0.25)
mrs = np.sort(meanranks)
v1, v2 = np.triu_indices(4,1)
print('\nsorted rank differences')
print(mrs[v2] - mrs[v1])
diffidx = np.argsort(mrs[v2] - mrs[v1])[::-1]
mrs[v2[diffidx]] - mrs[v1[diffidx]]
print('\nkruskal for all pairs')
for i,j in zip(v2[diffidx], v1[diffidx]):
print(i,j, stats.kruskal(xli[i], xli[j]))
mwu, mwupval = stats.mannwhitneyu(xli[i], xli[j], use_continuity=False)
print(mwu, mwupval*2, mwupval*2<0.05/6., mwupval*2<0.1/6.)
uni, intlab = np.unique(X[:,0], return_inverse=True)
groupnobs = np.bincount(intlab)
groupxsum = np.bincount(intlab, weights=X[:,0])
groupxmean = groupxsum * 1.0 / groupnobs
rankraw = X[:,0].argsort().argsort()
groupranksum = np.bincount(intlab, weights=rankraw)
# start at 1 for stats.rankdata :
grouprankmean = groupranksum * 1.0 / groupnobs + 1
assert_almost_equal(grouprankmean[intlab], stats.rankdata(X[:,0]), 15)
gs = GroupsStats(X, useranks=True)
print('\ngroupmeanfilter and grouprankmeans')
print(gs.groupmeanfilter)
print(grouprankmean[intlab])
#the following has changed
#assert_almost_equal(gs.groupmeanfilter, stats.rankdata(X[:,0]), 15)
xuni, xintlab = np.unique(X[:,0], return_inverse=True)
gs2 = GroupsStats(np.column_stack([X[:,0], xintlab]), useranks=True)
#assert_almost_equal(gs2.groupmeanfilter, stats.rankdata(X[:,0]), 15)
rankbincount = np.bincount(xranks.astype(int))
nties = rankbincount[rankbincount > 1]
ntot = float(len(xranks));
tiecorrection = 1 - (nties**3 - nties).sum()/(ntot**3 - ntot)
assert_almost_equal(tiecorrection, stats.tiecorrect(xranks),15)
print('\ntiecorrection for data and ranks')
print(tiecorrection)
print(tiecorrect(xranks))
tot = X.shape[0]
t=500 #168
f=(tot*(tot+1.)/12.)-(t/(6.*(tot-1.)))
f=(tot*(tot+1.)/12.)/stats.tiecorrect(xranks)
print('\npairs of mean rank differences')
for i,j in zip(v2[diffidx], v1[diffidx]):
#pdiff = np.abs(mrs[i] - mrs[j])
pdiff = np.abs(meanranks[i] - meanranks[j])
se = np.sqrt(f * np.sum(1./xnobs[[i,j]] )) #np.array([8,8]))) #Fixme groupnobs[[i,j]] ))
print(i,j, pdiff, se, pdiff/se, pdiff/se>2.6310)
multicomp = MultiComparison(*X.T)
multicomp.kruskal()
gsr = GroupsStats(X, useranks=True)
print('\nexamples for kruskal multicomparison')
for i in range(10):
x1, x2 = (np.random.randn(30,2) + np.array([0, 0.5])).T
skw = stats.kruskal(x1, x2)
mc2=MultiComparison(np.r_[x1, x2], np.r_[np.zeros(len(x1)), np.ones(len(x2))])
newskw = mc2.kruskal()
print(skw, np.sqrt(skw[0]), skw[1]-newskw, (newskw/skw[1]-1)*100)
tablett, restt, arrtt = multicomp.allpairtest(stats.ttest_ind)
tablemw, resmw, arrmw = multicomp.allpairtest(stats.mannwhitneyu)
print('')
print(tablett)
print('')
print(tablemw)
tablemwhs, resmw, arrmw = multicomp.allpairtest(stats.mannwhitneyu, method='hs')
print('')
print(tablemwhs)
if 'last' in examples:
xli = (np.random.randn(60,4) + np.array([0, 0, 0.5, 0.5])).T
#Xrvs = np.array(catstack(xli))
xrvs, xrvsgr = catstack(xli)
multicompr = MultiComparison(xrvs, xrvsgr)
tablett, restt, arrtt = multicompr.allpairtest(stats.ttest_ind)
print(tablett)
xli=[[8,10,9,10,9],[7,8,5,8,5],[4,8,7,5,7]]
x,l = catstack(xli)
gs4 = GroupsStats(np.column_stack([x,l]))
print(gs4.groupvarwithin())
#test_tukeyhsd() #moved to test_multi.py
gmeans = np.array([ 7.71375, 7.76125, 7.78428571, 7.79875])
gnobs = np.array([8, 8, 7, 8])
sd = StepDown(gmeans, gnobs, 0.001, [27])
#example from BKY
pvals = [0.0001, 0.0004, 0.0019, 0.0095, 0.0201, 0.0278, 0.0298, 0.0344, 0.0459,
0.3240, 0.4262, 0.5719, 0.6528, 0.7590, 1.000 ]
#same number of rejection as in BKY paper:
#single step-up:4, two-stage:8, iterated two-step:9
#also alpha_star is the same as theirs for TST
print(fdrcorrection0(pvals, alpha=0.05, method='indep'))
print(fdrcorrection_twostage(pvals, alpha=0.05, iter=False))
res_tst = fdrcorrection_twostage(pvals, alpha=0.05, iter=False)
assert_almost_equal([0.047619, 0.0649], res_tst[-1][:2],3) #alpha_star for stage 2
assert_equal(8, res_tst[0].sum())
print(fdrcorrection_twostage(pvals, alpha=0.05, iter=True))
print('fdr_gbs', multipletests(pvals, alpha=0.05, method='fdr_gbs'))
#multicontrast_pvalues(tstat, tcorr, df)
test_tukey_pvalues()
tukey_pvalues(3.649, 3, 16)
|
paninetworks/neutron
|
refs/heads/master
|
neutron/tests/unit/extensions/test_quotasv2.py
|
7
|
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import mock
from oslo_config import cfg
import testtools
from webob import exc
import webtest
from neutron.api import extensions
from neutron.api.v2 import router
from neutron.common import config
from neutron.common import constants
from neutron.common import exceptions
from neutron import context
from neutron.db import quota_db
from neutron import quota
from neutron.tests import base
from neutron.tests import tools
from neutron.tests.unit.api.v2 import test_base
from neutron.tests.unit import testlib_api
TARGET_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin'
_get_path = test_base._get_path
class QuotaExtensionTestCase(testlib_api.WebTestCase):
def setUp(self):
super(QuotaExtensionTestCase, self).setUp()
# Ensure existing ExtensionManager is not used
extensions.PluginAwareExtensionManager._instance = None
self.useFixture(tools.AttributeMapMemento())
# Create the default configurations
self.config_parse()
# Update the plugin and extensions path
self.setup_coreplugin(TARGET_PLUGIN)
cfg.CONF.set_override(
'quota_items',
['network', 'subnet', 'port', 'extra1'],
group='QUOTAS')
quota.QUOTAS = quota.QuotaEngine()
quota.register_resources_from_config()
self._plugin_patcher = mock.patch(TARGET_PLUGIN, autospec=True)
self.plugin = self._plugin_patcher.start()
self.plugin.return_value.supported_extension_aliases = ['quotas']
# QUOTAS will register the items in conf when starting
# extra1 here is added later, so have to do it manually
quota.QUOTAS.register_resource_by_name('extra1')
ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
app = config.load_paste_app('extensions_test_app')
ext_middleware = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr)
self.api = webtest.TestApp(ext_middleware)
# Initialize the router for the core API in order to ensure core quota
# resources are registered
router.APIRouter()
def tearDown(self):
self.api = None
self.plugin = None
super(QuotaExtensionTestCase, self).tearDown()
def _test_quota_default_values(self, expected_values):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
quota = self.deserialize(res)
for resource, expected_value in expected_values.items():
self.assertEqual(expected_value,
quota['quota'][resource])
class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
fmt = 'json'
def setUp(self):
cfg.CONF.set_override(
'quota_driver',
'neutron.db.quota_db.DbQuotaDriver',
group='QUOTAS')
super(QuotaExtensionDbTestCase, self).setUp()
def test_quotas_loaded_right(self):
res = self.api.get(_get_path('quotas', fmt=self.fmt))
quota = self.deserialize(res)
self.assertEqual([], quota['quotas'])
self.assertEqual(200, res.status_int)
def test_quotas_default_values(self):
self._test_quota_default_values(
{'network': 10,
'subnet': 10,
'port': 50,
'extra1': -1})
def test_quotas_negative_default_value(self):
cfg.CONF.set_override(
'quota_port', -666, group='QUOTAS')
cfg.CONF.set_override(
'quota_network', -10, group='QUOTAS')
cfg.CONF.set_override(
'quota_subnet', -50, group='QUOTAS')
self._test_quota_default_values(
{'network': -1,
'subnet': -1,
'port': -1,
'extra1': -1})
def test_show_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual(10, quota['quota']['network'])
self.assertEqual(10, quota['quota']['subnet'])
self.assertEqual(50, quota['quota']['port'])
def test_show_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=False)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_show_quotas_with_owner_tenant(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual(10, quota['quota']['network'])
self.assertEqual(10, quota['quota']['subnet'])
self.assertEqual(50, quota['quota']['port'])
def test_list_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
res = self.api.get(_get_path('quotas', fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual([], quota['quotas'])
def test_list_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.get(_get_path('quotas', fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_update_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
quotas = {'quota': {'network': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(403, res.status_int)
def test_update_quotas_with_non_integer_returns_400(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': 'abc'}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_update_quotas_with_negative_integer_returns_400(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': -2}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_update_quotas_with_out_of_range_integer_returns_400(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': constants.DB_INTEGER_MAX_VALUE + 1}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
def test_update_quotas_to_unlimited(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': -1}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=False)
self.assertEqual(200, res.status_int)
def test_update_quotas_exceeding_current_limit(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': 120}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=False)
self.assertEqual(200, res.status_int)
def test_update_quotas_with_non_support_resource_returns_400(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'abc': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_update_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
quotas = {'quota': {'network': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
self.assertEqual(200, res.status_int)
env2 = {'neutron.context': context.Context('', tenant_id)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env2)
quota = self.deserialize(res)
self.assertEqual(100, quota['quota']['network'])
self.assertEqual(10, quota['quota']['subnet'])
self.assertEqual(50, quota['quota']['port'])
def test_update_attributes(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
quotas = {'quota': {'extra1': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
self.assertEqual(200, res.status_int)
env2 = {'neutron.context': context.Context('', tenant_id)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env2)
quota = self.deserialize(res)
self.assertEqual(100, quota['quota']['extra1'])
def test_delete_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(204, res.status_int)
def test_delete_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_quotas_loaded_bad_returns_404(self):
try:
res = self.api.get(_get_path('quotas'), expect_errors=True)
self.assertEqual(404, res.status_int)
except Exception:
pass
def test_quotas_limit_check(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': 5}}
res = self.api.put(_get_path('quotas', id=tenant_id,
fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
self.assertEqual(200, res.status_int)
quota.QUOTAS.limit_check(context.Context('', tenant_id),
tenant_id,
network=4)
def test_quotas_limit_check_with_invalid_quota_value(self):
tenant_id = 'tenant_id1'
with testtools.ExpectedException(exceptions.InvalidQuotaValue):
quota.QUOTAS.limit_check(context.Context('', tenant_id),
tenant_id,
network=-2)
def test_quotas_limit_check_with_not_registered_resource_fails(self):
tenant_id = 'tenant_id1'
self.assertRaises(exceptions.QuotaResourceUnknown,
quota.QUOTAS.limit_check,
context.get_admin_context(),
tenant_id,
foobar=1)
def test_quotas_get_tenant_from_request_context(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
res = self.api.get(_get_path('quotas/tenant', fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual(quota['tenant']['tenant_id'], tenant_id)
def test_quotas_get_tenant_from_empty_request_context_returns_400(self):
env = {'neutron.context': context.Context('', '',
is_admin=True)}
res = self.api.get(_get_path('quotas/tenant', fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(400, res.status_int)
class QuotaExtensionCfgTestCase(QuotaExtensionTestCase):
fmt = 'json'
def setUp(self):
cfg.CONF.set_override(
'quota_driver',
'neutron.quota.ConfDriver',
group='QUOTAS')
super(QuotaExtensionCfgTestCase, self).setUp()
def test_quotas_default_values(self):
self._test_quota_default_values(
{'network': 10,
'subnet': 10,
'port': 50,
'extra1': -1})
def test_quotas_negative_default_value(self):
cfg.CONF.set_override(
'quota_port', -666, group='QUOTAS')
self._test_quota_default_values(
{'network': 10,
'subnet': 10,
'port': -1,
'extra1': -1})
def test_show_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
def test_show_quotas_without_admin_forbidden(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=False)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_update_quotas_forbidden(self):
tenant_id = 'tenant_id1'
quotas = {'quota': {'network': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas),
expect_errors=True)
self.assertEqual(403, res.status_int)
def test_delete_quotas_forbidden(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
class TestDbQuotaDriver(base.BaseTestCase):
"""Test for neutron.db.quota_db.DbQuotaDriver."""
def test_get_tenant_quotas_arg(self):
"""Call neutron.db.quota_db.DbQuotaDriver._get_quotas."""
driver = quota_db.DbQuotaDriver()
ctx = context.Context('', 'bar')
foo_quotas = {'network': 5}
default_quotas = {'network': 10}
target_tenant = 'foo'
with mock.patch.object(quota_db.DbQuotaDriver,
'get_tenant_quotas',
return_value=foo_quotas) as get_tenant_quotas:
quotas = driver._get_quotas(ctx,
target_tenant,
default_quotas)
self.assertEqual(quotas, foo_quotas)
get_tenant_quotas.assert_called_once_with(ctx,
default_quotas,
target_tenant)
class TestQuotaDriverLoad(base.BaseTestCase):
def setUp(self):
super(TestQuotaDriverLoad, self).setUp()
# Make sure QuotaEngine is reinitialized in each test.
quota.QUOTAS._driver = None
def _test_quota_driver(self, cfg_driver, loaded_driver,
with_quota_db_module=True):
cfg.CONF.set_override('quota_driver', cfg_driver, group='QUOTAS')
with mock.patch.dict(sys.modules, {}):
if (not with_quota_db_module and
'neutron.db.quota_db' in sys.modules):
del sys.modules['neutron.db.quota_db']
driver = quota.QUOTAS.get_driver()
self.assertEqual(loaded_driver, driver.__class__.__name__)
def test_quota_db_driver_with_quotas_table(self):
self._test_quota_driver('neutron.db.quota_db.DbQuotaDriver',
'DbQuotaDriver', True)
def test_quota_db_driver_fallback_conf_driver(self):
self._test_quota_driver('neutron.db.quota_db.DbQuotaDriver',
'ConfDriver', False)
def test_quota_conf_driver(self):
self._test_quota_driver('neutron.quota.ConfDriver',
'ConfDriver', True)
|
endlessm/chromium-browser
|
refs/heads/master
|
third_party/llvm/lldb/test/API/functionalities/plugins/python_os_plugin/operating_system2.py
|
8
|
#!/usr/bin/python
import lldb
import struct
class OperatingSystemPlugIn(object):
"""Class that provides data for an instance of a LLDB 'OperatingSystemPython' plug-in class"""
def __init__(self, process):
'''Initialization needs a valid.SBProcess object.
This plug-in will get created after a live process is valid and has stopped for the
first time.'''
self.process = None
self.registers = None
self.threads = None
if isinstance(process, lldb.SBProcess) and process.IsValid():
self.process = process
self.threads = None # Will be an dictionary containing info for each thread
def get_target(self):
# NOTE: Don't use "lldb.target" when trying to get your target as the "lldb.target"
# tracks the current target in the LLDB command interpreter which isn't the
# correct thing to use for this plug-in.
return self.process.target
def create_thread(self, tid, context):
if tid == 0x444444444:
thread_info = {
'tid': tid,
'name': 'four',
'queue': 'queue4',
'state': 'stopped',
'stop_reason': 'none'}
self.threads.append(thread_info)
return thread_info
return None
def get_thread_info(self):
if not self.threads:
# The sample dictionary below shows the values that can be returned for a thread
# tid => thread ID (mandatory)
# name => thread name (optional key/value pair)
# queue => thread dispatch queue name (optional key/value pair)
# state => thred state (mandatory, set to 'stopped' for now)
# stop_reason => thread stop reason. (mandatory, usually set to 'none')
# Possible values include:
# 'breakpoint' if the thread is stopped at a breakpoint
# 'none' thread is just stopped because the process is stopped
# 'trace' the thread just single stepped
# The usual value for this while threads are in memory is 'none'
# register_data_addr => the address of the register data in memory (optional key/value pair)
# Specifying this key/value pair for a thread will avoid a call to get_register_data()
# and can be used when your registers are in a thread context structure that is contiguous
# in memory. Don't specify this if your register layout in memory doesn't match the layout
# described by the dictionary returned from a call to the
# get_register_info() method.
self.threads = [
{'tid': 0x111111111, 'core': 0}
]
return self.threads
def get_register_info(self):
if self.registers is None:
self.registers = dict()
self.registers['sets'] = ['GPR']
self.registers['registers'] = [
{'name': 'rax', 'bitsize': 64, 'offset': 0, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 0, 'dwarf': 0},
{'name': 'rbx', 'bitsize': 64, 'offset': 8, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 3, 'dwarf': 3},
{'name': 'rcx', 'bitsize': 64, 'offset': 16, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 2, 'dwarf': 2, 'generic': 'arg4', 'alt-name': 'arg4', },
{'name': 'rdx', 'bitsize': 64, 'offset': 24, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 1, 'dwarf': 1, 'generic': 'arg3', 'alt-name': 'arg3', },
{'name': 'rdi', 'bitsize': 64, 'offset': 32, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 5, 'dwarf': 5, 'generic': 'arg1', 'alt-name': 'arg1', },
{'name': 'rsi', 'bitsize': 64, 'offset': 40, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 4, 'dwarf': 4, 'generic': 'arg2', 'alt-name': 'arg2', },
{'name': 'rbp', 'bitsize': 64, 'offset': 48, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 6, 'dwarf': 6, 'generic': 'fp', 'alt-name': 'fp', },
{'name': 'rsp', 'bitsize': 64, 'offset': 56, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 7, 'dwarf': 7, 'generic': 'sp', 'alt-name': 'sp', },
{'name': 'r8', 'bitsize': 64, 'offset': 64, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 8, 'dwarf': 8, 'generic': 'arg5', 'alt-name': 'arg5', },
{'name': 'r9', 'bitsize': 64, 'offset': 72, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 9, 'dwarf': 9, 'generic': 'arg6', 'alt-name': 'arg6', },
{'name': 'r10', 'bitsize': 64, 'offset': 80, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 10, 'dwarf': 10},
{'name': 'r11', 'bitsize': 64, 'offset': 88, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 11, 'dwarf': 11},
{'name': 'r12', 'bitsize': 64, 'offset': 96, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 12, 'dwarf': 12},
{'name': 'r13', 'bitsize': 64, 'offset': 104, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 13, 'dwarf': 13},
{'name': 'r14', 'bitsize': 64, 'offset': 112, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 14, 'dwarf': 14},
{'name': 'r15', 'bitsize': 64, 'offset': 120, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 15, 'dwarf': 15},
{'name': 'rip', 'bitsize': 64, 'offset': 128, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'gcc': 16, 'dwarf': 16, 'generic': 'pc', 'alt-name': 'pc'},
{'name': 'rflags', 'bitsize': 64, 'offset': 136, 'encoding': 'uint', 'format': 'hex', 'set': 0, 'generic': 'flags', 'alt-name': 'flags'},
{'name': 'cs', 'bitsize': 64, 'offset': 144, 'encoding': 'uint', 'format': 'hex', 'set': 0},
{'name': 'fs', 'bitsize': 64, 'offset': 152, 'encoding': 'uint', 'format': 'hex', 'set': 0},
{'name': 'gs', 'bitsize': 64, 'offset': 160, 'encoding': 'uint', 'format': 'hex', 'set': 0},
]
return self.registers
def get_register_data(self, tid):
return struct.pack(
'21Q',
tid + 1,
tid + 2,
tid + 3,
tid + 4,
tid + 5,
tid + 6,
tid + 7,
tid + 8,
tid + 9,
tid + 10,
tid + 11,
tid + 12,
tid + 13,
tid + 14,
tid + 15,
tid + 16,
tid + 17,
tid + 18,
tid + 19,
tid + 20,
tid + 21)
|
stonegithubs/odoo
|
refs/heads/8.0
|
addons/purchase_analytic_plans/__init__.py
|
441
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#----------------------------------------------------------
# Init Sales
#----------------------------------------------------------
import purchase_analytic_plans
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
EnvGen/toolbox
|
refs/heads/master
|
scripts/convenience/construct_ena_sequencing_runs_table.py
|
1
|
#!/usr/bin/env python
"""construct_ena_sequencing_runs_table.py
Based on a folder with uploaded files and a template, construct table ready to be submitted.
This script is not general but is very niched to the NGI/Uppmax scenario.
Ideally the user is expected to copy this script and edit it to suit the users needs.
"""
import argparse
import sys
import os
import glob
from os.path import join as opj
import pandas as pd
from collections import defaultdict
import gzip
# Need to fetch file name and link it to sample id
# Fetch the md5sum already calculated
def main(args):
md5sum_df = pd.read_table(args.md5_summary, sep=',', header=None, names=['file_name', 'md5sum'], index_col=0)
insert_sizes = pd.read_table(args.insert_size, index_col=0)
info_d = {}
for sample_dir in args.sample_dirs:
for R1_run_file in glob.glob(opj(sample_dir, "*", "*_R1*.fastq.gz")):
R1_file_name=os.path.basename(R1_run_file)
sample_name="_".join(R1_file_name.split('_')[0:2])
run_id="_".join(R1_file_name.split('_')[0:4])
run_info = {}
run_info['sample_accession'] = sample_name
run_info['library_name'] = run_id
is_series = insert_sizes.ix[sample_name]['Avg. FS']
try:
run_info['insert_size'] = int(round(is_series))
except TypeError:
run_info['insert_size'] = int(round(insert_sizes[insert_sizes['Lib QC'] == 'PASSED'].ix[sample_name]['Avg. FS']))
run_info['forward_file_name'] = R1_file_name
run_info['forward_file_md5'] = md5sum_df.loc[R1_file_name]['md5sum']
R2_file_name = R1_file_name.replace("R1", "R2")
run_info['reverse_file_name'] = R2_file_name
run_info['reverse_file_md5'] = md5sum_df.loc[R2_file_name]['md5sum']
run_info['library_source'] = 'METAGENOMIC'
run_info['library_selection'] = 'RANDOM'
run_info['library_strategy'] = 'WGS'
run_info['library_construction_protocol'] = 'Rubicon Thruplex'
run_info['instrument_model'] = 'Illumina HiSeq 2500'
run_info['file_type'] = 'fastq'
run_info['library_layout'] = 'PAIRED'
info_d[run_id] = run_info
all_columns_sorted = ['sample_accession', 'library_name', 'library_source', 'insert_size', \
'library_selection', 'library_strategy', 'library_construction_protocol', 'instrument_model', \
'file_type', 'library_layout', 'insert_size', \
'forward_file_name', 'forward_file_md5', 'reverse_file_name', 'reverse_file_md5']
df = pd.DataFrame.from_dict(info_d, orient='index')
df[all_columns_sorted].to_csv(sys.stdout, index=False, sep='\t', header=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("md5_summary", help="Table (csv) with md5sum values for all files")
parser.add_argument("insert_size", help="Table with insert size per sample")
parser.add_argument("sample_dirs", nargs='*', help="Directories where read files are located in subdirs")
args = parser.parse_args()
main(args)
|
incuna/authentic
|
refs/heads/master
|
authentic2/idp/saml/idff12_endpoints.py
|
1
|
import datetime
import logging
import urllib
import lasso
from django.contrib.auth.views import redirect_to_login
from django.conf.urls.defaults import patterns
from django.http import HttpResponse, HttpResponseForbidden, \
HttpResponseRedirect
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import csrf_exempt
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.conf import settings
from authentic2.saml.models import LibertyArtifact
from authentic2.saml.common import get_idff12_metadata, create_idff12_server, \
load_provider, load_federation, load_session, save_federation, \
save_session, return_idff12_response, get_idff12_request_message, \
get_soap_message, return_saml_soap_response
from authentic2.utils import cache_and_validate
def fill_assertion(request, saml_request, assertion, provider_id):
'''Stuff an assertion with information extracted from the user record
and from the session, and eventually from transactions linked to the
request, i.e. a login event or a consent event.'''
# Use assertion ID as session index
assertion.authenticationStatement.sessionIndex = assertion.assertionId
# TODO: add attributes from user account
# TODO: determine and add attributes from the session, for anonymous
# users (pseudonymous federation, openid without accoutns)
# TODO: add information from the login event, of the session or linked
# to the request id
# TODO: use information from the consent event to specialize release of
# attributes (user only authorized to give its email for email)
def build_assertion(request, login):
'''After a successfully validated authentication request, build an
authentication assertion'''
now = datetime.datetime.utcnow()
# 1 minute ago
notBefore = now-datetime.timedelta(0,60)
# 1 minute in the future
notOnOrAfter = now+datetime.timedelta(0,60)
# TODO: find authn method from login event or from session
login.buildAssertion(lasso.LIB_AUTHN_CONTEXT_CLASS_REF_PREVIOUS_SESSION,
now.isoformat()+'Z',
'unused', # reauthenticateOnOrAfter is only for ID-FF 1.2
notBefore.isoformat()+'Z',
notOnOrAfter.isoformat()+'Z')
assertion = login.assertion
fill_assertion(request, login.request, assertion, login.remoteProviderId)
@cache_and_validate(settings.LOCAL_METADATA_CACHE_TIMEOUT)
def metadata(request):
'''Return ID-FFv1.2 metadata for our IdP'''
return HttpResponse(get_idff12_metadata(request, reverse(metadata)),
mimetype = 'text/xml')
def save_artifact(request, login):
LibertyArtifact(artifact = login.assertionArtifact,
django_session_key = request.session.session_key,
provider_id = login.remoteProviderId).save()
# TODO: handle cancellation, by retrieving a login event and looking for
# cancelled flag
# TODO: handle force_authn by redirecting to the login page with a parameter
# linking the login event with this request id and next=current_path
@csrf_exempt
def sso(request):
"""Endpoint for AuthnRequests asynchronously sent, i.e. POST or Redirect"""
# 1. Process the request, separate POST and GET treatment
message = get_idff12_request_message(request)
if not message:
return HttpResponseForbidden('Invalid SAML 1.1 AuthnRequest: "%s"' % message)
server = create_idff12_server(request, reverse(metadata))
login = lasso.Login(server)
while True:
try:
logging.debug('ID-FFv1.2: processing sso request %r' % message)
login.processAuthnRequestMsg(message)
break
except lasso.ProfileInvalidMsgError:
message = _('Invalid SAML 1.1 AuthnRequest: %r') % message
logging.error(message)
return HttpResponseForbidden(message)
except lasso.DsInvalidSignatureError:
message = _('Invalid signature on SAML 1.1 AuthnRequest: %r') % message
logging.error(message)
# This error is handled through SAML status codes, so return a
# response
return finish_sso(request, login)
except lasso.ServerProviderNotFoundError:
# This path is not exceptionnal it should be normal since we did
# not load any provider in the Server object
provider_id = login.remoteProviderId
# 2. Lookup the ProviderID
logging.info('ID-FFv1.2: AuthnRequest from %r' % provider_id)
provider_loaded = load_provider(request, provider_id, server=login.server)
if not provider_loaded:
consent_obtained = False
message = _('ID-FFv1.2: provider %r unknown') % provider_id
logging.warning(message)
return HttpResponseForbidden(message)
else:
# XXX: does consent be always automatic for known providers ? Maybe
# add a configuration key on the provider.
consent_obtained = True
return sso_after_process_request(request, login,
consent_obtained = consent_obtained)
def sso_after_process_request(request, login,
consent_obtained = True, user = None, save = True):
'''Common path for sso and idp_initiated_sso.
consent_obtained: whether the user has given his consent to this federation
user: the user which must be federated, if None, current user is the default.
save: whether to save the result of this transaction or not.
'''
if user is None:
user = request.user
# Flags possible:
# - consent
# - isPassive
# - forceAuthn
#
# 3. TODO: Check for permission
if login.mustAuthenticate():
# TODO:
# check that it exists a login transaction for this request id
# - if there is, then provoke one with a redirect to
# login?next=<current_url>
# - if there is then set user_authenticated to the result of the
# login event
# Work around lack of informations returned by mustAuthenticate()
if login.request.forceAuthn or request.user.is_anonymous():
return redirect_to_login(request.get_full_path())
else:
user_authenticated = True
else:
user_authenticated = not request.user.is_anonymous()
# 3.1 Ask for consent
if user_authenticated:
# TODO: for autoloaded providers always ask for consent
if login.mustAskForConsent() or not consent_obtained:
# TODO: replace False by check against request id
if False:
consent_obtained = True
# i.e. redirect to /idp/consent?id=requestId
# then check that Consent(id=requestId) exists in the database
else:
return HttpResponseRedirect('consent_federation?id=%s&next=%s' %
( login.request.requestId,
urllib.quote(request.get_full_path())) )
# 4. Validate the request, passing authentication and consent status
try:
login.validateRequestMsg(user_authenticated, consent_obtained)
except:
raise
do_federation = False
else:
do_federation = True
# 5. Lookup the federations
if do_federation:
load_federation(request, login, user)
load_session(request, login)
# 3. Build and assertion, fill attributes
build_assertion(request, login)
return finish_sso(request, login, user = user, save = save)
def finish_sso(request, login, user = None, save = True):
'''Return the response to an AuthnRequest
user: the user which must be federated, if None default to current user.
save: whether to save the result of this transaction or not.
'''
if user is None:
user = request.user
if login.protocolProfile == lasso.LOGIN_PROTOCOL_PROFILE_BRWS_ART:
login.buildArtifactMsg(lasso.HTTP_METHOD_REDIRECT)
save_artifact(request, login)
elif login.protocolProfile == lasso.LOGIN_PROTOCOL_PROFILE_BRWS_POST:
login.buildAuthnResponseMsg()
else:
raise NotImplementedError()
if save:
save_federation(request, login)
save_session(request, login)
return return_idff12_response(request, login,
title=_('Authentication response'))
def artifact_resolve(request, soap_message):
'''Resolve a SAMLv1.1 ArtifactResolve request
'''
server = create_idff12_server(request, reverse(metadata))
login = lasso.Login(server)
try:
login.processRequestMsg(soap_message)
except:
raise
logging.debug('ID-FFv1.2 artifact resolve %r' % soap_message)
liberty_artifact = LibertyArtifact.objects.get(
artifact = login.assertionArtifact)
if liberty_artifact:
liberty_artifact.delete()
provider_id = liberty_artifact.provider_id
load_provider(request, provider_id, server=login.server)
load_session(request, login,
session_key = liberty_artifact.django_session_key)
logging.info('ID-FFv1.2 artifact resolve from %r for artifact %r' % (
provider_id, login.assertionArtifact))
else:
logging.warning('ID-FFv1.2 no artifact found for %r' % login.assertionArtifact)
provider_id = None
return finish_artifact_resolve(request, login, provider_id,
session_key = liberty_artifact.django_session_key)
def finish_artifact_resolve(request, login, provider_id, session_key = None):
'''Finish artifact resolver processing:
compute a response, returns it and eventually update stored
LassoSession.
provider_id: the entity id of the provider which should receive the artifact
session_key: the session_key of the session linked to the artifact, if None it means no artifact was found
'''
try:
login.buildResponseMsg(provider_id)
except:
raise
if session_key:
save_session(request, login,
session_key = session_key)
return return_saml_soap_response(login)
@csrf_exempt
def soap(request):
'''SAMLv1.1 soap endpoint implementation.
It should handle request for:
- artifact resolution
- logout
- and federation termination'''
soap_message = get_soap_message(request)
request_type = lasso.getRequestTypeFromSoapMsg(soap_message)
if request_type == lasso.REQUEST_TYPE_LOGIN:
return artifact_resolve(request, soap_message)
else:
message = _('ID-FFv1.2: soap request type %r is currently not supported') % request_type
logging.warning(message)
return NotImplementedError(message)
def check_delegated_authentication_permission(request):
return request.user.is_superuser()
def idp_sso(request, provider_id, user_id = None):
'''Initiate an SSO toward provider_id without a prior AuthnRequest
'''
assert provider_id, 'You must call idp_initiated_sso with a provider_id parameter'
server = create_idff12_server(request, reverse(metadata))
login = lasso.Login(server)
liberty_provider = load_provider(request, provider_id, server=login.server)
service_provider = liberty_provider.service_provider
binding = service_provider.prefered_assertion_consumer_binding
nid_policy = service_provider.default_name_id_format
if user_id:
user = User.get(id = user_id)
if not check_delegated_authentication_permission(request):
logging.warning('ID-FFv1.2: %r tried to log as %r on %r but was forbidden' % (
request.user, user, provider_id))
return HttpResponseForbidden('You must be superuser to log as another user')
else:
user = request.user
load_federation(request, login, user)
if not liberty_provider:
message = _('ID-FFv1.2: provider %r unknown') % provider_id
logging.warning('ID-FFv1.2: provider %r unknown' % provider_id)
return HttpResponseForbidden(message)
login.initIdpInitiatedAuthnRequest(provider_id)
if binding == 'art':
login.request.protocolProfile = lasso.LIB_PROTOCOL_PROFILE_BRWS_ART
elif binding == 'post':
login.request.protocolProfile = lasso.LIB_PROTOCOL_PROFILE_BRWS_POST
else:
raise Exception('Unsupported binding %r' % binding)
if nid_policy == 'persistent':
login.request.nameIdPolicy = lasso.LIB_NAMEID_POLICY_TYPE_FEDERATED
elif nid_policy == 'transient':
login.request.nameIdPolicy = lasso.LIB_NAMEID_POLICY_TYPE_ONE_TIME
else:
message = _('ID-FFv1.2: default nameIdPolicy unsupported %r') % nid_policy
logging.error(message)
raise Exception(message)
login.processAuthnRequestMsg(None)
return sso_after_process_request(request, login,
consent_obtained = True, user = user, save = False)
urlpatterns = patterns('',
(r'^metadata$', metadata),
(r'^sso$', sso),
(r'^soap', soap),
(r'^idp_sso/(.*)$', idp_sso),
(r'^idp_sso/([^/]*)/([^/]*)$', idp_sso),
)
|
keimlink/django-cms
|
refs/heads/develop
|
cms/test_utils/fixtures/__init__.py
|
12133432
| |
mindnervestech/mnrp
|
refs/heads/master
|
addons/pos_discount/discount.py
|
315
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import openerp
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
class pos_config(osv.osv):
_inherit = 'pos.config'
_columns = {
'discount_pc': fields.float('Discount Percentage', help='The discount percentage'),
'discount_product_id': fields.many2one('product.product','Discount Product', help='The product used to model the discount'),
}
_defaults = {
'discount_pc': 10,
}
|
tovmeod/anaf
|
refs/heads/drf
|
anaf/documents/templatetags/documents.py
|
1
|
"""
Documents templatetags
"""
from coffin import template
from anaf.core.rendering import render_to_string
from jinja2 import contextfunction, Markup
from django.template import RequestContext
register = template.Library()
@contextfunction
def documents_document_list(context, documents, skip_group=False):
"""Print a list of documents"""
request = context['request']
response_format = 'html'
if 'response_format' in context:
response_format = context['response_format']
return Markup(render_to_string('documents/tags/document_list',
{'documents': documents,
'skip_group': skip_group},
context_instance=RequestContext(request),
response_format=response_format))
register.object(documents_document_list)
@contextfunction
def documents_file_list(context, files, skip_group=False):
"Print a list of files"
request = context['request']
response_format = 'html'
if 'response_format' in context:
response_format = context['response_format']
return Markup(render_to_string('documents/tags/file_list',
{'files': files, 'skip_group': skip_group},
context_instance=RequestContext(request),
response_format=response_format))
register.object(documents_file_list)
@contextfunction
def documents_weblink_list(context, links, skip_group=False):
"Print a list of links"
request = context['request']
response_format = 'html'
if 'response_format' in context:
response_format = context['response_format']
return Markup(render_to_string('documents/tags/weblink_list',
{'links': links, 'skip_group': skip_group},
context_instance=RequestContext(request),
response_format=response_format))
register.object(documents_weblink_list)
@contextfunction
def documents_objects_list(context, objects, folder, skip_group=False):
"Print a list of all objects"
request = context['request']
response_format = 'html'
if 'response_format' in context:
response_format = context['response_format']
return Markup(render_to_string('documents/tags/objects_list',
{'objects': objects,
'skip_group': skip_group, 'folder': folder},
context_instance=RequestContext(request),
response_format=response_format))
register.object(documents_objects_list)
|
abdoosh00/edraak
|
refs/heads/master
|
common/lib/xmodule/xmodule/capa_base.py
|
5
|
"""Implements basics of Capa, including class CapaModule."""
import cgi
import copy
import datetime
import hashlib
import json
import logging
import os
import traceback
import struct
import sys
# We don't want to force a dependency on datadog, so make the import conditional
try:
from dogapi import dog_stats_api
except ImportError:
# pylint: disable=invalid-name
dog_stats_api = None
from pkg_resources import resource_string
from capa.capa_problem import LoncapaProblem, LoncapaSystem
from capa.responsetypes import StudentInputError, \
ResponseError, LoncapaProblemError
from capa.util import convert_files_to_filenames
from .progress import Progress
from xmodule.exceptions import NotFoundError, ProcessingError
from xblock.fields import Scope, String, Boolean, Dict, Integer, Float
from .fields import Timedelta, Date
from django.utils.timezone import UTC
from .util.duedate import get_extended_due_date
log = logging.getLogger("edx.courseware")
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
# Generate this many different variants of problems with rerandomize=per_student
NUM_RANDOMIZATION_BINS = 20
# Never produce more than this many different seeds, no matter what.
MAX_RANDOMIZATION_BINS = 1000
def randomization_bin(seed, problem_id):
"""
Pick a randomization bin for the problem given the user's seed and a problem id.
We do this because we only want e.g. 20 randomizations of a problem to make analytics
interesting. To avoid having sets of students that always get the same problems,
we'll combine the system's per-student seed with the problem id in picking the bin.
"""
r_hash = hashlib.sha1()
r_hash.update(str(seed))
r_hash.update(str(problem_id))
# get the first few digits of the hash, convert to an int, then mod.
return int(r_hash.hexdigest()[:7], 16) % NUM_RANDOMIZATION_BINS
class Randomization(String):
"""
Define a field to store how to randomize a problem.
"""
def from_json(self, value):
if value in ("", "true"):
return "always"
elif value == "false":
return "per_student"
return value
to_json = from_json
class ComplexEncoder(json.JSONEncoder):
"""
Extend the JSON encoder to correctly handle complex numbers
"""
def default(self, obj):
"""
Print a nicely formatted complex number, or default to the JSON encoder
"""
if isinstance(obj, complex):
return u"{real:.7g}{imag:+.7g}*j".format(real=obj.real, imag=obj.imag)
return json.JSONEncoder.default(self, obj)
class CapaFields(object):
"""
Define the possible fields for a Capa problem
"""
display_name = String(
display_name=_("Display Name"),
help=_("This name appears in the horizontal navigation at the top of the page."),
scope=Scope.settings,
# it'd be nice to have a useful default but it screws up other things; so,
# use display_name_with_default for those
default="Blank Advanced Problem"
)
attempts = Integer(
help=_("Number of attempts taken by the student on this problem"),
default=0,
scope=Scope.user_state)
max_attempts = Integer(
display_name=_("Maximum Attempts"),
help=_("Defines the number of times a student can try to answer this problem. "
"If the value is not set, infinite attempts are allowed."),
values={"min": 0}, scope=Scope.settings
)
due = Date(help=_("Date that this problem is due by"), scope=Scope.settings)
extended_due = Date(
help=_("Date that this problem is due by for a particular student. This "
"can be set by an instructor, and will override the global due "
"date if it is set to a date that is later than the global due "
"date."),
default=None,
scope=Scope.user_state,
)
graceperiod = Timedelta(
help=_("Amount of time after the due date that submissions will be accepted"),
scope=Scope.settings
)
showanswer = String(
display_name=_("Show Answer"),
help=_("Defines when to show the answer to the problem. "
"A default value can be set in Advanced Settings."),
scope=Scope.settings,
default="finished",
values=[
{"display_name": _("Always"), "value": "always"},
{"display_name": _("Answered"), "value": "answered"},
{"display_name": _("Attempted"), "value": "attempted"},
{"display_name": _("Closed"), "value": "closed"},
{"display_name": _("Finished"), "value": "finished"},
{"display_name": _("Past Due"), "value": "past_due"},
{"display_name": _("Never"), "value": "never"}]
)
force_save_button = Boolean(
help=_("Whether to force the save button to appear on the page"),
scope=Scope.settings,
default=False
)
rerandomize = Randomization(
display_name=_("Randomization"),
help=_("Defines how often inputs are randomized when a student loads the problem. "
"This setting only applies to problems that can have randomly generated numeric values. "
"A default value can be set in Advanced Settings."),
default="never",
scope=Scope.settings,
values=[
{"display_name": _("Always"), "value": "always"},
{"display_name": _("On Reset"), "value": "onreset"},
{"display_name": _("Never"), "value": "never"},
{"display_name": _("Per Student"), "value": "per_student"}
]
)
data = String(help=_("XML data for the problem"), scope=Scope.content, default="<problem></problem>")
correct_map = Dict(help=_("Dictionary with the correctness of current student answers"),
scope=Scope.user_state, default={})
input_state = Dict(help=_("Dictionary for maintaining the state of inputtypes"), scope=Scope.user_state)
student_answers = Dict(help=_("Dictionary with the current student responses"), scope=Scope.user_state)
done = Boolean(help=_("Whether the student has answered the problem"), scope=Scope.user_state)
seed = Integer(help=_("Random seed for this student"), scope=Scope.user_state)
last_submission_time = Date(help=_("Last submission time"), scope=Scope.user_state)
submission_wait_seconds = Integer(
display_name=_("Timer Between Attempts"),
help=_("Seconds a student must wait between submissions for a problem with multiple attempts."),
scope=Scope.settings,
default=0)
weight = Float(
display_name=_("Problem Weight"),
help=_("Defines the number of points each problem is worth. "
"If the value is not set, each response field in the problem is worth one point."),
values={"min": 0, "step": .1},
scope=Scope.settings
)
markdown = String(help=_("Markdown source of this module"), default=None, scope=Scope.settings)
source_code = String(
help=_("Source code for LaTeX and Word problems. This feature is not well-supported."),
scope=Scope.settings
)
text_customization = Dict(
help=_("String customization substitutions for particular locations"),
scope=Scope.settings
# TODO: someday it should be possible to not duplicate this definition here
# and in inheritance.py
)
use_latex_compiler = Boolean(
help=_("Enable LaTeX templates?"),
default=False,
scope=Scope.settings
)
matlab_api_key = String(
display_name="Matlab API key",
help="Enter the API key provided by MathWorks for accessing the MATLAB Hosted Service. "
"This key is granted for exclusive use by this course for the specified duration. "
"Please do not share the API key with other courses and notify MathWorks immediately "
"if you believe the key is exposed or compromised. To obtain a key for your course, "
"or to report and issue, please contact moocsupport@mathworks.com",
scope=Scope.settings
)
class CapaMixin(CapaFields):
"""
Core logic for Capa Problem, which can be used by XModules or XBlocks.
"""
def __init__(self, *args, **kwargs):
super(CapaMixin, self).__init__(*args, **kwargs)
due_date = get_extended_due_date(self)
if self.graceperiod is not None and due_date:
self.close_date = due_date + self.graceperiod
else:
self.close_date = due_date
if self.seed is None:
self.choose_new_seed()
# Need the problem location in openendedresponse to send out. Adding
# it to the system here seems like the least clunky way to get it
# there.
self.runtime.set('location', self.location.to_deprecated_string())
try:
# TODO (vshnayder): move as much as possible of this work and error
# checking to descriptor load time
self.lcp = self.new_lcp(self.get_state_for_lcp())
# At this point, we need to persist the randomization seed
# so that when the problem is re-loaded (to check/view/save)
# it stays the same.
# However, we do not want to write to the database
# every time the module is loaded.
# So we set the seed ONLY when there is not one set already
if self.seed is None:
self.seed = self.lcp.seed
except Exception as err: # pylint: disable=broad-except
msg = u'cannot create LoncapaProblem {loc}: {err}'.format(
loc=self.location.to_deprecated_string(), err=err)
# TODO (vshnayder): do modules need error handlers too?
# We shouldn't be switching on DEBUG.
if self.runtime.DEBUG:
log.warning(msg)
# TODO (vshnayder): This logic should be general, not here--and may
# want to preserve the data instead of replacing it.
# e.g. in the CMS
msg = u'<p>{msg}</p>'.format(msg=cgi.escape(msg))
msg += u'<p><pre>{tb}</pre></p>'.format(
# just the traceback, no message - it is already present above
tb=cgi.escape(
u''.join(['Traceback (most recent call last):\n'] +
traceback.format_tb(sys.exc_info()[2])))
)
# create a dummy problem with error message instead of failing
problem_text = (u'<problem><text><span class="inline-error">'
u'Problem {url} has an error:</span>{msg}</text></problem>'.format(
url=self.location.to_deprecated_string(),
msg=msg)
)
self.lcp = self.new_lcp(self.get_state_for_lcp(), text=problem_text)
else:
# add extra info and raise
raise Exception(msg), None, sys.exc_info()[2]
self.set_state_from_lcp()
assert self.seed is not None
def choose_new_seed(self):
"""
Choose a new seed.
"""
if self.rerandomize == 'never':
self.seed = 1
elif self.rerandomize == "per_student" and hasattr(self.runtime, 'seed'):
# see comment on randomization_bin
self.seed = randomization_bin(self.runtime.seed, unicode(self.location).encode('utf-8'))
else:
self.seed = struct.unpack('i', os.urandom(4))[0]
# So that sandboxed code execution can be cached, but still have an interesting
# number of possibilities, cap the number of different random seeds.
self.seed %= MAX_RANDOMIZATION_BINS
def new_lcp(self, state, text=None):
"""
Generate a new Loncapa Problem
"""
if text is None:
text = self.data
capa_system = LoncapaSystem(
ajax_url=self.runtime.ajax_url,
anonymous_student_id=self.runtime.anonymous_student_id,
cache=self.runtime.cache,
can_execute_unsafe_code=self.runtime.can_execute_unsafe_code,
DEBUG=self.runtime.DEBUG,
filestore=self.runtime.filestore,
i18n=self.runtime.service(self, "i18n"),
node_path=self.runtime.node_path,
render_template=self.runtime.render_template,
seed=self.runtime.seed, # Why do we do this if we have self.seed?
STATIC_URL=self.runtime.STATIC_URL,
xqueue=self.runtime.xqueue,
matlab_api_key=self.matlab_api_key
)
return LoncapaProblem(
problem_text=text,
id=self.location.html_id(),
state=state,
seed=self.seed,
capa_system=capa_system,
)
def get_state_for_lcp(self):
"""
Give a dictionary holding the state of the module
"""
return {
'done': self.done,
'correct_map': self.correct_map,
'student_answers': self.student_answers,
'input_state': self.input_state,
'seed': self.seed,
}
def set_state_from_lcp(self):
"""
Set the module's state from the settings in `self.lcp`
"""
lcp_state = self.lcp.get_state()
self.done = lcp_state['done']
self.correct_map = lcp_state['correct_map']
self.input_state = lcp_state['input_state']
self.student_answers = lcp_state['student_answers']
self.seed = lcp_state['seed']
def set_last_submission_time(self):
"""
Set the module's last submission time (when the problem was checked)
"""
self.last_submission_time = datetime.datetime.now(UTC())
def get_score(self):
"""
Access the problem's score
"""
return self.lcp.get_score()
def max_score(self):
"""
Access the problem's max score
"""
return self.lcp.get_max_score()
def get_progress(self):
"""
For now, just return score / max_score
"""
score_dict = self.get_score()
score = score_dict['score']
total = score_dict['total']
if total > 0:
if self.weight is not None:
# Progress objects expect total > 0
if self.weight == 0:
return None
# scale score and total by weight/total:
score = score * self.weight / total
total = self.weight
try:
return Progress(score, total)
except (TypeError, ValueError):
log.exception("Got bad progress")
return None
return None
def get_html(self):
"""
Return some html with data about the module
"""
progress = self.get_progress()
return self.runtime.render_template('problem_ajax.html', {
'element_id': self.location.html_id(),
'id': self.location.to_deprecated_string(),
'ajax_url': self.runtime.ajax_url,
'progress_status': Progress.to_js_status_str(progress),
'progress_detail': Progress.to_js_detail_str(progress),
})
def check_button_name(self):
"""
Determine the name for the "check" button.
Usually it is just "Check", but if this is the student's
final attempt, change the name to "Final Check".
The text can be customized by the text_customization setting.
"""
# The logic flow is a little odd so that _('xxx') strings can be found for
# translation while also running _() just once for each string.
_ = self.runtime.service(self, "i18n").ugettext
check = _('Check')
final_check = _('Final Check')
# Apply customizations if present
if 'custom_check' in self.text_customization:
check = _(self.text_customization.get('custom_check'))
if 'custom_final_check' in self.text_customization:
final_check = _(self.text_customization.get('custom_final_check'))
# TODO: need a way to get the customized words into the list of
# words to be translated
if self.max_attempts is not None and self.attempts >= self.max_attempts - 1:
return final_check
else:
return check
def check_button_checking_name(self):
"""
Return the "checking..." text for the "check" button.
After the user presses the "check" button, the button will briefly
display the value returned by this function until a response is
received by the server.
The text can be customized by the text_customization setting.
"""
# Apply customizations if present
if 'custom_checking' in self.text_customization:
return self.text_customization.get('custom_checking')
_ = self.runtime.service(self, "i18n").ugettext
return _('Checking...')
def should_show_check_button(self):
"""
Return True/False to indicate whether to show the "Check" button.
"""
submitted_without_reset = (self.is_submitted() and self.rerandomize == "always")
# If the problem is closed (past due / too many attempts)
# then we do NOT show the "check" button
# Also, do not show the "check" button if we're waiting
# for the user to reset a randomized problem
if self.closed() or submitted_without_reset:
return False
else:
return True
def should_show_reset_button(self):
"""
Return True/False to indicate whether to show the "Reset" button.
"""
is_survey_question = (self.max_attempts == 0)
if self.rerandomize in ["always", "onreset"]:
# If the problem is closed (and not a survey question with max_attempts==0),
# then do NOT show the reset button.
# If the problem hasn't been submitted yet, then do NOT show
# the reset button.
if (self.closed() and not is_survey_question) or not self.is_submitted():
return False
else:
return True
# Only randomized problems need a "reset" button
else:
return False
def should_show_save_button(self):
"""
Return True/False to indicate whether to show the "Save" button.
"""
# If the user has forced the save button to display,
# then show it as long as the problem is not closed
# (past due / too many attempts)
if self.force_save_button:
return not self.closed()
else:
is_survey_question = (self.max_attempts == 0)
needs_reset = self.is_submitted() and self.rerandomize == "always"
# If the student has unlimited attempts, and their answers
# are not randomized, then we do not need a save button
# because they can use the "Check" button without consequences.
#
# The consequences we want to avoid are:
# * Using up an attempt (if max_attempts is set)
# * Changing the current problem, and no longer being
# able to view it (if rerandomize is "always")
#
# In those cases. the if statement below is false,
# and the save button can still be displayed.
#
if self.max_attempts is None and self.rerandomize != "always":
return False
# If the problem is closed (and not a survey question with max_attempts==0),
# then do NOT show the save button
# If we're waiting for the user to reset a randomized problem
# then do NOT show the save button
elif (self.closed() and not is_survey_question) or needs_reset:
return False
else:
return True
def handle_problem_html_error(self, err):
"""
Create a dummy problem to represent any errors.
Change our problem to a dummy problem containing a warning message to
display to users. Returns the HTML to show to users
`err` is the Exception encountered while rendering the problem HTML.
"""
log.exception(err.message)
# TODO (vshnayder): another switch on DEBUG.
if self.runtime.DEBUG:
msg = (
u'[courseware.capa.capa_module] <font size="+1" color="red">'
u'Failed to generate HTML for problem {url}</font>'.format(
url=cgi.escape(self.location.to_deprecated_string()))
)
msg += u'<p>Error:</p><p><pre>{msg}</pre></p>'.format(msg=cgi.escape(err.message))
msg += u'<p><pre>{tb}</pre></p>'.format(tb=cgi.escape(traceback.format_exc()))
html = msg
else:
# We're in non-debug mode, and possibly even in production. We want
# to avoid bricking of problem as much as possible
# Presumably, student submission has corrupted LoncapaProblem HTML.
# First, pull down all student answers
student_answers = self.lcp.student_answers
answer_ids = student_answers.keys()
# Some inputtypes, such as dynamath, have additional "hidden" state that
# is not exposed to the student. Keep those hidden
# TODO: Use regex, e.g. 'dynamath' is suffix at end of answer_id
hidden_state_keywords = ['dynamath']
for answer_id in answer_ids:
for hidden_state_keyword in hidden_state_keywords:
if answer_id.find(hidden_state_keyword) >= 0:
student_answers.pop(answer_id)
# Next, generate a fresh LoncapaProblem
self.lcp = self.new_lcp(None)
self.set_state_from_lcp()
# Prepend a scary warning to the student
_ = self.runtime.service(self, "i18n").ugettext
warning_msg = _("Warning: The problem has been reset to its initial state!")
warning = '<div class="capa_reset"> <h2> ' + warning_msg + '</h2>'
# Translators: Following this message, there will be a bulleted list of items.
warning_msg = _("The problem's state was corrupted by an invalid submission. The submission consisted of:")
warning += warning_msg + '<ul>'
for student_answer in student_answers.values():
if student_answer != '':
warning += '<li>' + cgi.escape(student_answer) + '</li>'
warning_msg = _('If this error persists, please contact the course staff.')
warning += '</ul>' + warning_msg + '</div>'
html = warning
try:
html += self.lcp.get_html()
except Exception: # pylint: disable=broad-except
# Couldn't do it. Give up.
log.exception("Unable to generate html from LoncapaProblem")
raise
return html
def get_problem_html(self, encapsulate=True):
"""
Return html for the problem.
Adds check, reset, save buttons as necessary based on the problem config and state.
"""
try:
html = self.lcp.get_html()
# If we cannot construct the problem HTML,
# then generate an error message instead.
except Exception as err: # pylint: disable=broad-except
html = self.handle_problem_html_error(err)
# The convention is to pass the name of the check button if we want
# to show a check button, and False otherwise This works because
# non-empty strings evaluate to True. We use the same convention
# for the "checking" state text.
if self.should_show_check_button():
check_button = self.check_button_name()
check_button_checking = self.check_button_checking_name()
else:
check_button = False
check_button_checking = False
content = {
'name': self.display_name_with_default,
'html': html,
'weight': self.weight,
}
context = {
'problem': content,
'id': self.location.to_deprecated_string(),
'check_button': check_button,
'check_button_checking': check_button_checking,
'reset_button': self.should_show_reset_button(),
'save_button': self.should_show_save_button(),
'answer_available': self.answer_available(),
'attempts_used': self.attempts,
'attempts_allowed': self.max_attempts,
}
html = self.runtime.render_template('problem.html', context)
if encapsulate:
html = u'<div id="problem_{id}" class="problem" data-url="{ajax_url}">'.format(
id=self.location.html_id(), ajax_url=self.runtime.ajax_url
) + html + "</div>"
# Now do all the substitutions which the LMS module_render normally does, but
# we need to do here explicitly since we can get called for our HTML via AJAX
html = self.runtime.replace_urls(html)
if self.runtime.replace_course_urls:
html = self.runtime.replace_course_urls(html)
if self.runtime.replace_jump_to_id_urls:
html = self.runtime.replace_jump_to_id_urls(html)
return html
def is_past_due(self):
"""
Is it now past this problem's due date, including grace period?
"""
return (self.close_date is not None and
datetime.datetime.now(UTC()) > self.close_date)
def closed(self):
"""
Is the student still allowed to submit answers?
"""
if self.max_attempts is not None and self.attempts >= self.max_attempts:
return True
if self.is_past_due():
return True
return False
def is_submitted(self):
"""
Used to decide to show or hide RESET or CHECK buttons.
Means that student submitted problem and nothing more.
Problem can be completely wrong.
Pressing RESET button makes this function to return False.
"""
# used by conditional module
return self.lcp.done
def is_attempted(self):
"""
Has the problem been attempted?
used by conditional module
"""
return self.attempts > 0
def is_correct(self):
"""
True iff full points
"""
score_dict = self.get_score()
return score_dict['score'] == score_dict['total']
def answer_available(self):
"""
Is the user allowed to see an answer?
"""
if self.showanswer == '':
return False
elif self.showanswer == "never":
return False
elif self.runtime.user_is_staff:
# This is after the 'never' check because admins can see the answer
# unless the problem explicitly prevents it
return True
elif self.showanswer == 'attempted':
return self.attempts > 0
elif self.showanswer == 'answered':
# NOTE: this is slightly different from 'attempted' -- resetting the problems
# makes lcp.done False, but leaves attempts unchanged.
return self.lcp.done
elif self.showanswer == 'closed':
return self.closed()
elif self.showanswer == 'finished':
return self.closed() or self.is_correct()
elif self.showanswer == 'past_due':
return self.is_past_due()
elif self.showanswer == 'always':
return True
return False
def update_score(self, data):
"""
Delivers grading response (e.g. from asynchronous code checking) to
the capa problem, so its score can be updated
'data' must have a key 'response' which is a string that contains the
grader's response
No ajax return is needed. Return empty dict.
"""
queuekey = data['queuekey']
score_msg = data['xqueue_body']
self.lcp.update_score(score_msg, queuekey)
self.set_state_from_lcp()
self.publish_grade()
return dict() # No AJAX return is needed
def handle_ungraded_response(self, data):
"""
Delivers a response from the XQueue to the capa problem
The score of the problem will not be updated
Args:
- data (dict) must contain keys:
queuekey - a key specific to this response
xqueue_body - the body of the response
Returns:
empty dictionary
No ajax return is needed, so an empty dict is returned
"""
queuekey = data['queuekey']
score_msg = data['xqueue_body']
# pass along the xqueue message to the problem
self.lcp.ungraded_response(score_msg, queuekey)
self.set_state_from_lcp()
return dict()
def handle_input_ajax(self, data):
"""
Handle ajax calls meant for a particular input in the problem
Args:
- data (dict) - data that should be passed to the input
Returns:
- dict containing the response from the input
"""
response = self.lcp.handle_input_ajax(data)
# save any state changes that may occur
self.set_state_from_lcp()
return response
def get_answer(self, _data):
"""
For the "show answer" button.
Returns the answers: {'answers' : answers}
"""
event_info = dict()
event_info['problem_id'] = self.location.to_deprecated_string()
self.track_function_unmask('showanswer', event_info)
if not self.answer_available():
raise NotFoundError('Answer is not available')
else:
answers = self.lcp.get_question_answers()
self.set_state_from_lcp()
# answers (eg <solution>) may have embedded images
# but be careful, some problems are using non-string answer dicts
new_answers = dict()
for answer_id in answers:
try:
new_answer = {answer_id: self.runtime.replace_urls(answers[answer_id])}
except TypeError:
log.debug(u'Unable to perform URL substitution on answers[%s]: %s',
answer_id, answers[answer_id])
new_answer = {answer_id: answers[answer_id]}
new_answers.update(new_answer)
return {'answers': new_answers}
# Figure out if we should move these to capa_problem?
def get_problem(self, _data):
"""
Return results of get_problem_html, as a simple dict for json-ing.
{ 'html': <the-html> }
Used if we want to reconfirm we have the right thing e.g. after
several AJAX calls.
"""
return {'html': self.get_problem_html(encapsulate=False)}
@staticmethod
def make_dict_of_responses(data):
"""
Make dictionary of student responses (aka "answers")
`data` is POST dictionary (webob.multidict.MultiDict).
The `data` dict has keys of the form 'x_y', which are mapped
to key 'y' in the returned dict. For example,
'input_1_2_3' would be mapped to '1_2_3' in the returned dict.
Some inputs always expect a list in the returned dict
(e.g. checkbox inputs). The convention is that
keys in the `data` dict that end with '[]' will always
have list values in the returned dict.
For example, if the `data` dict contains {'input_1[]': 'test' }
then the output dict would contain {'1': ['test'] }
(the value is a list).
Some other inputs such as ChoiceTextInput expect a dict of values in the returned
dict If the key ends with '{}' then we will assume that the value is a json
encoded dict and deserialize it.
For example, if the `data` dict contains {'input_1{}': '{"1_2_1": 1}'}
then the output dict would contain {'1': {"1_2_1": 1} }
(the value is a dictionary)
Raises an exception if:
-A key in the `data` dictionary does not contain at least one underscore
(e.g. "input" is invalid, but "input_1" is valid)
-Two keys end up with the same name in the returned dict.
(e.g. 'input_1' and 'input_1[]', which both get mapped to 'input_1'
in the returned dict)
"""
answers = dict()
# webob.multidict.MultiDict is a view of a list of tuples,
# so it will return a multi-value key once for each value.
# We only want to consider each key a single time, so we use set(data.keys())
for key in set(data.keys()):
# e.g. input_resistor_1 ==> resistor_1
_, _, name = key.partition('_') # pylint: disable=redefined-outer-name
# If key has no underscores, then partition
# will return (key, '', '')
# We detect this and raise an error
if not name:
raise ValueError(u"{key} must contain at least one underscore".format(key=key))
else:
# This allows for answers which require more than one value for
# the same form input (e.g. checkbox inputs). The convention is that
# if the name ends with '[]' (which looks like an array), then the
# answer will be an array.
# if the name ends with '{}' (Which looks like a dict),
# then the answer will be a dict
is_list_key = name.endswith('[]')
is_dict_key = name.endswith('{}')
name = name[:-2] if is_list_key or is_dict_key else name
if is_list_key:
val = data.getall(key)
elif is_dict_key:
try:
val = json.loads(data[key])
# If the submission wasn't deserializable, raise an error.
except(KeyError, ValueError):
raise ValueError(
u"Invalid submission: {val} for {key}".format(val=data[key], key=key)
)
else:
val = data[key]
# If the name already exists, then we don't want
# to override it. Raise an error instead
if name in answers:
raise ValueError(u"Key {name} already exists in answers dict".format(name=name))
else:
answers[name] = val
return answers
def publish_grade(self):
"""
Publishes the student's current grade to the system as an event
"""
score = self.lcp.get_score()
self.runtime.publish(
self,
'grade',
{
'value': score['score'],
'max_value': score['total'],
}
)
return {'grade': score['score'], 'max_grade': score['total']}
# pylint: disable=too-many-statements
def check_problem(self, data, override_time=False):
"""
Checks whether answers to a problem are correct
Returns a map of correct/incorrect answers:
{'success' : 'correct' | 'incorrect' | AJAX alert msg string,
'contents' : html}
"""
event_info = dict()
event_info['state'] = self.lcp.get_state()
event_info['problem_id'] = self.location.to_deprecated_string()
answers = self.make_dict_of_responses(data)
answers_without_files = convert_files_to_filenames(answers)
event_info['answers'] = answers_without_files
metric_name = u'capa.check_problem.{}'.format
# Can override current time
current_time = datetime.datetime.now(UTC())
if override_time is not False:
current_time = override_time
_ = self.runtime.service(self, "i18n").ugettext
# Too late. Cannot submit
if self.closed():
event_info['failure'] = 'closed'
self.track_function_unmask('problem_check_fail', event_info)
if dog_stats_api:
dog_stats_api.increment(metric_name('checks'), tags=[u'result:failed', u'failure:closed'])
raise NotFoundError(_("Problem is closed."))
# Problem submitted. Student should reset before checking again
if self.done and self.rerandomize == "always":
event_info['failure'] = 'unreset'
self.track_function_unmask('problem_check_fail', event_info)
if dog_stats_api:
dog_stats_api.increment(metric_name('checks'), tags=[u'result:failed', u'failure:unreset'])
raise NotFoundError(_("Problem must be reset before it can be checked again."))
# Problem queued. Students must wait a specified waittime before they are allowed to submit
# IDEA: consider stealing code from below: pretty-print of seconds, cueing of time remaining
if self.lcp.is_queued():
prev_submit_time = self.lcp.get_recentmost_queuetime()
waittime_between_requests = self.runtime.xqueue['waittime']
if (current_time - prev_submit_time).total_seconds() < waittime_between_requests:
msg = _(u"You must wait at least {wait} seconds between submissions.").format(
wait=waittime_between_requests)
return {'success': msg, 'html': ''}
# Wait time between resets: check if is too soon for submission.
if self.last_submission_time is not None and self.submission_wait_seconds != 0:
# pylint: disable=maybe-no-member
# pylint is unable to verify that .total_seconds() exists
if (current_time - self.last_submission_time).total_seconds() < self.submission_wait_seconds:
remaining_secs = int(self.submission_wait_seconds - (current_time - self.last_submission_time).total_seconds())
msg = _(u'You must wait at least {wait_secs} between submissions. {remaining_secs} remaining.').format(
wait_secs=self.pretty_print_seconds(self.submission_wait_seconds),
remaining_secs=self.pretty_print_seconds(remaining_secs))
return {
'success': msg,
'html': ''
}
try:
correct_map = self.lcp.grade_answers(answers)
self.attempts = self.attempts + 1
self.lcp.done = True
self.set_state_from_lcp()
self.set_last_submission_time()
except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
log.warning("StudentInputError in capa_module:problem_check",
exc_info=True)
# Save the user's state before failing
self.set_state_from_lcp()
# If the user is a staff member, include
# the full exception, including traceback,
# in the response
if self.runtime.user_is_staff:
msg = u"Staff debug info: {tb}".format(tb=cgi.escape(traceback.format_exc()))
# Otherwise, display just an error message,
# without a stack trace
else:
# Translators: {msg} will be replaced with a problem's error message.
msg = _(u"Error: {msg}").format(msg=inst.message)
return {'success': msg}
except Exception as err:
# Save the user's state before failing
self.set_state_from_lcp()
if self.runtime.DEBUG:
msg = u"Error checking problem: {}".format(err.message)
msg += u'\nTraceback:\n{}'.format(traceback.format_exc())
return {'success': msg}
raise
published_grade = self.publish_grade()
# success = correct if ALL questions in this problem are correct
success = 'correct'
for answer_id in correct_map:
if not correct_map.is_correct(answer_id):
success = 'incorrect'
# NOTE: We are logging both full grading and queued-grading submissions. In the latter,
# 'success' will always be incorrect
event_info['grade'] = published_grade['grade']
event_info['max_grade'] = published_grade['max_grade']
event_info['correct_map'] = correct_map.get_dict()
event_info['success'] = success
event_info['attempts'] = self.attempts
event_info['submission'] = self.get_submission_metadata_safe(answers_without_files, correct_map)
self.track_function_unmask('problem_check', event_info)
if dog_stats_api:
dog_stats_api.increment(metric_name('checks'), tags=[u'result:success'])
dog_stats_api.histogram(
metric_name('correct_pct'),
float(published_grade['grade']) / published_grade['max_grade'],
)
dog_stats_api.histogram(
metric_name('attempts'),
self.attempts,
)
if hasattr(self.runtime, 'psychometrics_handler'): # update PsychometricsData using callback
self.runtime.psychometrics_handler(self.get_state_for_lcp())
# render problem into HTML
html = self.get_problem_html(encapsulate=False)
return {
'success': success,
'contents': html
}
# pylint: enable=too-many-statements
def track_function_unmask(self, title, event_info):
"""
All calls to runtime.track_function route through here so that the
choice names can be unmasked.
"""
# Do the unmask translates on a copy of event_info,
# avoiding problems where an event_info is unmasked twice.
event_unmasked = copy.deepcopy(event_info)
self.unmask_event(event_unmasked)
self.runtime.track_function(title, event_unmasked)
def unmask_event(self, event_info):
"""
Translates in-place the event_info to account for masking
and adds information about permutation options in force.
"""
# answers is like: {u'i4x-Stanford-CS99-problem-dada976e76f34c24bc8415039dee1300_2_1': u'mask_0'}
# Each response values has an answer_id which matches the key in answers.
for response in self.lcp.responders.values():
# Un-mask choice names in event_info for masked responses.
if response.has_mask():
# We don't assume much about the structure of event_info,
# but check for the existence of the things we need to un-mask.
# Look for answers/id
answer = event_info.get('answers', {}).get(response.answer_id)
if answer is not None:
event_info['answers'][response.answer_id] = response.unmask_name(answer)
# Look for state/student_answers/id
answer = event_info.get('state', {}).get('student_answers', {}).get(response.answer_id)
if answer is not None:
event_info['state']['student_answers'][response.answer_id] = response.unmask_name(answer)
# Look for old_state/student_answers/id -- parallel to the above case, happens on reset
answer = event_info.get('old_state', {}).get('student_answers', {}).get(response.answer_id)
if answer is not None:
event_info['old_state']['student_answers'][response.answer_id] = response.unmask_name(answer)
# Add 'permutation' to event_info for permuted responses.
permutation_option = None
if response.has_shuffle():
permutation_option = 'shuffle'
elif response.has_answerpool():
permutation_option = 'answerpool'
if permutation_option is not None:
# Add permutation record tuple: (one of:'shuffle'/'answerpool', [as-displayed list])
if not 'permutation' in event_info:
event_info['permutation'] = {}
event_info['permutation'][response.answer_id] = (permutation_option, response.unmask_order())
def pretty_print_seconds(self, num_seconds):
"""
Returns time duration nicely formated, e.g. "3 minutes 4 seconds"
"""
# Here _ is the N variant ungettext that does pluralization with a 3-arg call
_ = self.runtime.service(self, "i18n").ungettext
hours = num_seconds // 3600
sub_hour = num_seconds % 3600
minutes = sub_hour // 60
seconds = sub_hour % 60
display = ""
if hours > 0:
display += _("{num_hour} hour", "{num_hour} hours", hours).format(num_hour=hours)
if minutes > 0:
if display != "":
display += " "
# translators: "minute" refers to a minute of time
display += _("{num_minute} minute", "{num_minute} minutes", minutes).format(num_minute=minutes)
# Taking care to make "0 seconds" instead of "" for 0 time
if seconds > 0 or (hours == 0 and minutes == 0):
if display != "":
display += " "
# translators: "second" refers to a second of time
display += _("{num_second} second", "{num_second} seconds", seconds).format(num_second=seconds)
return display
def get_submission_metadata_safe(self, answers, correct_map):
"""
Ensures that no exceptions are thrown while generating input metadata summaries. Returns the
summary if it is successfully created, otherwise an empty dictionary.
"""
try:
return self.get_submission_metadata(answers, correct_map)
except Exception: # pylint: disable=broad-except
# NOTE: The above process requires deep inspection of capa structures that may break for some
# uncommon problem types. Ensure that it does not prevent answer submission in those
# cases. Any occurrences of errors in this block should be investigated and resolved.
log.exception('Unable to gather submission metadata, it will not be included in the event.')
return {}
def get_submission_metadata(self, answers, correct_map):
"""
Return a map of inputs to their corresponding summarized metadata.
Returns:
A map whose keys are a unique identifier for the input (in this case a capa input_id) and
whose values are:
question (str): Is the prompt that was presented to the student. It corresponds to the
label of the input.
answer (mixed): Is the answer the student provided. This may be a rich structure,
however it must be json serializable.
response_type (str): The XML tag of the capa response type.
input_type (str): The XML tag of the capa input type.
correct (bool): Whether or not the provided answer is correct. Will be an empty
string if correctness could not be determined.
variant (str): In some cases the same question can have several different variants.
This string should uniquely identify the variant of the question that was answered.
In the capa context this corresponds to the `seed`.
This function attempts to be very conservative and make very few assumptions about the structure
of the problem. If problem related metadata cannot be located it should be replaced with empty
strings ''.
"""
input_metadata = {}
for input_id, internal_answer in answers.iteritems():
answer_input = self.lcp.inputs.get(input_id)
if answer_input is None:
log.warning('Input id %s is not mapped to an input type.', input_id)
answer_response = None
for response, responder in self.lcp.responders.iteritems():
for other_input_id in self.lcp.responder_answers[response]:
if other_input_id == input_id:
answer_response = responder
if answer_response is None:
log.warning('Answer responder could not be found for input_id %s.', input_id)
user_visible_answer = internal_answer
if hasattr(answer_input, 'get_user_visible_answer'):
user_visible_answer = answer_input.get_user_visible_answer(internal_answer)
# If this problem has rerandomize enabled, then it will generate N variants of the
# question, one per unique seed value. In this case we would like to know which
# variant was selected. Ideally it would be nice to have the exact question that
# was presented to the user, with values interpolated etc, but that can be done
# later if necessary.
variant = ''
if self.rerandomize != 'never':
variant = self.seed
is_correct = correct_map.is_correct(input_id)
if is_correct is None:
is_correct = ''
input_metadata[input_id] = {
'question': getattr(answer_input, 'loaded_attributes', {}).get('label', ''),
'answer': user_visible_answer,
'response_type': getattr(getattr(answer_response, 'xml', None), 'tag', ''),
'input_type': getattr(answer_input, 'tag', ''),
'correct': is_correct,
'variant': variant,
}
return input_metadata
def rescore_problem(self):
"""
Checks whether the existing answers to a problem are correct.
This is called when the correct answer to a problem has been changed,
and the grade should be re-evaluated.
Returns a dict with one key:
{'success' : 'correct' | 'incorrect' | AJAX alert msg string }
Raises NotFoundError if called on a problem that has not yet been
answered, or NotImplementedError if it's a problem that cannot be rescored.
Returns the error messages for exceptions occurring while performing
the rescoring, rather than throwing them.
"""
event_info = {'state': self.lcp.get_state(), 'problem_id': self.location.to_deprecated_string()}
_ = self.runtime.service(self, "i18n").ugettext
if not self.lcp.supports_rescoring():
event_info['failure'] = 'unsupported'
self.track_function_unmask('problem_rescore_fail', event_info)
# Translators: 'rescoring' refers to the act of re-submitting a student's solution so it can get a new score.
raise NotImplementedError(_("Problem's definition does not support rescoring."))
if not self.done:
event_info['failure'] = 'unanswered'
self.track_function_unmask('problem_rescore_fail', event_info)
raise NotFoundError(_("Problem must be answered before it can be graded again."))
# get old score, for comparison:
orig_score = self.lcp.get_score()
event_info['orig_score'] = orig_score['score']
event_info['orig_total'] = orig_score['total']
try:
correct_map = self.lcp.rescore_existing_answers()
except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
log.warning("Input error in capa_module:problem_rescore", exc_info=True)
event_info['failure'] = 'input_error'
self.track_function_unmask('problem_rescore_fail', event_info)
return {'success': u"Error: {0}".format(inst.message)}
except Exception as err:
event_info['failure'] = 'unexpected'
self.track_function_unmask('problem_rescore_fail', event_info)
if self.runtime.DEBUG:
msg = u"Error checking problem: {0}".format(err.message)
msg += u'\nTraceback:\n' + traceback.format_exc()
return {'success': msg}
raise
# rescoring should have no effect on attempts, so don't
# need to increment here, or mark done. Just save.
self.set_state_from_lcp()
self.publish_grade()
new_score = self.lcp.get_score()
event_info['new_score'] = new_score['score']
event_info['new_total'] = new_score['total']
# success = correct if ALL questions in this problem are correct
success = 'correct'
for answer_id in correct_map:
if not correct_map.is_correct(answer_id):
success = 'incorrect'
# NOTE: We are logging both full grading and queued-grading submissions. In the latter,
# 'success' will always be incorrect
event_info['correct_map'] = correct_map.get_dict()
event_info['success'] = success
event_info['attempts'] = self.attempts
self.track_function_unmask('problem_rescore', event_info)
# psychometrics should be called on rescoring requests in the same way as check-problem
if hasattr(self.runtime, 'psychometrics_handler'): # update PsychometricsData using callback
self.runtime.psychometrics_handler(self.get_state_for_lcp())
return {'success': success}
def save_problem(self, data):
"""
Save the passed in answers.
Returns a dict { 'success' : bool, 'msg' : message }
The message is informative on success, and an error message on failure.
"""
event_info = dict()
event_info['state'] = self.lcp.get_state()
event_info['problem_id'] = self.location.to_deprecated_string()
answers = self.make_dict_of_responses(data)
event_info['answers'] = answers
_ = self.runtime.service(self, "i18n").ugettext
# Too late. Cannot submit
if self.closed() and not self.max_attempts == 0:
event_info['failure'] = 'closed'
self.track_function_unmask('save_problem_fail', event_info)
return {
'success': False,
# Translators: 'closed' means the problem's due date has passed. You may no longer attempt to solve the problem.
'msg': _("Problem is closed.")
}
# Problem submitted. Student should reset before saving
# again.
if self.done and self.rerandomize == "always":
event_info['failure'] = 'done'
self.track_function_unmask('save_problem_fail', event_info)
return {
'success': False,
'msg': _("Problem needs to be reset prior to save.")
}
self.lcp.student_answers = answers
self.set_state_from_lcp()
self.track_function_unmask('save_problem_success', event_info)
msg = _("Your answers have been saved.")
if not self.max_attempts == 0:
msg = _("Your answers have been saved but not graded. Click 'Check' to grade them.")
return {
'success': True,
'msg': msg,
}
def reset_problem(self, _data):
"""
Changes problem state to unfinished -- removes student answers,
and causes problem to rerender itself.
Returns a dictionary of the form:
{'success': True/False,
'html': Problem HTML string }
If an error occurs, the dictionary will also have an
`error` key containing an error message.
"""
event_info = dict()
event_info['old_state'] = self.lcp.get_state()
event_info['problem_id'] = self.location.to_deprecated_string()
_ = self.runtime.service(self, "i18n").ugettext
if self.closed():
event_info['failure'] = 'closed'
self.track_function_unmask('reset_problem_fail', event_info)
return {
'success': False,
# Translators: 'closed' means the problem's due date has passed. You may no longer attempt to solve the problem.
'error': _("Problem is closed."),
}
if not self.done:
event_info['failure'] = 'not_done'
self.track_function_unmask('reset_problem_fail', event_info)
return {
'success': False,
'error': _("Refresh the page and make an attempt before resetting."),
}
if self.rerandomize in ["always", "onreset"]:
# Reset random number generator seed.
self.choose_new_seed()
# Generate a new problem with either the previous seed or a new seed
self.lcp = self.new_lcp(None)
# Pull in the new problem seed
self.set_state_from_lcp()
event_info['new_state'] = self.lcp.get_state()
self.track_function_unmask('reset_problem', event_info)
return {
'success': True,
'html': self.get_problem_html(encapsulate=False),
}
|
google-research/generic-adaptive-restarts
|
refs/heads/master
|
pdhg/pdhg_linear_programming.py
|
1
|
# coding=utf-8
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Linear programming with PDHG."""
import collections
from . import restarted_pdhg
import h5py
import numpy as np
import odl
import pandas as pd
import scipy.sparse
class LinearOnBox(odl.solvers.functional.Functional):
"""A linear function within a box, and infinity outside the box.
Another way to say this is that this function is the sum of a linear function
and the indicator function of the box.
"""
def __init__(self, space, linear_coefficients, lower, upper):
super(LinearOnBox, self).__init__(space, linear=False)
self.lower = lower
self.upper = upper
self.linear_coefficients = space.element(linear_coefficients)
# _call not implemented.
@property
def proximal(self):
lower = self.lower
upper = self.upper
linear_coefficients = self.linear_coefficients
space = self.domain
class ProxLinearOnBox(odl.operator.operator.Operator):
def __init__(self, sigma):
super(ProxLinearOnBox, self).__init__(
domain=space, range=space, linear=False)
self.sigma = sigma
def _call(self, x, out):
"""Apply the operator to ``x`` and store the result in ``out``."""
# out = x - sigma * linear_coefficients
out.lincomb(1.0, x, -self.sigma, linear_coefficients)
# Now project to the box.
out.ufuncs.maximum(lower, out=out)
out.ufuncs.minimum(upper, out=out)
return ProxLinearOnBox
class LinearOnBoxConjugate(odl.solvers.functional.Functional):
"""Implements the convex conjugate of LinearOnBox."""
def __init__(self, space, linear_coefficients, lower, upper):
super(LinearOnBoxConjugate, self).__init__(space=space, linear=False)
# Confuses the primal and the dual space. Luckily they're the same.
self.lower = lower
self.upper = upper
self.linear_coefficients = linear_coefficients
@property
def convex_conj(self):
"""The convex conjugate."""
return LinearOnBox(self.domain, self.linear_coefficients, self.lower,
self.upper)
class LpData(object):
"""Specifies a linear programming problem.
In the format:
minimize objective_vector' * x + objective_constant
s.t. constraint_matrix[:num_equalities, :] * x =
right_hand_side[:num_equalities]
constraint_matrix[num_equalities:, :] * x >=
right_hand_side[num_equalities:, :]
variable_lower_bound <= x <= variable_upper_bound
The variable_lower_bound may contain `-inf` elements and variable_upper_bound
may contain `inf` elements when the corresponding variable bound is not
present.
Fields: variable_lower_bound, variable_upper_bound, objective_vector,
objective_constant, constraint_matrix, right_hand_side, num_equalities.
"""
def __init__(self, variable_lower_bound, variable_upper_bound,
objective_vector, objective_constant, constraint_matrix,
right_hand_side, num_equalities):
self.variable_lower_bound = variable_lower_bound
self.variable_upper_bound = variable_upper_bound
self.objective_vector = objective_vector
self.objective_constant = objective_constant
self.constraint_matrix = constraint_matrix
self.right_hand_side = right_hand_side
self.num_equalities = num_equalities
def lp_from_hdf5(filename):
h5 = h5py.File(filename, 'r')
variable_lower_bound = np.array(h5['variable_lower_bound'])
variable_upper_bound = np.array(h5['variable_upper_bound'])
right_hand_side = np.array(h5['right_hand_side'])
objective_vector = np.array(h5['objective_vector'])
constraint_matrix = scipy.sparse.csc_matrix(
(np.array(h5['constraint_matrix_data']),
np.array(h5['constraint_matrix_indices']),
np.array(h5['constraint_matrix_indptr'])),
shape=(right_hand_side.size, variable_lower_bound.size))
print('constraint matrix dimensions:', constraint_matrix.shape)
return LpData(variable_lower_bound, variable_upper_bound, objective_vector,
h5['objective_constant'][()], constraint_matrix,
right_hand_side, h5['num_equalities'][()])
def solution_stats(lp, primal, dual):
primal_obj = np.dot(primal, lp.objective_vector) + lp.objective_constant
# Assumes that bounds on primal and dual variables are always satisfied.
activity = lp.constraint_matrix @ primal
eq_error = lp.right_hand_side[:lp.num_equalities] - activity[:lp
.num_equalities]
ineq_error = np.maximum(
lp.right_hand_side[lp.num_equalities:] - activity[lp.num_equalities:],
0.0)
reduced_cost = lp.objective_vector - lp.constraint_matrix.T @ dual
# Whenever there's no lower bound, the positive part of the reduced cost is
# an infeasibility. Likewise when there's no upper bound, the negative part is
# an infeasibility.
reduced_cost_pos = np.maximum(reduced_cost, 0.0)
reduced_cost_neg = np.maximum(-reduced_cost, 0.0)
reduced_cost_infeas = np.isinf(
lp.variable_lower_bound) * reduced_cost_pos + np.isinf(
lp.variable_upper_bound) * reduced_cost_neg
finite_lower_bounds = lp.variable_lower_bound.copy()
finite_lower_bounds[np.isinf(finite_lower_bounds)] = 0.0
finite_upper_bounds = lp.variable_upper_bound.copy()
finite_upper_bounds[np.isinf(finite_upper_bounds)] = 0.0
dual_obj = np.dot(dual, lp.right_hand_side) + np.dot(
finite_lower_bounds, reduced_cost_pos) + np.dot(
finite_upper_bounds, reduced_cost_neg) + lp.objective_constant
kkt_residual = np.concatenate((eq_error, ineq_error, reduced_cost_infeas))
kkt_residual = np.append(kkt_residual, primal_obj - dual_obj)
stats = dict()
stats['primal_obj'] = primal_obj
stats['dual_obj'] = dual_obj
stats['kkt_err_l2'] = np.linalg.norm(kkt_residual)
stats['kkt_err_l1'] = np.linalg.norm(kkt_residual, ord=1)
stats['kkt_err_linf'] = np.linalg.norm(kkt_residual, ord=np.inf)
return stats
def num_active_bounds_changed(lp, x_new, y_new, x_prev, y_prev):
x_prev_at_lower = x_prev == lp.variable_lower_bound
x_prev_at_upper = x_prev == lp.variable_upper_bound
x_new_at_lower = x_new == lp.variable_lower_bound
x_new_at_upper = x_new == lp.variable_upper_bound
x_active_bounds_changed = np.sum((x_prev_at_lower != x_new_at_lower)
| (x_prev_at_upper != x_new_at_upper))
y_prev_at_lower = y_prev[lp.num_equalities:] == 0.0
y_new_at_lower = y_new[lp.num_equalities:] == 0.0
y_active_bounds_changed = np.sum(y_prev_at_lower != y_new_at_lower)
return x_active_bounds_changed + y_active_bounds_changed
class CallbackStore(odl.solvers.Callback):
def __init__(self, lp):
self.lp = lp
self.stats = collections.OrderedDict()
self.iteration_count = 0
fields = [
'iteration_num', 'current_primal_obj', 'current_dual_obj',
'current_kkt_err_l2', 'current_kkt_err_l1', 'current_kkt_err_linf',
'avg_primal_obj', 'avg_dual_obj', 'avg_kkt_err_l2', 'avg_kkt_err_l1',
'avg_kkt_err_linf', 'num_active_bounds_changed', 'did_restart'
]
for f in fields:
self.stats[f] = []
def __call__(self, x, y, x_avg, y_avg, did_restart):
self.stats['iteration_num'].append(self.iteration_count)
self.iteration_count += 1
stats = solution_stats(self.lp, x, y)
for stat_name in stats:
self.stats['current_{}'.format(stat_name)].append(stats[stat_name])
stats = solution_stats(self.lp, x_avg, y_avg)
for stat_name in stats:
self.stats['avg_{}'.format(stat_name)].append(stats[stat_name])
self.stats['did_restart'].append(did_restart)
if self.iteration_count == 1:
self.stats['num_active_bounds_changed'].append(0)
else:
self.stats['num_active_bounds_changed'].append(
num_active_bounds_changed(self.lp, x.asarray(), y.asarray(),
self.x_prev, self.y_prev))
self.x_prev = x.asarray().copy()
self.y_prev = y.asarray().copy()
def dataframe(self):
return pd.DataFrame.from_dict(self.stats)
def solve_lp(lp, num_iters, tau, sigma, restart, fixed_restart_frequency=None):
# Using the notation of ODL's primal_dual_hybrid_gradient.py, the LP is
# formulated as
# min_x max_y f(x) + y'Lx - g^*(y)
# where:
# f(x) = objective_vector'x +
# Indicator([variable_lower_bound, variable_upper_bound])
# L = -constraint_matrix
# g^*(x) = -right_hand_side'y +
# Indicator(R_+^{num_equalities} x R^{num_variables - num_equalities}
# The objective constant is ignored in the formulation.
linear_operator = odl.MatrixOperator(-lp.constraint_matrix)
primal_space = linear_operator.domain
dual_space = linear_operator.range
f = LinearOnBox(primal_space, lp.objective_vector, lp.variable_lower_bound,
lp.variable_upper_bound)
num_constraints = lp.constraint_matrix.shape[0]
g = LinearOnBoxConjugate(
dual_space, -lp.right_hand_side,
np.concatenate(
(np.full(lp.num_equalities,
-np.inf), np.zeros(num_constraints - lp.num_equalities))),
np.full(num_constraints, np.inf))
x = primal_space.zero()
y = dual_space.zero()
callback = CallbackStore(lp)
restarted_pdhg.restarted_pdhg(
x,
f=f,
g=g,
L=linear_operator,
niter=num_iters,
y=y,
tau=tau,
sigma=sigma,
callback=callback,
restart=restart,
fixed_restart_frequency=fixed_restart_frequency)
return callback.dataframe()
def step_sizes(lp, tau_sigma_ratio):
estimated_norm = odl.MatrixOperator(-lp.constraint_matrix).norm(estimate=True)
# tau * sigma = 0.9 / estimated_norm**2
# and tau/sigma = scale
sigma = np.sqrt(0.9 / tau_sigma_ratio) / estimated_norm
tau = sigma * tau_sigma_ratio
return tau, sigma
def solve_lps(lp,
num_iters,
tau_sigma_ratio,
restart_frequencies,
solve_adaptive=True):
tau, sigma = step_sizes(lp, tau_sigma_ratio)
iteration_data = collections.OrderedDict()
name = 'pdhg'
iteration_data[name] = solve_lp(lp, num_iters, tau, sigma, restart='none')
print(name, '\n', iteration_data[name].tail(2))
if solve_adaptive:
name = 'pdhg_adaptive'
iteration_data[name] = solve_lp(
lp, num_iters, tau, sigma, restart='adaptive')
print(name, '\n', iteration_data[name].tail(2))
for restart_frequency in restart_frequencies:
name = 'pdhg_restart_{}'.format(restart_frequency)
iteration_data[name] = solve_lp(
lp,
num_iters,
tau,
sigma,
restart='fixed',
fixed_restart_frequency=restart_frequency)
print(name, '\n', iteration_data[name].tail(2))
return iteration_data
# These example LPs are useful for testing the code.
def trivial_lp():
# min -2x - y
# s.t. -x - y >= -1
# x, y >= 0.
return LpData(
variable_lower_bound=np.zeros(2),
variable_upper_bound=np.full(2, np.inf),
objective_vector=np.array([-2.0, -1.0]),
objective_constant=0.0,
constraint_matrix=scipy.sparse.csc_matrix([[-1.0, -1.0]]),
right_hand_side=np.array([-1.0]),
num_equalities=0)
def trivial_lp2():
# min -x
# s.t. x - y == 0.5
# x free
# y in [0, 1]
return LpData(
variable_lower_bound=np.array([-np.inf, 0.0]),
variable_upper_bound=np.array([np.inf, 1.0]),
objective_vector=np.array([-1.0, 0.0]),
objective_constant=0.0,
constraint_matrix=scipy.sparse.csc_matrix([[1.0, -1.0]]),
right_hand_side=np.array([0.5]),
num_equalities=1)
|
lmazuel/azure-sdk-for-python
|
refs/heads/master
|
azure-mgmt-network/azure/mgmt/network/v2017_06_01/models/application_gateway_ip_configuration.py
|
1
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ApplicationGatewayIPConfiguration(SubResource):
"""IP configuration of an application gateway. Currently 1 public and 1
private IP configuration is allowed.
:param id: Resource ID.
:type id: str
:param subnet: Reference of the subnet resource. A subnet from where
application gateway gets its private address.
:type subnet: ~azure.mgmt.network.v2017_06_01.models.SubResource
:param provisioning_state: Provisioning state of the application gateway
subnet resource. Possible values are: 'Updating', 'Deleting', and
'Failed'.
:type provisioning_state: str
:param name: Name of the resource that is unique within a resource group.
This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
:param type: Type of the resource.
:type type: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ApplicationGatewayIPConfiguration, self).__init__(**kwargs)
self.subnet = kwargs.get('subnet', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.name = kwargs.get('name', None)
self.etag = kwargs.get('etag', None)
self.type = kwargs.get('type', None)
|
google/nerfactor
|
refs/heads/main
|
nerfactor/__init__.py
|
42
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
sfam/home-assistant
|
refs/heads/dev
|
tests/components/test_script.py
|
7
|
"""
tests.components.test_script
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests script component.
"""
# pylint: disable=too-many-public-methods,protected-access
from datetime import timedelta
import unittest
from homeassistant.components import script
import homeassistant.util.dt as dt_util
from tests.common import fire_time_changed, get_test_home_assistant
ENTITY_ID = 'script.test'
class TestScript(unittest.TestCase):
""" Test the switch module. """
def setUp(self): # pylint: disable=invalid-name
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
def test_setup_with_missing_sequence(self):
self.assertTrue(script.setup(self.hass, {
'script': {
'test': {}
}
}))
self.assertEqual(0, len(self.hass.states.entity_ids('script')))
def test_setup_with_invalid_object_id(self):
self.assertTrue(script.setup(self.hass, {
'script': {
'test hello world': {
'sequence': []
}
}
}))
self.assertEqual(0, len(self.hass.states.entity_ids('script')))
def test_setup_with_dict_as_sequence(self):
self.assertTrue(script.setup(self.hass, {
'script': {
'test': {
'sequence': {
'event': 'test_event'
}
}
}
}))
self.assertEqual(0, len(self.hass.states.entity_ids('script')))
def test_firing_event(self):
event = 'test_event'
calls = []
def record_event(event):
calls.append(event)
self.hass.bus.listen(event, record_event)
self.assertTrue(script.setup(self.hass, {
'script': {
'test': {
'alias': 'Test Script',
'sequence': [{
'event': event,
'event_data': {
'hello': 'world'
}
}]
}
}
}))
script.turn_on(self.hass, ENTITY_ID)
self.hass.pool.block_till_done()
self.assertEqual(1, len(calls))
self.assertEqual('world', calls[0].data.get('hello'))
self.assertEqual(
True, self.hass.states.get(ENTITY_ID).attributes.get('can_cancel'))
def test_calling_service_old(self):
calls = []
def record_call(service):
calls.append(service)
self.hass.services.register('test', 'script', record_call)
self.assertTrue(script.setup(self.hass, {
'script': {
'test': {
'sequence': [{
'execute_service': 'test.script',
'service_data': {
'hello': 'world'
}
}]
}
}
}))
script.turn_on(self.hass, ENTITY_ID)
self.hass.pool.block_till_done()
self.assertEqual(1, len(calls))
self.assertEqual('world', calls[0].data.get('hello'))
def test_calling_service(self):
calls = []
def record_call(service):
calls.append(service)
self.hass.services.register('test', 'script', record_call)
self.assertTrue(script.setup(self.hass, {
'script': {
'test': {
'sequence': [{
'service': 'test.script',
'service_data': {
'hello': 'world'
}
}]
}
}
}))
script.turn_on(self.hass, ENTITY_ID)
self.hass.pool.block_till_done()
self.assertEqual(1, len(calls))
self.assertEqual('world', calls[0].data.get('hello'))
def test_delay(self):
event = 'test_event'
calls = []
def record_event(event):
calls.append(event)
self.hass.bus.listen(event, record_event)
self.assertTrue(script.setup(self.hass, {
'script': {
'test': {
'sequence': [{
'event': event
}, {
'delay': {
'seconds': 5
}
}, {
'event': event,
}]
}
}
}))
script.turn_on(self.hass, ENTITY_ID)
self.hass.pool.block_till_done()
self.assertTrue(script.is_on(self.hass, ENTITY_ID))
self.assertEqual(
False,
self.hass.states.get(ENTITY_ID).attributes.get('can_cancel'))
self.assertEqual(
event,
self.hass.states.get(ENTITY_ID).attributes.get('last_action'))
self.assertEqual(1, len(calls))
future = dt_util.utcnow() + timedelta(seconds=5)
fire_time_changed(self.hass, future)
self.hass.pool.block_till_done()
self.assertFalse(script.is_on(self.hass, ENTITY_ID))
self.assertEqual(2, len(calls))
def test_cancel_while_delay(self):
event = 'test_event'
calls = []
def record_event(event):
calls.append(event)
self.hass.bus.listen(event, record_event)
self.assertTrue(script.setup(self.hass, {
'script': {
'test': {
'sequence': [{
'delay': {
'seconds': 5
}
}, {
'event': event,
}]
}
}
}))
script.turn_on(self.hass, ENTITY_ID)
self.hass.pool.block_till_done()
self.assertTrue(script.is_on(self.hass, ENTITY_ID))
self.assertEqual(0, len(calls))
script.turn_off(self.hass, ENTITY_ID)
self.hass.pool.block_till_done()
self.assertFalse(script.is_on(self.hass, ENTITY_ID))
future = dt_util.utcnow() + timedelta(seconds=5)
fire_time_changed(self.hass, future)
self.hass.pool.block_till_done()
self.assertFalse(script.is_on(self.hass, ENTITY_ID))
self.assertEqual(0, len(calls))
|
2mf/moto
|
refs/heads/master
|
tests/test_datapipeline/test_datapipeline.py
|
14
|
from __future__ import unicode_literals
import boto.datapipeline
import sure # noqa
from moto import mock_datapipeline
from moto.datapipeline.utils import remove_capitalization_of_dict_keys
def get_value_from_fields(key, fields):
for field in fields:
if field['key'] == key:
return field['stringValue']
@mock_datapipeline
def test_create_pipeline():
conn = boto.datapipeline.connect_to_region("us-west-2")
res = conn.create_pipeline("mypipeline", "some-unique-id")
pipeline_id = res["pipelineId"]
pipeline_descriptions = conn.describe_pipelines([pipeline_id])["pipelineDescriptionList"]
pipeline_descriptions.should.have.length_of(1)
pipeline_description = pipeline_descriptions[0]
pipeline_description['name'].should.equal("mypipeline")
pipeline_description["pipelineId"].should.equal(pipeline_id)
fields = pipeline_description['fields']
get_value_from_fields('@pipelineState', fields).should.equal("PENDING")
get_value_from_fields('uniqueId', fields).should.equal("some-unique-id")
PIPELINE_OBJECTS = [
{
"id": "Default",
"name": "Default",
"fields": [{
"key": "workerGroup",
"stringValue": "workerGroup"
}]
},
{
"id": "Schedule",
"name": "Schedule",
"fields": [{
"key": "startDateTime",
"stringValue": "2012-12-12T00:00:00"
}, {
"key": "type",
"stringValue": "Schedule"
}, {
"key": "period",
"stringValue": "1 hour"
}, {
"key": "endDateTime",
"stringValue": "2012-12-21T18:00:00"
}]
},
{
"id": "SayHello",
"name": "SayHello",
"fields": [{
"key": "type",
"stringValue": "ShellCommandActivity"
}, {
"key": "command",
"stringValue": "echo hello"
}, {
"key": "parent",
"refValue": "Default"
}, {
"key": "schedule",
"refValue": "Schedule"
}]
}
]
@mock_datapipeline
def test_creating_pipeline_definition():
conn = boto.datapipeline.connect_to_region("us-west-2")
res = conn.create_pipeline("mypipeline", "some-unique-id")
pipeline_id = res["pipelineId"]
conn.put_pipeline_definition(PIPELINE_OBJECTS, pipeline_id)
pipeline_definition = conn.get_pipeline_definition(pipeline_id)
pipeline_definition['pipelineObjects'].should.have.length_of(3)
default_object = pipeline_definition['pipelineObjects'][0]
default_object['name'].should.equal("Default")
default_object['id'].should.equal("Default")
default_object['fields'].should.equal([{
"key": "workerGroup",
"stringValue": "workerGroup"
}])
@mock_datapipeline
def test_describing_pipeline_objects():
conn = boto.datapipeline.connect_to_region("us-west-2")
res = conn.create_pipeline("mypipeline", "some-unique-id")
pipeline_id = res["pipelineId"]
conn.put_pipeline_definition(PIPELINE_OBJECTS, pipeline_id)
objects = conn.describe_objects(["Schedule", "Default"], pipeline_id)['pipelineObjects']
objects.should.have.length_of(2)
default_object = [x for x in objects if x['id'] == 'Default'][0]
default_object['name'].should.equal("Default")
default_object['fields'].should.equal([{
"key": "workerGroup",
"stringValue": "workerGroup"
}])
@mock_datapipeline
def test_activate_pipeline():
conn = boto.datapipeline.connect_to_region("us-west-2")
res = conn.create_pipeline("mypipeline", "some-unique-id")
pipeline_id = res["pipelineId"]
conn.activate_pipeline(pipeline_id)
pipeline_descriptions = conn.describe_pipelines([pipeline_id])["pipelineDescriptionList"]
pipeline_descriptions.should.have.length_of(1)
pipeline_description = pipeline_descriptions[0]
fields = pipeline_description['fields']
get_value_from_fields('@pipelineState', fields).should.equal("SCHEDULED")
@mock_datapipeline
def test_listing_pipelines():
conn = boto.datapipeline.connect_to_region("us-west-2")
res1 = conn.create_pipeline("mypipeline1", "some-unique-id1")
res2 = conn.create_pipeline("mypipeline2", "some-unique-id2")
response = conn.list_pipelines()
response["hasMoreResults"].should.be(False)
response["marker"].should.be.none
response["pipelineIdList"].should.have.length_of(2)
response["pipelineIdList"].should.contain({
"id": res1["pipelineId"],
"name": "mypipeline1",
})
response["pipelineIdList"].should.contain({
"id": res2["pipelineId"],
"name": "mypipeline2"
})
# testing a helper function
def test_remove_capitalization_of_dict_keys():
result = remove_capitalization_of_dict_keys(
{
"Id": "IdValue",
"Fields": [{
"Key": "KeyValue",
"StringValue": "StringValueValue"
}]
}
)
result.should.equal({
"id": "IdValue",
"fields": [{
"key": "KeyValue",
"stringValue": "StringValueValue"
}],
})
|
ppwwyyxx/tensorflow
|
refs/heads/master
|
tensorflow/python/kernel_tests/tridiagonal_solve_op_test.py
|
3
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.linalg.linalg_impl.tridiagonal_solve."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.client import session
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.linalg import linalg_impl
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
_sample_diags = np.array([[2, 1, 4, 0], [1, 3, 2, 2], [0, 1, -1, 1]])
_sample_rhs = np.array([1, 2, 3, 4])
_sample_result = np.array([-9, 5, -4, 4])
FORWARD_COMPATIBLE_DATE = (2019, 10, 18)
# Flag, indicating that test should be run only with partial_pivoting=True
FLAG_REQUIRES_PIVOTING = "FLAG_REQUIRES_PIVOT"
# Flag, indicating that test shouldn't be parameterized by different values of
# partial_pivoting, etc.
FLAG_NO_PARAMETERIZATION = "FLAG_NO_PARAMETERIZATION"
def flags(*args):
def decorator(f):
for flag in args:
setattr(f, flag, True)
return f
return decorator
def _tfconst(array):
return constant_op.constant(array, dtypes.float64)
def _tf_ones(shape):
return array_ops.ones(shape, dtype=dtypes.float64)
class TridiagonalSolveOpTest(test.TestCase):
def _test(self,
diags,
rhs,
expected,
diags_format="compact",
transpose_rhs=False,
conjugate_rhs=False):
with self.cached_session(use_gpu=True):
result = linalg_impl.tridiagonal_solve(diags, rhs, diags_format,
transpose_rhs, conjugate_rhs)
self.assertAllClose(self.evaluate(result), expected)
def _testWithLists(self,
diags,
rhs,
expected,
diags_format="compact",
transpose_rhs=False,
conjugate_rhs=False):
self._test(
_tfconst(diags), _tfconst(rhs), _tfconst(expected), diags_format,
transpose_rhs, conjugate_rhs)
def _assertRaises(self, diags, rhs, diags_format="compact"):
with self.assertRaises(ValueError):
linalg_impl.tridiagonal_solve(diags, rhs, diags_format)
# Tests with various dtypes
def testReal(self):
for dtype in dtypes.float32, dtypes.float64:
self._test(
diags=constant_op.constant(_sample_diags, dtype),
rhs=constant_op.constant(_sample_rhs, dtype),
expected=constant_op.constant(_sample_result, dtype))
def testComplex(self):
for dtype in dtypes.complex64, dtypes.complex128:
self._test(
diags=constant_op.constant(_sample_diags, dtype) * (1 + 1j),
rhs=constant_op.constant(_sample_rhs, dtype) * (1 - 1j),
expected=constant_op.constant(_sample_result, dtype) * (1 - 1j) /
(1 + 1j))
# Tests with small matrix sizes
def test3x3(self):
self._testWithLists(
diags=[[2, -1, 0], [1, 3, 1], [0, -1, -2]],
rhs=[1, 2, 3],
expected=[-3, 2, 7])
def test2x2(self):
self._testWithLists(
diags=[[2, 0], [1, 3], [0, 1]], rhs=[1, 4], expected=[-5, 3])
def test2x2Complex(self):
for dtype in dtypes.complex64, dtypes.complex128:
self._test(
diags=constant_op.constant([[2j, 0j], [1j, 3j], [0j, 1j]], dtype),
rhs=constant_op.constant([1 - 1j, 4 - 4j], dtype),
expected=constant_op.constant([5 + 5j, -3 - 3j], dtype))
def test1x1(self):
self._testWithLists(diags=[[0], [3], [0]], rhs=[6], expected=[2])
def test0x0(self):
self._test(
diags=constant_op.constant(0, shape=(3, 0), dtype=dtypes.float32),
rhs=constant_op.constant(0, shape=(0, 1), dtype=dtypes.float32),
expected=constant_op.constant(0, shape=(0, 1), dtype=dtypes.float32))
def test2x2WithMultipleRhs(self):
self._testWithLists(
diags=[[2, 0], [1, 3], [0, 1]],
rhs=[[1, 2, 3], [4, 8, 12]],
expected=[[-5, -10, -15], [3, 6, 9]])
def test1x1WithMultipleRhs(self):
self._testWithLists(
diags=[[0], [3], [0]], rhs=[[6, 9, 12]], expected=[[2, 3, 4]])
def test1x1NotInvertible(self):
with self.assertRaises(errors_impl.InvalidArgumentError):
self._testWithLists(diags=[[0], [0], [0]], rhs=[[6, 9, 12]], expected=[])
def test2x2NotInvertible(self):
with self.assertRaises(errors_impl.InvalidArgumentError):
self._testWithLists(
diags=[[3, 0], [1, 3], [0, 1]], rhs=[1, 4], expected=[])
# Other edge cases
@flags(FLAG_REQUIRES_PIVOTING)
def testCaseRequiringPivoting(self):
# Without partial pivoting (e.g. Thomas algorithm) this would fail.
self._testWithLists(
diags=[[2, -1, 1, 0], [1, 4, 1, -1], [0, 2, -2, 3]],
rhs=[1, 2, 3, 4],
expected=[8, -3.5, 0, -4])
@flags(FLAG_REQUIRES_PIVOTING)
def testCaseRequiringPivotingLastRows(self):
self._testWithLists(
diags=[[2, 1, -1, 0], [1, -1, 2, 1], [0, 1, -6, 1]],
rhs=[1, 2, -1, -2],
expected=[5, -2, -5, 3])
def testNotInvertible(self):
if test.is_gpu_available(cuda_only=True):
# CuSparse gtsv routines don't raise errors for non-invertible
# matrices.
return
with self.assertRaises(errors_impl.InvalidArgumentError):
self._testWithLists(
diags=[[2, -1, 1, 0], [1, 4, 1, -1], [0, 2, 0, 3]],
rhs=[1, 2, 3, 4],
expected=[8, -3.5, 0, -4])
def testDiagonal(self):
self._testWithLists(
diags=[[0, 0, 0, 0], [1, 2, -1, -2], [0, 0, 0, 0]],
rhs=[1, 2, 3, 4],
expected=[1, 1, -3, -2])
def testUpperTriangular(self):
self._testWithLists(
diags=[[2, 4, -1, 0], [1, 3, 1, 2], [0, 0, 0, 0]],
rhs=[1, 6, 4, 4],
expected=[13, -6, 6, 2])
def testLowerTriangular(self):
self._testWithLists(
diags=[[0, 0, 0, 0], [2, -1, 3, 1], [0, 1, 4, 2]],
rhs=[4, 5, 6, 1],
expected=[2, -3, 6, -11])
# Multiple right-hand sides and batching
def testWithTwoRightHandSides(self):
self._testWithLists(
diags=_sample_diags,
rhs=np.transpose([_sample_rhs, 2 * _sample_rhs]),
expected=np.transpose([_sample_result, 2 * _sample_result]))
def testBatching(self):
self._testWithLists(
diags=np.array([_sample_diags, -_sample_diags]),
rhs=np.array([_sample_rhs, 2 * _sample_rhs]),
expected=np.array([_sample_result, -2 * _sample_result]))
def testWithTwoBatchingDimensions(self):
self._testWithLists(
diags=np.array([[_sample_diags, -_sample_diags, _sample_diags],
[-_sample_diags, _sample_diags, -_sample_diags]]),
rhs=np.array([[_sample_rhs, 2 * _sample_rhs, 3 * _sample_rhs],
[4 * _sample_rhs, 5 * _sample_rhs, 6 * _sample_rhs]]),
expected=np.array(
[[_sample_result, -2 * _sample_result, 3 * _sample_result],
[-4 * _sample_result, 5 * _sample_result, -6 * _sample_result]]))
def testBatchingAndTwoRightHandSides(self):
rhs = np.transpose([_sample_rhs, 2 * _sample_rhs])
expected_result = np.transpose([_sample_result, 2 * _sample_result])
self._testWithLists(
diags=np.array([_sample_diags, -_sample_diags]),
rhs=np.array([rhs, 2 * rhs]),
expected=np.array([expected_result, -2 * expected_result]))
# Various input formats
def testSequenceFormat(self):
self._test(
diags=(_tfconst([2, 1, 4]), _tfconst([1, 3, 2, 2]), _tfconst([1, -1,
1])),
rhs=_tfconst([1, 2, 3, 4]),
expected=_tfconst([-9, 5, -4, 4]),
diags_format="sequence")
def testSequenceFormatWithDummyElements(self):
dummy = 20
self._test(
diags=(_tfconst([2, 1, 4, dummy]), _tfconst([1, 3, 2, 2]),
_tfconst([dummy, 1, -1, 1])),
rhs=_tfconst([1, 2, 3, 4]),
expected=_tfconst([-9, 5, -4, 4]),
diags_format="sequence")
def testSequenceFormatWithBatching(self):
self._test(
diags=(_tfconst([[2, 1, 4], [-2, -1, -4]]),
_tfconst([[1, 3, 2, 2], [-1, -3, -2, -2]]),
_tfconst([[1, -1, 1], [-1, 1, -1]])),
rhs=_tfconst([[1, 2, 3, 4], [1, 2, 3, 4]]),
expected=_tfconst([[-9, 5, -4, 4], [9, -5, 4, -4]]),
diags_format="sequence")
def testMatrixFormat(self):
self._testWithLists(
diags=[[1, 2, 0, 0], [1, 3, 1, 0], [0, -1, 2, 4], [0, 0, 1, 2]],
rhs=[1, 2, 3, 4],
expected=[-9, 5, -4, 4],
diags_format="matrix")
def testMatrixFormatWithMultipleRightHandSides(self):
self._testWithLists(
diags=[[1, 2, 0, 0], [1, 3, 1, 0], [0, -1, 2, 4], [0, 0, 1, 2]],
rhs=[[1, -1], [2, -2], [3, -3], [4, -4]],
expected=[[-9, 9], [5, -5], [-4, 4], [4, -4]],
diags_format="matrix")
def testMatrixFormatWithBatching(self):
self._testWithLists(
diags=[[[1, 2, 0, 0], [1, 3, 1, 0], [0, -1, 2, 4], [0, 0, 1, 2]],
[[-1, -2, 0, 0], [-1, -3, -1, 0], [0, 1, -2, -4], [0, 0, -1,
-2]]],
rhs=[[1, 2, 3, 4], [1, 2, 3, 4]],
expected=[[-9, 5, -4, 4], [9, -5, 4, -4]],
diags_format="matrix")
def testRightHandSideAsColumn(self):
self._testWithLists(
diags=_sample_diags,
rhs=np.transpose([_sample_rhs]),
expected=np.transpose([_sample_result]),
diags_format="compact")
# Tests with transpose and adjoint
def testTransposeRhs(self):
expected = np.array([_sample_result, 2 * _sample_result])
if compat.forward_compatible(*FORWARD_COMPATIBLE_DATE):
expected = expected.T
self._testWithLists(
diags=_sample_diags,
rhs=np.array([_sample_rhs, 2 * _sample_rhs]),
expected=expected,
transpose_rhs=True)
def testConjugateRhs(self):
self._testWithLists(
diags=_sample_diags,
rhs=np.transpose([_sample_rhs * (1 + 1j), _sample_rhs * (1 - 2j)]),
expected=np.transpose(
[_sample_result * (1 - 1j), _sample_result * (1 + 2j)]),
conjugate_rhs=True)
def testAdjointRhs(self):
expected = np.array(
[_sample_result * (1 - 1j), _sample_result * (1 + 2j)])
if compat.forward_compatible(*FORWARD_COMPATIBLE_DATE):
expected = expected.T
self._testWithLists(
diags=_sample_diags,
rhs=np.array([_sample_rhs * (1 + 1j), _sample_rhs * (1 - 2j)]),
expected=expected,
transpose_rhs=True,
conjugate_rhs=True)
def testTransposeRhsWithBatching(self):
expected = np.array(
[[_sample_result, 2 * _sample_result],
[-3 * _sample_result, -4 * _sample_result]])
if compat.forward_compatible(*FORWARD_COMPATIBLE_DATE):
expected = expected.transpose(0, 2, 1)
self._testWithLists(
diags=np.array([_sample_diags, -_sample_diags]),
rhs=np.array([[_sample_rhs, 2 * _sample_rhs],
[3 * _sample_rhs, 4 * _sample_rhs]]),
expected=expected,
transpose_rhs=True)
def testTransposeRhsWithRhsAsVector(self):
self._testWithLists(
diags=_sample_diags,
rhs=_sample_rhs,
expected=_sample_result,
transpose_rhs=True)
def testConjugateRhsWithRhsAsVector(self):
self._testWithLists(
diags=_sample_diags,
rhs=_sample_rhs * (1 + 1j),
expected=_sample_result * (1 - 1j),
conjugate_rhs=True)
def testTransposeRhsWithRhsAsVectorAndBatching(self):
self._testWithLists(
diags=np.array([_sample_diags, -_sample_diags]),
rhs=np.array([_sample_rhs, 2 * _sample_rhs]),
expected=np.array([_sample_result, -2 * _sample_result]),
transpose_rhs=True)
# Gradient tests
def _gradientTest(
self,
diags,
rhs,
y, # output = reduce_sum(y * tridiag_solve(diags, rhs))
expected_grad_diags, # expected gradient of output w.r.t. diags
expected_grad_rhs, # expected gradient of output w.r.t. rhs
diags_format="compact",
transpose_rhs=False,
conjugate_rhs=False,
feed_dict=None):
expected_grad_diags = _tfconst(expected_grad_diags)
expected_grad_rhs = _tfconst(expected_grad_rhs)
with backprop.GradientTape() as tape_diags:
with backprop.GradientTape() as tape_rhs:
tape_diags.watch(diags)
tape_rhs.watch(rhs)
x = linalg_impl.tridiagonal_solve(
diags,
rhs,
diagonals_format=diags_format,
transpose_rhs=transpose_rhs,
conjugate_rhs=conjugate_rhs)
res = math_ops.reduce_sum(x * y)
with self.cached_session(use_gpu=True) as sess:
actual_grad_diags = sess.run(
tape_diags.gradient(res, diags), feed_dict=feed_dict)
actual_rhs_diags = sess.run(
tape_rhs.gradient(res, rhs), feed_dict=feed_dict)
self.assertAllClose(expected_grad_diags, actual_grad_diags)
self.assertAllClose(expected_grad_rhs, actual_rhs_diags)
def _gradientTestWithLists(self,
diags,
rhs,
y,
expected_grad_diags,
expected_grad_rhs,
diags_format="compact",
transpose_rhs=False,
conjugate_rhs=False):
self._gradientTest(
_tfconst(diags), _tfconst(rhs), _tfconst(y), expected_grad_diags,
expected_grad_rhs, diags_format, transpose_rhs, conjugate_rhs)
def testGradientSimple(self):
self._gradientTestWithLists(
diags=_sample_diags,
rhs=_sample_rhs,
y=[1, 3, 2, 4],
expected_grad_diags=[[-5, 0, 4, 0], [9, 0, -4, -16], [0, 0, 5, 16]],
expected_grad_rhs=[1, 0, -1, 4])
def testGradientWithMultipleRhs(self):
self._gradientTestWithLists(
diags=_sample_diags,
rhs=[[1, 2], [2, 4], [3, 6], [4, 8]],
y=[[1, 5], [2, 6], [3, 7], [4, 8]],
expected_grad_diags=([[-20, 28, -60, 0], [36, -35, 60, 80],
[0, 63, -75, -80]]),
expected_grad_rhs=[[0, 2], [1, 3], [1, 7], [0, -10]])
def _makeDataForGradientWithBatching(self):
y = np.array([1, 3, 2, 4])
grad_diags = np.array([[-5, 0, 4, 0], [9, 0, -4, -16], [0, 0, 5, 16]])
grad_rhs = np.array([1, 0, -1, 4])
diags_batched = np.array(
[[_sample_diags, 2 * _sample_diags, 3 * _sample_diags],
[4 * _sample_diags, 5 * _sample_diags, 6 * _sample_diags]])
rhs_batched = np.array([[_sample_rhs, -_sample_rhs, _sample_rhs],
[-_sample_rhs, _sample_rhs, -_sample_rhs]])
y_batched = np.array([[y, y, y], [y, y, y]])
expected_grad_diags_batched = np.array(
[[grad_diags, -grad_diags / 4, grad_diags / 9],
[-grad_diags / 16, grad_diags / 25, -grad_diags / 36]])
expected_grad_rhs_batched = np.array(
[[grad_rhs, grad_rhs / 2, grad_rhs / 3],
[grad_rhs / 4, grad_rhs / 5, grad_rhs / 6]])
return (y_batched, diags_batched, rhs_batched, expected_grad_diags_batched,
expected_grad_rhs_batched)
def testGradientWithBatchDims(self):
y, diags, rhs, expected_grad_diags, expected_grad_rhs = \
self._makeDataForGradientWithBatching()
self._gradientTestWithLists(
diags=diags,
rhs=rhs,
y=y,
expected_grad_diags=expected_grad_diags,
expected_grad_rhs=expected_grad_rhs)
@test_util.run_deprecated_v1
def testGradientWithUnknownShapes(self):
def placeholder(rank):
return array_ops.placeholder(
dtypes.float64, shape=(None for _ in range(rank)))
y, diags, rhs, expected_grad_diags, expected_grad_rhs = \
self._makeDataForGradientWithBatching()
diags_placeholder = placeholder(rank=4)
rhs_placeholder = placeholder(rank=3)
y_placeholder = placeholder(rank=3)
self._gradientTest(
diags=diags_placeholder,
rhs=rhs_placeholder,
y=y_placeholder,
expected_grad_diags=expected_grad_diags,
expected_grad_rhs=expected_grad_rhs,
feed_dict={
diags_placeholder: diags,
rhs_placeholder: rhs,
y_placeholder: y
})
# Invalid input shapes
@flags(FLAG_NO_PARAMETERIZATION)
def testInvalidShapesCompactFormat(self):
def test_raises(diags_shape, rhs_shape):
self._assertRaises(_tf_ones(diags_shape), _tf_ones(rhs_shape), "compact")
test_raises((5, 4, 4), (5, 4))
test_raises((5, 3, 4), (4, 5))
test_raises((5, 3, 4), (5))
test_raises((5), (5, 4))
@flags(FLAG_NO_PARAMETERIZATION)
def testInvalidShapesSequenceFormat(self):
def test_raises(diags_tuple_shapes, rhs_shape):
diagonals = tuple(_tf_ones(shape) for shape in diags_tuple_shapes)
self._assertRaises(diagonals, _tf_ones(rhs_shape), "sequence")
test_raises(((5, 4), (5, 4)), (5, 4))
test_raises(((5, 4), (5, 4), (5, 6)), (5, 4))
test_raises(((5, 3), (5, 4), (5, 6)), (5, 4))
test_raises(((5, 6), (5, 4), (5, 3)), (5, 4))
test_raises(((5, 4), (7, 4), (5, 4)), (5, 4))
test_raises(((5, 4), (7, 4), (5, 4)), (3, 4))
@flags(FLAG_NO_PARAMETERIZATION)
def testInvalidShapesMatrixFormat(self):
def test_raises(diags_shape, rhs_shape):
self._assertRaises(_tf_ones(diags_shape), _tf_ones(rhs_shape), "matrix")
test_raises((5, 4, 7), (5, 4))
test_raises((5, 4, 4), (3, 4))
test_raises((5, 4, 4), (5, 3))
# Tests with placeholders
def _testWithPlaceholders(self,
diags_shape,
rhs_shape,
diags_feed,
rhs_feed,
expected,
diags_format="compact"):
if context.executing_eagerly():
return
diags = array_ops.placeholder(dtypes.float64, shape=diags_shape)
rhs = array_ops.placeholder(dtypes.float64, shape=rhs_shape)
x = linalg_impl.tridiagonal_solve(
diags, rhs, diags_format, partial_pivoting=self.pivoting)
with self.cached_session(use_gpu=True) as sess:
result = sess.run(x, feed_dict={diags: diags_feed, rhs: rhs_feed})
self.assertAllClose(result, expected)
@test_util.run_deprecated_v1
def testCompactFormatAllDimsUnknown(self):
self._testWithPlaceholders(
diags_shape=[None, None],
rhs_shape=[None],
diags_feed=_sample_diags,
rhs_feed=_sample_rhs,
expected=_sample_result)
@test_util.run_deprecated_v1
def testCompactFormatUnknownMatrixSize(self):
self._testWithPlaceholders(
diags_shape=[3, None],
rhs_shape=[4],
diags_feed=_sample_diags,
rhs_feed=_sample_rhs,
expected=_sample_result)
@test_util.run_deprecated_v1
def testCompactFormatUnknownRhsCount(self):
self._testWithPlaceholders(
diags_shape=[3, 4],
rhs_shape=[4, None],
diags_feed=_sample_diags,
rhs_feed=np.transpose([_sample_rhs, 2 * _sample_rhs]),
expected=np.transpose([_sample_result, 2 * _sample_result]))
@test_util.run_deprecated_v1
def testCompactFormatUnknownBatchSize(self):
self._testWithPlaceholders(
diags_shape=[None, 3, 4],
rhs_shape=[None, 4],
diags_feed=np.array([_sample_diags, -_sample_diags]),
rhs_feed=np.array([_sample_rhs, 2 * _sample_rhs]),
expected=np.array([_sample_result, -2 * _sample_result]))
@test_util.run_deprecated_v1
def testMatrixFormatWithUnknownDims(self):
if context.executing_eagerly():
return
def test_with_matrix_shapes(matrix_shape, rhs_shape=None):
matrix = np.array([[1, 2, 0, 0], [1, 3, 1, 0], [0, -1, 2, 4],
[0, 0, 1, 2]])
rhs = np.array([1, 2, 3, 4])
x = np.array([-9, 5, -4, 4])
self._testWithPlaceholders(
diags_shape=matrix_shape,
rhs_shape=rhs_shape,
diags_feed=matrix,
rhs_feed=np.transpose([rhs, 2 * rhs]),
expected=np.transpose([x, 2 * x]),
diags_format="matrix")
test_with_matrix_shapes(matrix_shape=[4, 4], rhs_shape=[None, None])
test_with_matrix_shapes(matrix_shape=[None, 4], rhs_shape=[None, None])
test_with_matrix_shapes(matrix_shape=[4, None], rhs_shape=[None, None])
test_with_matrix_shapes(matrix_shape=[None, None], rhs_shape=[None, None])
test_with_matrix_shapes(matrix_shape=[4, 4])
test_with_matrix_shapes(matrix_shape=[None, 4])
test_with_matrix_shapes(matrix_shape=[4, None])
test_with_matrix_shapes(matrix_shape=[None, None])
test_with_matrix_shapes(matrix_shape=None, rhs_shape=[None, None])
test_with_matrix_shapes(matrix_shape=None)
@test_util.run_deprecated_v1
def testSequenceFormatWithUnknownDims(self):
if context.executing_eagerly():
return
superdiag = array_ops.placeholder(dtypes.float64, shape=[None])
diag = array_ops.placeholder(dtypes.float64, shape=[None])
subdiag = array_ops.placeholder(dtypes.float64, shape=[None])
rhs = array_ops.placeholder(dtypes.float64, shape=[None])
x = linalg_impl.tridiagonal_solve((superdiag, diag, subdiag),
rhs,
diagonals_format="sequence",
partial_pivoting=self.pivoting)
with self.cached_session(use_gpu=True) as sess:
result = sess.run(
x,
feed_dict={
subdiag: [20, 1, -1, 1],
diag: [1, 3, 2, 2],
superdiag: [2, 1, 4, 20],
rhs: [1, 2, 3, 4]
})
self.assertAllClose(result, [-9, 5, -4, 4])
# Benchmark
class TridiagonalSolveBenchmark(test.Benchmark):
sizes = [(100000, 1, 1), (1000000, 1, 1), (10000000, 1, 1), (100000, 10, 1),
(100000, 100, 1), (10000, 1, 10), (10000, 1, 100)]
pivoting_options = [(True, "pivoting"), (False, "no_pivoting")]
def _generateData(self, matrix_size, batch_size, num_rhs, seed=42):
np.random.seed(seed)
data = np.random.normal(size=(batch_size, matrix_size, 3 + num_rhs))
diags = np.stack([data[:, :, 0], data[:, :, 1], data[:, :, 2]], axis=-2)
rhs = data[:, :, 3:]
return (variables.Variable(diags, dtype=dtypes.float64),
variables.Variable(rhs, dtype=dtypes.float64))
def _generateMatrixData(self, matrix_size, batch_size, num_rhs, seed=42):
np.random.seed(seed)
import scipy.sparse as sparse # pylint:disable=g-import-not-at-top
# By being strictly diagonally dominant, we guarantee invertibility.d
diag = 2* np.abs(np.random.randn(matrix_size)) + 4.1
subdiag = 2* np.abs(np.random.randn(matrix_size-1))
superdiag = 2* np.abs(np.random.randn(matrix_size-1))
matrix = sparse.diags([superdiag, diag, subdiag], [1, 0, -1]).toarray()
vector = np.random.randn(batch_size, matrix_size, num_rhs)
return (variables.Variable(np.tile(matrix, (batch_size, 1, 1))),
variables.Variable(vector))
def _benchmark(self, generate_data_fn, test_name_format_string):
devices = [("/cpu:0", "cpu")]
if test.is_gpu_available(cuda_only=True):
devices += [("/gpu:0", "gpu")]
for device_option, pivoting_option, size_option in \
itertools.product(devices, self.pivoting_options, self.sizes):
device_id, device_name = device_option
pivoting, pivoting_name = pivoting_option
matrix_size, batch_size, num_rhs = size_option
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device(device_id):
diags, rhs = generate_data_fn(matrix_size, batch_size, num_rhs)
x = linalg_impl.tridiagonal_solve(
diags, rhs, partial_pivoting=pivoting)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(x),
min_iters=10,
store_memory_usage=False,
name=test_name_format_string.format(
device_name, matrix_size, batch_size, num_rhs,
pivoting_name))
def benchmarkTridiagonalSolveOp_WithMatrixInput(self):
self._benchmark(
self._generateMatrixData,
test_name_format_string=(
"tridiagonal_solve_matrix_format_{}_matrix_size_{}_"
"batch_size_{}_num_rhs_{}_{}"))
def benchmarkTridiagonalSolveOp(self):
self._benchmark(
self._generateMatrixData,
test_name_format_string=(
"tridiagonal_solve_{}_matrix_size_{}_"
"batch_size_{}_num_rhs_{}_{}"))
if __name__ == "__main__":
for name, fun in dict(TridiagonalSolveOpTest.__dict__).items():
if not name.startswith("test"):
continue
if hasattr(fun, FLAG_NO_PARAMETERIZATION):
continue
# Replace testFoo with testFoo_pivoting and testFoo_noPivoting, setting
# self.pivoting to corresponding value.
delattr(TridiagonalSolveOpTest, name)
def decor(test_fun, pivoting):
def wrapped(instance):
instance.pivoting = pivoting
test_fun(instance)
return wrapped
setattr(TridiagonalSolveOpTest, name + "_pivoting",
decor(fun, pivoting=True))
if not hasattr(fun, FLAG_REQUIRES_PIVOTING):
setattr(TridiagonalSolveOpTest, name + "_noPivoting",
decor(fun, pivoting=False))
test.main()
|
mxOBS/deb-pkg_trusty_chromium-browser
|
refs/heads/master
|
chrome/test/chromedriver/embed_extension_in_cpp.py
|
158
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Embeds Chrome user data files in C++ code."""
import base64
import optparse
import os
import StringIO
import sys
import zipfile
import cpp_source
def main():
parser = optparse.OptionParser()
parser.add_option(
'', '--directory', type='string', default='.',
help='Path to directory where the cc/h file should be created')
options, args = parser.parse_args()
global_string_map = {}
string_buffer = StringIO.StringIO()
zipper = zipfile.ZipFile(string_buffer, 'w')
for f in args:
zipper.write(f, os.path.basename(f), zipfile.ZIP_STORED)
zipper.close()
global_string_map['kAutomationExtension'] = base64.b64encode(
string_buffer.getvalue())
string_buffer.close()
cpp_source.WriteSource('embedded_automation_extension',
'chrome/test/chromedriver/chrome',
options.directory, global_string_map)
if __name__ == '__main__':
sys.exit(main())
|
xkcd1253/Mimi
|
refs/heads/master
|
flask/lib/python2.7/site-packages/whoosh/matching/binary.py
|
94
|
# Copyright 2010 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from whoosh.matching import mcore
class BiMatcher(mcore.Matcher):
"""Base class for matchers that combine the results of two sub-matchers in
some way.
"""
def __init__(self, a, b):
super(BiMatcher, self).__init__()
self.a = a
self.b = b
def reset(self):
self.a.reset()
self.b.reset()
def __repr__(self):
return "%s(%r, %r)" % (self.__class__.__name__, self.a, self.b)
def children(self):
return [self.a, self.b]
def copy(self):
return self.__class__(self.a.copy(), self.b.copy())
def depth(self):
return 1 + max(self.a.depth(), self.b.depth())
def skip_to(self, id):
if not self.is_active():
raise mcore.ReadTooFar
ra = self.a.skip_to(id)
rb = self.b.skip_to(id)
return ra or rb
def supports_block_quality(self):
return (self.a.supports_block_quality()
and self.b.supports_block_quality())
def supports(self, astype):
return self.a.supports(astype) and self.b.supports(astype)
class AdditiveBiMatcher(BiMatcher):
"""Base class for binary matchers where the scores of the sub-matchers are
added together.
"""
def max_quality(self):
q = 0.0
if self.a.is_active():
q += self.a.max_quality()
if self.b.is_active():
q += self.b.max_quality()
return q
def block_quality(self):
bq = 0.0
if self.a.is_active():
bq += self.a.block_quality()
if self.b.is_active():
bq += self.b.block_quality()
return bq
def weight(self):
return (self.a.weight() + self.b.weight())
def score(self):
return (self.a.score() + self.b.score())
def __eq__(self, other):
return self.__class__ is type(other)
def __lt__(self, other):
return type(other) is self.__class__
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
class UnionMatcher(AdditiveBiMatcher):
"""Matches the union (OR) of the postings in the two sub-matchers.
"""
_id = None
def replace(self, minquality=0):
a = self.a
b = self.b
a_active = a.is_active()
b_active = b.is_active()
# If neither sub-matcher on its own has a high enough max quality to
# contribute, convert to an intersection matcher
if minquality and a_active and b_active:
a_max = a.max_quality()
b_max = b.max_quality()
if a_max < minquality and b_max < minquality:
return IntersectionMatcher(a, b).replace(minquality)
elif a_max < minquality:
return AndMaybeMatcher(b, a)
elif b_max < minquality:
return AndMaybeMatcher(a, b)
# If one or both of the sub-matchers are inactive, convert
if not (a_active or b_active):
return mcore.NullMatcher()
elif not a_active:
return b.replace(minquality)
elif not b_active:
return a.replace(minquality)
a = a.replace(minquality - b.max_quality() if minquality else 0)
b = b.replace(minquality - a.max_quality() if minquality else 0)
# If one of the sub-matchers changed, return a new union
if a is not self.a or b is not self.b:
return self.__class__(a, b)
else:
self._id = None
return self
def is_active(self):
return self.a.is_active() or self.b.is_active()
def skip_to(self, id):
self._id = None
ra = rb = False
if self.a.is_active():
ra = self.a.skip_to(id)
if self.b.is_active():
rb = self.b.skip_to(id)
return ra or rb
def id(self):
_id = self._id
if _id is not None:
return _id
a = self.a
b = self.b
if not a.is_active():
_id = b.id()
elif not b.is_active():
_id = a.id()
else:
_id = min(a.id(), b.id())
self._id = _id
return _id
# Using sets is faster in most cases, but could potentially use a lot of
# memory. Comment out this method override to not use sets.
#def all_ids(self):
# return iter(sorted(set(self.a.all_ids()) | set(self.b.all_ids())))
def next(self):
self._id = None
a = self.a
b = self.b
a_active = a.is_active()
b_active = b.is_active()
# Shortcut when one matcher is inactive
if not (a_active or b_active):
raise mcore.ReadTooFar
elif not a_active:
return b.next()
elif not b_active:
return a.next()
a_id = a.id()
b_id = b.id()
ar = br = None
# After all that, here's the actual implementation
if a_id <= b_id:
ar = a.next()
if b_id <= a_id:
br = b.next()
return ar or br
def spans(self):
if not self.a.is_active():
return self.b.spans()
if not self.b.is_active():
return self.a.spans()
id_a = self.a.id()
id_b = self.b.id()
if id_a < id_b:
return self.a.spans()
elif id_b < id_a:
return self.b.spans()
else:
return sorted(set(self.a.spans()) | set(self.b.spans()))
def weight(self):
a = self.a
b = self.b
if not a.is_active():
return b.weight()
if not b.is_active():
return a.weight()
id_a = a.id()
id_b = b.id()
if id_a < id_b:
return a.weight()
elif id_b < id_a:
return b.weight()
else:
return (a.weight() + b.weight())
def score(self):
a = self.a
b = self.b
if not a.is_active():
return b.score()
if not b.is_active():
return a.score()
id_a = a.id()
id_b = b.id()
if id_a < id_b:
return a.score()
elif id_b < id_a:
return b.score()
else:
return (a.score() + b.score())
def skip_to_quality(self, minquality):
self._id = None
a = self.a
b = self.b
if not (a.is_active() or b.is_active()):
raise mcore.ReadTooFar
# Short circuit if one matcher is inactive
if not a.is_active():
return b.skip_to_quality(minquality)
elif not b.is_active():
return a.skip_to_quality(minquality)
skipped = 0
aq = a.block_quality()
bq = b.block_quality()
while a.is_active() and b.is_active() and aq + bq <= minquality:
if aq < bq:
skipped += a.skip_to_quality(minquality - bq)
aq = a.block_quality()
else:
skipped += b.skip_to_quality(minquality - aq)
bq = b.block_quality()
return skipped
class DisjunctionMaxMatcher(UnionMatcher):
"""Matches the union (OR) of two sub-matchers. Where both sub-matchers
match the same posting, returns the weight/score of the higher-scoring
posting.
"""
# TODO: this class inherits from AdditiveBiMatcher (through UnionMatcher)
# but it does not add the scores of the sub-matchers together (it
# overrides all methods that perform addition). Need to clean up the
# inheritance.
def __init__(self, a, b, tiebreak=0.0):
super(DisjunctionMaxMatcher, self).__init__(a, b)
self.tiebreak = tiebreak
def copy(self):
return self.__class__(self.a.copy(), self.b.copy(),
tiebreak=self.tiebreak)
def replace(self, minquality=0):
a = self.a
b = self.b
a_active = a.is_active()
b_active = b.is_active()
# DisMax takes the max of the sub-matcher qualities instead of adding
# them, so we need special logic here
if minquality and a_active and b_active:
a_max = a.max_quality()
b_max = b.max_quality()
if a_max < minquality and b_max < minquality:
# If neither sub-matcher has a high enough max quality to
# contribute, return an inactive matcher
return mcore.NullMatcher()
elif b_max < minquality:
# If the b matcher can't contribute, return a
return a.replace(minquality)
elif a_max < minquality:
# If the a matcher can't contribute, return b
return b.replace(minquality)
if not (a_active or b_active):
return mcore.NullMatcher()
elif not a_active:
return b.replace(minquality)
elif not b_active:
return a.replace(minquality)
# We CAN pass the minquality down here, since we don't add the two
# scores together
a = a.replace(minquality)
b = b.replace(minquality)
a_active = a.is_active()
b_active = b.is_active()
# It's kind of tedious to check for inactive sub-matchers all over
# again here after we replace them, but it's probably better than
# returning a replacement with an inactive sub-matcher
if not (a_active and b_active):
return mcore.NullMatcher()
elif not a_active:
return b
elif not b_active:
return a
elif a is not self.a or b is not self.b:
# If one of the sub-matchers changed, return a new DisMax
return self.__class__(a, b)
else:
return self
def score(self):
if not self.a.is_active():
return self.b.score()
elif not self.b.is_active():
return self.a.score()
else:
return max(self.a.score(), self.b.score())
def max_quality(self):
return max(self.a.max_quality(), self.b.max_quality())
def block_quality(self):
return max(self.a.block_quality(), self.b.block_quality())
def skip_to_quality(self, minquality):
a = self.a
b = self.b
# Short circuit if one matcher is inactive
if not a.is_active():
sk = b.skip_to_quality(minquality)
return sk
elif not b.is_active():
return a.skip_to_quality(minquality)
skipped = 0
aq = a.block_quality()
bq = b.block_quality()
while a.is_active() and b.is_active() and max(aq, bq) <= minquality:
if aq <= minquality:
skipped += a.skip_to_quality(minquality)
aq = a.block_quality()
if bq <= minquality:
skipped += b.skip_to_quality(minquality)
bq = b.block_quality()
return skipped
class IntersectionMatcher(AdditiveBiMatcher):
"""Matches the intersection (AND) of the postings in the two sub-matchers.
"""
def __init__(self, a, b):
super(IntersectionMatcher, self).__init__(a, b)
self._find_first()
def reset(self):
self.a.reset()
self.b.reset()
self._find_first()
def _find_first(self):
if (self.a.is_active()
and self.b.is_active()
and self.a.id() != self.b.id()):
self._find_next()
def replace(self, minquality=0):
a = self.a
b = self.b
a_active = a.is_active()
b_active = b.is_active()
if not (a_active and b_active):
# Intersection matcher requires that both sub-matchers be active
return mcore.NullMatcher()
if minquality:
a_max = a.max_quality()
b_max = b.max_quality()
if a_max + b_max < minquality:
# If the combined quality of the sub-matchers can't contribute,
# return an inactive matcher
return mcore.NullMatcher()
# Require that the replacements be able to contribute results
# higher than the minquality
a_min = minquality - b_max
b_min = minquality - a_max
else:
a_min = b_min = 0
a = a.replace(a_min)
b = b.replace(b_min)
a_active = a.is_active()
b_active = b.is_active()
if not (a_active or b_active):
return mcore.NullMatcher()
elif not a_active:
return b
elif not b_active:
return a
elif a is not self.a or b is not self.b:
return self.__class__(a, b)
else:
return self
def is_active(self):
return self.a.is_active() and self.b.is_active()
def _find_next(self):
a = self.a
b = self.b
a_id = a.id()
b_id = b.id()
assert a_id != b_id
r = False
while a.is_active() and b.is_active() and a_id != b_id:
if a_id < b_id:
ra = a.skip_to(b_id)
if not a.is_active():
return
r = r or ra
a_id = a.id()
else:
rb = b.skip_to(a_id)
if not b.is_active():
return
r = r or rb
b_id = b.id()
return r
def id(self):
return self.a.id()
# Using sets is faster in some cases, but could potentially use a lot of
# memory
def all_ids(self):
return iter(sorted(set(self.a.all_ids()) & set(self.b.all_ids())))
def skip_to(self, id):
if not self.is_active():
raise mcore.ReadTooFar
ra = self.a.skip_to(id)
rb = self.b.skip_to(id)
if self.is_active():
rn = False
if self.a.id() != self.b.id():
rn = self._find_next()
return ra or rb or rn
def skip_to_quality(self, minquality):
a = self.a
b = self.b
minquality = minquality
skipped = 0
aq = a.block_quality()
bq = b.block_quality()
while a.is_active() and b.is_active() and aq + bq <= minquality:
if aq < bq:
# If the block quality of A is less than B, skip A ahead until
# it can contribute at least the balance of the required min
# quality when added to B
sk = a.skip_to_quality(minquality - bq)
skipped += sk
if not sk and a.is_active():
# The matcher couldn't skip ahead for some reason, so just
# advance and try again
a.next()
else:
# And vice-versa
sk = b.skip_to_quality(minquality - aq)
skipped += sk
if not sk and b.is_active():
b.next()
if not a.is_active() or not b.is_active():
# One of the matchers is exhausted
break
if a.id() != b.id():
# We want to always leave in a state where the matchers are at
# the same document, so call _find_next() to sync them
self._find_next()
# Get the block qualities at the new matcher positions
aq = a.block_quality()
bq = b.block_quality()
return skipped
def next(self):
if not self.is_active():
raise mcore.ReadTooFar
# We must assume that the ids are equal whenever next() is called (they
# should have been made equal by _find_next), so advance them both
ar = self.a.next()
if self.is_active():
nr = self._find_next()
return ar or nr
def spans(self):
return sorted(set(self.a.spans()) | set(self.b.spans()))
class AndNotMatcher(BiMatcher):
"""Matches the postings in the first sub-matcher that are NOT present in
the second sub-matcher.
"""
def __init__(self, a, b):
super(AndNotMatcher, self).__init__(a, b)
self._find_first()
def reset(self):
self.a.reset()
self.b.reset()
self._find_first()
def _find_first(self):
if (self.a.is_active()
and self.b.is_active()
and self.a.id() == self.b.id()):
self._find_next()
def is_active(self):
return self.a.is_active()
def _find_next(self):
pos = self.a
neg = self.b
if not neg.is_active():
return
pos_id = pos.id()
r = False
if neg.id() < pos_id:
neg.skip_to(pos_id)
while pos.is_active() and neg.is_active() and pos_id == neg.id():
nr = pos.next()
if not pos.is_active():
break
r = r or nr
pos_id = pos.id()
neg.skip_to(pos_id)
return r
def supports_block_quality(self):
return self.a.supports_block_quality()
def replace(self, minquality=0):
if not self.a.is_active():
# The a matcher is required, so if it's inactive, return an
# inactive matcher
return mcore.NullMatcher()
elif (minquality
and self.a.max_quality() < minquality):
# If the quality of the required matcher isn't high enough to
# contribute, return an inactive matcher
return mcore.NullMatcher()
elif not self.b.is_active():
# If the prohibited matcher is inactive, convert to just the
# required matcher
return self.a.replace(minquality)
a = self.a.replace(minquality)
b = self.b.replace()
if a is not self.a or b is not self.b:
# If one of the sub-matchers was replaced, return a new AndNot
return self.__class__(a, b)
else:
return self
def max_quality(self):
return self.a.max_quality()
def block_quality(self):
return self.a.block_quality()
def skip_to_quality(self, minquality):
skipped = self.a.skip_to_quality(minquality)
self._find_next()
return skipped
def id(self):
return self.a.id()
def next(self):
if not self.a.is_active():
raise mcore.ReadTooFar
ar = self.a.next()
nr = False
if self.a.is_active() and self.b.is_active():
nr = self._find_next()
return ar or nr
def skip_to(self, id):
if not self.a.is_active():
raise mcore.ReadTooFar
if id < self.a.id():
return
self.a.skip_to(id)
if self.b.is_active():
self.b.skip_to(id)
self._find_next()
def weight(self):
return self.a.weight()
def score(self):
return self.a.score()
def supports(self, astype):
return self.a.supports(astype)
def value(self):
return self.a.value()
def value_as(self, astype):
return self.a.value_as(astype)
class AndMaybeMatcher(AdditiveBiMatcher):
"""Matches postings in the first sub-matcher, and if the same posting is
in the second sub-matcher, adds their scores.
"""
def __init__(self, a, b):
AdditiveBiMatcher.__init__(self, a, b)
self._first_b()
def reset(self):
self.a.reset()
self.b.reset()
self._first_b()
def _first_b(self):
a = self.a
b = self.b
if a.is_active() and b.is_active() and a.id() != b.id():
b.skip_to(a.id())
def is_active(self):
return self.a.is_active()
def id(self):
return self.a.id()
def next(self):
if not self.a.is_active():
raise mcore.ReadTooFar
ar = self.a.next()
br = False
if self.a.is_active() and self.b.is_active():
br = self.b.skip_to(self.a.id())
return ar or br
def skip_to(self, id):
if not self.a.is_active():
raise mcore.ReadTooFar
ra = self.a.skip_to(id)
rb = False
if self.a.is_active() and self.b.is_active():
rb = self.b.skip_to(id)
return ra or rb
def replace(self, minquality=0):
a = self.a
b = self.b
a_active = a.is_active()
b_active = b.is_active()
if not a_active:
return mcore.NullMatcher()
elif minquality and b_active:
if a.max_quality() + b.max_quality() < minquality:
# If the combined max quality of the sub-matchers isn't high
# enough to possibly contribute, return an inactive matcher
return mcore.NullMatcher()
elif a.max_quality() < minquality:
# If the max quality of the main sub-matcher isn't high enough
# to ever contribute without the optional sub- matcher, change
# into an IntersectionMatcher
return IntersectionMatcher(self.a, self.b)
elif not b_active:
return a.replace(minquality)
new_a = a.replace(minquality - b.max_quality())
new_b = b.replace(minquality - a.max_quality())
if new_a is not a or new_b is not b:
# If one of the sub-matchers changed, return a new AndMaybe
return self.__class__(new_a, new_b)
else:
return self
def skip_to_quality(self, minquality):
a = self.a
b = self.b
minquality = minquality
if not a.is_active():
raise mcore.ReadTooFar
if not b.is_active():
return a.skip_to_quality(minquality)
skipped = 0
aq = a.block_quality()
bq = b.block_quality()
while a.is_active() and b.is_active() and aq + bq <= minquality:
if aq < bq:
skipped += a.skip_to_quality(minquality - bq)
aq = a.block_quality()
else:
skipped += b.skip_to_quality(minquality - aq)
bq = b.block_quality()
return skipped
def weight(self):
if self.a.id() == self.b.id():
return self.a.weight() + self.b.weight()
else:
return self.a.weight()
def score(self):
if self.b.is_active() and self.a.id() == self.b.id():
return self.a.score() + self.b.score()
else:
return self.a.score()
def supports(self, astype):
return self.a.supports(astype)
def value(self):
return self.a.value()
def value_as(self, astype):
return self.a.value_as(astype)
|
kevin-coder/tensorflow-fork
|
refs/heads/master
|
tensorflow/contrib/distributions/python/ops/bijectors/batch_normalization.py
|
2
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Batch Norm bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.layers import normalization
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"BatchNormalization",
]
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _undo_batch_normalization(x,
mean,
variance,
offset,
scale,
variance_epsilon,
name=None):
r"""Inverse of tf.nn.batch_normalization.
Args:
x: Input `Tensor` of arbitrary dimensionality.
mean: A mean `Tensor`.
variance: A variance `Tensor`.
offset: An offset `Tensor`, often denoted `beta` in equations, or
None. If present, will be added to the normalized tensor.
scale: A scale `Tensor`, often denoted `gamma` in equations, or
`None`. If present, the scale is applied to the normalized tensor.
variance_epsilon: A small `float` added to the minibatch `variance` to
prevent dividing by zero.
name: A name for this operation (optional).
Returns:
batch_unnormalized: The de-normalized, de-scaled, de-offset `Tensor`.
"""
with ops.name_scope(
name, "undo_batchnorm", [x, mean, variance, scale, offset]):
# inv = math_ops.rsqrt(variance + variance_epsilon)
# if scale is not None:
# inv *= scale
# return x * inv + (
# offset - mean * inv if offset is not None else -mean * inv)
rescale = math_ops.sqrt(variance + variance_epsilon)
if scale is not None:
rescale /= scale
batch_unnormalized = x * rescale + (
mean - offset * rescale if offset is not None else mean)
return batch_unnormalized
class BatchNormalization(bijector.Bijector):
"""Compute `Y = g(X) s.t. X = g^-1(Y) = (Y - mean(Y)) / std(Y)`.
Applies Batch Normalization [(Ioffe and Szegedy, 2015)][1] to samples from a
data distribution. This can be used to stabilize training of normalizing
flows ([Papamakarios et al., 2016][3]; [Dinh et al., 2017][2])
When training Deep Neural Networks (DNNs), it is common practice to
normalize or whiten features by shifting them to have zero mean and
scaling them to have unit variance.
The `inverse()` method of the `BatchNormalization` bijector, which is used in
the log-likelihood computation of data samples, implements the normalization
procedure (shift-and-scale) using the mean and standard deviation of the
current minibatch.
Conversely, the `forward()` method of the bijector de-normalizes samples (e.g.
`X*std(Y) + mean(Y)` with the running-average mean and standard deviation
computed at training-time. De-normalization is useful for sampling.
```python
dist = tfd.TransformedDistribution(
distribution=tfd.Normal()),
bijector=tfb.BatchNorm())
y = tfd.MultivariateNormalDiag(loc=1., scale=2.).sample(100) # ~ N(1, 2)
x = dist.bijector.inverse(y) # ~ N(0, 1)
y = dist.sample() # ~ N(1, 2)
```
During training time, `BatchNorm.inverse` and `BatchNorm.forward` are not
guaranteed to be inverses of each other because `inverse(y)` uses statistics
of the current minibatch, while `forward(x)` uses running-average statistics
accumulated from training. In other words,
`BatchNorm.inverse(BatchNorm.forward(...))` and
`BatchNorm.forward(BatchNorm.inverse(...))` will be identical when
`training=False` but may be different when `training=True`.
#### References
[1]: Sergey Ioffe and Christian Szegedy. Batch Normalization: Accelerating
Deep Network Training by Reducing Internal Covariate Shift. In
_International Conference on Machine Learning_, 2015.
https://arxiv.org/abs/1502.03167
[2]: Laurent Dinh, Jascha Sohl-Dickstein, and Samy Bengio. Density Estimation
using Real NVP. In _International Conference on Learning
Representations_, 2017. https://arxiv.org/abs/1605.08803
[3]: George Papamakarios, Theo Pavlakou, and Iain Murray. Masked
Autoregressive Flow for Density Estimation. In _Neural Information
Processing Systems_, 2017. https://arxiv.org/abs/1705.07057
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
batchnorm_layer=None,
training=True,
validate_args=False,
name="batch_normalization"):
"""Instantiates the `BatchNorm` bijector.
Args:
batchnorm_layer: `tf.layers.BatchNormalization` layer object. If `None`,
defaults to
`tf.layers.BatchNormalization(gamma_constraint=nn_ops.relu(x) + 1e-6)`.
This ensures positivity of the scale variable.
training: If True, updates running-average statistics during call to
`inverse()`.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
Raises:
ValueError: If bn_layer is not an instance of
`tf.layers.BatchNormalization`, or if it is specified with `renorm=True`
or a virtual batch size.
"""
# Scale must be positive.
g_constraint = lambda x: nn.relu(x) + 1e-6
self.batchnorm = batchnorm_layer or normalization.BatchNormalization(
gamma_constraint=g_constraint)
self._validate_bn_layer(self.batchnorm)
self._training = training
if isinstance(self.batchnorm.axis, int):
forward_min_event_ndims = 1
else:
forward_min_event_ndims = len(self.batchnorm.axis)
super(BatchNormalization, self).__init__(
forward_min_event_ndims=forward_min_event_ndims,
validate_args=validate_args, name=name)
def _validate_bn_layer(self, layer):
"""Check for valid BatchNormalization layer.
Args:
layer: Instance of `tf.layers.BatchNormalization`.
Raises:
ValueError: If batchnorm_layer argument is not an instance of
`tf.layers.BatchNormalization`, or if `batchnorm_layer.renorm=True` or
if `batchnorm_layer.virtual_batch_size` is specified.
"""
if not isinstance(layer, normalization.BatchNormalization):
raise ValueError(
"batchnorm_layer must be an instance of BatchNormalization layer.")
if layer.renorm:
raise ValueError("BatchNorm Bijector does not support renormalization.")
if layer.virtual_batch_size:
raise ValueError(
"BatchNorm Bijector does not support virtual batch sizes.")
def _get_broadcast_fn(self, x):
# Compute shape to broadcast scale/shift parameters to.
if not x.shape.is_fully_defined():
raise ValueError("Input must have shape known at graph construction.")
input_shape = np.int32(x.shape.as_list())
ndims = len(input_shape)
reduction_axes = [i for i in range(ndims) if i not in self.batchnorm.axis]
# Broadcasting only necessary for single-axis batch norm where the axis is
# not the last dimension
broadcast_shape = [1] * ndims
broadcast_shape[self.batchnorm.axis[0]] = (
input_shape[self.batchnorm.axis[0]])
def _broadcast(v):
if (v is not None and
len(v.get_shape()) != ndims and
reduction_axes != list(range(ndims - 1))):
return array_ops.reshape(v, broadcast_shape)
return v
return _broadcast
def _normalize(self, y):
return self.batchnorm.apply(y, training=self._training)
def _de_normalize(self, x):
# Uses the saved statistics.
if not self.batchnorm.built:
input_shape = x.get_shape()
self.batchnorm.build(input_shape)
broadcast_fn = self._get_broadcast_fn(x)
mean = broadcast_fn(self.batchnorm.moving_mean)
variance = broadcast_fn(self.batchnorm.moving_variance)
beta = broadcast_fn(self.batchnorm.beta) if self.batchnorm.center else None
gamma = broadcast_fn(self.batchnorm.gamma) if self.batchnorm.scale else None
return _undo_batch_normalization(
x, mean, variance, beta, gamma, self.batchnorm.epsilon)
def _forward(self, x):
return self._de_normalize(x)
def _inverse(self, y):
return self._normalize(y)
def _forward_log_det_jacobian(self, x):
# Uses saved statistics to compute volume distortion.
return -self._inverse_log_det_jacobian(x, use_saved_statistics=True)
def _inverse_log_det_jacobian(self, y, use_saved_statistics=False):
if not y.shape.is_fully_defined():
raise ValueError("Input must have shape known at graph construction.")
input_shape = np.int32(y.shape.as_list())
if not self.batchnorm.built:
# Create variables.
self.batchnorm.build(input_shape)
event_dims = self.batchnorm.axis
reduction_axes = [i for i in range(len(input_shape)) if i not in event_dims]
if use_saved_statistics or not self._training:
log_variance = math_ops.log(
self.batchnorm.moving_variance + self.batchnorm.epsilon)
else:
# At training-time, ildj is computed from the mean and log-variance across
# the current minibatch.
_, v = nn.moments(y, axes=reduction_axes, keepdims=True)
log_variance = math_ops.log(v + self.batchnorm.epsilon)
# `gamma` and `log Var(y)` reductions over event_dims.
# Log(total change in area from gamma term).
log_total_gamma = math_ops.reduce_sum(math_ops.log(self.batchnorm.gamma))
# Log(total change in area from log-variance term).
log_total_variance = math_ops.reduce_sum(log_variance)
# The ildj is scalar, as it does not depend on the values of x and are
# constant across minibatch elements.
return log_total_gamma - 0.5 * log_total_variance
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.