repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
dongyoungy/dbseer_middleware
|
refs/heads/master
|
rs-sysmon2/plugins/dstat_top_childwait.py
|
5
|
### Dstat most expensive process plugin
### Displays the name of the most expensive process
###
### Authority: dag@wieers.com
global cpunr
class dstat_plugin(dstat):
def __init__(self):
self.name = 'most waiting for'
self.vars = ('child process',)
self.type = 's'
self.width = 16
self.scale = 0
def extract(self):
self.set2 = {}
self.val['max'] = 0.0
for pid in proc_pidlist():
try:
### Using dopen() will cause too many open files
l = proc_splitline('/proc/%s/stat' % pid)
except IOError:
continue
if len(l) < 15: continue
### Reset previous value if it doesn't exist
if not self.set1.has_key(pid):
self.set1[pid] = 0
self.set2[pid] = int(l[15]) + int(l[16])
usage = (self.set2[pid] - self.set1[pid]) * 1.0 / elapsed / cpunr
### Is it a new topper ?
if usage <= self.val['max']: continue
self.val['max'] = usage
self.val['name'] = getnamebypid(pid, l[1][1:-1])
self.val['pid'] = pid
### Debug (show PID)
# self.val['process'] = '%*s %-*s' % (5, self.val['pid'], self.width-6, self.val['name'])
if step == op.delay:
self.set1 = self.set2
def show(self):
if self.val['max'] == 0.0:
return '%-*s' % (self.width, '')
else:
return '%s%-*s%s' % (theme['default'], self.width-3, self.val['name'][0:self.width-3], cprint(self.val['max'], 'p', 3, 34))
def showcsv(self):
return '%s / %d%%' % (self.val['name'], self.val['max'])
# vim:ts=4:sw=4:et
|
iver333/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/style/error_handlers_unittest.py
|
122
|
# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for error_handlers.py."""
import unittest2 as unittest
from checker import StyleProcessorConfiguration
from error_handlers import DefaultStyleErrorHandler
from filter import FilterConfiguration
class DefaultStyleErrorHandlerTest(unittest.TestCase):
"""Tests the DefaultStyleErrorHandler class."""
def setUp(self):
self._error_messages = []
self._error_count = 0
_category = "whitespace/tab"
"""The category name for the tests in this class."""
_file_path = "foo.h"
"""The file path for the tests in this class."""
def _mock_increment_error_count(self):
self._error_count += 1
def _mock_stderr_write(self, message):
self._error_messages.append(message)
def _style_checker_configuration(self):
"""Return a StyleProcessorConfiguration instance for testing."""
base_rules = ["-whitespace", "+whitespace/tab"]
filter_configuration = FilterConfiguration(base_rules=base_rules)
return StyleProcessorConfiguration(
filter_configuration=filter_configuration,
max_reports_per_category={"whitespace/tab": 2},
min_confidence=3,
output_format="vs7",
stderr_write=self._mock_stderr_write)
def _error_handler(self, configuration, line_numbers=None):
return DefaultStyleErrorHandler(configuration=configuration,
file_path=self._file_path,
increment_error_count=self._mock_increment_error_count,
line_numbers=line_numbers)
def _check_initialized(self):
"""Check that count and error messages are initialized."""
self.assertEqual(0, self._error_count)
self.assertEqual(0, len(self._error_messages))
def _call_error_handler(self, handle_error, confidence, line_number=100):
"""Call the given error handler with a test error."""
handle_error(line_number=line_number,
category=self._category,
confidence=confidence,
message="message")
def test_eq__true_return_value(self):
"""Test the __eq__() method for the return value of True."""
handler1 = self._error_handler(configuration=None)
handler2 = self._error_handler(configuration=None)
self.assertTrue(handler1.__eq__(handler2))
def test_eq__false_return_value(self):
"""Test the __eq__() method for the return value of False."""
def make_handler(configuration=self._style_checker_configuration(),
file_path='foo.txt', increment_error_count=lambda: True,
line_numbers=[100]):
return DefaultStyleErrorHandler(configuration=configuration,
file_path=file_path,
increment_error_count=increment_error_count,
line_numbers=line_numbers)
handler = make_handler()
# Establish a baseline for our comparisons below.
self.assertTrue(handler.__eq__(make_handler()))
# Verify that a difference in any argument causes equality to fail.
self.assertFalse(handler.__eq__(make_handler(configuration=None)))
self.assertFalse(handler.__eq__(make_handler(file_path='bar.txt')))
self.assertFalse(handler.__eq__(make_handler(increment_error_count=None)))
self.assertFalse(handler.__eq__(make_handler(line_numbers=[50])))
def test_ne(self):
"""Test the __ne__() method."""
# By default, __ne__ always returns true on different objects.
# Thus, check just the distinguishing case to verify that the
# code defines __ne__.
handler1 = self._error_handler(configuration=None)
handler2 = self._error_handler(configuration=None)
self.assertFalse(handler1.__ne__(handler2))
def test_non_reportable_error(self):
"""Test __call__() with a non-reportable error."""
self._check_initialized()
configuration = self._style_checker_configuration()
confidence = 1
# Confirm the error is not reportable.
self.assertFalse(configuration.is_reportable(self._category,
confidence,
self._file_path))
error_handler = self._error_handler(configuration)
self._call_error_handler(error_handler, confidence)
self.assertEqual(0, self._error_count)
self.assertEqual([], self._error_messages)
# Also serves as a reportable error test.
def test_max_reports_per_category(self):
"""Test error report suppression in __call__() method."""
self._check_initialized()
configuration = self._style_checker_configuration()
error_handler = self._error_handler(configuration)
confidence = 5
# First call: usual reporting.
self._call_error_handler(error_handler, confidence)
self.assertEqual(1, self._error_count)
self.assertEqual(1, len(self._error_messages))
self.assertEqual(self._error_messages,
["foo.h(100): message [whitespace/tab] [5]\n"])
# Second call: suppression message reported.
self._call_error_handler(error_handler, confidence)
# The "Suppressing further..." message counts as an additional
# message (but not as an addition to the error count).
self.assertEqual(2, self._error_count)
self.assertEqual(3, len(self._error_messages))
self.assertEqual(self._error_messages[-2],
"foo.h(100): message [whitespace/tab] [5]\n")
self.assertEqual(self._error_messages[-1],
"Suppressing further [whitespace/tab] reports "
"for this file.\n")
# Third call: no report.
self._call_error_handler(error_handler, confidence)
self.assertEqual(3, self._error_count)
self.assertEqual(3, len(self._error_messages))
def test_line_numbers(self):
"""Test the line_numbers parameter."""
self._check_initialized()
configuration = self._style_checker_configuration()
error_handler = self._error_handler(configuration,
line_numbers=[50])
confidence = 5
# Error on non-modified line: no error.
self._call_error_handler(error_handler, confidence, line_number=60)
self.assertEqual(0, self._error_count)
self.assertEqual([], self._error_messages)
# Error on modified line: error.
self._call_error_handler(error_handler, confidence, line_number=50)
self.assertEqual(1, self._error_count)
self.assertEqual(self._error_messages,
["foo.h(50): message [whitespace/tab] [5]\n"])
# Error on non-modified line after turning off line filtering: error.
error_handler.turn_off_line_filtering()
self._call_error_handler(error_handler, confidence, line_number=60)
self.assertEqual(2, self._error_count)
self.assertEqual(self._error_messages,
['foo.h(50): message [whitespace/tab] [5]\n',
'foo.h(60): message [whitespace/tab] [5]\n',
'Suppressing further [whitespace/tab] reports for this file.\n'])
|
antoinecarme/pyaf
|
refs/heads/master
|
tests/model_control/detailed/transf_None/model_control_one_enabled_None_MovingMedian_Seasonal_DayOfMonth_LSTM.py
|
1
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['None'] , ['MovingMedian'] , ['Seasonal_DayOfMonth'] , ['LSTM'] );
|
aasoliz/Bitcoin-Statistics
|
refs/heads/master
|
venv/lib/python2.7/site-packages/sqlalchemy/event/__init__.py
|
55
|
# event/__init__.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .api import CANCEL, NO_RETVAL, listen, listens_for, remove, contains
from .base import Events, dispatcher
from .attr import RefCollection
from .legacy import _legacy_signature
|
MalloyPower/parsing-python
|
refs/heads/master
|
front-end/testsuite-python-lib/Python-2.7/Lib/test/test_struct.py
|
3
|
import os
import array
import unittest
import struct
import inspect
from test.test_support import run_unittest, check_warnings, check_py3k_warnings
import sys
ISBIGENDIAN = sys.byteorder == "big"
IS32BIT = sys.maxsize == 0x7fffffff
integer_codes = 'b', 'B', 'h', 'H', 'i', 'I', 'l', 'L', 'q', 'Q'
testmod_filename = os.path.splitext(__file__)[0] + '.py'
# Native 'q' packing isn't available on systems that don't have the C
# long long type.
try:
struct.pack('q', 5)
except struct.error:
HAVE_LONG_LONG = False
else:
HAVE_LONG_LONG = True
def string_reverse(s):
return "".join(reversed(s))
def bigendian_to_native(value):
if ISBIGENDIAN:
return value
else:
return string_reverse(value)
class StructTest(unittest.TestCase):
def check_float_coerce(self, format, number):
# SF bug 1530559. struct.pack raises TypeError where it used
# to convert.
with check_warnings((".*integer argument expected, got float",
DeprecationWarning)) as w:
got = struct.pack(format, number)
lineno = inspect.currentframe().f_lineno - 1
self.assertEqual(w.filename, testmod_filename)
self.assertEqual(w.lineno, lineno)
self.assertEqual(len(w.warnings), 1)
expected = struct.pack(format, int(number))
self.assertEqual(got, expected)
def test_isbigendian(self):
self.assertEqual((struct.pack('=i', 1)[0] == chr(0)), ISBIGENDIAN)
def test_consistence(self):
self.assertRaises(struct.error, struct.calcsize, 'Z')
sz = struct.calcsize('i')
self.assertEqual(sz * 3, struct.calcsize('iii'))
fmt = 'cbxxxxxxhhhhiillffd?'
fmt3 = '3c3b18x12h6i6l6f3d3?'
sz = struct.calcsize(fmt)
sz3 = struct.calcsize(fmt3)
self.assertEqual(sz * 3, sz3)
self.assertRaises(struct.error, struct.pack, 'iii', 3)
self.assertRaises(struct.error, struct.pack, 'i', 3, 3, 3)
self.assertRaises(struct.error, struct.pack, 'i', 'foo')
self.assertRaises(struct.error, struct.pack, 'P', 'foo')
self.assertRaises(struct.error, struct.unpack, 'd', 'flap')
s = struct.pack('ii', 1, 2)
self.assertRaises(struct.error, struct.unpack, 'iii', s)
self.assertRaises(struct.error, struct.unpack, 'i', s)
def test_transitiveness(self):
c = 'a'
b = 1
h = 255
i = 65535
l = 65536
f = 3.1415
d = 3.1415
t = True
for prefix in ('', '@', '<', '>', '=', '!'):
for format in ('xcbhilfd?', 'xcBHILfd?'):
format = prefix + format
s = struct.pack(format, c, b, h, i, l, f, d, t)
cp, bp, hp, ip, lp, fp, dp, tp = struct.unpack(format, s)
self.assertEqual(cp, c)
self.assertEqual(bp, b)
self.assertEqual(hp, h)
self.assertEqual(ip, i)
self.assertEqual(lp, l)
self.assertEqual(int(100 * fp), int(100 * f))
self.assertEqual(int(100 * dp), int(100 * d))
self.assertEqual(tp, t)
def test_new_features(self):
# Test some of the new features in detail
# (format, argument, big-endian result, little-endian result, asymmetric)
tests = [
('c', 'a', 'a', 'a', 0),
('xc', 'a', '\0a', '\0a', 0),
('cx', 'a', 'a\0', 'a\0', 0),
('s', 'a', 'a', 'a', 0),
('0s', 'helloworld', '', '', 1),
('1s', 'helloworld', 'h', 'h', 1),
('9s', 'helloworld', 'helloworl', 'helloworl', 1),
('10s', 'helloworld', 'helloworld', 'helloworld', 0),
('11s', 'helloworld', 'helloworld\0', 'helloworld\0', 1),
('20s', 'helloworld', 'helloworld'+10*'\0', 'helloworld'+10*'\0', 1),
('b', 7, '\7', '\7', 0),
('b', -7, '\371', '\371', 0),
('B', 7, '\7', '\7', 0),
('B', 249, '\371', '\371', 0),
('h', 700, '\002\274', '\274\002', 0),
('h', -700, '\375D', 'D\375', 0),
('H', 700, '\002\274', '\274\002', 0),
('H', 0x10000-700, '\375D', 'D\375', 0),
('i', 70000000, '\004,\035\200', '\200\035,\004', 0),
('i', -70000000, '\373\323\342\200', '\200\342\323\373', 0),
('I', 70000000L, '\004,\035\200', '\200\035,\004', 0),
('I', 0x100000000L-70000000, '\373\323\342\200', '\200\342\323\373', 0),
('l', 70000000, '\004,\035\200', '\200\035,\004', 0),
('l', -70000000, '\373\323\342\200', '\200\342\323\373', 0),
('L', 70000000L, '\004,\035\200', '\200\035,\004', 0),
('L', 0x100000000L-70000000, '\373\323\342\200', '\200\342\323\373', 0),
('f', 2.0, '@\000\000\000', '\000\000\000@', 0),
('d', 2.0, '@\000\000\000\000\000\000\000',
'\000\000\000\000\000\000\000@', 0),
('f', -2.0, '\300\000\000\000', '\000\000\000\300', 0),
('d', -2.0, '\300\000\000\000\000\000\000\000',
'\000\000\000\000\000\000\000\300', 0),
('?', 0, '\0', '\0', 0),
('?', 3, '\1', '\1', 1),
('?', True, '\1', '\1', 0),
('?', [], '\0', '\0', 1),
('?', (1,), '\1', '\1', 1),
]
for fmt, arg, big, lil, asy in tests:
for (xfmt, exp) in [('>'+fmt, big), ('!'+fmt, big), ('<'+fmt, lil),
('='+fmt, ISBIGENDIAN and big or lil)]:
res = struct.pack(xfmt, arg)
self.assertEqual(res, exp)
self.assertEqual(struct.calcsize(xfmt), len(res))
rev = struct.unpack(xfmt, res)[0]
if rev != arg:
self.assertTrue(asy)
def test_calcsize(self):
expected_size = {
'b': 1, 'B': 1,
'h': 2, 'H': 2,
'i': 4, 'I': 4,
'l': 4, 'L': 4,
'q': 8, 'Q': 8,
}
# standard integer sizes
for code in integer_codes:
for byteorder in ('=', '<', '>', '!'):
format = byteorder+code
size = struct.calcsize(format)
self.assertEqual(size, expected_size[code])
# native integer sizes, except 'q' and 'Q'
for format_pair in ('bB', 'hH', 'iI', 'lL'):
for byteorder in ['', '@']:
signed_size = struct.calcsize(byteorder + format_pair[0])
unsigned_size = struct.calcsize(byteorder + format_pair[1])
self.assertEqual(signed_size, unsigned_size)
# bounds for native integer sizes
self.assertEqual(struct.calcsize('b'), 1)
self.assertLessEqual(2, struct.calcsize('h'))
self.assertLessEqual(4, struct.calcsize('l'))
self.assertLessEqual(struct.calcsize('h'), struct.calcsize('i'))
self.assertLessEqual(struct.calcsize('i'), struct.calcsize('l'))
# tests for native 'q' and 'Q' when applicable
if HAVE_LONG_LONG:
self.assertEqual(struct.calcsize('q'), struct.calcsize('Q'))
self.assertLessEqual(8, struct.calcsize('q'))
self.assertLessEqual(struct.calcsize('l'), struct.calcsize('q'))
def test_integers(self):
# Integer tests (bBhHiIlLqQ).
import binascii
class IntTester(unittest.TestCase):
def __init__(self, format):
super(IntTester, self).__init__(methodName='test_one')
self.format = format
self.code = format[-1]
self.direction = format[:-1]
if not self.direction in ('', '@', '=', '<', '>', '!'):
raise ValueError("unrecognized packing direction: %s" %
self.direction)
self.bytesize = struct.calcsize(format)
self.bitsize = self.bytesize * 8
if self.code in tuple('bhilq'):
self.signed = True
self.min_value = -(2L**(self.bitsize-1))
self.max_value = 2L**(self.bitsize-1) - 1
elif self.code in tuple('BHILQ'):
self.signed = False
self.min_value = 0
self.max_value = 2L**self.bitsize - 1
else:
raise ValueError("unrecognized format code: %s" %
self.code)
def test_one(self, x, pack=struct.pack,
unpack=struct.unpack,
unhexlify=binascii.unhexlify):
format = self.format
if self.min_value <= x <= self.max_value:
expected = long(x)
if self.signed and x < 0:
expected += 1L << self.bitsize
self.assertGreaterEqual(expected, 0)
expected = '%x' % expected
if len(expected) & 1:
expected = "0" + expected
expected = unhexlify(expected)
expected = ("\x00" * (self.bytesize - len(expected)) +
expected)
if (self.direction == '<' or
self.direction in ('', '@', '=') and not ISBIGENDIAN):
expected = string_reverse(expected)
self.assertEqual(len(expected), self.bytesize)
# Pack work?
got = pack(format, x)
self.assertEqual(got, expected)
# Unpack work?
retrieved = unpack(format, got)[0]
self.assertEqual(x, retrieved)
# Adding any byte should cause a "too big" error.
self.assertRaises((struct.error, TypeError), unpack, format,
'\x01' + got)
else:
# x is out of range -- verify pack realizes that.
self.assertRaises(struct.error, pack, format, x)
def run(self):
from random import randrange
# Create all interesting powers of 2.
values = []
for exp in range(self.bitsize + 3):
values.append(1L << exp)
# Add some random values.
for i in range(self.bitsize):
val = 0L
for j in range(self.bytesize):
val = (val << 8) | randrange(256)
values.append(val)
# Values absorbed from other tests
values.extend([300, 700000, sys.maxint*4])
# Try all those, and their negations, and +-1 from
# them. Note that this tests all power-of-2
# boundaries in range, and a few out of range, plus
# +-(2**n +- 1).
for base in values:
for val in -base, base:
for incr in -1, 0, 1:
x = val + incr
self.test_one(int(x))
self.test_one(long(x))
# Some error cases.
class NotAnIntNS(object):
def __int__(self):
return 42
def __long__(self):
return 1729L
class NotAnIntOS:
def __int__(self):
return 85
def __long__(self):
return -163L
# Objects with an '__index__' method should be allowed
# to pack as integers. That is assuming the implemented
# '__index__' method returns and 'int' or 'long'.
class Indexable(object):
def __init__(self, value):
self._value = value
def __index__(self):
return self._value
# If the '__index__' method raises a type error, then
# '__int__' should be used with a deprecation warning.
class BadIndex(object):
def __index__(self):
raise TypeError
def __int__(self):
return 42
self.assertRaises((TypeError, struct.error),
struct.pack, self.format,
"a string")
self.assertRaises((TypeError, struct.error),
struct.pack, self.format,
randrange)
with check_warnings(("integer argument expected, "
"got non-integer", DeprecationWarning)):
self.assertRaises((TypeError, struct.error),
struct.pack, self.format,
3+42j)
# an attempt to convert a non-integer (with an
# implicit conversion via __int__) should succeed,
# with a DeprecationWarning
for nonint in NotAnIntNS(), NotAnIntOS(), BadIndex():
with check_warnings((".*integer argument expected, got non"
"-integer", DeprecationWarning)) as w:
got = struct.pack(self.format, nonint)
lineno = inspect.currentframe().f_lineno - 1
self.assertEqual(w.filename, testmod_filename)
self.assertEqual(w.lineno, lineno)
self.assertEqual(len(w.warnings), 1)
expected = struct.pack(self.format, int(nonint))
self.assertEqual(got, expected)
# Check for legitimate values from '__index__'.
for obj in (Indexable(0), Indexable(10), Indexable(17),
Indexable(42), Indexable(100), Indexable(127)):
try:
struct.pack(format, obj)
except:
self.fail("integer code pack failed on object "
"with '__index__' method")
# Check for bogus values from '__index__'.
for obj in (Indexable('a'), Indexable(u'b'), Indexable(None),
Indexable({'a': 1}), Indexable([1, 2, 3])):
self.assertRaises((TypeError, struct.error),
struct.pack, self.format,
obj)
byteorders = '', '@', '=', '<', '>', '!'
for code in integer_codes:
for byteorder in byteorders:
if (byteorder in ('', '@') and code in ('q', 'Q') and
not HAVE_LONG_LONG):
continue
format = byteorder+code
t = IntTester(format)
t.run()
def test_p_code(self):
# Test p ("Pascal string") code.
for code, input, expected, expectedback in [
('p','abc', '\x00', ''),
('1p', 'abc', '\x00', ''),
('2p', 'abc', '\x01a', 'a'),
('3p', 'abc', '\x02ab', 'ab'),
('4p', 'abc', '\x03abc', 'abc'),
('5p', 'abc', '\x03abc\x00', 'abc'),
('6p', 'abc', '\x03abc\x00\x00', 'abc'),
('1000p', 'x'*1000, '\xff' + 'x'*999, 'x'*255)]:
got = struct.pack(code, input)
self.assertEqual(got, expected)
(got,) = struct.unpack(code, got)
self.assertEqual(got, expectedback)
def test_705836(self):
# SF bug 705836. "<f" and ">f" had a severe rounding bug, where a carry
# from the low-order discarded bits could propagate into the exponent
# field, causing the result to be wrong by a factor of 2.
import math
for base in range(1, 33):
# smaller <- largest representable float less than base.
delta = 0.5
while base - delta / 2.0 != base:
delta /= 2.0
smaller = base - delta
# Packing this rounds away a solid string of trailing 1 bits.
packed = struct.pack("<f", smaller)
unpacked = struct.unpack("<f", packed)[0]
# This failed at base = 2, 4, and 32, with unpacked = 1, 2, and
# 16, respectively.
self.assertEqual(base, unpacked)
bigpacked = struct.pack(">f", smaller)
self.assertEqual(bigpacked, string_reverse(packed))
unpacked = struct.unpack(">f", bigpacked)[0]
self.assertEqual(base, unpacked)
# Largest finite IEEE single.
big = (1 << 24) - 1
big = math.ldexp(big, 127 - 23)
packed = struct.pack(">f", big)
unpacked = struct.unpack(">f", packed)[0]
self.assertEqual(big, unpacked)
# The same, but tack on a 1 bit so it rounds up to infinity.
big = (1 << 25) - 1
big = math.ldexp(big, 127 - 24)
self.assertRaises(OverflowError, struct.pack, ">f", big)
def test_1530559(self):
# SF bug 1530559. struct.pack raises TypeError where it used to convert.
for endian in ('', '>', '<'):
for fmt in integer_codes:
self.check_float_coerce(endian + fmt, 1.0)
self.check_float_coerce(endian + fmt, 1.5)
def test_unpack_from(self, cls=str):
data = cls('abcd01234')
fmt = '4s'
s = struct.Struct(fmt)
self.assertEqual(s.unpack_from(data), ('abcd',))
self.assertEqual(struct.unpack_from(fmt, data), ('abcd',))
for i in xrange(6):
self.assertEqual(s.unpack_from(data, i), (data[i:i+4],))
self.assertEqual(struct.unpack_from(fmt, data, i), (data[i:i+4],))
for i in xrange(6, len(data) + 1):
self.assertRaises(struct.error, s.unpack_from, data, i)
self.assertRaises(struct.error, struct.unpack_from, fmt, data, i)
def test_pack_into(self):
test_string = 'Reykjavik rocks, eow!'
writable_buf = array.array('c', ' '*100)
fmt = '21s'
s = struct.Struct(fmt)
# Test without offset
s.pack_into(writable_buf, 0, test_string)
from_buf = writable_buf.tostring()[:len(test_string)]
self.assertEqual(from_buf, test_string)
# Test with offset.
s.pack_into(writable_buf, 10, test_string)
from_buf = writable_buf.tostring()[:len(test_string)+10]
self.assertEqual(from_buf, test_string[:10] + test_string)
# Go beyond boundaries.
small_buf = array.array('c', ' '*10)
self.assertRaises(struct.error, s.pack_into, small_buf, 0, test_string)
self.assertRaises(struct.error, s.pack_into, small_buf, 2, test_string)
# Test bogus offset (issue 3694)
sb = small_buf
self.assertRaises(TypeError, struct.pack_into, b'1', sb, None)
def test_pack_into_fn(self):
test_string = 'Reykjavik rocks, eow!'
writable_buf = array.array('c', ' '*100)
fmt = '21s'
pack_into = lambda *args: struct.pack_into(fmt, *args)
# Test without offset.
pack_into(writable_buf, 0, test_string)
from_buf = writable_buf.tostring()[:len(test_string)]
self.assertEqual(from_buf, test_string)
# Test with offset.
pack_into(writable_buf, 10, test_string)
from_buf = writable_buf.tostring()[:len(test_string)+10]
self.assertEqual(from_buf, test_string[:10] + test_string)
# Go beyond boundaries.
small_buf = array.array('c', ' '*10)
self.assertRaises(struct.error, pack_into, small_buf, 0, test_string)
self.assertRaises(struct.error, pack_into, small_buf, 2, test_string)
def test_unpack_with_buffer(self):
with check_py3k_warnings(("buffer.. not supported in 3.x",
DeprecationWarning)):
# SF bug 1563759: struct.unpack doesn't support buffer protocol objects
data1 = array.array('B', '\x12\x34\x56\x78')
data2 = buffer('......\x12\x34\x56\x78......', 6, 4)
for data in [data1, data2]:
value, = struct.unpack('>I', data)
self.assertEqual(value, 0x12345678)
self.test_unpack_from(cls=buffer)
def test_bool(self):
for prefix in tuple("<>!=")+('',):
false = (), [], [], '', 0
true = [1], 'test', 5, -1, 0xffffffffL+1, 0xffffffff//2
falseFormat = prefix + '?' * len(false)
packedFalse = struct.pack(falseFormat, *false)
unpackedFalse = struct.unpack(falseFormat, packedFalse)
trueFormat = prefix + '?' * len(true)
packedTrue = struct.pack(trueFormat, *true)
unpackedTrue = struct.unpack(trueFormat, packedTrue)
self.assertEqual(len(true), len(unpackedTrue))
self.assertEqual(len(false), len(unpackedFalse))
for t in unpackedFalse:
self.assertFalse(t)
for t in unpackedTrue:
self.assertTrue(t)
packed = struct.pack(prefix+'?', 1)
self.assertEqual(len(packed), struct.calcsize(prefix+'?'))
if len(packed) != 1:
self.assertFalse(prefix, msg='encoded bool is not one byte: %r'
%packed)
for c in '\x01\x7f\xff\x0f\xf0':
self.assertTrue(struct.unpack('>?', c)[0])
@unittest.skipUnless(IS32BIT, "Specific to 32bit machines")
def test_crasher(self):
self.assertRaises(MemoryError, struct.pack, "357913941c", "a")
def test_count_overflow(self):
hugecount = '{}b'.format(sys.maxsize+1)
self.assertRaises(struct.error, struct.calcsize, hugecount)
hugecount2 = '{}b{}H'.format(sys.maxsize//2, sys.maxsize//2)
self.assertRaises(struct.error, struct.calcsize, hugecount2)
def test_main():
run_unittest(StructTest)
if __name__ == '__main__':
test_main()
|
temasek/android_external_chromium_org_third_party_WebKit
|
refs/heads/cm-12.1
|
Tools/Scripts/webkitpy/common/net/sheriff_calendar_unittest.py
|
59
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is based on code from:
# https://chromium.googlesource.com/chromium/tools/build/+/master/scripts/tools/blink_roller/auto_roll_test.py
# Ideally we should share code between these.
from webkitpy.common.system.outputcapture import OutputCaptureTestCaseBase
import sheriff_calendar as calendar
class SheriffCalendarTest(OutputCaptureTestCaseBase):
def test_complete_email(self):
expected_emails = ['foo@chromium.org', 'bar@google.com', 'baz@chromium.org']
names = ['foo', 'bar@google.com', 'baz']
self.assertEqual(map(calendar._complete_email, names), expected_emails)
def test_emails(self):
expected_emails = ['foo@bar.com', 'baz@baz.com']
calendar._emails_from_url = lambda urls: expected_emails
self.assertEqual(calendar.current_gardener_emails(), expected_emails)
def _assert_parse(self, js_string, expected_emails):
self.assertEqual(calendar._names_from_sheriff_js(js_string), expected_emails)
def test_names_from_sheriff_js(self):
self._assert_parse('document.write(\'none (channel is sheriff)\')', [])
self._assert_parse('document.write(\'foo, bar\')', ['foo', 'bar'])
def test_email_regexp(self):
self.assertTrue(calendar._email_is_valid('somebody@example.com'))
self.assertTrue(calendar._email_is_valid('somebody@example.domain.com'))
self.assertTrue(calendar._email_is_valid('somebody@example-domain.com'))
self.assertTrue(calendar._email_is_valid('some.body@example.com'))
self.assertTrue(calendar._email_is_valid('some_body@example.com'))
self.assertTrue(calendar._email_is_valid('some+body@example.com'))
self.assertTrue(calendar._email_is_valid('some+body@com'))
self.assertTrue(calendar._email_is_valid('some/body@example.com'))
# These are valid according to the standard, but not supported here.
self.assertFalse(calendar._email_is_valid('some~body@example.com'))
self.assertFalse(calendar._email_is_valid('some!body@example.com'))
self.assertFalse(calendar._email_is_valid('some?body@example.com'))
self.assertFalse(calendar._email_is_valid('some" "body@example.com'))
self.assertFalse(calendar._email_is_valid('"{somebody}"@example.com'))
# Bogus.
self.assertFalse(calendar._email_is_valid('rm -rf /#@example.com'))
self.assertFalse(calendar._email_is_valid('some body@example.com'))
self.assertFalse(calendar._email_is_valid('[some body]@example.com'))
def test_filter_emails(self):
input_emails = ['foo@bar.com', 'baz@baz.com', 'bogus email @ !!!']
expected_emails = ['foo@bar.com', 'baz@baz.com']
self.assertEquals(calendar._filter_emails(input_emails), expected_emails)
self.assertStdout('WARNING: Not including bogus email @ !!! (invalid email address)\n')
|
sirikata/sirikata
|
refs/heads/master
|
tools/cdn/meshtool_regression.py
|
1
|
import list
import pprint
import sys
import os
import os.path
import urllib2
import shutil
from meshtool.filters import factory
from meshtool.filters.print_filters.print_render_info import getRenderInfo
import collada
import unicodedata
import math
import traceback
import itertools
from multiprocessing import Process, Pool
from multiprocessing import current_process
from Queue import Queue
from threading import Thread, Lock
SERVER = 'http://open3dhub.com'
DOWNLOAD = SERVER + '/download'
#this will set number of workers equal to cpus in the system
NUM_PROCS = None
def decode(str):
return unicodedata.normalize('NFKD', str).encode('ascii','ignore')
def save_screenshot(mesh_file, screenshot_file, zip_filename=None):
screenshot = factory.getInstance('save_screenshot')
mesh = collada.Collada(mesh_file, zip_filename=zip_filename)
screenshot.apply(mesh, screenshot_file)
def process_file(base_dir, f):
current_process().daemon = False
base_name = decode(f['base_name'])
full_path = decode(f['full_path'])
zip_hash = decode(f['metadata']['types']['original']['zip'])
f_dir = full_path.replace('/','_')
f_dir = os.path.join(base_dir, f_dir)
if not os.path.isdir(f_dir):
os.mkdir(f_dir)
orig_zip = os.path.join(f_dir, base_name + '.orig.zip')
if not os.path.isfile(orig_zip):
f_data = urllib2.urlopen(DOWNLOAD + '/' + zip_hash).read()
orig_zip_file = open(orig_zip, 'wb')
orig_zip_file.write(f_data)
orig_zip_file.close()
optimizations = factory.getInstance('full_optimizations')
save = factory.getInstance('save_collada_zip')
orig_ss = orig_zip + '.png'
opt_zip = os.path.join(f_dir, base_name + '.opt.zip')
opt_ss = opt_zip + '.png'
mesh = collada.Collada(orig_zip, zip_filename=base_name)
orig_render_info = getRenderInfo(mesh)
if not os.path.isfile(orig_ss):
p = Process(target=save_screenshot, args=(orig_zip, orig_ss), kwargs={'zip_filename':base_name})
p.start()
p.join()
if not os.path.isfile(opt_zip):
optimizations.apply(mesh)
save.apply(mesh, opt_zip)
mesh = None
optmesh = collada.Collada(opt_zip)
opt_render_info = getRenderInfo(optmesh)
optmesh = None
if not os.path.isfile(opt_ss):
p = Process(target=save_screenshot, args=(opt_zip, opt_ss))
p.start()
p.join()
orig_ss_copy = f_dir + '.orig.png'
opt_ss_copy = f_dir + '.opt.png'
if not os.path.isfile(orig_ss_copy):
shutil.copy2(orig_ss, orig_ss_copy)
if not os.path.isfile(opt_ss_copy):
shutil.copy2(opt_ss, opt_ss_copy)
orig_size = os.stat(orig_zip)[6]
opt_size = os.stat(opt_zip)[6]
return (f_dir, orig_size, opt_size, orig_render_info, opt_render_info)
def queue_worker():
while True:
args = queue.get()
try:
process_file(*args)
except (KeyboardInterrupt, SystemExit):
raise
except:
traceback.print_exc()
queue.task_done()
def main():
if len(sys.argv) != 2 or not os.path.isdir(sys.argv[1]):
print 'usage: python meshtool_regression.py directory'
sys.exit(1)
base_dir = sys.argv[1]
diffs = []
cdn_list = list.grab_list(SERVER, 1000, '', '', 'dump')
pool = Pool(processes=NUM_PROCS)
results = []
for f in cdn_list:
results.append(pool.apply_async(process_file, args=(base_dir, f)))
for r in results:
r.wait()
print '\t'.join(['id',
'orig_size',
'orig_num_triangles',
'orig_num_draw_raw',
'orig_num_draw_with_instances',
'orig_num_draw_with_batching',
'orig_texture_ram',
'opt_size',
'opt_num_triangles',
'opt_num_draw_raw',
'opt_num_draw_with_instances',
'opt_num_draw_with_batching',
'opt_texture_ram'])
for r in results:
unique_id, orig_size, opt_size, orig_render_info, opt_render_info = r.get()
print '\t'.join(map(str,[
unique_id,
orig_size,
orig_render_info['num_triangles'],
orig_render_info['num_draw_raw'],
orig_render_info['num_draw_with_instances'],
orig_render_info['num_draw_with_batching'],
orig_render_info['texture_ram'],
opt_size,
opt_render_info['num_triangles'],
opt_render_info['num_draw_raw'],
opt_render_info['num_draw_with_instances'],
opt_render_info['num_draw_with_batching'],
opt_render_info['texture_ram']
]))
if __name__ == "__main__":
main()
|
ArvinDevel/incubator-pulsar
|
refs/heads/master
|
pulsar-functions/instance/src/main/python/contextimpl.py
|
1
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -*- encoding: utf-8 -*-
"""contextimpl.py: ContextImpl class that implements the Context interface
"""
import time
import os
import json
import pulsar
import util
from prometheus_client import Summary
from function_stats import Stats
class ContextImpl(pulsar.Context):
# add label to indicate user metric
user_metrics_label_names = Stats.metrics_label_names + ["metric"]
def __init__(self, instance_config, logger, pulsar_client, user_code, consumers, secrets_provider, metrics_labels):
self.instance_config = instance_config
self.log = logger
self.pulsar_client = pulsar_client
self.user_code_dir = os.path.dirname(user_code)
self.consumers = consumers
self.secrets_provider = secrets_provider
self.publish_producers = {}
self.publish_serializers = {}
self.current_message_id = None
self.current_input_topic_name = None
self.current_start_time = None
self.user_config = json.loads(instance_config.function_details.userConfig) \
if instance_config.function_details.userConfig \
else []
self.secrets_map = json.loads(instance_config.function_details.secretsMap) \
if instance_config.function_details.secretsMap \
else {}
self.metrics_labels = metrics_labels
self.user_metrics_labels = dict()
self.user_metrics_summary = Summary("pulsar_function_user_metric",
'Pulsar Function user defined metric',
ContextImpl.user_metrics_label_names)
# Called on a per message basis to set the context for the current message
def set_current_message_context(self, msgid, topic):
self.current_message_id = msgid
self.current_input_topic_name = topic
self.current_start_time = time.time()
def get_message_id(self):
return self.current_message_id
def get_current_message_topic_name(self):
return self.current_input_topic_name
def get_function_name(self):
return self.instance_config.function_details.name
def get_function_tenant(self):
return self.instance_config.function_details.tenant
def get_function_namespace(self):
return self.instance_config.function_details.namespace
def get_function_id(self):
return self.instance_config.function_id
def get_instance_id(self):
return self.instance_config.instance_id
def get_function_version(self):
return self.instance_config.function_version
def get_logger(self):
return self.log
def get_user_config_value(self, key):
if key in self.user_config:
return self.user_config[key]
else:
return None
def get_user_config_map(self):
return self.user_config
def get_secret(self, secret_key):
if not secret_key in self.secrets_map:
return None
return self.secrets_provider.provide_secret(secret_key, self.secrets_map[secret_key])
def record_metric(self, metric_name, metric_value):
if metric_name not in self.user_metrics_labels:
self.user_metrics_labels[metric_name] = self.metrics_labels + [metric_name]
self.user_metrics_summary.labels(*self.user_metrics_labels[metric_name]).observe(metric_value)
def get_output_topic(self):
return self.instance_config.function_details.output
def get_output_serde_class_name(self):
return self.instance_config.function_details.outputSerdeClassName
def publish(self, topic_name, message, serde_class_name="serde.IdentitySerDe", properties=None, compression_type=None):
# Just make sure that user supplied values are properly typed
topic_name = str(topic_name)
serde_class_name = str(serde_class_name)
pulsar_compression_type = pulsar._pulsar.CompressionType.NONE
if compression_type is not None:
pulsar_compression_type = compression_type
if topic_name not in self.publish_producers:
self.publish_producers[topic_name] = self.pulsar_client.create_producer(
topic_name,
block_if_queue_full=True,
batching_enabled=True,
batching_max_publish_delay_ms=1,
max_pending_messages=100000,
compression_type=pulsar_compression_type
)
if serde_class_name not in self.publish_serializers:
serde_klass = util.import_class(self.user_code_dir, serde_class_name)
self.publish_serializers[serde_class_name] = serde_klass()
output_bytes = bytes(self.publish_serializers[serde_class_name].serialize(message))
self.publish_producers[topic_name].send_async(output_bytes, None, properties=properties)
def ack(self, msgid, topic):
if topic not in self.consumers:
raise ValueError('Invalid topicname %s' % topic)
self.consumers[topic].acknowledge(msgid)
def get_and_reset_metrics(self):
metrics = self.get_metrics()
# TODO(sanjeev):- Make this thread safe
self.reset_metrics()
return metrics
def reset_metrics(self):
# TODO: Make it thread safe
for labels in self.user_metrics_labels.values():
self.user_metrics_summary.labels(*labels)._sum.set(0.0)
self.user_metrics_summary.labels(*labels)._count.set(0.0)
def get_metrics(self):
metrics_map = {}
for metric_name, metric_labels in self.user_metrics_labels.items():
metrics_map["%s%s_sum" % (Stats.USER_METRIC_PREFIX, metric_name)] = self.user_metrics_summary.labels(*metric_labels)._sum.get()
metrics_map["%s%s_count" % (Stats.USER_METRIC_PREFIX, metric_name)] = self.user_metrics_summary.labels(*metric_labels)._count.get()
return metrics_map
|
newerthcom/savagerebirth
|
refs/heads/master
|
libs/python-2.72/Demo/scripts/lpwatch.py
|
10
|
#! /usr/bin/env python
# Watch line printer queue(s).
# Intended for BSD 4.3 lpq.
import os
import sys
import time
DEF_PRINTER = 'psc'
DEF_DELAY = 10
def main():
delay = DEF_DELAY # XXX Use getopt() later
try:
thisuser = os.environ['LOGNAME']
except:
thisuser = os.environ['USER']
printers = sys.argv[1:]
if printers:
# Strip '-P' from printer names just in case
# the user specified it...
for i, name in enumerate(printers):
if name[:2] == '-P':
printers[i] = name[2:]
else:
if os.environ.has_key('PRINTER'):
printers = [os.environ['PRINTER']]
else:
printers = [DEF_PRINTER]
clearhome = os.popen('clear', 'r').read()
while True:
text = clearhome
for name in printers:
text += makestatus(name, thisuser) + '\n'
print text
time.sleep(delay)
def makestatus(name, thisuser):
pipe = os.popen('lpq -P' + name + ' 2>&1', 'r')
lines = []
users = {}
aheadbytes = 0
aheadjobs = 0
userseen = False
totalbytes = 0
totaljobs = 0
for line in pipe:
fields = line.split()
n = len(fields)
if len(fields) >= 6 and fields[n-1] == 'bytes':
rank, user, job = fields[0:3]
files = fields[3:-2]
bytes = int(fields[n-2])
if user == thisuser:
userseen = True
elif not userseen:
aheadbytes += bytes
aheadjobs += 1
totalbytes += bytes
totaljobs += 1
ujobs, ubytes = users.get(user, (0, 0))
ujobs += 1
ubytes += bytes
users[user] = ujobs, ubytes
else:
if fields and fields[0] != 'Rank':
line = line.strip()
if line == 'no entries':
line = name + ': idle'
elif line[-22:] == ' is ready and printing':
line = name
lines.append(line)
if totaljobs:
line = '%d K' % ((totalbytes+1023) // 1024)
if totaljobs != len(users):
line += ' (%d jobs)' % totaljobs
if len(users) == 1:
line += ' for %s' % (users.keys()[0],)
else:
line += ' for %d users' % len(users)
if userseen:
if aheadjobs == 0:
line += ' (%s first)' % thisuser
else:
line += ' (%d K before %s)' % (
(aheadbytes+1023) // 1024, thisuser)
lines.append(line)
sts = pipe.close()
if sts:
lines.append('lpq exit status %r' % (sts,))
return ': '.join(lines)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
toanalien/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/bot/patchanalysistask.py
|
122
|
# Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.common.system.executive import ScriptError
from webkitpy.common.net.layouttestresults import LayoutTestResults
class UnableToApplyPatch(Exception):
def __init__(self, patch):
Exception.__init__(self)
self.patch = patch
class PatchAnalysisTaskDelegate(object):
def parent_command(self):
raise NotImplementedError("subclasses must implement")
def run_command(self, command):
raise NotImplementedError("subclasses must implement")
def command_passed(self, message, patch):
raise NotImplementedError("subclasses must implement")
def command_failed(self, message, script_error, patch):
raise NotImplementedError("subclasses must implement")
def refetch_patch(self, patch):
raise NotImplementedError("subclasses must implement")
def expected_failures(self):
raise NotImplementedError("subclasses must implement")
def test_results(self):
raise NotImplementedError("subclasses must implement")
def archive_last_test_results(self, patch):
raise NotImplementedError("subclasses must implement")
def build_style(self):
raise NotImplementedError("subclasses must implement")
# We could make results_archive optional, but for now it's required.
def report_flaky_tests(self, patch, flaky_tests, results_archive):
raise NotImplementedError("subclasses must implement")
class PatchAnalysisTask(object):
def __init__(self, delegate, patch):
self._delegate = delegate
self._patch = patch
self._script_error = None
self._results_archive_from_patch_test_run = None
self._results_from_patch_test_run = None
self._expected_failures = delegate.expected_failures()
def _run_command(self, command, success_message, failure_message):
try:
self._delegate.run_command(command)
self._delegate.command_passed(success_message, patch=self._patch)
return True
except ScriptError, e:
self._script_error = e
self.failure_status_id = self._delegate.command_failed(failure_message, script_error=self._script_error, patch=self._patch)
return False
def _clean(self):
return self._run_command([
"clean",
],
"Cleaned working directory",
"Unable to clean working directory")
def _update(self):
# FIXME: Ideally the status server log message should include which revision we updated to.
return self._run_command([
"update",
],
"Updated working directory",
"Unable to update working directory")
def _apply(self):
return self._run_command([
"apply-attachment",
"--no-update",
"--non-interactive",
self._patch.id(),
],
"Applied patch",
"Patch does not apply")
def _build(self):
return self._run_command([
"build",
"--no-clean",
"--no-update",
"--build-style=%s" % self._delegate.build_style(),
],
"Built patch",
"Patch does not build")
def _build_without_patch(self):
return self._run_command([
"build",
"--force-clean",
"--no-update",
"--build-style=%s" % self._delegate.build_style(),
],
"Able to build without patch",
"Unable to build without patch")
def _test(self):
return self._run_command([
"build-and-test",
"--no-clean",
"--no-update",
# Notice that we don't pass --build, which means we won't build!
"--test",
"--non-interactive",
],
"Passed tests",
"Patch does not pass tests")
def _build_and_test_without_patch(self):
return self._run_command([
"build-and-test",
"--force-clean",
"--no-update",
"--build",
"--test",
"--non-interactive",
],
"Able to pass tests without patch",
"Unable to pass tests without patch (tree is red?)")
def _land(self):
# Unclear if this should pass --quiet or not. If --parent-command always does the reporting, then it should.
return self._run_command([
"land-attachment",
"--force-clean",
"--non-interactive",
"--parent-command=" + self._delegate.parent_command(),
self._patch.id(),
],
"Landed patch",
"Unable to land patch")
def _report_flaky_tests(self, flaky_test_results, results_archive):
self._delegate.report_flaky_tests(self._patch, flaky_test_results, results_archive)
def _results_failed_different_tests(self, first, second):
first_failing_tests = [] if not first else first.failing_tests()
second_failing_tests = [] if not second else second.failing_tests()
return first_failing_tests != second_failing_tests
def _test_patch(self):
if self._test():
return True
# Note: archive_last_test_results deletes the results directory, making these calls order-sensitve.
# We could remove this dependency by building the test_results from the archive.
first_results = self._delegate.test_results()
first_results_archive = self._delegate.archive_last_test_results(self._patch)
first_script_error = self._script_error
first_failure_status_id = self.failure_status_id
if self._expected_failures.failures_were_expected(first_results):
return True
if self._test():
# Only report flaky tests if we were successful at parsing results.json and archiving results.
if first_results and first_results_archive:
self._report_flaky_tests(first_results.failing_test_results(), first_results_archive)
return True
second_results = self._delegate.test_results()
if self._results_failed_different_tests(first_results, second_results):
# We could report flaky tests here, but we would need to be careful
# to use similar checks to ExpectedFailures._can_trust_results
# to make sure we don't report constant failures as flakes when
# we happen to hit the --exit-after-N-failures limit.
# See https://bugs.webkit.org/show_bug.cgi?id=51272
return False
# Archive (and remove) second results so test_results() after
# build_and_test_without_patch won't use second results instead of the clean-tree results.
second_results_archive = self._delegate.archive_last_test_results(self._patch)
if self._build_and_test_without_patch():
# The error from the previous ._test() run is real, report it.
return self.report_failure(first_results_archive, first_results, first_script_error)
clean_tree_results = self._delegate.test_results()
self._expected_failures.update(clean_tree_results)
# Re-check if the original results are now to be expected to avoid a full re-try.
if self._expected_failures.failures_were_expected(first_results):
return True
# Now that we have updated information about failing tests with a clean checkout, we can
# tell if our original failures were unexpected and fail the patch if necessary.
if self._expected_failures.unexpected_failures_observed(first_results):
self.failure_status_id = first_failure_status_id
return self.report_failure(first_results_archive, first_results, first_script_error)
# We don't know what's going on. The tree is likely very red (beyond our layout-test-results
# failure limit), just keep retrying the patch. until someone fixes the tree.
return False
def results_archive_from_patch_test_run(self, patch):
assert(self._patch.id() == patch.id()) # PatchAnalysisTask is not currently re-useable.
return self._results_archive_from_patch_test_run
def results_from_patch_test_run(self, patch):
assert(self._patch.id() == patch.id()) # PatchAnalysisTask is not currently re-useable.
return self._results_from_patch_test_run
def report_failure(self, results_archive=None, results=None, script_error=None):
if not self.validate():
return False
self._results_archive_from_patch_test_run = results_archive
self._results_from_patch_test_run = results
raise script_error or self._script_error
def validate(self):
raise NotImplementedError("subclasses must implement")
def run(self):
raise NotImplementedError("subclasses must implement")
|
Tchanders/socorro
|
refs/heads/master
|
webapp-django/crashstats/base/helpers.py
|
11
|
import cgi
import urllib
import jinja2
from jingo import register
@register.function
@jinja2.contextfunction
def change_query_string(context, **kwargs):
"""
Template function for modifying the current URL by parameters.
You use it like this in a template:
<a href={{ change_query_string(foo='bar') }}>
And it takes the current request's URL (and query string) and modifies it
just by the parameters you pass in. So if the current URL is
`/page/?day=1` the output of this will become:
<a href=/page?day=1&foo=bar>
You can also pass lists like this:
<a href={{ change_query_string(thing=['bar','foo']) }}>
And you get this output:
<a href=/page?day=1&thing=bar&thing=foo>
And if you want to remove a parameter you can explicitely pass it `None`.
Like this for example:
<a href={{ change_query_string(day=None) }}>
And you get this output:
<a href=/page>
"""
if kwargs.get('_no_base'):
kwargs.pop('_no_base')
base = ''
else:
base = context['request'].META['PATH_INFO']
qs = cgi.parse_qs(context['request'].META['QUERY_STRING'])
for key, value in kwargs.items():
if value is None:
# delete the parameter
if key in qs:
del qs[key]
else:
# change it
qs[key] = value
new_qs = urllib.urlencode(qs, True)
if new_qs:
return '%s?%s' % (base, new_qs)
return base
|
tamland/trakt-sync
|
refs/heads/master
|
models.py
|
1
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Thomas Amland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
class Movie(object):
def __init__(self, title, year, playcount, imdbid, xbmcid=None):
self.title = title
self.year = year
self.playcount = playcount
self.imdbid = imdbid
self.xbmcid = xbmcid
@property
def id(self):
return hash(self.imdbid)
def __repr__(self):
return b"Movie(%r)" % self.__dict__
class Episode(object):
def __init__(self, episode, season, tvdbid, playcount, xbmcid=None, title=None):
if not isinstance(tvdbid, unicode):
raise TypeError("Expected unicode was %s" % type(tvdbid))
self.episode = episode
self.season = season
self.tvdbid = tvdbid
self.title = title
self.xbmcid = xbmcid
self.playcount = playcount
@property
def id(self):
return hash((self.tvdbid, self.season, self.episode))
def __repr__(self):
return b"Episode(%r)" % self.__dict__
|
ebar0n/django
|
refs/heads/master
|
django/contrib/gis/sitemaps/kml.py
|
101
|
from django.apps import apps
from django.contrib.gis.db.models.fields import GeometryField
from django.contrib.sitemaps import Sitemap
from django.db import models
from django.urls import reverse
class KMLSitemap(Sitemap):
"""
A minimal hook to produce KML sitemaps.
"""
geo_format = 'kml'
def __init__(self, locations=None):
# If no locations specified, then we try to build for
# every model in installed applications.
self.locations = self._build_kml_sources(locations)
def _build_kml_sources(self, sources):
"""
Go through the given sources and return a 3-tuple of the application
label, module name, and field name of every GeometryField encountered
in the sources.
If no sources are provided, then all models.
"""
kml_sources = []
if sources is None:
sources = apps.get_models()
for source in sources:
if isinstance(source, models.base.ModelBase):
for field in source._meta.fields:
if isinstance(field, GeometryField):
kml_sources.append((source._meta.app_label,
source._meta.model_name,
field.name))
elif isinstance(source, (list, tuple)):
if len(source) != 3:
raise ValueError('Must specify a 3-tuple of (app_label, module_name, field_name).')
kml_sources.append(source)
else:
raise TypeError('KML Sources must be a model or a 3-tuple.')
return kml_sources
def get_urls(self, page=1, site=None, protocol=None):
"""
This method is overridden so the appropriate `geo_format` attribute
is placed on each URL element.
"""
urls = Sitemap.get_urls(self, page=page, site=site, protocol=protocol)
for url in urls:
url['geo_format'] = self.geo_format
return urls
def items(self):
return self.locations
def location(self, obj):
return reverse(
'django.contrib.gis.sitemaps.views.%s' % self.geo_format,
kwargs={
'label': obj[0],
'model': obj[1],
'field_name': obj[2],
},
)
class KMZSitemap(KMLSitemap):
geo_format = 'kmz'
|
savoirfairelinux/odoo
|
refs/heads/master
|
setup/setup.py
|
21
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import glob, os, re, setuptools, sys
from os.path import join
# List all data files
def data():
r = {}
for root, dirnames, filenames in os.walk('openerp'):
for filename in filenames:
if not re.match(r'.*(\.pyc|\.pyo|\~)$', filename):
r.setdefault(root, []).append(os.path.join(root, filename))
if os.name == 'nt':
r["Microsoft.VC90.CRT"] = glob.glob('C:\Microsoft.VC90.CRT\*.*')
import babel
# Add data, but also some .py files py2exe won't include automatically.
# TODO This should probably go under `packages`, instead of `data`,
# but this will work fine (especially since we don't use the ZIP file
# approach).
r["babel/localedata"] = glob.glob(os.path.join(os.path.dirname(babel.__file__), "localedata", '*'))
others = ['global.dat', 'numbers.py', 'support.py', 'plural.py']
r["babel"] = map(lambda f: os.path.join(os.path.dirname(babel.__file__), f), others)
others = ['frontend.py', 'mofile.py']
r["babel/messages"] = map(lambda f: os.path.join(os.path.dirname(babel.__file__), "messages", f), others)
import pytz
tzdir = os.path.dirname(pytz.__file__)
for root, _, filenames in os.walk(os.path.join(tzdir, "zoneinfo")):
base = os.path.join('pytz', root[len(tzdir) + 1:])
r[base] = [os.path.join(root, f) for f in filenames]
import docutils
dudir = os.path.dirname(docutils.__file__)
for root, _, filenames in os.walk(dudir):
base = os.path.join('docutils', root[len(dudir) + 1:])
r[base] = [os.path.join(root, f) for f in filenames if not f.endswith(('.py', '.pyc', '.pyo'))]
return r.items()
def gen_manifest():
file_list="\n".join(data())
open('MANIFEST','w').write(file_list)
if os.name == 'nt':
sys.path.append("C:\Microsoft.VC90.CRT")
def py2exe_options():
if os.name == 'nt':
import py2exe
return {
"console" : [ { "script": "openerp-server", "icon_resources": [(1, join("install","openerp-icon.ico"))], },
{ "script": "openerp-gevent" },
{ "script": "odoo.py" },
],
'options' : {
"py2exe": {
"skip_archive": 1,
"optimize": 0, # keep the assert running, because the integrated tests rely on them.
"dist_dir": 'dist',
"packages": [
"HTMLParser",
"PIL",
"asynchat", "asyncore",
"commands",
"dateutil",
"decimal",
"docutils",
"email",
"encodings",
"imaplib",
"jinja2",
"lxml", "lxml._elementpath", "lxml.builder", "lxml.etree", "lxml.objectify",
"mako",
"markupsafe", # dependence of jinja2 and mako
"mock",
"openerp",
"poplib",
"psutil",
"pychart",
"pydot",
"pyparsing",
"pytz",
"reportlab",
"requests",
"select",
"simplejson",
"smtplib",
"uuid",
"vatnumber",
"vobject",
"win32service", "win32serviceutil",
"xlwt",
"xml", "xml.dom",
"yaml",
],
"excludes" : ["Tkconstants","Tkinter","tcl"],
}
}
}
else:
return {}
execfile(join(os.path.dirname(__file__), 'openerp', 'release.py'))
# Notes for OpenERP developer on windows:
#
# To setup a windows developer evironement install python2.7 then pip and use
# "pip install <depencey>" for every dependency listed below.
#
# Dependecies that requires DLLs are not installable with pip install, for
# them we added comments with links where you can find the installers.
#
# OpenERP on windows also require the pywin32, the binary can be found at
# http://pywin32.sf.net
#
# Both python2.7 32bits and 64bits are known to work.
setuptools.setup(
name = 'openerp',
version = version,
description = description,
long_description = long_desc,
url = url,
author = author,
author_email = author_email,
classifiers = filter(None, classifiers.split("\n")),
license = license,
scripts = ['openerp-server', 'openerp-gevent', 'odoo.py'],
data_files = data(),
packages = setuptools.find_packages(),
dependency_links = ['http://download.gna.org/pychart/'],
#include_package_data = True,
install_requires = [
'pychart', # not on pypi, use: pip install http://download.gna.org/pychart/PyChart-1.39.tar.gz
'babel >= 1.0',
'docutils',
'feedparser',
'gdata',
'gevent',
'psycogreen',
'Jinja2',
'lxml', # windows binary http://www.lfd.uci.edu/~gohlke/pythonlibs/
'mako',
'mock',
'pillow', # windows binary http://www.lfd.uci.edu/~gohlke/pythonlibs/
'psutil', # windows binary code.google.com/p/psutil/downloads/list
'psycopg2 >= 2.2',
'pydot',
'pyparsing < 2',
'pyserial',
'python-dateutil < 2',
'python-ldap', # optional
'python-openid',
'pytz',
'pyusb >= 1.0.0b1',
'pyyaml',
'qrcode',
'reportlab', # windows binary pypi.python.org/pypi/reportlab
'requests',
'simplejson',
'unittest2',
'vatnumber',
'vobject',
'werkzeug',
'xlwt',
],
extras_require = {
'SSL' : ['pyopenssl'],
},
tests_require = ['unittest2', 'mock'],
**py2exe_options()
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
wilvk/ansible
|
refs/heads/devel
|
lib/ansible/plugins/lookup/hiera.py
|
34
|
# (c) 2017, Juan Manuel Parrilla <jparrill@redhat.com>
# (c) 2012-17 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author:
- Juan Manuel Parrilla (@jparrill)
lookup: hiera
version_added: "2.4"
short_description: get info from hiera data
requirements:
- hiera (command line utility)
description:
- Retrieves data from an Puppetmaster node using Hiera as ENC
options:
_hiera_key:
description:
- The list of keys to lookup on the Puppetmaster
type: list
element_type: string
required: True
_bin_file:
description:
- Binary file to execute Hiera
default: '/usr/bin/hiera'
env:
- name: ANSIBLE_HIERA_BIN
_hierarchy_file:
description:
- File that describes the hierarchy of Hiera
default: '/etc/hiera.yaml'
env:
- name: ANSIBLE_HIERA_CFG
# FIXME: incomplete options .. _terms? environment/fqdn?
'''
EXAMPLES = """
# All this examples depends on hiera.yml that describes the hierarchy
- name: "a value from Hiera 'DB'"
debug: msg={{ lookup('hiera', 'foo') }}
- name: "a value from a Hiera 'DB' on other environment"
debug: msg={{ lookup('hiera', 'foo environment=production') }}
- name: "a value from a Hiera 'DB' for a concrete node"
debug: msg={{ lookup('hiera', 'foo fqdn=puppet01.localdomain') }}
"""
RETURN = """
_raw:
description:
- a value associated with input key
type: strings
"""
import os
from ansible.plugins.lookup import LookupBase
from ansible.utils.cmd_functions import run_cmd
ANSIBLE_HIERA_CFG = os.getenv('ANSIBLE_HIERA_CFG', '/etc/hiera.yaml')
ANSIBLE_HIERA_BIN = os.getenv('ANSIBLE_HIERA_BIN', '/usr/bin/hiera')
class Hiera(object):
def get(self, hiera_key):
pargs = [ANSIBLE_HIERA_BIN]
pargs.extend(['-c', ANSIBLE_HIERA_CFG])
pargs.extend(hiera_key)
rc, output, err = run_cmd("{0} -c {1} {2}".format(
ANSIBLE_HIERA_BIN, ANSIBLE_HIERA_CFG, hiera_key[0]))
return output.strip()
class LookupModule(LookupBase):
def run(self, terms, variables=''):
hiera = Hiera()
ret = []
ret.append(hiera.get(terms))
return ret
|
juhans/ardupilot
|
refs/heads/master
|
Tools/LogAnalyzer/tests/TestEmpty.py
|
346
|
from LogAnalyzer import Test,TestResult
import DataflashLog
class TestEmpty(Test):
'''test for empty or near-empty logs'''
def __init__(self):
Test.__init__(self)
self.name = "Empty"
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
# all the logic for this test is in the helper function, as it can also be called up front as an early exit
emptyErr = DataflashLog.DataflashLogHelper.isLogEmpty(logdata)
if emptyErr:
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = "Empty log? " + emptyErr
|
sanitz/django-census-example
|
refs/heads/master
|
census_site/settings.py
|
1
|
"""
Django settings for census_site project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&30j5r=s!dzz_f)ji$eh)rb#h9zrzm(73a#izvp71eq^kul'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'census',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'census_site.urls'
WSGI_APPLICATION = 'census_site.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
|
haridsv/pip
|
refs/heads/develop
|
pip/_vendor/requests/packages/urllib3/packages/ordered_dict.py
|
2039
|
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
|
oblique-labs/pyVM
|
refs/heads/master
|
rpython/translator/c/gc.py
|
1
|
import sys
from rpython.flowspace.model import Constant
from rpython.rtyper.lltypesystem.lltype import (RttiStruct,
RuntimeTypeInfo)
from rpython.translator.c.node import ContainerNode
from rpython.translator.c.support import cdecl
from rpython.translator.tool.cbuild import ExternalCompilationInfo
class BasicGcPolicy(object):
def __init__(self, db, thread_enabled=False):
self.db = db
self.thread_enabled = thread_enabled
def common_gcheader_definition(self, defnode):
if defnode.db.gctransformer is not None:
return defnode.db.gctransformer.HDR
return None
def struct_gcheader_definition(self, defnode):
return self.common_gcheader_definition(defnode)
def array_gcheader_definition(self, defnode):
return self.common_gcheader_definition(defnode)
def compilation_info(self):
if not self.db:
return ExternalCompilationInfo()
gct = self.db.gctransformer
return ExternalCompilationInfo(
pre_include_bits=['/* using %s */' % (gct.__class__.__name__,),
'#define MALLOC_ZERO_FILLED %d' % (gct.malloc_zero_filled,),
]
)
def need_no_typeptr(self):
return False
def gc_startup_code(self):
return []
def struct_setup(self, structdefnode, rtti):
return None
def array_setup(self, arraydefnode):
return None
def rtti_type(self):
return ''
def OP_GC_SET_MAX_HEAP_SIZE(self, funcgen, op):
return ''
def OP_GC_THREAD_PREPARE(self, funcgen, op):
return ''
def OP_GC_THREAD_RUN(self, funcgen, op):
# The gc transformer leaves this operation in the graphs
# in all cases except with framework+shadowstack. In that
# case the operation is removed because redundant with
# rthread.get_or_make_ident().
return 'RPY_THREADLOCALREF_ENSURE();'
def OP_GC_THREAD_START(self, funcgen, op):
return ''
def OP_GC_THREAD_DIE(self, funcgen, op):
# The gc transformer leaves this operation in the graphs
# (but may insert a call to a gcrootfinder-specific
# function just before).
return 'RPython_ThreadLocals_ThreadDie();'
def OP_GC_THREAD_BEFORE_FORK(self, funcgen, op):
return '%s = NULL;' % funcgen.expr(op.result)
def OP_GC_THREAD_AFTER_FORK(self, funcgen, op):
return ''
def OP_GC_WRITEBARRIER(self, funcgen, op):
return ''
def OP_GC_STACK_BOTTOM(self, funcgen, op):
return ''
def OP_GC_GCFLAG_EXTRA(self, funcgen, op):
return '%s = 0; /* gc_gcflag_extra%r */' % (
funcgen.expr(op.result),
op.args[0])
class RefcountingInfo:
static_deallocator = None
class RefcountingGcPolicy(BasicGcPolicy):
def gettransformer(self, translator):
from rpython.memory.gctransform import refcounting
return refcounting.RefcountingGCTransformer(translator)
# for structs
def struct_setup(self, structdefnode, rtti):
if rtti is not None:
transformer = structdefnode.db.gctransformer
fptr = transformer.static_deallocation_funcptr_for_type(
structdefnode.STRUCT)
structdefnode.gcinfo = RefcountingInfo()
structdefnode.gcinfo.static_deallocator = structdefnode.db.get(fptr)
# for arrays
def array_setup(self, arraydefnode):
pass
# for rtti node
def rtti_type(self):
return 'void (@)(void *)' # void dealloc_xx(struct xx *)
def rtti_node_factory(self):
return RefcountingRuntimeTypeInfo_OpaqueNode
# zero malloc impl
def OP_GC_CALL_RTTI_DESTRUCTOR(self, funcgen, op):
args = [funcgen.expr(v) for v in op.args]
line = '%s(%s);' % (args[0], ', '.join(args[1:]))
return line
def OP_GC_FREE(self, funcgen, op):
args = [funcgen.expr(v) for v in op.args]
return 'OP_FREE(%s);' % (args[0], )
def OP_GC__COLLECT(self, funcgen, op):
return ''
def OP_GC__DISABLE_FINALIZERS(self, funcgen, op):
return ''
def OP_GC__ENABLE_FINALIZERS(self, funcgen, op):
return ''
class RefcountingRuntimeTypeInfo_OpaqueNode(ContainerNode):
nodekind = 'refcnt rtti'
globalcontainer = True
typename = 'void (@)(void *)'
_funccodegen_owner = None
def __init__(self, db, T, obj):
assert T == RuntimeTypeInfo
assert isinstance(obj.about, RttiStruct)
self.db = db
self.T = T
self.obj = obj
defnode = db.gettypedefnode(obj.about)
self.implementationtypename = 'void (@)(void *)'
self.name = defnode.gcinfo.static_deallocator
def getptrname(self):
return '((void (*)(void *)) %s)' % (self.name,)
def enum_dependencies(self):
return []
def implementation(self):
return []
class BoehmInfo:
finalizer = None
class BoehmGcPolicy(BasicGcPolicy):
def gettransformer(self, translator):
from rpython.memory.gctransform import boehm
return boehm.BoehmGCTransformer(translator)
def array_setup(self, arraydefnode):
pass
def struct_setup(self, structdefnode, rtti):
pass
def rtti_type(self):
return BoehmGcRuntimeTypeInfo_OpaqueNode.typename
def rtti_node_factory(self):
return BoehmGcRuntimeTypeInfo_OpaqueNode
def compilation_info(self):
eci = BasicGcPolicy.compilation_info(self)
from rpython.rtyper.tool.rffi_platform import configure_boehm
eci = eci.merge(configure_boehm())
pre_include_bits = []
if sys.platform.startswith('linux'):
pre_include_bits += ["#define _REENTRANT 1",
"#define GC_LINUX_THREADS 1"]
if sys.platform != "win32" and not sys.platform.startswith("openbsd"):
# GC_REDIRECT_TO_LOCAL is not supported on Win32 by gc6.8
pre_include_bits += ["#define GC_REDIRECT_TO_LOCAL 1"]
eci = eci.merge(ExternalCompilationInfo(
pre_include_bits=pre_include_bits,
# The following define is required by the thread module,
# See module/thread/test/test_rthread.py
compile_extra=['-DPYPY_USING_BOEHM_GC'],
))
gct = self.db.gctransformer
gct.finalizer_triggers = tuple(gct.finalizer_triggers) # stop changing
sourcelines = ['']
for trig in gct.finalizer_triggers:
sourcelines.append('RPY_EXTERN void %s(void);' % (
self.db.get(trig),))
sourcelines.append('')
sourcelines.append('void (*boehm_fq_trigger[])(void) = {')
for trig in gct.finalizer_triggers:
sourcelines.append('\t%s,' % (self.db.get(trig),))
sourcelines.append('\tNULL')
sourcelines.append('};')
sourcelines.append('struct boehm_fq_s *boehm_fq_queues[%d];' % (
len(gct.finalizer_triggers) or 1,))
sourcelines.append('')
eci = eci.merge(ExternalCompilationInfo(
separate_module_sources=['\n'.join(sourcelines)]))
return eci
def gc_startup_code(self):
if sys.platform == 'win32':
pass # yield 'assert(GC_all_interior_pointers == 0);'
else:
yield 'GC_all_interior_pointers = 0;'
yield 'boehm_gc_startup_code();'
def get_real_weakref_type(self):
from rpython.memory.gctransform import boehm
return boehm.WEAKLINK
def convert_weakref_to(self, ptarget):
from rpython.memory.gctransform import boehm
return boehm.convert_weakref_to(ptarget)
def OP_GC__COLLECT(self, funcgen, op):
return 'GC_gcollect();'
def OP_GC_SET_MAX_HEAP_SIZE(self, funcgen, op):
nbytes = funcgen.expr(op.args[0])
return 'GC_set_max_heap_size(%s);' % (nbytes,)
def GC_KEEPALIVE(self, funcgen, v):
return 'pypy_asm_keepalive(%s);' % funcgen.expr(v)
class BoehmGcRuntimeTypeInfo_OpaqueNode(ContainerNode):
nodekind = 'boehm rtti'
globalcontainer = True
typename = 'char @'
_funccodegen_owner = None
def __init__(self, db, T, obj):
assert T == RuntimeTypeInfo
assert isinstance(obj.about, RttiStruct)
self.db = db
self.T = T
self.obj = obj
defnode = db.gettypedefnode(obj.about)
self.implementationtypename = self.typename
self.name = self.db.namespace.uniquename('g_rtti_v_'+ defnode.barename)
def getptrname(self):
return '(&%s)' % (self.name,)
def enum_dependencies(self):
return []
def implementation(self):
yield 'char %s /* uninitialized */;' % self.name
class FrameworkGcRuntimeTypeInfo_OpaqueNode(BoehmGcRuntimeTypeInfo_OpaqueNode):
nodekind = 'framework rtti'
# to get an idea how it looks like with no refcount/gc at all
class NoneGcPolicy(BoehmGcPolicy):
gc_startup_code = RefcountingGcPolicy.gc_startup_code.im_func
def compilation_info(self):
eci = BasicGcPolicy.compilation_info(self)
eci = eci.merge(ExternalCompilationInfo(
post_include_bits=['#define PYPY_USING_NO_GC_AT_ALL'],
))
return eci
class BasicFrameworkGcPolicy(BasicGcPolicy):
def gettransformer(self, translator):
if hasattr(self, 'transformerclass'): # for rpython/memory tests
return self.transformerclass(translator)
raise NotImplementedError
def struct_setup(self, structdefnode, rtti):
if rtti is not None and hasattr(rtti._obj, 'destructor_funcptr'):
gctransf = self.db.gctransformer
TYPE = structdefnode.STRUCT
fptrs = gctransf.special_funcptr_for_type(TYPE)
# make sure this is seen by the database early, i.e. before
# finish_helpers() on the gctransformer
destrptr = rtti._obj.destructor_funcptr
self.db.get(destrptr)
# the following, on the other hand, will only discover ll_finalizer
# helpers. The get() sees and records a delayed pointer. It is
# still important to see it so that it can be followed as soon as
# the mixlevelannotator resolves it.
for fptr in fptrs.values():
self.db.get(fptr)
def array_setup(self, arraydefnode):
pass
def rtti_type(self):
return FrameworkGcRuntimeTypeInfo_OpaqueNode.typename
def rtti_node_factory(self):
return FrameworkGcRuntimeTypeInfo_OpaqueNode
def gc_startup_code(self):
fnptr = self.db.gctransformer.frameworkgc_setup_ptr.value
yield '%s();' % (self.db.get(fnptr),)
def get_real_weakref_type(self):
from rpython.memory.gctypelayout import WEAKREF
return WEAKREF
def convert_weakref_to(self, ptarget):
from rpython.memory.gctypelayout import convert_weakref_to
return convert_weakref_to(ptarget)
def OP_GC_RELOAD_POSSIBLY_MOVED(self, funcgen, op):
if isinstance(op.args[1], Constant):
return '/* %s */' % (op,)
else:
args = [funcgen.expr(v) for v in op.args]
return '%s = %s; /* for moving GCs */' % (args[1], args[0])
def need_no_typeptr(self):
config = self.db.translator.config
return config.translation.gcremovetypeptr
def header_type(self, extra='*'):
# Fish out the C name of the 'struct pypy_header0'
HDR = self.db.gctransformer.HDR
return self.db.gettype(HDR).replace('@', extra)
def tid_fieldname(self, tid_field='tid'):
# Fish out the C name of the tid field.
HDR = self.db.gctransformer.HDR
hdr_node = self.db.gettypedefnode(HDR)
return hdr_node.c_struct_field_name(tid_field)
def OP_GC_GETTYPEPTR_GROUP(self, funcgen, op):
# expands to a number of steps, as per rpython/lltypesystem/opimpl.py,
# all implemented by a single call to a C macro.
[v_obj, c_grpptr, c_skipoffset, c_vtableinfo] = op.args
tid_field = c_vtableinfo.value[2]
typename = funcgen.db.gettype(op.result.concretetype)
return (
'%s = (%s)_OP_GET_NEXT_GROUP_MEMBER(%s, (pypy_halfword_t)%s->'
'_gcheader.%s, %s);'
% (funcgen.expr(op.result),
cdecl(typename, ''),
funcgen.expr(c_grpptr),
funcgen.expr(v_obj),
self.tid_fieldname(tid_field),
funcgen.expr(c_skipoffset)))
def OP_GC_WRITEBARRIER(self, funcgen, op):
raise Exception("the FramewokGCTransformer should handle this")
def OP_GC_GCFLAG_EXTRA(self, funcgen, op):
gcflag_extra = self.db.gctransformer.gcdata.gc.gcflag_extra
if gcflag_extra == 0:
return BasicGcPolicy.OP_GC_GCFLAG_EXTRA(self, funcgen, op)
subopnum = op.args[0].value
if subopnum == 1:
return '%s = 1; /* has_gcflag_extra */' % (
funcgen.expr(op.result),)
hdrfield = '((%s)%s)->%s' % (self.header_type(),
funcgen.expr(op.args[1]),
self.tid_fieldname())
parts = ['%s = (%s & %dL) != 0;' % (funcgen.expr(op.result),
hdrfield,
gcflag_extra)]
if subopnum == 2: # get_gcflag_extra
parts.append('/* get_gcflag_extra */')
elif subopnum == 3: # toggle_gcflag_extra
parts.insert(0, '%s ^= %dL;' % (hdrfield,
gcflag_extra))
parts.append('/* toggle_gcflag_extra */')
else:
raise AssertionError(subopnum)
return ' '.join(parts)
def OP_GC_BIT(self, funcgen, op):
# This is a two-arguments operation (x, y) where x is a
# pointer and y is a constant power of two. It returns 0 if
# "(*(Signed*)x) & y == 0", and non-zero if it is "== y".
#
# On x86-64, emitting this is better than emitting a load
# followed by an INT_AND for the case where y doesn't fit in
# 32 bits. I've seen situations where a register was wasted
# to contain the constant 2**32 throughout a complete messy
# function; the goal of this GC_BIT is to avoid that.
#
# Don't abuse, though. If you need to check several bits in
# sequence, then it's likely better to load the whole Signed
# first; using GC_BIT would result in multiple accesses to
# memory.
#
bitmask = op.args[1].value
assert bitmask > 0 and (bitmask & (bitmask - 1)) == 0
offset = 0
while bitmask >= 0x100:
offset += 1
bitmask >>= 8
if sys.byteorder == 'big':
offset = 'sizeof(Signed)-%s' % (offset+1)
return '%s = ((char *)%s)[%s] & %d;' % (funcgen.expr(op.result),
funcgen.expr(op.args[0]),
offset, bitmask)
class ShadowStackFrameworkGcPolicy(BasicFrameworkGcPolicy):
def gettransformer(self, translator):
from rpython.memory.gctransform import shadowstack
return shadowstack.ShadowStackFrameworkGCTransformer(translator)
def enter_roots_frame(self, funcgen, (c_gcdata, c_numcolors)):
numcolors = c_numcolors.value
# XXX hard-code the field name here
gcpol_ss = '%s->gcd_inst_root_stack_top' % funcgen.expr(c_gcdata)
#
yield ('typedef struct { void %s; } pypy_ss_t;'
% ', '.join(['*s%d' % i for i in range(numcolors)]))
yield 'pypy_ss_t *ss;'
funcgen.gcpol_ss = gcpol_ss
def OP_GC_PUSH_ROOTS(self, funcgen, op):
raise Exception("gc_push_roots should be removed by postprocess_graph")
def OP_GC_POP_ROOTS(self, funcgen, op):
raise Exception("gc_pop_roots should be removed by postprocess_graph")
def OP_GC_ENTER_ROOTS_FRAME(self, funcgen, op):
return 'ss = (pypy_ss_t *)%s; %s = (void *)(ss+1);' % (
funcgen.gcpol_ss, funcgen.gcpol_ss)
def OP_GC_LEAVE_ROOTS_FRAME(self, funcgen, op):
return '%s = (void *)ss;' % funcgen.gcpol_ss
def OP_GC_SAVE_ROOT(self, funcgen, op):
num = op.args[0].value
exprvalue = funcgen.expr(op.args[1])
return 'ss->s%d = (void *)%s;\t/* gc_save_root */' % (num, exprvalue)
def OP_GC_RESTORE_ROOT(self, funcgen, op):
num = op.args[0].value
exprvalue = funcgen.expr(op.args[1])
typename = funcgen.db.gettype(op.args[1].concretetype)
result = '%s = (%s)ss->s%d;' % (exprvalue, cdecl(typename, ''), num)
if isinstance(op.args[1], Constant):
return '/* %s\t* gc_restore_root */' % result
else:
return '%s\t/* gc_restore_root */' % result
class AsmGcRootFrameworkGcPolicy(BasicFrameworkGcPolicy):
def gettransformer(self, translator):
from rpython.memory.gctransform import asmgcroot
return asmgcroot.AsmGcRootFrameworkGCTransformer(translator)
def GC_KEEPALIVE(self, funcgen, v):
return 'pypy_asm_keepalive(%s);' % funcgen.expr(v)
def OP_GC_STACK_BOTTOM(self, funcgen, op):
return 'pypy_asm_stack_bottom();'
name_to_gcpolicy = {
'boehm': BoehmGcPolicy,
'ref': RefcountingGcPolicy,
'none': NoneGcPolicy,
'framework+shadowstack': ShadowStackFrameworkGcPolicy,
'framework+asmgcc': AsmGcRootFrameworkGcPolicy
}
|
munhanha/mtg-random
|
refs/heads/master
|
django/contrib/comments/views/moderation.py
|
307
|
from django import template
from django.conf import settings
from django.shortcuts import get_object_or_404, render_to_response
from django.contrib.auth.decorators import login_required, permission_required
from utils import next_redirect, confirmation_view
from django.contrib import comments
from django.contrib.comments import signals
from django.views.decorators.csrf import csrf_protect
@csrf_protect
@login_required
def flag(request, comment_id, next=None):
"""
Flags a comment. Confirmation on GET, action on POST.
Templates: `comments/flag.html`,
Context:
comment
the flagged `comments.comment` object
"""
comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
# Flag on POST
if request.method == 'POST':
perform_flag(request, comment)
return next_redirect(request.POST.copy(), next, flag_done, c=comment.pk)
# Render a form on GET
else:
return render_to_response('comments/flag.html',
{'comment': comment, "next": next},
template.RequestContext(request)
)
@csrf_protect
@permission_required("comments.can_moderate")
def delete(request, comment_id, next=None):
"""
Deletes a comment. Confirmation on GET, action on POST. Requires the "can
moderate comments" permission.
Templates: `comments/delete.html`,
Context:
comment
the flagged `comments.comment` object
"""
comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
# Delete on POST
if request.method == 'POST':
# Flag the comment as deleted instead of actually deleting it.
perform_delete(request, comment)
return next_redirect(request.POST.copy(), next, delete_done, c=comment.pk)
# Render a form on GET
else:
return render_to_response('comments/delete.html',
{'comment': comment, "next": next},
template.RequestContext(request)
)
@csrf_protect
@permission_required("comments.can_moderate")
def approve(request, comment_id, next=None):
"""
Approve a comment (that is, mark it as public and non-removed). Confirmation
on GET, action on POST. Requires the "can moderate comments" permission.
Templates: `comments/approve.html`,
Context:
comment
the `comments.comment` object for approval
"""
comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
# Delete on POST
if request.method == 'POST':
# Flag the comment as approved.
perform_approve(request, comment)
return next_redirect(request.POST.copy(), next, approve_done, c=comment.pk)
# Render a form on GET
else:
return render_to_response('comments/approve.html',
{'comment': comment, "next": next},
template.RequestContext(request)
)
# The following functions actually perform the various flag/aprove/delete
# actions. They've been broken out into seperate functions to that they
# may be called from admin actions.
def perform_flag(request, comment):
"""
Actually perform the flagging of a comment from a request.
"""
flag, created = comments.models.CommentFlag.objects.get_or_create(
comment = comment,
user = request.user,
flag = comments.models.CommentFlag.SUGGEST_REMOVAL
)
signals.comment_was_flagged.send(
sender = comment.__class__,
comment = comment,
flag = flag,
created = created,
request = request,
)
def perform_delete(request, comment):
flag, created = comments.models.CommentFlag.objects.get_or_create(
comment = comment,
user = request.user,
flag = comments.models.CommentFlag.MODERATOR_DELETION
)
comment.is_removed = True
comment.save()
signals.comment_was_flagged.send(
sender = comment.__class__,
comment = comment,
flag = flag,
created = created,
request = request,
)
def perform_approve(request, comment):
flag, created = comments.models.CommentFlag.objects.get_or_create(
comment = comment,
user = request.user,
flag = comments.models.CommentFlag.MODERATOR_APPROVAL,
)
comment.is_removed = False
comment.is_public = True
comment.save()
signals.comment_was_flagged.send(
sender = comment.__class__,
comment = comment,
flag = flag,
created = created,
request = request,
)
# Confirmation views.
flag_done = confirmation_view(
template = "comments/flagged.html",
doc = 'Displays a "comment was flagged" success page.'
)
delete_done = confirmation_view(
template = "comments/deleted.html",
doc = 'Displays a "comment was deleted" success page.'
)
approve_done = confirmation_view(
template = "comments/approved.html",
doc = 'Displays a "comment was approved" success page.'
)
|
felipedau/unmessage
|
refs/heads/develop
|
unmessage/persistence.py
|
2
|
import errno
import sqlite3
import attr
from pyaxo import Keypair, a2b, b2a
from .contact import Contact
@attr.s
class PeerInfo(object):
name = attr.ib(
validator=attr.validators.optional(attr.validators.instance_of(str)),
default=None)
port_local_server = attr.ib(
validator=attr.validators.optional(attr.validators.instance_of(int)),
default=None)
identity_keys = attr.ib(
validator=attr.validators.optional(
attr.validators.instance_of(Keypair)),
default=None)
onion_service_key = attr.ib(
validator=attr.validators.optional(attr.validators.instance_of(str)),
default=None)
contacts = attr.ib(
validator=attr.validators.optional(attr.validators.instance_of(dict)),
default=attr.Factory(dict))
@attr.s
class Persistence(object):
dbname = attr.ib(validator=attr.validators.instance_of(str))
dbpassphrase = attr.ib(
validator=attr.validators.optional(attr.validators.instance_of(dict)),
default=None)
db = attr.ib(init=False)
def __attrs_post_init__(self):
self.db = self._open_db()
@classmethod
def create(cls, paths):
return cls(paths.peer_db)
def _open_db(self):
db = sqlite3.connect(':memory:', check_same_thread=False)
db.row_factory = sqlite3.Row
with db:
try:
with open(self.dbname, 'r') as f:
sql = f.read()
db.cursor().executescript(sql)
except IOError as e:
if e.errno == errno.ENOENT:
self._create_db(db)
else:
raise
return db
def _create_db(self, db):
db.execute('''
CREATE TABLE IF NOT EXISTS
peer (
name TEXT,
port_local_server INTEGER,
priv_identity_key TEXT,
pub_identity_key TEXT,
onion_service_key TEXT)''')
db.execute('''
CREATE UNIQUE INDEX IF NOT EXISTS
peer_name
ON
peer (name)''')
db.execute('''
CREATE TABLE IF NOT EXISTS
contacts (
identity TEXT,
key TEXT,
is_verified INTEGER,
has_presence INTEGER)''')
db.execute('''
CREATE UNIQUE INDEX IF NOT EXISTS
contact_identity
ON
contacts (identity)''')
def _write_db(self):
with self.db as db:
sql = bytes('\n'.join(db.iterdump()))
with open(self.dbname, 'w') as f:
f.write(sql)
def load_peer_info(self):
with self.db as db:
cur = db.cursor()
cur.execute('''
SELECT
*
FROM
peer''')
row = cur.fetchone()
if row:
onion_service_key = str(row['onion_service_key'])
identity_keys = Keypair(a2b(row['priv_identity_key']),
a2b(row['pub_identity_key']))
port_local_server = int(row['port_local_server'])
name = str(row['name'])
else:
onion_service_key = None
identity_keys = None
port_local_server = None
name = None
with self.db as db:
rows = db.execute('''
SELECT
*
FROM
contacts''')
contacts = dict()
for row in rows:
c = Contact(str(row['identity']),
a2b(row['key']),
bool(row['is_verified']),
bool(row['has_presence']))
contacts[c.name] = c
return PeerInfo(name, port_local_server, identity_keys,
onion_service_key, contacts)
def save_peer_info(self, peer_info):
with self.db as db:
db.execute('''
DELETE FROM
peer''')
if peer_info.identity_keys:
db.execute('''
INSERT INTO
peer (
name,
port_local_server,
priv_identity_key,
pub_identity_key,
onion_service_key)
VALUES (?, ?, ?, ?, ?)''', (
peer_info.name,
peer_info.port_local_server,
b2a(peer_info.identity_keys.priv),
b2a(peer_info.identity_keys.pub),
peer_info.onion_service_key))
db.execute('''
DELETE FROM
contacts''')
for c in peer_info.contacts.values():
db.execute('''
INSERT INTO
contacts (
identity,
key,
is_verified,
has_presence)
VALUES (?, ?, ?, ?)''', (
c.identity,
b2a(c.key),
int(c.is_verified),
int(c.has_presence)))
self._write_db()
|
marc-sensenich/ansible
|
refs/heads/devel
|
test/units/modules/network/f5/test_bigip_device_sshd.py
|
21
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_device_sshd import ApiParameters
from library.modules.bigip_device_sshd import ModuleParameters
from library.modules.bigip_device_sshd import ModuleManager
from library.modules.bigip_device_sshd import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_device_sshd import ApiParameters
from ansible.modules.network.f5.bigip_device_sshd import ModuleParameters
from ansible.modules.network.f5.bigip_device_sshd import ModuleManager
from ansible.modules.network.f5.bigip_device_sshd import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
allow=['all'],
banner='enabled',
banner_text='asdf',
inactivity_timeout='100',
log_level='debug',
login='enabled',
port=1010,
server='localhost',
user='admin',
password='password'
)
p = ModuleParameters(params=args)
assert p.allow == ['all']
assert p.banner == 'enabled'
assert p.banner_text == 'asdf'
assert p.inactivity_timeout == 100
assert p.log_level == 'debug'
assert p.login == 'enabled'
assert p.port == 1010
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_update_settings(self, *args):
set_module_args(dict(
allow=['all'],
banner='enabled',
banner_text='asdf',
inactivity_timeout='100',
log_level='debug',
login='enabled',
port=1010,
server='localhost',
user='admin',
password='password'
))
# Configure the parameters that would be returned by querying the
# remote device
current = ApiParameters(
params=dict(
allow=['172.27.1.1']
)
)
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
assert results['allow'] == ['all']
|
brunoabud/ic
|
refs/heads/master
|
plugins/ICBGRBlurFilter/plugin_object.py
|
1
|
# coding: utf-8
# Copyright (C) 2016 Bruno Abude Cardoso
#
# Imagem Cinemática is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Imagem Cinemática is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import cv2
import numpy as np
class ICBlurFilter(object):
def __init__(self, **kwargs):
self.plugin_path = kwargs["plugin_path"]
self.parameters = [
("kernel_size", u"Kernel Size", "int", 1, 15, 1, 3)
]
self.kernel_size = 3
def parameter_changed(self, param_name, value):
if param_name == "kernel_size":
self.kernel_size = value | 1
return self.kernel_size
else:
return None
def apply_filter(self, frame):
colorspace, data, pos, timestamp = frame
data = cv2.blur(data, (self.kernel_size,)*2)
return (colorspace, data)
def release_plugin(self):
return True
|
zacoxicompton/ExocortexCrate
|
refs/heads/master
|
Python/samples/cmpABCs.py
|
4
|
import _ExocortexAlembicPython as alembic
import sys
import argparse
def compareValues(a1, a2, id):
if a1.getNbStoredSamples() != a2.getNbStoredSamples():
print("--> VAL: " + str(id) + " doesn't have the same number of sample")
return
for i in xrange(0, a1.getNbStoredSamples()):
if a1.getValues(i) != a2.getValues(i):
print("--> VAL: " + str(id) + " value #" + str(i) + " is different")
return
pass
def compareProperties(a1, a2, id):
props2 = a2.getPropertyNames()
for prop in a1.getPropertyNames():
if prop not in props2:
print("--> PRO: " + str(id) + "/" + str(prop) + " doesn't exist")
continue
prop1 = a1.getProperty(prop)
prop2 = a2.getProperty(prop)
if prop1.getType() != prop2.getType():
print("--> PRO: " + str(id) + "/" + str(prop) + " is not the same type")
continue
id2 = id + "/" + prop
if prop1.isCompound():
compareProperties(prop1, prop2, id2)
else:
compareValues(prop1, prop2, id2)
pass
def compareObjects(a1, a2):
secondIds = a2.getIdentifiers()
for identifier in a1.getIdentifiers():
if identifier not in secondIds:
print("--> OBJ: " + str(identifier) + " doesn't exist")
continue
obj1 = a1.getObject(identifier)
obj1_typ = obj1.getType()
obj2 = a2.getObject(identifier)
obj2_typ = obj1.getType()
if obj1_typ != obj2_typ:
print("--> OBJ: " + str(identifier) + " is not the same type")
continue
compareProperties(obj1, obj2, identifier)
pass
def main(args):
# parser args
parser = argparse.ArgumentParser(description="Compare an alembic file to a second one and report the differences.")
parser.add_argument("abc_in", type=str, metavar=("{file1}", "{file2}"), nargs=2, help="input alembic file to be compared")
ns = vars(parser.parse_args(args[1:]))
if ns["abc_in"][0] == ns["abc_in"][1]:
print("cannot compare a file to itself!")
return
in1 = alembic.getIArchive(ns["abc_in"][0])
in2 = alembic.getIArchive(ns["abc_in"][1])
if in1 != None and in2 != None:
compareObjects(in1, in2)
in1 = None
in2 = None
# ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
if __name__ == "__main__":
main(sys.argv)
|
ByteMail/ByteMail
|
refs/heads/master
|
flask/testsuite/test_apps/config_module_app.py
|
1257
|
import os
import flask
here = os.path.abspath(os.path.dirname(__file__))
app = flask.Flask(__name__)
|
zaxliu/deepnap
|
refs/heads/master
|
experiments/kdd-exps/experiment_QNN_Feb2_0944.py
|
1
|
# System built-in modules
import time
from datetime import datetime
import sys
import os
from multiprocessing import Pool
# Project dependency modules
import pandas as pd
pd.set_option('mode.chained_assignment', None) # block warnings due to DataFrame value assignment
import lasagne
# Project modules
sys.path.append('../')
from sleep_control.traffic_emulator import TrafficEmulator
from sleep_control.traffic_server import TrafficServer
from sleep_control.controller import QController, DummyController, NController
from sleep_control.integration import Emulation
from sleep_control.env_models import SJTUModel
from rl.qtable import QAgent
from rl.qnn_theano import QAgentNN
from rl.mixin import PhiMixin, DynaMixin
sys_stdout = sys.stdout
log_prefix = '_'.join(['msg'] + os.path.basename(__file__).replace('.', '_').split('_')[1:4])
log_file_name = "{}_{}.log".format(log_prefix, sys.argv[1])
# Composite classes
class Phi_QAgentNN(PhiMixin, QAgentNN):
def __init__(self, **kwargs):
super(Phi_QAgentNN, self).__init__(**kwargs)
# Parameters
# |- Data
location = 'dmW'
# |- Agent
# |- QAgent
actions = [(True, None), (False, 'serve_all')]
gamma, alpha = 0.9, 0.9 # TD backup
explore_strategy, epsilon = 'epsilon', 0.02 # exploration
# |- QAgentNN
# | - Phi
phi_length = 15
dim_state = (1, phi_length, 3+2)
range_state_slice = [(0, 10), (0, 10), (0, 10), (0, 1), (0, 1)]
range_state = [[range_state_slice]*phi_length]
# | - Other params
momentum, learning_rate = 0.9, 0.01 # SGD
num_buffer, memory_size, batch_size, update_period, freeze_period = 2, 200, 100, 4, 16
reward_scaling, reward_scaling_update, rs_period = 1, 'adaptive', 32 # reward scaling
# |- Env model
Rs, Rw, Rf, Co, Cw = 1.0, -1.0, -10.0, -5.0, -0.5
beta = 0.5 # R = (1-beta)*ServiceReward + beta*Cost
reward_params = (Rs, Rw, Rf, Co, Cw, beta)
# |- Env
# |- Time
start_time = pd.to_datetime("2014-10-15 09:40:00")
total_time = pd.Timedelta(days=7)
time_step = pd.Timedelta(seconds=2)
backoff_epochs = num_buffer*memory_size+phi_length
head_datetime = start_time - time_step*backoff_epochs
tail_datetime = head_datetime + total_time
TOTAL_EPOCHS = int(total_time/time_step)
# |- Reward
rewarding = {'serve': Rs, 'wait': Rw, 'fail': Rf}
# load from processed data
session_df =pd.read_csv(
filepath_or_buffer='../data/trace_{}.dat'.format(location),
parse_dates=['startTime_datetime', 'endTime_datetime']
)
te = TrafficEmulator(
session_df=session_df, time_step=time_step,
head_datetime=head_datetime, tail_datetime=tail_datetime,
rewarding=rewarding,
verbose=2)
ts = TrafficServer(cost=(Co, Cw), verbose=2)
agent = Phi_QAgentNN(
phi_length=phi_length,
dim_state=dim_state, range_state=range_state,
f_build_net = None,
batch_size=batch_size, learning_rate=learning_rate, momentum=momentum,
reward_scaling=reward_scaling, reward_scaling_update=reward_scaling_update, rs_period=rs_period,
update_period=update_period, freeze_period=freeze_period,
memory_size=memory_size, num_buffer=num_buffer,
# Below is QAgent params
actions=actions, alpha=alpha, gamma=gamma,
explore_strategy=explore_strategy, epsilon=epsilon,
verbose=2)
c = QController(agent=agent)
emu = Emulation(te=te, ts=ts, c=c, beta=beta)
# Heavyliftings
t = time.time()
sys.stdout = sys_stdout
log_path = './log/'
if os.path.isfile(log_path+log_file_name):
print "Log file {} already exist. Experiment cancelled.".format(log_file_name)
else:
log_file = open(log_path+log_file_name,"w")
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
while emu.epoch is not None and emu.epoch<TOTAL_EPOCHS:
# log time
print "Epoch {},".format(emu.epoch),
left = emu.te.head_datetime + emu.te.epoch*emu.te.time_step
right = left + emu.te.time_step
print "{} - {}".format(left.strftime("%Y-%m-%d %H:%M:%S"), right.strftime("%Y-%m-%d %H:%M:%S"))
emu.step()
print
if emu.epoch%(0.05*TOTAL_EPOCHS)==0:
sys.stdout = sys_stdout
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
sys.stdout = sys_stdout
log_file.close()
print
print log_file_name,
print '{:.3f} sec,'.format(time.time()-t),
print '{:.3f} min'.format((time.time()-t)/60)
|
arek125/remote-GPIO-control-server
|
refs/heads/master
|
rgc-server1_0.py
|
1
|
import socket
import sys
import hashlib
from datetime import datetime
import os
import signal
import glob
import sqlite3
import threading
import time
import RPi.GPIO as GPIO
HOST = ''
PORT = 8888
PASSWORD = ''
ENC_KEY = ''
exitapp = False
break_ = -1
db_path = 'rgc-server.db3'
def stringToint(string):
try:
ints = int(string)
except ValueError:
print "Error while converting String to Int"
return ints
def planowanie():
if exitapp == False:
rtime = 1
threading.Timer(rtime, planowanie).start()
conndb2 = sqlite3.connect(db_path, check_same_thread=False)
conndb2.isolation_level = None
cursor2 = conndb2.execute("SELECT * from planowanie p join stany s on p.Out_id = s.Id")
for row in cursor2:
if row[1] == 'date':
if row[4] == datetime.now().strftime('%Y-%m-%d %H:%M'):
if row[10] != row[6]:
set=row[6]
if row[6] == 2:
set=int(not row[10])
GPIOset(row[9],set)
gpiolist = row[9].split(",")
for gpio in gpiolist:
conndb2.execute("UPDATE stany set Stan =2,Edit_time=? where (GPIO_BCM like ? and Id!=? and IN_OUT like 'out') or (GPIO_BCM like ? and Id!=? and IN_OUT like 'out');",(datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),"%"+gpio+",%",row[5],"%,"+gpio+"%",row[5]))
conndb2.execute("UPDATE stany set Stan =?,Edit_time=? where GPIO_BCM =? and Id!=? and IN_OUT like 'out' ;",(set,datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),gpio,row[5]))
conndb2.execute("UPDATE stany set Stan =?, Edit_time=? where Id=?",(set,datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),row[5]))
conndb2.execute("DELETE from planowanie where Id=?", (row[0],))
conndb2.execute("UPDATE planowanie set Edit_time=? where Id in (SELECT Id FROM planowanie LIMIT 1)",(datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),))
conndb2.commit()
elif row[1] == 'hour':
if row[4] == datetime.now().strftime('%H:%M'):
if row[10] != row[6]:
set=row[6]
if row[6] == 2:
set=int(not row[10])
GPIOset(row[9],set)
gpiolist = row[9].split(",")
for gpio in gpiolist:
conndb2.execute("UPDATE stany set Stan =2,Edit_time=? where (GPIO_BCM like ? and Id!=? and IN_OUT like 'out') or (GPIO_BCM like ? and Id!=? and IN_OUT like 'out');",(datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),"%"+gpio+",%",row[5],"%,"+gpio+"%",row[5]))
conndb2.execute("UPDATE stany set Stan =?,Edit_time=? where GPIO_BCM =? and Id!=? and IN_OUT like 'out' ;",(set,datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),gpio,row[5]))
conndb2.execute("UPDATE stany set Stan =?, Edit_time=? where Id=?",(set,datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),row[5]))
if row[3] == 'once':
conndb2.execute("DELETE from planowanie where Id=?", (row[0],))
conndb2.execute("UPDATE planowanie set Edit_time=? where Id in (SELECT Id FROM planowanie LIMIT 1)",(datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),))
conndb2.commit()
elif row[1] == 'timer':
timelist = row[4].split(",")
time = int(timelist[1]) - rtime
if time <= 0:
if row[10] != row[6]:
set=row[6]
if row[6] == 2:
set=int(not row[10])
GPIOset(row[9],set)
gpiolist = row[9].split(",")
for gpio in gpiolist:
conndb2.execute("UPDATE stany set Stan =2,Edit_time=? where (GPIO_BCM like ? and Id!=? and IN_OUT like 'out') or (GPIO_BCM like ? and Id!=? and IN_OUT like 'out');",(datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),"%"+gpio+",%",row[5],"%,"+gpio+"%",row[5]))
conndb2.execute("UPDATE stany set Stan =?,Edit_time=? where GPIO_BCM =? and Id!=? and IN_OUT like 'out' ;",(set,datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),gpio,row[5]))
conndb2.execute("UPDATE stany set Stan =?, Edit_time=? where Id=?",(set,datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),row[5]))
if row[3] == 'once':
conndb2.execute("DELETE from planowanie where Id=?", (row[0],))
conndb2.execute("UPDATE planowanie set Edit_time=? where Id in (SELECT Id FROM planowanie LIMIT 1)",(datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),))
else:
conndb2.execute("UPDATE planowanie set Dane=?, Edit_time=? where Id=?",(timelist[0]+','+timelist[0],datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),row[0]))
else:
conndb2.execute("UPDATE planowanie set Dane=?, Edit_time=? where Id=?",(str(timelist[0])+','+str(time),datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),row[0]))
conndb2.commit()
conndb2.close()
def GPIOset(pinout,onoff):
pins = pinout.split(",")
onoff = stringToint(onoff)
if onoff < 2:
for pin in pins:
pin = stringToint(pin)
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, onoff)
def GPIOstate(pin):
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(stringToint(pin), GPIO.OUT)
return GPIO.input(stringToint(pin))
def GPIOset_in(inpin):
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(inpin,GPIO.IN,GPIO.PUD_UP)
def GPIOPWM(inpin,fr):
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(inpin, GPIO.OUT)
p = GPIO.PWM(inpin, fr)
return p
pwm = {k: [] for k in range(2)}
def inputLoop2(outid,inid,inpin,Stan,reverse):
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
inpin = int(inpin)
id2 = int(inid)
Stan = int(Stan)
GPIO.setup(inpin,GPIO.IN,GPIO.PUD_UP)
if Stan == 1:
stan = 6
else:
stan = 1
while exitapp == False:
if stan ==1:
if GPIO.input(inpin)==0:
stan=2
cursor1 = conndb.execute("SELECT Stan, Reverse from stany where Id=?", (outid,))
for row in cursor1:
if int(row[0])==1:
stan=3
elif stan ==2:
cursor2 = conndb.execute("SELECT GPIO_BCM, Reverse from stany where Id=?", (outid,))
for row in cursor2:
GPIOset(str(row[0]),1)
gpiolist = row[0].split(",")
for gpio in gpiolist:
conndb.execute("UPDATE stany set Stan =2,Edit_time=? where (GPIO_BCM like ? and Id!=?) or (GPIO_BCM like ? and Id!=?);",(datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),"%"+gpio+",%",str(outid),"%,"+gpio+"%",str(outid)))
conndb.execute("UPDATE stany set Stan =?,Edit_time=? where GPIO_BCM =? and Id!=? ;",(str(1),datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),gpio,str(outid)))
conndb.execute("UPDATE stany set Stan =?,Edit_time=? where Id=?",(str(1),datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),str(outid)))
conndb.execute("UPDATE stany set Stan =1,Edit_time=? where GPIO_BCM=? and IN_OUT like 'in'",(datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),str(inpin)))
conndb.commit()
stan = 5
if GPIO.input(inpin)==1:
conndb.execute("UPDATE stany set Stan =0,Edit_time=? where GPIO_BCM=? and IN_OUT like 'in'",(datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),str(inpin)))
conndb.commit()
stan=3
elif stan ==3:
if GPIO.input(inpin)==0:
stan=4
cursor1 = conndb.execute("SELECT Stan, Reverse from stany where Id=?", (outid,))
for row in cursor1:
if int(row[0])==0:
stan=1
elif stan ==4:
cursor2 = conndb.execute("SELECT GPIO_BCM, Reverse from stany where Id=?", (outid,))
for row in cursor2:
GPIOset(str(row[0]),0)
gpiolist = row[0].split(",")
for gpio in gpiolist:
conndb.execute("UPDATE stany set Stan =2,Edit_time=? where (GPIO_BCM like ? and Id!=?) or (GPIO_BCM like ? and Id!=?);",(datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),"%"+gpio+",%",str(outid),"%,"+gpio+"%",str(outid)))
conndb.execute("UPDATE stany set Stan =?,Edit_time=? where GPIO_BCM =? and Id!=? ;",(str(0),datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),gpio,str(outid)))
conndb.execute("UPDATE stany set Stan =?,Edit_time=? where Id=?",(str(0),datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),str(outid)))
conndb.execute("UPDATE stany set Stan =1,Edit_time=? where GPIO_BCM=? and IN_OUT like 'in'",(datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),str(inpin)))
conndb.commit()
stan = 6
if GPIO.input(inpin)==1:
conndb.execute("UPDATE stany set Stan =0,Edit_time=? where GPIO_BCM=? and IN_OUT like 'in'",(datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),str(inpin)))
conndb.commit()
stan=1
elif stan ==5:
if GPIO.input(inpin)==1:
conndb.execute("UPDATE stany set Stan =0,Edit_time=? where GPIO_BCM=? and IN_OUT like 'in'",(datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),str(inpin)))
conndb.commit()
stan=3
elif stan ==6:
if GPIO.input(inpin)==1:
conndb.execute("UPDATE stany set Stan =0,Edit_time=? where GPIO_BCM=? and IN_OUT like 'in'",(datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),str(inpin)))
conndb.commit()
stan=1
time.sleep(0.05)
if break_ == id2:
break
def inputLoop4(id,inpin,Stan,reverse):
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
inpin = int(inpin)
id2 = int(id)
Stan = int(Stan)
GPIO.setup(inpin,GPIO.IN,GPIO.PUD_UP)
if Stan == 0:
stan = 2
elif Stan == 1:
stan = 4
else:
stan = 2
while exitapp == False:
if stan ==2:
if GPIO.input(inpin)==0:
conndb.execute("UPDATE stany set Stan =1,Edit_time=? where GPIO_BCM=? and IN_OUT like 'in'",(datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),str(inpin)))
conndb.commit()
stan=4
if stan ==4:
if GPIO.input(inpin)==1:
conndb.execute("UPDATE stany set Stan =0,Edit_time=? where GPIO_BCM=? and IN_OUT like 'in'",(datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),str(inpin)))
conndb.commit()
stan=2
time.sleep(0.05)
if break_ == id2:
break
if __name__ == '__main__':
print 'Server is starting...'
print 'Please press Ctrl+C to end the program...'
conndb = sqlite3.connect(db_path, check_same_thread=False)
conndb.isolation_level = None
tableexist = conndb.execute("SELECT * FROM sqlite_master WHERE name ='stany' and type='table';")
if len(tableexist.fetchall()) == 0:
print "Creating database..."
conndb.executescript('''CREATE TABLE IF NOT EXISTS `stany` (
`Id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
`GPIO_BCM` TEXT NOT NULL,
`Stan` INTEGER NOT NULL,
`Name` TEXT,
`IN_OUT` TEXT,
`Edit_time` TEXT,
`Reverse` INTEGER NOT NULL,
`Bindid` INTEGER,
`Bindtype` INTEGER);
CREATE TABLE IF NOT EXISTS `planowanie` (
`Id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
`Warunek` TEXT NOT NULL,
`Podwarunek` TEXT,
`Rodzaj` TEXT NOT NULL,
`Dane` TEXT,
`Out_id` INTEGER NOT NULL,
`Stan` INTEGER NOT NULL,
`Edit_time` TEXT );
CREATE TABLE IF NOT EXISTS `pwm` (
`Id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
`GPIO_BCM` TEXT NOT NULL,
`FR` NUMERIC NOT NULL,
`DC` INTEGER NOT NULL,
`SS` INTEGER NOT NULL,
`Name` TEXT NOT NULL,
`Reverse` INTEGER NOT NULL,
`Edit_time` TEXT);''')
print datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
try :
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print 'Socket created'
except socket.error, msg :
print 'Failed to create socket. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit()
i = 0
for arg in sys.argv:
if arg == '-port':
try:
PORT = int(sys.argv[i+1])
except ValueError:
print "Wrong port argument"
elif arg == '-address':
HOST = sys.argv[i+1]
elif arg == '-password':
PASSWORD = hashlib.sha256(sys.argv[i+1].encode()).hexdigest()
ENC_KEY = hashlib.md5(sys.argv[i+1].encode()).hexdigest()
print ENC_KEY
import base64
from Crypto import Random
from Crypto.Cipher import AES
def encrypt(key, message):
try:
bs = 16
message = message + (bs - len(message) % bs) * chr(bs - len(message) % bs)
iv = Random.new().read(AES.block_size)
cipher = AES.new(key, AES.MODE_CBC, iv)
s = base64.b64encode(iv + cipher.encrypt(message)).decode('utf-8')
except:
s = "error"
return s
def decrypt(key, enc_message):
try:
enc_message = base64.b64decode(enc_message)
iv = enc_message[:AES.block_size]
cipher = AES.new(key, AES.MODE_CBC, iv)
s = cipher.decrypt(enc_message[AES.block_size:])
s = s[:-ord(s[len(s)-1:])].decode('utf-8')
except:
s = "error"
return s
i = i+1
try:
s.bind((HOST, PORT))
except socket.error , msg:
print 'Bind failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit()
print 'Socket bind complete ' + str(s.getsockname()) + PASSWORD
cursor1 = conndb.execute("SELECT * from stany where IN_OUT like 'out' order by Edit_time ASC")
for row in cursor1:
print 'OUTPUT: GPIO='+str(row[1])+' STATE='+str(row[2])
GPIOset(row[1],row[2])
cursor1 = conndb.execute("SELECT * from pwm")
for row in cursor1:
print 'OUTPUT PWM: GPIO='+str(row[1])+' S/S='+str(row[4])+' FR='+str(row[2])+' DC='+str(row[3])
pwmpins = row[1].split(",")
for pin in pwmpins:
pwm[pin] = GPIOPWM(int(pin),float(row[2]))
if row[4] == 1:
pwm[pin].start(int(row[3]))
try:
pid = planowanie()
cursor1 = conndb.execute("SELECT * from stany where IN_OUT like 'in'")
for row in cursor1:
print 'INPUT: GPIO='+str(row[1])+' STATE='+str(row[2])
if row[8] == 1:
threading.Thread(target=inputLoop2, args=(row[7],row[0],row[1],row[2],row[6])).start()
else:
threading.Thread(target=inputLoop4, args=(row[0],row[1],row[2],row[6])).start()
while 1:
d = s.recvfrom(1024)
data = d[0].strip()
addr = d[1]
datalist = data.split(";")
passwalidation = False
if PASSWORD == '':
passwalidation = True
else:
if datalist[0] == PASSWORD:
temp = decrypt(ENC_KEY,datalist[1])
if temp == 'error':
passwalidation = False
print 'Decrytion error'
else:
datalist = ("0;"+temp).split(";")
passwalidation = True
else:
passwalidation = False
if passwalidation == True:
if datalist[1] == 'test':
reply = 'true;Connection OK;'
elif datalist[1] == 'GPIO_OEtime':
cursor8 = conndb.execute("SELECT Max(Edit_time) FROM stany where IN_OUT like 'out'")
for row in cursor8:
reply = 'true;GPIO_OEtime;'+str(row[0])+';'
elif datalist[1] == 'GPIO_Olist':
cursor9 = conndb.execute("SELECT * from stany where IN_OUT like 'out'")
reply = 'true;GPIO_Olist;'
for row in cursor9:
reply += str(row[0])+';'+str(row[1])+';'+str(row[2])+';'+str(row[3])+';'+str(row[6])+';'
elif datalist[1] == 'Add_GPIO_out':
conndb.execute("INSERT INTO stany VALUES (null,?,2,?,'out',?,?,null,null)",(datalist[2],datalist[3],datalist[4],datalist[5]))
conndb.commit()
reply= 'true;Add_GPIO_out;'
elif datalist[1] == 'Edit_GPIO_out':
conndb.execute("UPDATE stany set GPIO_BCM=?,Name=?, Edit_time=?, reverse=? where Id=?",(datalist[3],datalist[4],datalist[5],datalist[6],datalist[2]))
conndb.commit()
pwmpins = datalist[3].split(',')
pwmpins2 = datalist[7].split(',')
for pin2 in pwmpins2:
if pin2 not in pwmpins:
GPIO.cleanup(int(pin2))
reply= 'true;Edit_GPIO_out;'
elif datalist[1] == 'Delete_GPIO_out':
break_ = int(datalist[2])
conndb.execute("DELETE from stany where Id=?",(datalist[2],))
conndb.execute("UPDATE stany set Edit_time=? where Id in (SELECT Id FROM stany LIMIT 1)",(datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),))
r1 = conndb.execute("DELETE from planowanie where Out_id=?",(datalist[2],)).rowcount
if r1 > 0:
conndb.execute("UPDATE planowanie set Edit_time=? where Id in (SELECT Id FROM planowanie LIMIT 1)",(datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),))
conndb.commit()
pwmpins = datalist[3].split(",")
for pin in pwmpins:
GPIO.cleanup(int(pin))
reply= 'true;Delete_GPIO_out;'
elif datalist[1] == 'GPIO_IEtime':
cursor8 = conndb.execute("SELECT Max(Edit_time) FROM stany where IN_OUT like 'in'")
for row in cursor8:
reply = 'true;GPIO_IEtime;'+str(row[0])+';'
elif datalist[1] == 'GPIO_Ilist':
cursor13 = conndb.execute("SELECT * from stany where IN_OUT like 'in'")
reply = 'true;GPIO_Ilist;'
for row in cursor13:
reply += str(row[0])+';'+str(row[1])+';'+str(row[2])+';'+str(row[3])+';'+str(row[6])+';'+str(row[7])+';'+str(row[8])+';'
elif datalist[1] == 'Add_GPIO_in':
id = conndb.execute("INSERT INTO stany VALUES (null,?,0,?,'in',?,?,?,?)",(datalist[2],datalist[3],datalist[4],datalist[5],datalist[6],datalist[7])).lastrowid
conndb.commit()
if datalist[7] == '1':
threading.Thread(target=inputLoop2, args=(datalist[6],id,datalist[2],'0',datalist[5])).start()
else:
threading.Thread(target=inputLoop4, args=(id,datalist[2],'0',datalist[5])).start()
reply= 'true;Add_GPIO_in;'
elif datalist[1] == 'Edit_GPIO_in':
break_ = int(datalist[2])
conndb.execute("DELETE from stany where Id=?",(datalist[2],))
id = conndb.execute("INSERT INTO stany VALUES (null,?,0,?,'in',?,?,?,?)",(datalist[3],datalist[4],datalist[5],datalist[6],datalist[7],datalist[8])).lastrowid
conndb.commit()
if datalist[3] != datalist[9]:
GPIO.cleanup(int(datalist[9]))
if datalist[8] == '1':
threading.Thread(target=inputLoop2, args=(datalist[7],id,datalist[3],'0',datalist[6])).start()
else:
threading.Thread(target=inputLoop4, args=(id,datalist[3],'0',datalist[6])).start()
reply= 'true;Edit_GPIO_in;'
elif datalist[1] == 'GPIO_Oname':
cursor12 = conndb.execute("SELECT Id,Name,GPIO_BCM,Reverse from stany where IN_OUT like 'out'")
reply = 'true;GPIO_Oname;'
for row in cursor12:
reply += str(row[0])+';'+str(row[1])+';'+str(row[2])+';'+str(row[3])+';'
elif datalist[1] == 'GPIO_PEtime':
cursor13 = conndb.execute("SELECT Max(Edit_time) FROM pwm")
for row in cursor13:
reply = 'true;GPIO_PEtime;'+str(row[0])+';'
elif datalist[1] == 'GPIO_Plist':
cursor14 = conndb.execute("SELECT * from pwm")
reply = 'true;GPIO_Plist;'
for row in cursor14:
reply += str(row[0])+';'+str(row[1])+';'+str(row[2])+';'+str(row[3])+';'+str(row[4])+';'+str(row[5])+';'+str(row[6])+';'
elif datalist[1] == 'GPIO_PDC':
pwmpins = datalist[3].split(",")
for pin in pwmpins:
pwm[pin].ChangeDutyCycle(int(datalist[4]))
reply = 'true;GPIO_PDC;'+datalist[4]+';'
elif datalist[1] == 'GPIO_PDCu':
conndb.execute("UPDATE pwm set DC=?,Edit_time=? where Id=?",(datalist[4],datalist[5],datalist[2]))
conndb.commit()
reply = 'true;GPIO_PDCu;'+datalist[4]+';'+datalist[5]+';'
elif datalist[1] == 'GPIO_PFRDC':
pwmpins = datalist[3].split(",")
for pin in pwmpins:
pwm[pin].ChangeDutyCycle(int(datalist[5]))
pwm[pin].ChangeFrequency(float(datalist[4]))
conndb.execute("UPDATE pwm set FR=?,DC=?,Edit_time=? where Id=?",(datalist[4],datalist[5],datalist[6],datalist[2]))
conndb.commit()
reply = 'true;GPIO_PFRDC;'+datalist[4]+';'+datalist[6]+';'+datalist[5]+';'
elif datalist[1] == 'GPIO_PSS':
pwmpins = datalist[3].split(",")
for pin in pwmpins:
if datalist[6] == '1':
pwm[pin].start(int(datalist[4]))
pwm[pin].ChangeFrequency(float(datalist[7]))
elif datalist[6] == '0':
pwm[pin].stop()
conndb.execute("UPDATE pwm set DC=?,Edit_time=?,SS=? where Id=?",(datalist[4],datalist[5],datalist[6],datalist[2]))
conndb.commit()
reply = 'true;GPIO_PSS;'+datalist[4]+';'+datalist[5]+';'+datalist[6]+';'
elif datalist[1] == 'Add_GPIO_pwm':
pwmpins = datalist[2].split(',')
for pin in pwmpins:
pwm[pin] = GPIOPWM(int(pin),float(datalist[3]))
pwm[pin].start(int(datalist[4]))
conndb.execute("INSERT INTO pwm VALUES (null,?,?,?,1,?,?,?)",(datalist[2],datalist[3],datalist[4],datalist[5],datalist[6],datalist[7]))
conndb.commit()
reply= 'true;Add_GPIO_pwm;'
elif datalist[1] == 'Delete_GPIO_pwm':
break_ = int(datalist[2])
conndb.execute("DELETE from pwm where Id=?",(datalist[2],))
conndb.execute("UPDATE pwm set Edit_time=? where Id in (SELECT Id FROM pwm LIMIT 1)",(datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),))
conndb.commit()
pwmpins = datalist[3].split(',')
for pin in pwmpins:
pwm[pin].stop()
pwm.pop(pin)
GPIO.cleanup(int(pin))
reply= 'true;Delete_GPIO_pwm;'
elif datalist[1] == 'Edit_GPIO_pwm':
pwmpins = datalist[3].split(',')
pwmpins2 = datalist[4].split(',')
for pin in pwmpins:
if pin not in pwmpins2:
pwm[pin].stop()
pwm.pop(pin)
GPIO.cleanup(int(pin))
for pin2 in pwmpins2:
if pin2 not in pwmpins:
pwm[pin2] = GPIOPWM(int(pin2),float(datalist[5]))
pwm[pin2].start(int(datalist[6]))
else:
pwm[pin2].ChangeDutyCycle(int(datalist[6]))
pwm[pin2].ChangeFrequency(float(datalist[5]))
conndb.execute("UPDATE pwm set GPIO_BCM=?, FR=?, DC=?, SS=1, Name=?, Reverse=?, Edit_time=? where Id=?",(datalist[4],datalist[5],datalist[6],datalist[7],datalist[8],datalist[9],datalist[2]))
conndb.commit()
reply= 'true;Edit_GPIO_pwm;'
elif datalist[1] == 'Allpins_GPIO_pwm':
reply = 'true;Allpins_GPIO_pwm;'
cursor15 = conndb.execute("SELECT GPIO_BCM from pwm")
for row in cursor15:
pins = row[0].split(',')
for pin in pins:
reply+= pin+';'
elif datalist[1] == 'Allpins_GPIO_out':
reply = 'true;Allpins_GPIO_out;'
cursor16 = conndb.execute("SELECT GPIO_BCM from stany where IN_OUT like 'out'")
for row in cursor16:
pins = row[0].split(',')
for pin in pins:
reply+= pin+';'
elif datalist[1] == 'Allpins_GPIO_in':
reply = 'true;Allpins_GPIO_in;'
cursor17 = conndb.execute("SELECT GPIO_BCM from stany where IN_OUT like 'in'")
for row in cursor17:
reply+= str(row[0])+';'
elif datalist[1] == 'GPIO_SAEtime':
cursor18 = conndb.execute("SELECT Max(Edit_time) FROM planowanie")
for row in cursor18:
reply = 'true;GPIO_SAEtime;'+str(row[0])+';'
elif datalist[1] == 'GPIO_SAlist':
cursor19 = conndb.execute("SELECT * from planowanie p join stany s on p.Out_id = s.Id")
reply = 'true;GPIO_SAlist;'
for row in cursor19:
reply += str(row[0])+';'+str(row[1])+';'+str(row[2])+';'+str(row[3])+';'+str(row[4])+';'+str(row[6])+';'+str(row[11])+';'+str(row[14])+';'
elif datalist[1] == 'GPIO_set':
GPIOset(datalist[3],datalist[4])
reply = 'true;GPIO_set;'+datalist[4]+';'+datalist[5]+';'
gpiolist = datalist[3].split(",")
for gpio in gpiolist:
r1 = conndb.execute("UPDATE stany set Stan =2,Edit_time=? where (GPIO_BCM like ? and Id!=? and IN_OUT like 'out') or (GPIO_BCM like ? and Id!=? and IN_OUT like 'out');",(datalist[5],"%"+gpio+",%",datalist[2],"%,"+gpio+"%",datalist[2])).rowcount
r2 = conndb.execute("UPDATE stany set Stan =?,Edit_time=? where GPIO_BCM =? and Id!=? and IN_OUT like 'out' ;",(datalist[4],datalist[5],gpio,datalist[2])).rowcount
conndb.execute("UPDATE stany set Stan =?,Edit_time=? where Id=?",(datalist[4],datalist[5],datalist[2]))
if r1 > 0 or r2 > 0:
reply = 'true;GPIO_set;'+datalist[4]+';2000-01-01 00:00:00.000;'
conndb.commit()
elif datalist[1] == 'GPIO_state':
reply = 'true;GPIO_state;'+str(datalist[2])+';'+str(GPIOstate(datalist[2]))+';'
elif datalist[1] == 'TEMP_read':
if temperature==True:
reply = 'true;TEMP_read;'+read_temp(2)+';'
else:
reply = 'true;TEMP_read;0;0;'
elif datalist[1] == 'Insert_Action':
conndb.execute("INSERT INTO planowanie(Warunek, Podwarunek, Rodzaj, Dane, Out_id, Stan, Edit_time) VALUES(?,?,?,?,?,?,?)",(datalist[2],datalist[3],datalist[4],datalist[5],datalist[6],datalist[7],datalist[8]))
conndb.commit()
reply= 'true;Insert_Action;'
elif datalist[1] == 'Update_Action':
conndb.execute("UPDATE planowanie set Warunek=?, Podwarunek=?, Rodzaj=?, Dane=?, Out_id=?, Stan=? Edit_time=? where Id=?",(datalist[2],datalist[3],datalist[4],datalist[5],datalist[6],datalist[7],datalist[9],datalist[8]))
conndb.commit()
reply= 'true;Update_Action;'
elif datalist[1] == 'Delete_Action':
conndb.execute("DELETE from planowanie where Id=?",(datalist[2],))
conndb.execute("UPDATE planowanie set Edit_time=? where Id in (SELECT Id FROM planowanie LIMIT 1)",(datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),))
conndb.commit()
reply= 'true;Delete_Action;'
else:
reply = 'false;Conection OK, but no compabile method found, probably encryption error;'
else:
reply = 'false;Wrong password !;'
if PASSWORD != '' and passwalidation == True :
reply = '1;'+encrypt(ENC_KEY,reply)+';'
s.sendto(reply , addr)
print 'Message[' + addr[0] + ':' + str(addr[1]) + '] - ' + data
print reply
except KeyboardInterrupt:
print "...Ending..."
exitapp = True
s.close()
conndb.close()
GPIO.cleanup()
sys.exit()
|
eldarion/edge
|
refs/heads/master
|
setup.py
|
1
|
from setuptools import setup
setup(
name = "edge",
version = "0.4.0",
url = "http://pypi.python.org/pypi/edge",
license = "BSD",
description = "Python bindings for Directed Edge's API",
author = "Directed Edge",
py_modules = ["directed_edge"],
install_requires = ["httplib2"]
)
|
WillisXChen/django-oscar
|
refs/heads/master
|
oscar/lib/python2.7/site-packages/sphinx/pycode/nodes.py
|
12
|
# -*- coding: utf-8 -*-
"""
sphinx.pycode.nodes
~~~~~~~~~~~~~~~~~~~
Parse tree node implementations.
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
class BaseNode(object):
"""
Node superclass for both terminal and nonterminal nodes.
"""
parent = None
def _eq(self, other):
raise NotImplementedError
def __eq__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return self._eq(other)
def __ne__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return not self._eq(other)
__hash__ = None
def get_prev_sibling(self):
"""Return previous child in parent's children, or None."""
if self.parent is None:
return None
for i, child in enumerate(self.parent.children):
if child is self:
if i == 0:
return None
return self.parent.children[i-1]
def get_next_sibling(self):
"""Return next child in parent's children, or None."""
if self.parent is None:
return None
for i, child in enumerate(self.parent.children):
if child is self:
try:
return self.parent.children[i+1]
except IndexError:
return None
def get_prev_leaf(self):
"""Return the leaf node that precedes this node in the parse tree."""
def last_child(node):
if isinstance(node, Leaf):
return node
elif not node.children:
return None
else:
return last_child(node.children[-1])
if self.parent is None:
return None
prev = self.get_prev_sibling()
if isinstance(prev, Leaf):
return prev
elif prev is not None:
return last_child(prev)
return self.parent.get_prev_leaf()
def get_next_leaf(self):
"""Return self if leaf, otherwise the leaf node that succeeds this
node in the parse tree.
"""
node = self
while not isinstance(node, Leaf):
assert node.children
node = node.children[0]
return node
def get_lineno(self):
"""Return the line number which generated the invocant node."""
return self.get_next_leaf().lineno
def get_prefix(self):
"""Return the prefix of the next leaf node."""
# only leaves carry a prefix
return self.get_next_leaf().prefix
class Node(BaseNode):
"""
Node implementation for nonterminals.
"""
def __init__(self, type, children, context=None):
# type of nonterminals is >= 256
# assert type >= 256, type
self.type = type
self.children = list(children)
for ch in self.children:
# assert ch.parent is None, repr(ch)
ch.parent = self
def __repr__(self):
return '%s(%s, %r)' % (self.__class__.__name__,
self.type, self.children)
def __str__(self):
"""This reproduces the input source exactly."""
return ''.join(map(str, self.children))
def _eq(self, other):
return (self.type, self.children) == (other.type, other.children)
# support indexing the node directly instead of .children
def __getitem__(self, index):
return self.children[index]
def __iter__(self):
return iter(self.children)
def __len__(self):
return len(self.children)
class Leaf(BaseNode):
"""
Node implementation for leaf nodes (terminals).
"""
prefix = '' # Whitespace and comments preceding this token in the input
lineno = 0 # Line where this token starts in the input
column = 0 # Column where this token tarts in the input
def __init__(self, type, value, context=None):
# type of terminals is below 256
# assert 0 <= type < 256, type
self.type = type
self.value = value
if context is not None:
self.prefix, (self.lineno, self.column) = context
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__,
self.type, self.value, self.prefix)
def __str__(self):
"""This reproduces the input source exactly."""
return self.prefix + str(self.value)
def _eq(self, other):
"""Compares two nodes for equality."""
return (self.type, self.value) == (other.type, other.value)
def convert(grammar, raw_node):
"""Convert raw node to a Node or Leaf instance."""
type, value, context, children = raw_node
if children or type in grammar.number2symbol:
# If there's exactly one child, return that child instead of
# creating a new node.
if len(children) == 1:
return children[0]
return Node(type, children, context=context)
else:
return Leaf(type, value, context=context)
def nice_repr(node, number2name, prefix=False):
def _repr(node):
if isinstance(node, Leaf):
return "%s(%r)" % (number2name[node.type], node.value)
else:
return "%s(%s)" % (number2name[node.type],
', '.join(map(_repr, node.children)))
def _prepr(node):
if isinstance(node, Leaf):
return "%s(%r, %r)" % (number2name[node.type],
node.prefix, node.value)
else:
return "%s(%s)" % (number2name[node.type],
', '.join(map(_prepr, node.children)))
return (prefix and _prepr or _repr)(node)
class NodeVisitor(object):
def __init__(self, number2name, *args):
self.number2name = number2name
self.init(*args)
def init(self, *args):
pass
def visit(self, node):
"""Visit a node."""
method = 'visit_' + self.number2name[node.type]
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
if isinstance(node, Node):
for child in node:
self.visit(child)
|
dfalt974/SickRage
|
refs/heads/master
|
lib/oauthlib/oauth2/rfc6749/utils.py
|
19
|
# -*- coding: utf-8 -*-
"""
oauthlib.utils
~~~~~~~~~~~~~~
This module contains utility methods used by various parts of the OAuth 2 spec.
"""
from __future__ import absolute_import, unicode_literals
import datetime
import os
from oauthlib.common import unicode_type, urldecode
try:
from urllib import quote
except ImportError:
from urllib.parse import quote
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
def list_to_scope(scope):
"""Convert a list of scopes to a space separated string."""
if isinstance(scope, unicode_type) or scope is None:
return scope
elif isinstance(scope, (set, tuple, list)):
return " ".join([unicode_type(s) for s in scope])
else:
raise ValueError("Invalid scope (%s), must be string, tuple, set, or list." % scope)
def scope_to_list(scope):
"""Convert a space separated string to a list of scopes."""
if isinstance(scope, (tuple, list, set)):
return [unicode_type(s) for s in scope]
elif scope is None:
return None
else:
return scope.strip().split(" ")
def params_from_uri(uri):
params = dict(urldecode(urlparse(uri).query))
if 'scope' in params:
params['scope'] = scope_to_list(params['scope'])
return params
def host_from_uri(uri):
"""Extract hostname and port from URI.
Will use default port for HTTP and HTTPS if none is present in the URI.
"""
default_ports = {
'HTTP': '80',
'HTTPS': '443',
}
sch, netloc, path, par, query, fra = urlparse(uri)
if ':' in netloc:
netloc, port = netloc.split(':', 1)
else:
port = default_ports.get(sch.upper())
return netloc, port
def escape(u):
"""Escape a string in an OAuth-compatible fashion.
TODO: verify whether this can in fact be used for OAuth 2
"""
if not isinstance(u, unicode_type):
raise ValueError('Only unicode objects are escapable.')
return quote(u.encode('utf-8'), safe=b'~')
def generate_age(issue_time):
"""Generate a age parameter for MAC authentication draft 00."""
td = datetime.datetime.now() - issue_time
age = (td.microseconds + (td.seconds + td.days * 24 * 3600)
* 10 ** 6) / 10 ** 6
return unicode_type(age)
def is_secure_transport(uri):
"""Check if the uri is over ssl."""
if os.environ.get('OAUTHLIB_INSECURE_TRANSPORT'):
return True
return uri.lower().startswith('https://')
|
alangwansui/mtl_ordercenter
|
refs/heads/master
|
openerp/tools/yaml_import.py
|
35
|
# -*- coding: utf-8 -*-
import threading
import types
import time # used to eval time.strftime expressions
from datetime import datetime, timedelta
import logging
import openerp.pooler as pooler
import openerp.sql_db as sql_db
import misc
from config import config
import yaml_tag
import yaml
import re
from lxml import etree
from openerp import SUPERUSER_ID
# YAML import needs both safe and unsafe eval, but let's
# default to /safe/.
unsafe_eval = eval
from safe_eval import safe_eval as eval
import assertion_report
_logger = logging.getLogger(__name__)
class YamlImportException(Exception):
pass
class YamlImportAbortion(Exception):
pass
def _is_yaml_mapping(node, tag_constructor):
value = isinstance(node, types.DictionaryType) \
and len(node.keys()) == 1 \
and isinstance(node.keys()[0], tag_constructor)
return value
def is_comment(node):
return isinstance(node, types.StringTypes)
def is_assert(node):
return isinstance(node, yaml_tag.Assert) \
or _is_yaml_mapping(node, yaml_tag.Assert)
def is_record(node):
return _is_yaml_mapping(node, yaml_tag.Record)
def is_python(node):
return _is_yaml_mapping(node, yaml_tag.Python)
def is_menuitem(node):
return isinstance(node, yaml_tag.Menuitem) \
or _is_yaml_mapping(node, yaml_tag.Menuitem)
def is_function(node):
return isinstance(node, yaml_tag.Function) \
or _is_yaml_mapping(node, yaml_tag.Function)
def is_report(node):
return isinstance(node, yaml_tag.Report)
def is_workflow(node):
return isinstance(node, yaml_tag.Workflow)
def is_act_window(node):
return isinstance(node, yaml_tag.ActWindow)
def is_delete(node):
return isinstance(node, yaml_tag.Delete)
def is_context(node):
return isinstance(node, yaml_tag.Context)
def is_url(node):
return isinstance(node, yaml_tag.Url)
def is_eval(node):
return isinstance(node, yaml_tag.Eval)
def is_ref(node):
return isinstance(node, yaml_tag.Ref) \
or _is_yaml_mapping(node, yaml_tag.Ref)
def is_ir_set(node):
return _is_yaml_mapping(node, yaml_tag.IrSet)
def is_string(node):
return isinstance(node, basestring)
class RecordDictWrapper(dict):
"""
Used to pass a record as locals in eval:
records do not strictly behave like dict, so we force them to.
"""
def __init__(self, record):
self.record = record
def __getitem__(self, key):
if key in self.record:
return self.record[key]
return dict.__getitem__(self, key)
class YamlInterpreter(object):
def __init__(self, cr, module, id_map, mode, filename, report=None, noupdate=False, loglevel=logging.DEBUG):
self.cr = cr
self.module = module
self.id_map = id_map
self.mode = mode
self.filename = filename
if report is None:
report = assertion_report.assertion_report()
self.assertion_report = report
self.noupdate = noupdate
self.loglevel = loglevel
self.pool = pooler.get_pool(cr.dbname)
self.uid = 1
self.context = {} # opererp context
self.eval_context = {'ref': self._ref(),
'_ref': self._ref(), # added '_ref' so that record['ref'] is possible
'time': time,
'datetime': datetime,
'timedelta': timedelta}
def _log(self, *args, **kwargs):
_logger.log(self.loglevel, *args, **kwargs)
def _ref(self):
return lambda xml_id: self.get_id(xml_id)
def get_model(self, model_name):
model = self.pool.get(model_name)
assert model, "The model %s does not exist." % (model_name,)
return model
def validate_xml_id(self, xml_id):
id = xml_id
if '.' in xml_id:
module, id = xml_id.split('.', 1)
assert '.' not in id, "The ID reference '%s' must contains maximum one dot.\n" \
"It is used to refer to other modules ID, in the form: module.record_id" \
% (xml_id,)
if module != self.module:
module_count = self.pool.get('ir.module.module').search_count(self.cr, self.uid, \
['&', ('name', '=', module), ('state', 'in', ['installed'])])
assert module_count == 1, 'The ID "%s" refers to an uninstalled module.' % (xml_id,)
if len(id) > 64: # TODO where does 64 come from (DB is 128)? should be a constant or loaded form DB
_logger.error('id: %s is to long (max: 64)', id)
def get_id(self, xml_id):
if xml_id is False or xml_id is None:
return False
#if not xml_id:
# raise YamlImportException("The xml_id should be a non empty string.")
elif isinstance(xml_id, types.IntType):
id = xml_id
elif xml_id in self.id_map:
id = self.id_map[xml_id]
else:
if '.' in xml_id:
module, checked_xml_id = xml_id.split('.', 1)
else:
module = self.module
checked_xml_id = xml_id
try:
_, id = self.pool.get('ir.model.data').get_object_reference(self.cr, self.uid, module, checked_xml_id)
self.id_map[xml_id] = id
except ValueError:
raise ValueError("""%s not found when processing %s.
This Yaml file appears to depend on missing data. This often happens for
tests that belong to a module's test suite and depend on each other.""" % (checked_xml_id, self.filename))
return id
def get_context(self, node, eval_dict):
context = self.context.copy()
if node.context:
context.update(eval(node.context, eval_dict))
return context
def isnoupdate(self, node):
return self.noupdate or node.noupdate or False
def _get_first_result(self, results, default=False):
if len(results):
value = results[0]
if isinstance(value, types.TupleType):
value = value[0]
else:
value = default
return value
def process_comment(self, node):
return node
def _log_assert_failure(self, msg, *args):
self.assertion_report.record_failure()
_logger.error(msg, *args)
def _get_assertion_id(self, assertion):
if assertion.id:
ids = [self.get_id(assertion.id)]
elif assertion.search:
q = eval(assertion.search, self.eval_context)
ids = self.pool.get(assertion.model).search(self.cr, self.uid, q, context=assertion.context)
else:
raise YamlImportException('Nothing to assert: you must give either an id or a search criteria.')
return ids
def process_assert(self, node):
if isinstance(node, dict):
assertion, expressions = node.items()[0]
else:
assertion, expressions = node, []
if self.isnoupdate(assertion) and self.mode != 'init':
_logger.warning('This assertion was not evaluated ("%s").', assertion.string)
return
model = self.get_model(assertion.model)
ids = self._get_assertion_id(assertion)
if assertion.count is not None and len(ids) != assertion.count:
msg = 'assertion "%s" failed!\n' \
' Incorrect search count:\n' \
' expected count: %d\n' \
' obtained count: %d\n'
args = (assertion.string, assertion.count, len(ids))
self._log_assert_failure(msg, *args)
else:
context = self.get_context(assertion, self.eval_context)
for id in ids:
record = model.browse(self.cr, self.uid, id, context)
for test in expressions:
try:
success = unsafe_eval(test, self.eval_context, RecordDictWrapper(record))
except Exception, e:
_logger.debug('Exception during evaluation of !assert block in yaml_file %s.', self.filename, exc_info=True)
raise YamlImportAbortion(e)
if not success:
msg = 'Assertion "%s" FAILED\ntest: %s\n'
args = (assertion.string, test)
for aop in ('==', '!=', '<>', 'in', 'not in', '>=', '<=', '>', '<'):
if aop in test:
left, right = test.split(aop,1)
lmsg = ''
rmsg = ''
try:
lmsg = unsafe_eval(left, self.eval_context, RecordDictWrapper(record))
except Exception, e:
lmsg = '<exc>'
try:
rmsg = unsafe_eval(right, self.eval_context, RecordDictWrapper(record))
except Exception, e:
rmsg = '<exc>'
msg += 'values: ! %s %s %s'
args += ( lmsg, aop, rmsg )
break
self._log_assert_failure(msg, *args)
return
else: # all tests were successful for this assertion tag (no break)
self.assertion_report.record_success()
def _coerce_bool(self, value, default=False):
if isinstance(value, types.BooleanType):
b = value
if isinstance(value, types.StringTypes):
b = value.strip().lower() not in ('0', 'false', 'off', 'no')
elif isinstance(value, types.IntType):
b = bool(value)
else:
b = default
return b
def create_osv_memory_record(self, record, fields):
model = self.get_model(record.model)
context = self.get_context(record, self.eval_context)
record_dict = self._create_record(model, fields)
id_new = model.create(self.cr, self.uid, record_dict, context=context)
self.id_map[record.id] = int(id_new)
return record_dict
def process_record(self, node):
import openerp.osv as osv
record, fields = node.items()[0]
model = self.get_model(record.model)
view_id = record.view
if view_id and (view_id is not True) and isinstance(view_id, basestring):
module = self.module
if '.' in view_id:
module, view_id = view_id.split('.',1)
view_id = self.pool.get('ir.model.data').get_object_reference(self.cr, SUPERUSER_ID, module, view_id)[1]
if model.is_transient():
record_dict=self.create_osv_memory_record(record, fields)
else:
self.validate_xml_id(record.id)
try:
self.pool.get('ir.model.data')._get_id(self.cr, SUPERUSER_ID, self.module, record.id)
default = False
except ValueError:
default = True
if self.isnoupdate(record) and self.mode != 'init':
id = self.pool.get('ir.model.data')._update_dummy(self.cr, SUPERUSER_ID, record.model, self.module, record.id)
# check if the resource already existed at the last update
if id:
self.id_map[record] = int(id)
return None
else:
if not self._coerce_bool(record.forcecreate):
return None
#context = self.get_context(record, self.eval_context)
#TOFIX: record.context like {'withoutemployee':True} should pass from self.eval_context. example: test_project.yml in project module
context = record.context
view_info = False
if view_id:
varg = view_id
if view_id is True: varg = False
view_info = model.fields_view_get(self.cr, SUPERUSER_ID, varg, 'form', context)
record_dict = self._create_record(model, fields, view_info, default=default)
_logger.debug("RECORD_DICT %s" % record_dict)
id = self.pool.get('ir.model.data')._update(self.cr, SUPERUSER_ID, record.model, \
self.module, record_dict, record.id, noupdate=self.isnoupdate(record), mode=self.mode, context=context)
self.id_map[record.id] = int(id)
if config.get('import_partial'):
self.cr.commit()
def _create_record(self, model, fields, view_info=False, parent={}, default=True):
"""This function processes the !record tag in yalm files. It simulates the record creation through an xml
view (either specified on the !record tag or the default one for this object), including the calls to
on_change() functions, and sending only values for fields that aren't set as readonly.
:param model: model instance
:param fields: dictonary mapping the field names and their values
:param view_info: result of fields_view_get() called on the object
:param parent: dictionary containing the values already computed for the parent, in case of one2many fields
:param default: if True, the default values must be processed too or not
:return: dictionary mapping the field names and their values, ready to use when calling the create() function
:rtype: dict
"""
def _get_right_one2many_view(fg, field_name, view_type):
one2many_view = fg[field_name]['views'].get(view_type)
# if the view is not defined inline, we call fields_view_get()
if not one2many_view:
one2many_view = self.pool.get(fg[field_name]['relation']).fields_view_get(self.cr, SUPERUSER_ID, False, view_type, self.context)
return one2many_view
def process_val(key, val):
if fg[key]['type'] == 'many2one':
if type(val) in (tuple,list):
val = val[0]
elif fg[key]['type'] == 'one2many':
if val and isinstance(val, (list,tuple)) and isinstance(val[0], dict):
# we want to return only the fields that aren't readonly
# For that, we need to first get the right tree view to consider for the field `key´
one2many_tree_view = _get_right_one2many_view(fg, key, 'tree')
arch = etree.fromstring(one2many_tree_view['arch'].encode('utf-8'))
for rec in val:
# make a copy for the iteration, as we will alter `rec´
rec_copy = rec.copy()
for field_key in rec_copy:
# if field is missing in view or has a readonly modifier, drop it
field_elem = arch.xpath("//field[@name='%s']" % field_key)
if field_elem and (field_elem[0].get('modifiers', '{}').find('"readonly": true') >= 0):
# TODO: currently we only support if readonly is True in the modifiers. Some improvement may be done in
# order to support also modifiers that look like {"readonly": [["state", "not in", ["draft", "confirm"]]]}
del rec[field_key]
# now that unwanted values have been removed from val, we can encapsulate it in a tuple as returned value
val = map(lambda x: (0,0,x), val)
elif fg[key]['type'] == 'many2many':
if val and isinstance(val,(list,tuple)) and isinstance(val[0], (int,long)):
val = [(6,0,val)]
# we want to return only the fields that aren't readonly
if el.get('modifiers', '{}').find('"readonly": true') >= 0:
# TODO: currently we only support if readonly is True in the modifiers. Some improvement may be done in
# order to support also modifiers that look like {"readonly": [["state", "not in", ["draft", "confirm"]]]}
return False
return val
if view_info:
arch = etree.fromstring(view_info['arch'].decode('utf-8'))
view = arch if len(arch) else False
else:
view = False
fields = fields or {}
if view is not False:
fg = view_info['fields']
# gather the default values on the object. (Can't use `fields´ as parameter instead of {} because we may
# have references like `base.main_company´ in the yaml file and it's not compatible with the function)
defaults = default and model._add_missing_default_values(self.cr, SUPERUSER_ID, {}, context=self.context) or {}
# copy the default values in record_dict, only if they are in the view (because that's what the client does)
# the other default values will be added later on by the create().
record_dict = dict([(key, val) for key, val in defaults.items() if key in fg])
# Process all on_change calls
nodes = [view]
while nodes:
el = nodes.pop(0)
if el.tag=='field':
field_name = el.attrib['name']
assert field_name in fg, "The field '%s' is defined in the form view but not on the object '%s'!" % (field_name, model._name)
if field_name in fields:
one2many_form_view = None
if (view is not False) and (fg[field_name]['type']=='one2many'):
# for one2many fields, we want to eval them using the inline form view defined on the parent
one2many_form_view = _get_right_one2many_view(fg, field_name, 'form')
field_value = self._eval_field(model, field_name, fields[field_name], one2many_form_view or view_info, parent=record_dict, default=default)
#call process_val to not update record_dict if values were given for readonly fields
val = process_val(field_name, field_value)
if val:
record_dict[field_name] = val
#if (field_name in defaults) and defaults[field_name] == field_value:
# print '*** You can remove these lines:', field_name, field_value
#if field_name has a default value or a value is given in the yaml file, we must call its on_change()
elif field_name not in defaults:
continue
if not el.attrib.get('on_change', False):
continue
match = re.match("([a-z_1-9A-Z]+)\((.*)\)", el.attrib['on_change'])
assert match, "Unable to parse the on_change '%s'!" % (el.attrib['on_change'], )
# creating the context
class parent2(object):
def __init__(self, d):
self.d = d
def __getattr__(self, name):
return self.d.get(name, False)
ctx = record_dict.copy()
ctx['context'] = self.context
ctx['uid'] = SUPERUSER_ID
ctx['parent'] = parent2(parent)
for a in fg:
if a not in ctx:
ctx[a] = process_val(a, defaults.get(a, False))
# Evaluation args
args = map(lambda x: eval(x, ctx), match.group(2).split(','))
result = getattr(model, match.group(1))(self.cr, SUPERUSER_ID, [], *args)
for key, val in (result or {}).get('value', {}).items():
assert key in fg, "The returning field '%s' from your on_change call '%s' does not exist either on the object '%s', either in the view '%s' used for the creation" % (key, match.group(1), model._name, view_info['name'])
record_dict[key] = process_val(key, val)
#if (key in fields) and record_dict[key] == process_val(key, val):
# print '*** You can remove these lines:', key, val
else:
nodes = list(el) + nodes
else:
record_dict = {}
for field_name, expression in fields.items():
if field_name in record_dict:
continue
field_value = self._eval_field(model, field_name, expression, default=False)
record_dict[field_name] = field_value
return record_dict
def process_ref(self, node, column=None):
assert node.search or node.id, '!ref node should have a `search` attribute or `id` attribute'
if node.search:
if node.model:
model_name = node.model
elif column:
model_name = column._obj
else:
raise YamlImportException('You need to give a model for the search, or a column to infer it.')
model = self.get_model(model_name)
q = eval(node.search, self.eval_context)
ids = model.search(self.cr, self.uid, q)
if node.use:
instances = model.browse(self.cr, self.uid, ids)
value = [inst[node.use] for inst in instances]
else:
value = ids
elif node.id:
value = self.get_id(node.id)
else:
value = None
return value
def process_eval(self, node):
return eval(node.expression, self.eval_context)
def _eval_field(self, model, field_name, expression, view_info=False, parent={}, default=True):
# TODO this should be refactored as something like model.get_field() in bin/osv
if field_name in model._columns:
column = model._columns[field_name]
elif field_name in model._inherit_fields:
column = model._inherit_fields[field_name][2]
else:
raise KeyError("Object '%s' does not contain field '%s'" % (model, field_name))
if is_ref(expression):
elements = self.process_ref(expression, column)
if column._type in ("many2many", "one2many"):
value = [(6, 0, elements)]
else: # many2one
if isinstance(elements, (list,tuple)):
value = self._get_first_result(elements)
else:
value = elements
elif column._type == "many2one":
value = self.get_id(expression)
elif column._type == "one2many":
other_model = self.get_model(column._obj)
value = [(0, 0, self._create_record(other_model, fields, view_info, parent, default=default)) for fields in expression]
elif column._type == "many2many":
ids = [self.get_id(xml_id) for xml_id in expression]
value = [(6, 0, ids)]
elif column._type == "date" and is_string(expression):
# enforce ISO format for string date values, to be locale-agnostic during tests
time.strptime(expression, misc.DEFAULT_SERVER_DATE_FORMAT)
value = expression
elif column._type == "datetime" and is_string(expression):
# enforce ISO format for string datetime values, to be locale-agnostic during tests
time.strptime(expression, misc.DEFAULT_SERVER_DATETIME_FORMAT)
value = expression
else: # scalar field
if is_eval(expression):
value = self.process_eval(expression)
else:
value = expression
# raise YamlImportException('Unsupported column "%s" or value %s:%s' % (field_name, type(expression), expression))
return value
def process_context(self, node):
self.context = node.__dict__
if node.uid:
self.uid = self.get_id(node.uid)
if node.noupdate:
self.noupdate = node.noupdate
def process_python(self, node):
python, statements = node.items()[0]
model = self.get_model(python.model)
statements = statements.replace("\r\n", "\n")
code_context = { 'model': model, 'cr': self.cr, 'uid': self.uid, 'log': self._log, 'context': self.context }
code_context.update({'self': model}) # remove me when no !python block test uses 'self' anymore
try:
code_obj = compile(statements, self.filename, 'exec')
unsafe_eval(code_obj, {'ref': self.get_id}, code_context)
except AssertionError, e:
self._log_assert_failure('AssertionError in Python code %s: %s', python.name, e)
return
except Exception, e:
_logger.debug('Exception during evaluation of !python block in yaml_file %s.', self.filename, exc_info=True)
raise
else:
self.assertion_report.record_success()
def process_workflow(self, node):
workflow, values = node.items()[0]
if self.isnoupdate(workflow) and self.mode != 'init':
return
if workflow.ref:
id = self.get_id(workflow.ref)
else:
if not values:
raise YamlImportException('You must define a child node if you do not give a ref.')
if not len(values) == 1:
raise YamlImportException('Only one child node is accepted (%d given).' % len(values))
value = values[0]
if not 'model' in value and (not 'eval' in value or not 'search' in value):
raise YamlImportException('You must provide a "model" and an "eval" or "search" to evaluate.')
value_model = self.get_model(value['model'])
local_context = {'obj': lambda x: value_model.browse(self.cr, self.uid, x, context=self.context)}
local_context.update(self.id_map)
id = eval(value['eval'], self.eval_context, local_context)
if workflow.uid is not None:
uid = workflow.uid
else:
uid = self.uid
self.cr.execute('select distinct signal from wkf_transition')
signals=[x['signal'] for x in self.cr.dictfetchall()]
if workflow.action not in signals:
raise YamlImportException('Incorrect action %s. No such action defined' % workflow.action)
import openerp.netsvc as netsvc
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, workflow.model, id, workflow.action, self.cr)
def _eval_params(self, model, params):
args = []
for i, param in enumerate(params):
if isinstance(param, types.ListType):
value = self._eval_params(model, param)
elif is_ref(param):
value = self.process_ref(param)
elif is_eval(param):
value = self.process_eval(param)
elif isinstance(param, types.DictionaryType): # supports XML syntax
param_model = self.get_model(param.get('model', model))
if 'search' in param:
q = eval(param['search'], self.eval_context)
ids = param_model.search(self.cr, self.uid, q)
value = self._get_first_result(ids)
elif 'eval' in param:
local_context = {'obj': lambda x: param_model.browse(self.cr, self.uid, x, self.context)}
local_context.update(self.id_map)
value = eval(param['eval'], self.eval_context, local_context)
else:
raise YamlImportException('You must provide either a !ref or at least a "eval" or a "search" to function parameter #%d.' % i)
else:
value = param # scalar value
args.append(value)
return args
def process_function(self, node):
function, params = node.items()[0]
if self.isnoupdate(function) and self.mode != 'init':
return
model = self.get_model(function.model)
if function.eval:
args = self.process_eval(function.eval)
else:
args = self._eval_params(function.model, params)
method = function.name
getattr(model, method)(self.cr, self.uid, *args)
def _set_group_values(self, node, values):
if node.groups:
group_names = node.groups.split(',')
groups_value = []
for group in group_names:
if group.startswith('-'):
group_id = self.get_id(group[1:])
groups_value.append((3, group_id))
else:
group_id = self.get_id(group)
groups_value.append((4, group_id))
values['groups_id'] = groups_value
def process_menuitem(self, node):
self.validate_xml_id(node.id)
if not node.parent:
parent_id = False
self.cr.execute('select id from ir_ui_menu where parent_id is null and name=%s', (node.name,))
res = self.cr.fetchone()
values = {'parent_id': parent_id, 'name': node.name}
else:
parent_id = self.get_id(node.parent)
values = {'parent_id': parent_id}
if node.name:
values['name'] = node.name
try:
res = [ self.get_id(node.id) ]
except: # which exception ?
res = None
if node.action:
action_type = node.type or 'act_window'
icons = {
"act_window": 'STOCK_NEW',
"report.xml": 'STOCK_PASTE',
"wizard": 'STOCK_EXECUTE',
"url": 'STOCK_JUMP_TO',
}
values['icon'] = icons.get(action_type, 'STOCK_NEW')
if action_type == 'act_window':
action_id = self.get_id(node.action)
self.cr.execute('select view_type,view_mode,name,view_id,target from ir_act_window where id=%s', (action_id,))
ir_act_window_result = self.cr.fetchone()
assert ir_act_window_result, "No window action defined for this id %s !\n" \
"Verify that this is a window action or add a type argument." % (node.action,)
action_type, action_mode, action_name, view_id, target = ir_act_window_result
if view_id:
self.cr.execute('SELECT type FROM ir_ui_view WHERE id=%s', (view_id,))
# TODO guess why action_mode is ir_act_window.view_mode above and ir_ui_view.type here
action_mode = self.cr.fetchone()
self.cr.execute('SELECT view_mode FROM ir_act_window_view WHERE act_window_id=%s ORDER BY sequence LIMIT 1', (action_id,))
if self.cr.rowcount:
action_mode = self.cr.fetchone()
if action_type == 'tree':
values['icon'] = 'STOCK_INDENT'
elif action_mode and action_mode.startswith('tree'):
values['icon'] = 'STOCK_JUSTIFY_FILL'
elif action_mode and action_mode.startswith('graph'):
values['icon'] = 'terp-graph'
elif action_mode and action_mode.startswith('calendar'):
values['icon'] = 'terp-calendar'
if target == 'new':
values['icon'] = 'STOCK_EXECUTE'
if not values.get('name', False):
values['name'] = action_name
elif action_type == 'wizard':
action_id = self.get_id(node.action)
self.cr.execute('select name from ir_act_wizard where id=%s', (action_id,))
ir_act_wizard_result = self.cr.fetchone()
if (not values.get('name', False)) and ir_act_wizard_result:
values['name'] = ir_act_wizard_result[0]
else:
raise YamlImportException("Unsupported type '%s' in menuitem tag." % action_type)
if node.sequence:
values['sequence'] = node.sequence
if node.icon:
values['icon'] = node.icon
self._set_group_values(node, values)
pid = self.pool.get('ir.model.data')._update(self.cr, SUPERUSER_ID, \
'ir.ui.menu', self.module, values, node.id, mode=self.mode, \
noupdate=self.isnoupdate(node), res_id=res and res[0] or False)
if node.id and parent_id:
self.id_map[node.id] = int(parent_id)
if node.action and pid:
action_type = node.type or 'act_window'
action_id = self.get_id(node.action)
action = "ir.actions.%s,%d" % (action_type, action_id)
self.pool.get('ir.model.data').ir_set(self.cr, SUPERUSER_ID, 'action', \
'tree_but_open', 'Menuitem', [('ir.ui.menu', int(parent_id))], action, True, True, xml_id=node.id)
def process_act_window(self, node):
assert getattr(node, 'id'), "Attribute %s of act_window is empty !" % ('id',)
assert getattr(node, 'name'), "Attribute %s of act_window is empty !" % ('name',)
assert getattr(node, 'res_model'), "Attribute %s of act_window is empty !" % ('res_model',)
self.validate_xml_id(node.id)
view_id = False
if node.view:
view_id = self.get_id(node.view)
if not node.context:
node.context={}
context = eval(str(node.context), self.eval_context)
values = {
'name': node.name,
'type': node.type or 'ir.actions.act_window',
'view_id': view_id,
'domain': node.domain,
'context': context,
'res_model': node.res_model,
'src_model': node.src_model,
'view_type': node.view_type or 'form',
'view_mode': node.view_mode or 'tree,form',
'usage': node.usage,
'limit': node.limit,
'auto_refresh': node.auto_refresh,
'multi': getattr(node, 'multi', False),
}
self._set_group_values(node, values)
if node.target:
values['target'] = node.target
id = self.pool.get('ir.model.data')._update(self.cr, SUPERUSER_ID, \
'ir.actions.act_window', self.module, values, node.id, mode=self.mode)
self.id_map[node.id] = int(id)
if node.src_model:
keyword = 'client_action_relate'
value = 'ir.actions.act_window,%s' % id
replace = node.replace or True
self.pool.get('ir.model.data').ir_set(self.cr, SUPERUSER_ID, 'action', keyword, \
node.id, [node.src_model], value, replace=replace, noupdate=self.isnoupdate(node), isobject=True, xml_id=node.id)
# TODO add remove ir.model.data
def process_delete(self, node):
assert getattr(node, 'model'), "Attribute %s of delete tag is empty !" % ('model',)
if self.pool.get(node.model):
if node.search:
ids = self.pool.get(node.model).search(self.cr, self.uid, eval(node.search, self.eval_context))
else:
ids = [self.get_id(node.id)]
if len(ids):
self.pool.get(node.model).unlink(self.cr, self.uid, ids)
else:
self._log("Record not deleted.")
def process_url(self, node):
self.validate_xml_id(node.id)
res = {'name': node.name, 'url': node.url, 'target': node.target}
id = self.pool.get('ir.model.data')._update(self.cr, SUPERUSER_ID, \
"ir.actions.act_url", self.module, res, node.id, mode=self.mode)
self.id_map[node.id] = int(id)
# ir_set
if (not node.menu or eval(node.menu)) and id:
keyword = node.keyword or 'client_action_multi'
value = 'ir.actions.act_url,%s' % id
replace = node.replace or True
self.pool.get('ir.model.data').ir_set(self.cr, SUPERUSER_ID, 'action', \
keyword, node.url, ["ir.actions.act_url"], value, replace=replace, \
noupdate=self.isnoupdate(node), isobject=True, xml_id=node.id)
def process_ir_set(self, node):
if not self.mode == 'init':
return False
_, fields = node.items()[0]
res = {}
for fieldname, expression in fields.items():
if is_eval(expression):
value = eval(expression.expression, self.eval_context)
else:
value = expression
res[fieldname] = value
self.pool.get('ir.model.data').ir_set(self.cr, SUPERUSER_ID, res['key'], res['key2'], \
res['name'], res['models'], res['value'], replace=res.get('replace',True), \
isobject=res.get('isobject', False), meta=res.get('meta',None))
def process_report(self, node):
values = {}
for dest, f in (('name','string'), ('model','model'), ('report_name','name')):
values[dest] = getattr(node, f)
assert values[dest], "Attribute %s of report is empty !" % (f,)
for field,dest in (('rml','report_rml'),('file','report_rml'),('xml','report_xml'),('xsl','report_xsl'),('attachment','attachment'),('attachment_use','attachment_use')):
if getattr(node, field):
values[dest] = getattr(node, field)
if node.auto:
values['auto'] = eval(node.auto)
if node.sxw:
sxw_file = misc.file_open(node.sxw)
try:
sxw_content = sxw_file.read()
values['report_sxw_content'] = sxw_content
finally:
sxw_file.close()
if node.header:
values['header'] = eval(node.header)
values['multi'] = node.multi and eval(node.multi)
xml_id = node.id
self.validate_xml_id(xml_id)
self._set_group_values(node, values)
id = self.pool.get('ir.model.data')._update(self.cr, SUPERUSER_ID, "ir.actions.report.xml", \
self.module, values, xml_id, noupdate=self.isnoupdate(node), mode=self.mode)
self.id_map[xml_id] = int(id)
if not node.menu or eval(node.menu):
keyword = node.keyword or 'client_print_multi'
value = 'ir.actions.report.xml,%s' % id
replace = node.replace or True
self.pool.get('ir.model.data').ir_set(self.cr, SUPERUSER_ID, 'action', \
keyword, values['name'], [values['model']], value, replace=replace, isobject=True, xml_id=xml_id)
def process_none(self):
"""
Empty node or commented node should not pass silently.
"""
self._log_assert_failure("You have an empty block in your tests.")
def process(self, yaml_string):
"""
Processes a Yaml string. Custom tags are interpreted by 'process_' instance methods.
"""
yaml_tag.add_constructors()
is_preceded_by_comment = False
for node in yaml.load(yaml_string):
is_preceded_by_comment = self._log_node(node, is_preceded_by_comment)
try:
self._process_node(node)
except Exception, e:
_logger.exception(e)
raise
def _process_node(self, node):
if is_comment(node):
self.process_comment(node)
elif is_assert(node):
self.process_assert(node)
elif is_record(node):
self.process_record(node)
elif is_python(node):
self.process_python(node)
elif is_menuitem(node):
self.process_menuitem(node)
elif is_delete(node):
self.process_delete(node)
elif is_url(node):
self.process_url(node)
elif is_context(node):
self.process_context(node)
elif is_ir_set(node):
self.process_ir_set(node)
elif is_act_window(node):
self.process_act_window(node)
elif is_report(node):
self.process_report(node)
elif is_workflow(node):
if isinstance(node, types.DictionaryType):
self.process_workflow(node)
else:
self.process_workflow({node: []})
elif is_function(node):
if isinstance(node, types.DictionaryType):
self.process_function(node)
else:
self.process_function({node: []})
elif node is None:
self.process_none()
else:
raise YamlImportException("Can not process YAML block: %s" % node)
def _log_node(self, node, is_preceded_by_comment):
if is_comment(node):
is_preceded_by_comment = True
self._log(node)
elif not is_preceded_by_comment:
if isinstance(node, types.DictionaryType):
msg = "Creating %s\n with %s"
args = node.items()[0]
self._log(msg, *args)
else:
self._log(node)
else:
is_preceded_by_comment = False
return is_preceded_by_comment
def yaml_import(cr, module, yamlfile, kind, idref=None, mode='init', noupdate=False, report=None):
if idref is None:
idref = {}
loglevel = logging.TEST if kind == 'test' else logging.DEBUG
yaml_string = yamlfile.read()
yaml_interpreter = YamlInterpreter(cr, module, idref, mode, filename=yamlfile.name, report=report, noupdate=noupdate, loglevel=loglevel)
yaml_interpreter.process(yaml_string)
# keeps convention of convert.py
convert_yaml_import = yaml_import
def threaded_yaml_import(db_name, module_name, file_name, delay=0):
def f():
time.sleep(delay)
cr = None
fp = None
try:
cr = sql_db.db_connect(db_name).cursor()
fp = misc.file_open(file_name)
convert_yaml_import(cr, module_name, fp, {}, 'update', True)
finally:
if cr: cr.close()
if fp: fp.close()
threading.Thread(target=f).start()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
twitterdev/twitter-for-bigquery
|
refs/heads/master
|
libs/requests/packages/charade/euctwfreq.py
|
3132
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# EUCTW frequency table
# Converted from big5 work
# by Taiwan's Mandarin Promotion Council
# <http:#www.edu.tw:81/mandr/>
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75
# Char to FreqOrder table ,
EUCTW_TABLE_SIZE = 8102
EUCTWCharToFreqOrder = (
1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742
3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758
1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774
63,7312,7313, 317,1614, 75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809, # 2790
3616, 3, 10,3870,1471, 29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315, # 2806
4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932, 34,3501,3173, 64, 604, # 2822
7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337, 72, 406,7319, 80, # 2838
630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449, 69,2969, 591, # 2854
179,2095, 471, 115,2034,1843, 60, 50,2970, 134, 806,1868, 734,2035,3407, 180, # 2870
995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359, # 2886
2495, 90,2707,1338, 663, 11, 906,1099,2545, 20,2436, 182, 532,1716,7321, 732, # 2902
1376,4062,1311,1420,3175, 25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529, # 2918
3243, 475,1447,3617,7322, 117, 21, 656, 810,1297,2295,2329,3502,7323, 126,4063, # 2934
706, 456, 150, 613,4299, 71,1118,2036,4064, 145,3069, 85, 835, 486,2114,1246, # 2950
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221, # 2966
3503,3110,7325,1955,1153,4065, 83, 296,1199,3070, 192, 624, 93,7326, 822,1897, # 2982
2810,3111, 795,2064, 991,1554,1542,1592, 27, 43,2853, 859, 139,1456, 860,4300, # 2998
437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618, # 3014
3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228, # 3030
1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077, # 3046
7328,7329,2173,3176,3619,2673, 593, 845,1062,3244, 88,1723,2037,3875,1950, 212, # 3062
266, 152, 149, 468,1898,4066,4302, 77, 187,7330,3018, 37, 5,2972,7331,3876, # 3078
7332,7333, 39,2517,4303,2894,3177,2078, 55, 148, 74,4304, 545, 483,1474,1029, # 3094
1665, 217,1869,1531,3113,1104,2645,4067, 24, 172,3507, 900,3877,3508,3509,4305, # 3110
32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674, 4,3019,3314,1427,1788, # 3126
188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520, # 3142
3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439, 38,7339,1063,7340, 794, # 3158
3879,1435,2296, 46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804, 35, 707, # 3174
324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409, # 3190
2128,1363,3623,1423, 697, 100,3071, 48, 70,1231, 495,3114,2193,7345,1294,7346, # 3206
2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411, # 3222
314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412, # 3238
287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933, # 3254
3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895, # 3270
1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369, # 3286
1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000, # 3302
1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381, 7, # 3318
2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313, # 3334
265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513, # 3350
4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647, # 3366
1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357, # 3382
7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438, # 3398
2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978, # 3414
383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210, # 3430
98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642, # 3446
523,2776,2777,2648,7364, 141,2231,1333, 68, 176, 441, 876, 907,4077, 603,2592, # 3462
710, 171,3417, 404, 549, 18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320, # 3478
7366,2973, 368,7367, 146, 366, 99, 871,3627,1543, 748, 807,1586,1185, 22,2258, # 3494
379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702, # 3510
1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371, 59,7372, # 3526
585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836, # 3542
690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629, # 3558
7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686, # 3574
1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496, # 3590
544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560, # 3606
3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496, # 3622
4081, 57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082, # 3638
3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083, # 3654
279,3120, 51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264, # 3670
610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411, # 3686
1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483, # 3702
4084,2468,1436, 953,4085,2054,4331, 671,2395, 79,4086,2441,3252, 608, 567,2680, # 3718
3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672, # 3734
3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681, # 3750
2397,7400,7401,7402,4089,3025, 0,7403,2469, 315, 231,2442, 301,3319,4335,2380, # 3766
7404, 233,4090,3631,1818,4336,4337,7405, 96,1776,1315,2082,7406, 257,7407,1809, # 3782
3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183, # 3798
7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934, # 3814
1484,7413,1712, 127, 67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351, # 3830
2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545, # 3846
1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358, # 3862
78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338, # 3878
1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423, # 3894
4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859, # 3910
3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636, # 3926
534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344, # 3942
165, 243,4345,3637,2521, 123, 683,4096, 764,4346, 36,3895,1792, 589,2902, 816, # 3958
626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891, # 3974
2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662, # 3990
7425, 611,1156, 854,2381,1316,2861, 2, 386, 515,2904,7426,7427,3253, 868,2234, # 4006
1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431, # 4022
2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676, # 4038
1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437, # 4054
1993,7438,4350,7439,7440,2195, 13,2779,3638,2980,3124,1229,1916,7441,3756,2131, # 4070
7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307, # 4086
7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519, # 4102
7452, 128,2132, 92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980, # 4118
3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401, # 4134
4353,2248, 94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101, # 4150
1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937, # 4166
7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466, # 4182
2332,2067, 23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526, # 4198
7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598, # 4214
3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471, # 4230
3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863, 41,7473, # 4246
7474,4361,7475,1657,2333, 19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323, # 4262
2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416, # 4278
7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427, # 4294
862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110, # 4310
4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485, # 4326
2683, 733, 40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428, # 4342
7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907, # 4358
3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901, # 4374
2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870, # 4390
2752,2986,7490, 435,7491, 343,1108, 596, 17,1751,4365,2235,3430,3643,7492,4366, # 4406
294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031, # 4422
2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240, # 4438
1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521, # 4454
1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673, # 4470
2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260, # 4486
1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619, # 4502
7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506, # 4518
7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382, # 4534
2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324, # 4550
4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384, # 4566
1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551, 30,2263,4122, # 4582
7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192, # 4598
829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388, # 4614
4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129, # 4630
375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523, # 4646
2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692, # 4662
444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915, # 4678
1041,2987, 293,1168, 87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219, # 4694
1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825, # 4710
730,1515, 184,2827, 66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975, # 4726
3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394, # 4742
3918,7535,7536,1186, 15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758, # 4758
1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434, # 4774
3541,1342,1681,1718, 766,3264, 286, 89,2946,3649,7540,1713,7541,2597,3334,2990, # 4790
7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335, # 4806
7544,3265, 310, 313,3435,2299, 770,4134, 54,3034, 189,4397,3082,3769,3922,7545, # 4822
1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137, # 4838
2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471, # 4854
1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555, # 4870
3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139, # 4886
2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729, # 4902
3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482, # 4918
2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652, # 4934
4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867, # 4950
4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499, # 4966
3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250, # 4982
97, 81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830, # 4998
3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188, # 5014
424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408, # 5030
3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447, # 5046
3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527, # 5062
3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932, # 5078
1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411, # 5094
7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270, # 5110
199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589, # 5126
7590, 587, 14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591, # 5142
1702,1226, 102,1547, 62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756, # 5158
391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145, # 5174
4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598, 86,1494,1730, # 5190
3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069, # 5206
397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938, # 5222
2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625, # 5238
2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885, 28,2686, # 5254
3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797, # 5270
1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958, # 5286
4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528, # 5302
2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241, # 5318
1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169, # 5334
1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540, # 5350
2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342, # 5366
3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425, # 5382
1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427, # 5398
7617,3446,7618,7619,7620,3277,2689,1433,3278, 131, 95,1504,3946, 723,4159,3141, # 5414
1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949, # 5430
4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654, 53,7624,2996,7625, # 5446
1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202, # 5462
135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640, # 5478
1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936, # 5494
3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955, # 5510
3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910, # 5526
2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325, # 5542
1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024, # 5558
4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340, # 5574
660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918, # 5590
7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439, # 5606
2317,3283,7650,7651,4164,7652,4165, 84,4166, 112, 989,7653, 547,1059,3961, 701, # 5622
3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494, # 5638
4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285, # 5654
790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077, # 5670
7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443, # 5686
7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169, # 5702
1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906, # 5718
4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968, # 5734
3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804, # 5750
2690,1516,3559,1121,1082,1329,3284,3970,1449,3794, 65,1128,2835,2913,2759,1590, # 5766
3795,7674,7675, 12,2658, 45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676, # 5782
3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680, # 5798
2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285, # 5814
1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687, # 5830
4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454, # 5846
3670,1858, 91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403, # 5862
3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973, # 5878
2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454, # 5894
4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761, 61,3976,3672,1822,3977, # 5910
7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695, # 5926
3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945, # 5942
2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460, # 5958
3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179, # 5974
1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706, # 5990
2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982, # 6006
3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183, # 6022
4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043, 56,1396,3090, # 6038
2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717, # 6054
2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985, # 6070
7722,1076, 49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184, # 6086
1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472, # 6102
2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351, # 6118
1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714, # 6134
3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404, # 6150
4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629, 31,2838, # 6166
2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620, # 6182
3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738, # 6198
3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869, # 6214
2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558, # 6230
4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107, # 6246
2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216, # 6262
3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984, # 6278
4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705, # 6294
7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687, # 6310
3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840, # 6326
194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521, # 6342
1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412, 42,3096, 464,7759,2632, # 6358
4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295, # 6374
1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765, # 6390
4487,7766,3002, 962, 588,3574, 289,3219,2634,1116, 52,7767,3047,1796,7768,7769, # 6406
7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572, # 6422
510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776, # 6438
7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911, # 6454
2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693, # 6470
1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672, # 6486
1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013, # 6502
3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816, # 6518
509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010, # 6534
552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175, # 6550
478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473, # 6566
3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298, # 6582
2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359, # 6598
751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805, # 6614
7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807, # 6630
1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810, # 6646
3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812, # 6662
7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814, # 6678
1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818, # 6694
7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821, # 6710
4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877, # 6726
1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702, # 6742
2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813, # 6758
2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503, # 6774
4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484, # 6790
802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833, # 6806
809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457, # 6822
3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704, # 6838
3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878, # 6854
1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508, # 6870
2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451, # 6886
7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509, # 6902
1561,2664,1452,4010,1375,7855,7856, 47,2959, 316,7857,1406,1591,2923,3156,7858, # 6918
1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428, # 6934
3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800, # 6950
919,2347,2960,2348,1270,4511,4012, 73,7862,7863, 647,7864,3228,2843,2255,1550, # 6966
1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347, # 6982
4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515, # 6998
7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665, # 7014
2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518, # 7030
3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833, # 7046
516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961, # 7062
1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508, # 7078
2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482, # 7094
2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098, # 7110
7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483, # 7126
7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834, # 7142
7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904, # 7158
2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724, # 7174
2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910, # 7190
1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701, # 7206
4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062, # 7222
3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922, # 7238
3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925, # 7254
4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248, # 7270
4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487, # 7286
2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015, # 7302
2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935, # 7318
7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104, # 7334
4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580, # 7350
7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380, # 7366
2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951, # 7382
1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948, # 7398
3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488, # 7414
4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737, # 7430
2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017, # 7446
120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047, # 7462
2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967, # 7478
1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385, # 7494
2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975, # 7510
2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979, # 7526
4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982, # 7542
7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306, # 7558
1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270, # 7574
3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012, # 7590
7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236, # 7606
1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550, # 7622
8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746, # 7638
2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066, # 7654
8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977, # 7670
2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009, # 7686
2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013, # 7702
8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552, # 7718
8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023, # 7734
8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143, # 7750
408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278, # 7766
8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698, # 7782
4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706, # 7798
3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859, # 7814
8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344, # 7830
1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894, # 7846
8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194, # 7862
425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760, # 7878
1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210, # 7894
479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642, # 7910
4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013, # 7926
1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889, # 7942
4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239, # 7958
1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240, # 7974
433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083, # 7990
3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088, # 8006
4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094, # 8022
8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101, # 8038
938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104, # 8054
3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015, # 8070
890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941, # 8086
2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118, # 8102
#Everything below is of no interest for detection purpose
2515,1613,4582,8119,3312,3866,2516,8120,4058,8121,1637,4059,2466,4583,3867,8122, # 8118
2493,3016,3734,8123,8124,2192,8125,8126,2162,8127,8128,8129,8130,8131,8132,8133, # 8134
8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,8144,8145,8146,8147,8148,8149, # 8150
8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,8160,8161,8162,8163,8164,8165, # 8166
8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181, # 8182
8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197, # 8198
8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213, # 8214
8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229, # 8230
8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245, # 8246
8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,8256,8257,8258,8259,8260,8261, # 8262
8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,8272,8273,8274,8275,8276,8277, # 8278
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,8290,8291,8292,8293, # 8294
8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,8308,8309, # 8310
8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322,8323,8324,8325, # 8326
8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337,8338,8339,8340,8341, # 8342
8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353,8354,8355,8356,8357, # 8358
8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,8368,8369,8370,8371,8372,8373, # 8374
8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,8384,8385,8386,8387,8388,8389, # 8390
8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,8400,8401,8402,8403,8404,8405, # 8406
8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,8416,8417,8418,8419,8420,8421, # 8422
8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,8432,8433,8434,8435,8436,8437, # 8438
8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,8448,8449,8450,8451,8452,8453, # 8454
8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,8464,8465,8466,8467,8468,8469, # 8470
8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,8480,8481,8482,8483,8484,8485, # 8486
8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501, # 8502
8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517, # 8518
8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533, # 8534
8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549, # 8550
8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,8565, # 8566
8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,8576,8577,8578,8579,8580,8581, # 8582
8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597, # 8598
8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,8608,8609,8610,8611,8612,8613, # 8614
8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,8624,8625,8626,8627,8628,8629, # 8630
8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,8640,8641,8642,8643,8644,8645, # 8646
8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,8657,8658,8659,8660,8661, # 8662
8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672,8673,8674,8675,8676,8677, # 8678
8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,8688,8689,8690,8691,8692,8693, # 8694
8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,8704,8705,8706,8707,8708,8709, # 8710
8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,8720,8721,8722,8723,8724,8725, # 8726
8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,8736,8737,8738,8739,8740,8741) # 8742
# flake8: noqa
|
mscherer/ansible
|
refs/heads/devel
|
lib/ansible/plugins/shell/__init__.py
|
7690
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
|
bzero/statsmodels
|
refs/heads/master
|
statsmodels/examples/ex_regressionplots.py
|
34
|
# -*- coding: utf-8 -*-
"""Examples for Regression Plots
Author: Josef Perktold
"""
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.sandbox.regression.predstd import wls_prediction_std
import statsmodels.graphics.regressionplots as smrp
#example from tut.ols with changes
#fix a seed for these examples
np.random.seed(9876789)
# OLS non-linear curve but linear in parameters
# ---------------------------------------------
nsample = 100
sig = 0.5
x1 = np.linspace(0, 20, nsample)
x2 = 5 + 3* np.random.randn(nsample)
X = np.c_[x1, x2, np.sin(0.5*x1), (x2-5)**2, np.ones(nsample)]
beta = [0.5, 0.5, 1, -0.04, 5.]
y_true = np.dot(X, beta)
y = y_true + sig * np.random.normal(size=nsample)
#estimate only linear function, misspecified because of non-linear terms
exog0 = sm.add_constant(np.c_[x1, x2], prepend=False)
# plt.figure()
# plt.plot(x1, y, 'o', x1, y_true, 'b-')
res = sm.OLS(y, exog0).fit()
#print res.params
#print res.bse
plot_old = 0 #True
if plot_old:
#current bug predict requires call to model.results
#print res.model.predict
prstd, iv_l, iv_u = wls_prediction_std(res)
plt.plot(x1, res.fittedvalues, 'r-o')
plt.plot(x1, iv_u, 'r--')
plt.plot(x1, iv_l, 'r--')
plt.title('blue: true, red: OLS')
plt.figure()
plt.plot(res.resid, 'o')
plt.title('Residuals')
fig2 = plt.figure()
ax = fig2.add_subplot(2,1,1)
#namestr = ' for %s' % self.name if self.name else ''
plt.plot(x1, res.resid, 'o')
ax.set_title('residuals versus exog')# + namestr)
ax = fig2.add_subplot(2,1,2)
plt.plot(x2, res.resid, 'o')
fig3 = plt.figure()
ax = fig3.add_subplot(2,1,1)
#namestr = ' for %s' % self.name if self.name else ''
plt.plot(x1, res.fittedvalues, 'o')
ax.set_title('Fitted values versus exog')# + namestr)
ax = fig3.add_subplot(2,1,2)
plt.plot(x2, res.fittedvalues, 'o')
fig4 = plt.figure()
ax = fig4.add_subplot(2,1,1)
#namestr = ' for %s' % self.name if self.name else ''
plt.plot(x1, res.fittedvalues + res.resid, 'o')
ax.set_title('Fitted values plus residuals versus exog')# + namestr)
ax = fig4.add_subplot(2,1,2)
plt.plot(x2, res.fittedvalues + res.resid, 'o')
# see http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/partregr.htm
fig5 = plt.figure()
ax = fig5.add_subplot(2,1,1)
#namestr = ' for %s' % self.name if self.name else ''
res1a = sm.OLS(y, exog0[:,[0,2]]).fit()
res1b = sm.OLS(x1, exog0[:,[0,2]]).fit()
plt.plot(res1b.resid, res1a.resid, 'o')
res1c = sm.OLS(res1a.resid, res1b.resid).fit()
plt.plot(res1b.resid, res1c.fittedvalues, '-')
ax.set_title('Partial Regression plot')# + namestr)
ax = fig5.add_subplot(2,1,2)
#plt.plot(x2, res.fittedvalues + res.resid, 'o')
res2a = sm.OLS(y, exog0[:,[0,1]]).fit()
res2b = sm.OLS(x2, exog0[:,[0,1]]).fit()
plt.plot(res2b.resid, res2a.resid, 'o')
res2c = sm.OLS(res2a.resid, res2b.resid).fit()
plt.plot(res2b.resid, res2c.fittedvalues, '-')
# see http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/ccpr.htm
fig6 = plt.figure()
ax = fig6.add_subplot(2,1,1)
#namestr = ' for %s' % self.name if self.name else ''
x1beta = x1*res.params[1]
x2beta = x2*res.params[2]
plt.plot(x1, x1beta + res.resid, 'o')
plt.plot(x1, x1beta, '-')
ax.set_title('X_i beta_i plus residuals versus exog (CCPR)')# + namestr)
ax = fig6.add_subplot(2,1,2)
plt.plot(x2, x2beta + res.resid, 'o')
plt.plot(x2, x2beta, '-')
#print res.summary()
doplots = 1
if doplots:
fig1 = smrp.plot_fit(res, 0, y_true=None)
smrp.plot_fit(res, 1, y_true=None)
smrp.plot_partregress_grid(res, exog_idx=[0,1])
smrp.plot_regress_exog(res, exog_idx=0)
smrp.plot_ccpr(res, exog_idx=0)
smrp.plot_ccpr_grid(res, exog_idx=[0,1])
from statsmodels.graphics.tests.test_regressionplots import TestPlot
tp = TestPlot()
tp.test_plot_fit()
fig1 = smrp.plot_partregress_grid(res, exog_idx=[0,1])
#add lowess
ax = fig1.axes[0]
y0 = ax.get_lines()[0]._y
x0 = ax.get_lines()[0]._x
lres = sm.nonparametric.lowess(y0, x0, frac=0.2)
ax.plot(lres[:,0], lres[:,1], 'r', lw=1.5)
ax = fig1.axes[1]
y0 = ax.get_lines()[0]._y
x0 = ax.get_lines()[0]._x
lres = sm.nonparametric.lowess(y0, x0, frac=0.2)
ax.plot(lres[:,0], lres[:,1], 'r', lw=1.5)
#plt.show()
|
danhuss/faker
|
refs/heads/master
|
faker/providers/automotive/en_NZ/__init__.py
|
1
|
from .. import Provider as AutomotiveProvider
class Provider(AutomotiveProvider):
# See https://en.wikipedia.org/wiki/Vehicle_registration_plates_of_New_Zealand
license_formats = (
# Old plates
'??%##',
'??%###',
'??%###',
# Three letters since 2002
'A??%##',
'B??%##',
'C??%##',
'D??%##',
'E??%##',
'F??%##',
'G??%##',
'H??%##',
'J??%##',
'K??%##',
'L??%##',
'M??%##',
# After 2018
'N??%##',
)
|
hynnet/openwrt-mt7620
|
refs/heads/master
|
staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/test/test_fileio.py
|
32
|
# Adapted from test_file.py by Daniel Stutzbach
from __future__ import unicode_literals
import sys
import os
import errno
import unittest
from array import array
from weakref import proxy
from functools import wraps
from test.test_support import TESTFN, check_warnings, run_unittest, make_bad_fd
from test.test_support import py3k_bytes as bytes
from test.script_helper import run_python
from _io import FileIO as _FileIO
class AutoFileTests(unittest.TestCase):
# file tests for which a test file is automatically set up
def setUp(self):
self.f = _FileIO(TESTFN, 'w')
def tearDown(self):
if self.f:
self.f.close()
os.remove(TESTFN)
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
p.write(bytes(range(10)))
self.assertEqual(self.f.tell(), p.tell())
self.f.close()
self.f = None
self.assertRaises(ReferenceError, getattr, p, 'tell')
def testSeekTell(self):
self.f.write(bytes(range(20)))
self.assertEqual(self.f.tell(), 20)
self.f.seek(0)
self.assertEqual(self.f.tell(), 0)
self.f.seek(10)
self.assertEqual(self.f.tell(), 10)
self.f.seek(5, 1)
self.assertEqual(self.f.tell(), 15)
self.f.seek(-5, 1)
self.assertEqual(self.f.tell(), 10)
self.f.seek(-5, 2)
self.assertEqual(self.f.tell(), 15)
def testAttributes(self):
# verify expected attributes exist
f = self.f
self.assertEqual(f.mode, "wb")
self.assertEqual(f.closed, False)
# verify the attributes are readonly
for attr in 'mode', 'closed':
self.assertRaises((AttributeError, TypeError),
setattr, f, attr, 'oops')
def testReadinto(self):
# verify readinto
self.f.write(b"\x01\x02")
self.f.close()
a = array(b'b', b'x'*10)
self.f = _FileIO(TESTFN, 'r')
n = self.f.readinto(a)
self.assertEqual(array(b'b', [1, 2]), a[:n])
def test_none_args(self):
self.f.write(b"hi\nbye\nabc")
self.f.close()
self.f = _FileIO(TESTFN, 'r')
self.assertEqual(self.f.read(None), b"hi\nbye\nabc")
self.f.seek(0)
self.assertEqual(self.f.readline(None), b"hi\n")
self.assertEqual(self.f.readlines(None), [b"bye\n", b"abc"])
def testRepr(self):
self.assertEqual(repr(self.f), "<_io.FileIO name=%r mode='%s'>"
% (self.f.name, self.f.mode))
del self.f.name
self.assertEqual(repr(self.f), "<_io.FileIO fd=%r mode='%s'>"
% (self.f.fileno(), self.f.mode))
self.f.close()
self.assertEqual(repr(self.f), "<_io.FileIO [closed]>")
def testErrors(self):
f = self.f
self.assertTrue(not f.isatty())
self.assertTrue(not f.closed)
#self.assertEqual(f.name, TESTFN)
self.assertRaises(ValueError, f.read, 10) # Open for reading
f.close()
self.assertTrue(f.closed)
f = _FileIO(TESTFN, 'r')
self.assertRaises(TypeError, f.readinto, "")
self.assertTrue(not f.closed)
f.close()
self.assertTrue(f.closed)
def testMethods(self):
methods = ['fileno', 'isatty', 'read', 'readinto',
'seek', 'tell', 'truncate', 'write', 'seekable',
'readable', 'writable']
if sys.platform.startswith('atheos'):
methods.remove('truncate')
self.f.close()
self.assertTrue(self.f.closed)
for methodname in methods:
method = getattr(self.f, methodname)
# should raise on closed file
self.assertRaises(ValueError, method)
def testOpendir(self):
# Issue 3703: opening a directory should fill the errno
# Windows always returns "[Errno 13]: Permission denied
# Unix calls dircheck() and returns "[Errno 21]: Is a directory"
try:
_FileIO('.', 'r')
except IOError as e:
self.assertNotEqual(e.errno, 0)
self.assertEqual(e.filename, ".")
else:
self.fail("Should have raised IOError")
#A set of functions testing that we get expected behaviour if someone has
#manually closed the internal file descriptor. First, a decorator:
def ClosedFD(func):
@wraps(func)
def wrapper(self):
#forcibly close the fd before invoking the problem function
f = self.f
os.close(f.fileno())
try:
func(self, f)
finally:
try:
self.f.close()
except IOError:
pass
return wrapper
def ClosedFDRaises(func):
@wraps(func)
def wrapper(self):
#forcibly close the fd before invoking the problem function
f = self.f
os.close(f.fileno())
try:
func(self, f)
except IOError as e:
self.assertEqual(e.errno, errno.EBADF)
else:
self.fail("Should have raised IOError")
finally:
try:
self.f.close()
except IOError:
pass
return wrapper
@ClosedFDRaises
def testErrnoOnClose(self, f):
f.close()
@ClosedFDRaises
def testErrnoOnClosedWrite(self, f):
f.write('a')
@ClosedFDRaises
def testErrnoOnClosedSeek(self, f):
f.seek(0)
@ClosedFDRaises
def testErrnoOnClosedTell(self, f):
f.tell()
@ClosedFDRaises
def testErrnoOnClosedTruncate(self, f):
f.truncate(0)
@ClosedFD
def testErrnoOnClosedSeekable(self, f):
f.seekable()
@ClosedFD
def testErrnoOnClosedReadable(self, f):
f.readable()
@ClosedFD
def testErrnoOnClosedWritable(self, f):
f.writable()
@ClosedFD
def testErrnoOnClosedFileno(self, f):
f.fileno()
@ClosedFD
def testErrnoOnClosedIsatty(self, f):
self.assertEqual(f.isatty(), False)
def ReopenForRead(self):
try:
self.f.close()
except IOError:
pass
self.f = _FileIO(TESTFN, 'r')
os.close(self.f.fileno())
return self.f
@ClosedFDRaises
def testErrnoOnClosedRead(self, f):
f = self.ReopenForRead()
f.read(1)
@ClosedFDRaises
def testErrnoOnClosedReadall(self, f):
f = self.ReopenForRead()
f.readall()
@ClosedFDRaises
def testErrnoOnClosedReadinto(self, f):
f = self.ReopenForRead()
a = array(b'b', b'x'*10)
f.readinto(a)
class OtherFileTests(unittest.TestCase):
def testAbles(self):
try:
f = _FileIO(TESTFN, "w")
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
f.close()
f = _FileIO(TESTFN, "r")
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
f.close()
f = _FileIO(TESTFN, "a+")
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.assertEqual(f.isatty(), False)
f.close()
if sys.platform != "win32":
try:
f = _FileIO("/dev/tty", "a")
except EnvironmentError:
# When run in a cron job there just aren't any
# ttys, so skip the test. This also handles other
# OS'es that don't support /dev/tty.
pass
else:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
if sys.platform != "darwin" and \
'bsd' not in sys.platform and \
not sys.platform.startswith('sunos'):
# Somehow /dev/tty appears seekable on some BSDs
self.assertEqual(f.seekable(), False)
self.assertEqual(f.isatty(), True)
f.close()
finally:
os.unlink(TESTFN)
def testModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+", "rw", "rt"):
try:
f = _FileIO(TESTFN, mode)
except ValueError:
pass
else:
f.close()
self.fail('%r is an invalid file mode' % mode)
def testUnicodeOpen(self):
# verify repr works for unicode too
f = _FileIO(str(TESTFN), "w")
f.close()
os.unlink(TESTFN)
def testBytesOpen(self):
# Opening a bytes filename
try:
fn = TESTFN.encode("ascii")
except UnicodeEncodeError:
# Skip test
return
f = _FileIO(fn, "w")
try:
f.write(b"abc")
f.close()
with open(TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abc")
finally:
os.unlink(TESTFN)
def testInvalidFd(self):
self.assertRaises(ValueError, _FileIO, -10)
self.assertRaises(OSError, _FileIO, make_bad_fd())
if sys.platform == 'win32':
import msvcrt
self.assertRaises(IOError, msvcrt.get_osfhandle, make_bad_fd())
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
try:
f = _FileIO(TESTFN, bad_mode)
except ValueError as msg:
if msg.args[0] != 0:
s = str(msg)
if TESTFN in s or bad_mode not in s:
self.fail("bad error message for invalid mode: %s" % s)
# if msg.args[0] == 0, we're probably on Windows where there may be
# no obvious way to discover why open() failed.
else:
f.close()
self.fail("no error for invalid mode: %s" % bad_mode)
def testTruncate(self):
f = _FileIO(TESTFN, 'w')
f.write(bytes(bytearray(range(10))))
self.assertEqual(f.tell(), 10)
f.truncate(5)
self.assertEqual(f.tell(), 10)
self.assertEqual(f.seek(0, os.SEEK_END), 5)
f.truncate(15)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.seek(0, os.SEEK_END), 15)
f.close()
def testTruncateOnWindows(self):
def bug801631():
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
f = _FileIO(TESTFN, 'w')
f.write(bytes(range(11)))
f.close()
f = _FileIO(TESTFN,'r+')
data = f.read(5)
if data != bytes(range(5)):
self.fail("Read on file opened for update failed %r" % data)
if f.tell() != 5:
self.fail("File pos after read wrong %d" % f.tell())
f.truncate()
if f.tell() != 5:
self.fail("File pos after ftruncate wrong %d" % f.tell())
f.close()
size = os.path.getsize(TESTFN)
if size != 5:
self.fail("File size after ftruncate wrong %d" % size)
try:
bug801631()
finally:
os.unlink(TESTFN)
def testAppend(self):
try:
f = open(TESTFN, 'wb')
f.write(b'spam')
f.close()
f = open(TESTFN, 'ab')
f.write(b'eggs')
f.close()
f = open(TESTFN, 'rb')
d = f.read()
f.close()
self.assertEqual(d, b'spameggs')
finally:
try:
os.unlink(TESTFN)
except:
pass
def testInvalidInit(self):
self.assertRaises(TypeError, _FileIO, "1", 0, 0)
def testWarnings(self):
with check_warnings(quiet=True) as w:
self.assertEqual(w.warnings, [])
self.assertRaises(TypeError, _FileIO, [])
self.assertEqual(w.warnings, [])
self.assertRaises(ValueError, _FileIO, "/some/invalid/name", "rt")
self.assertEqual(w.warnings, [])
def test_surrogates(self):
# Issue #8438: try to open a filename containing surrogates.
# It should either fail because the file doesn't exist or the filename
# can't be represented using the filesystem encoding, but not because
# of a LookupError for the error handler "surrogateescape".
filename = u'\udc80.txt'
try:
with _FileIO(filename):
pass
except (UnicodeEncodeError, IOError):
pass
# Spawn a separate Python process with a different "file system
# default encoding", to exercise this further.
env = dict(os.environ)
env[b'LC_CTYPE'] = b'C'
_, out = run_python('-c', 'import _io; _io.FileIO(%r)' % filename, env=env)
if ('UnicodeEncodeError' not in out and
'IOError: [Errno 2] No such file or directory' not in out):
self.fail('Bad output: %r' % out)
def test_main():
# Historically, these tests have been sloppy about removing TESTFN.
# So get rid of it no matter what.
try:
run_unittest(AutoFileTests, OtherFileTests)
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
if __name__ == '__main__':
test_main()
|
stsquad/heating-pi
|
refs/heads/master
|
heatingpi/sensor/serial_temp_sensor.py
|
1
|
#!/usr/bin/env python
#
# A simple temperature sensor that is plugged into the serial port of
# the Pi and reports a temperature every now and again.
#
from temp_sensor import TempSensor
from serial import Serial
class SerialTempSensor(TempSensor):
"""
A SerialTempSensor expects to read a temperture from a serial port
(by default ttyAMA0)
"""
def __init__(self, name="ttyAMA0", dev="/dev/ttyAMA0", baud=9600):
super(SerialTempSensor, self).__init__(name=name, num=1)
self.port = Serial(dev, baud)
# flush a line out
self.port.readline()
def poll(self):
line = self.port.readline()
try:
temp = float(line)
self.log_data([temp])
except:
print "error converting: %s" % (line)
if __name__ == "__main__":
from sensor import common_args, poll_sensor
parser = common_args()
serial_args = parser.add_argument_group("Serial Temp Sensor options")
serial_args.add_argument("--dev", help="Serial device", default="/dev/ttyAMA0")
serial_args.add_argument("--baud", help="Baud Rate", default=9600)
args = parser.parse_args()
sensor = SerialTempSensor(name=args.name, dev=args.dev, baud=args.baud)
poll_sensor(20, sensor)
|
lcplj123/video-dl
|
refs/heads/master
|
extractors/tudou.py
|
2
|
#!/usr/bin/env python3
import re
import sys
import json
sys.path.append('..')
from define import *
from utils import *
from extractor import BasicExtractor
from xml.dom.minidom import parseString
class TuDouExtractor(BasicExtractor):
'''
土豆视频下载器
'''
def __init__(self,c):
super(TuDouExtractor,self).__init__(c,TUDOU)
def download(self):
'''
下载入口
'''
print('tudou:start downloading ...')
retry = 3
while retry > 0 :
self.page = get_html(self.c.url)
if self.page: break
retry -= 1
if not self.page:
print('error: request video info error,check url. %s' % (self.c.url,))
sys.exit(0)
#For test
#with open('test.html','w') as f:
# f.write(self.page)
pattern = re.compile(r'vcode\s*[:=]\s*\'([^\']+)\'')
r = pattern.search(self.page)
if r:
vcode = r.groups()[0]
youku_url = r'http://v.youku.com/v_show/id_%s.html' % (vcode,)
self.c.url = youku_url
from extractors.youku import download
download(self.c)
else:
self._download()
def _download(self):
self.iid = self._getIID()
self.i.vid = self.iid
self.i.title = self.getTitle()
self.i.desc = self.getDesc()
self.i.keywords = self.getKeywords()
self.i.category = self.getCategory()
js = None
url = r'http://www.tudou.com/outplay/goto/getItemSegs.action?iid=%s' % (self.iid,)
data = get_html(url)
js = json.loads(data)
if js and 'status' not in js:
pass
else:
js = None
pattern = re.compile(r'segs:\s*\'(\{.*?\})\'')
r = pattern.search(self.page)
if r:
js = json.loads(r.groups()[0])
if not js:
print('regret: unsupported url. %s' % (self.c.url,))
sys.exit(0)
maxkey = max(js.keys())
print(js[maxkey])
self.flvlist = self.query_real(js = js[maxkey])
self.i.fsize = self.getFsize(js = js[maxkey])
self.i.duration = int(self.getDuration(js = js[maxkey]) / 1000)
self.i.m3u8 = self.query_m3u8(iid = self.iid,st = maxkey)
self.i.fname = self.getFname()
self.realdown()
def _getIID(self):
iid = ''
pattern = re.compile(r'iid\s*[:=]\s*(\S+)')
r = pattern.search(self.page)
if r:
iid = r.groups()[0]
return iid
def query_m3u8(self,*args,**kwargs):
iid = kwargs['iid']
st = kwargs['st']
m3u8_url = r'http://vr.tudou.com/v2proxy/v2.m3u8?it=%s&st=%s&pw=' % (iid,st)
return m3u8_url
def query_real(self,*args,**kwargs):
jdata = kwargs['js']
vids = [d['k'] for d in jdata]
url = r'http://ct.v2.tudou.com/f?id=%s'
realurls = [
[n.firstChild.nodeValue.strip()
for n in parseString(get_html(url%(vid,))).getElementsByTagName('f')
][0]
for vid in vids
]
return realurls
def getVid(self,*args,**kwargs):
pass
def getFsize(self,*args,**kwargs):
size = 0
jdata = kwargs['js']
size = sum(d['size'] for d in jdata)
return size
def getTitle(self,*args,**kwargs):
title = ''
pattern = re.compile(r'\<meta\s+name\s*=\s*\"irTitle\"\s+content\s*=\s*\"(.*?)\"')
r = pattern.search(self.page)
if r:
title = r.groups()[0]
if not title:
pattern = re.compile(r'kw\s*[:=]\s*[\'\"]([^\n]+?)\'\s*\n')
r = pattern.search(self.page)
if r:
title = r.groups()[0]
return title
def getDesc(self,*args,**kwargs):
desc = ''
pattern = re.compile(r'\<meta\s+name\s*=\s*\"description\"\s+content\s*=\s*\"(.*?)\"')
r = pattern.search(self.page)
if r:
desc = r.groups()[0]
return desc
def getTags(self,*args,**kwargs):
tags = ''
pattern = re.compile(r'\<meta\s+name\s*=\s*\"keywords\"\s+content\s*=\s*\"(.*?)\"')
r = pattern.search(self.page)
if r:
tags = r.groups()[0]
return tags.split(' ')
def getCategory(self,*args,**kwargs):
cat = '未知'
pattern = re.compile(r'\<meta\s+name\s*=\s*\"irCategory\"\s+content\s*=\s*\"(.*?)\"')
r = pattern.search(self.page)
if r:
cat = r.groups()[0]
return cat
def getDuration(self,*args,**kwargs):
duration = 0
jdata = kwargs['js']
duration = sum(d['seconds'] for d in jdata)
return duration
def getUptime(self,*args,**kwargs):
return INITIAL_UPTIME
def download(c):
d = TuDouExtractor(c)
return d.download()
|
suneeth51/neutron
|
refs/heads/master
|
neutron/api/rpc/handlers/metadata_rpc.py
|
58
|
# Copyright (c) 2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oslo_messaging
from neutron.common import constants
from neutron import manager
class MetadataRpcCallback(object):
"""Metadata agent RPC callback in plugin implementations.
This class implements the server side of an rpc interface used by the
metadata service to make calls back into the Neutron plugin. The client
side is defined in neutron.agent.metadata.agent.MetadataPluginAPI. For
more information about changing rpc interfaces, see
doc/source/devref/rpc_api.rst.
"""
# 1.0 MetadataPluginAPI BASE_RPC_API_VERSION
target = oslo_messaging.Target(version='1.0',
namespace=constants.RPC_NAMESPACE_METADATA)
@property
def plugin(self):
if not hasattr(self, '_plugin'):
self._plugin = manager.NeutronManager.get_plugin()
return self._plugin
def get_ports(self, context, filters):
return self.plugin.get_ports(context, filters=filters)
|
MattsFleaMarket/python-for-android
|
refs/heads/master
|
python-modules/twisted/twisted/trial/test/test_doctest.py
|
61
|
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test twisted's doctest support.
"""
from twisted.trial import itrial, runner, unittest, reporter
from twisted.trial.test import mockdoctest
class TestRunners(unittest.TestCase):
"""
Tests for Twisted's doctest support.
"""
def test_id(self):
"""
Check that the id() of the doctests' case object contains the FQPN of
the actual tests. We need this because id() has weird behaviour w/
doctest in Python 2.3.
"""
loader = runner.TestLoader()
suite = loader.loadDoctests(mockdoctest)
idPrefix = 'twisted.trial.test.mockdoctest.Counter'
for test in suite._tests:
self.assertIn(idPrefix, itrial.ITestCase(test).id())
def makeDocSuite(self, module):
"""
Return a L{runner.DocTestSuite} for the doctests in C{module}.
"""
return self.assertWarns(
DeprecationWarning, "DocTestSuite is deprecated in Twisted 8.0.",
__file__, lambda: runner.DocTestSuite(mockdoctest))
def test_correctCount(self):
"""
L{countTestCases} returns the number of doctests in the module.
"""
suite = self.makeDocSuite(mockdoctest)
self.assertEqual(7, suite.countTestCases())
def test_basicTrialIntegration(self):
"""
L{loadDoctests} loads all of the doctests in the given module.
"""
loader = runner.TestLoader()
suite = loader.loadDoctests(mockdoctest)
self.assertEqual(7, suite.countTestCases())
def _testRun(self, suite):
"""
Run C{suite} and check the result.
"""
result = reporter.TestResult()
suite.run(result)
self.assertEqual(5, result.successes)
# doctest reports failures as errors in 2.3
self.assertEqual(2, len(result.errors) + len(result.failures))
def test_expectedResults(self, count=1):
"""
Trial can correctly run doctests with its xUnit test APIs.
"""
suite = runner.TestLoader().loadDoctests(mockdoctest)
self._testRun(suite)
def test_repeatable(self):
"""
Doctests should be runnable repeatably.
"""
suite = runner.TestLoader().loadDoctests(mockdoctest)
self._testRun(suite)
self._testRun(suite)
|
nikolaik/IndicoIo-python
|
refs/heads/master
|
tests/test_imagerecognition.py
|
2
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest, os
from indicoio import config
from indicoio import image_recognition
DIR = os.path.dirname(os.path.realpath(__file__))
class ImageRecognitionTest(unittest.TestCase):
def setUp(self):
self.api_key = config.api_key
def test_single_image_recognition(self):
test_data = os.path.normpath(os.path.join(DIR, "data", "fear.png"))
response = image_recognition(test_data, api_key = self.api_key, top_n=3)
self.assertIsInstance(response, dict)
self.assertEqual(len(response), 3)
self.assertIsInstance(list(response.values())[0], float)
def test_batch_image_recognition(self):
test_data = os.path.normpath(os.path.join(DIR, "data", "fear.png"))
response = image_recognition([test_data, test_data], api_key = self.api_key, top_n=3)
self.assertIsInstance(response, list)
self.assertIsInstance(response[0], dict)
self.assertEqual(len(response[0]), 3)
self.assertIsInstance(list(response[0].values())[0], float)
if __name__ == "__main__":
unittest.main()
|
tedye/leetcode
|
refs/heads/master
|
tools/leetcode.226.Invert Binary Tree/leetcode.226.Invert Binary Tree.submission2.py
|
2
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param {TreeNode} root
# @return {TreeNode}
def invertTree(self, root):
# iterative way
if not root:
return None
cur = [root]
while cur:
next = []
for n in cur:
temp = n.left
n.left = n.right
n.right = temp
if n.right:
next.append(n.right)
if n.left:
next.append(n.left)
cur = next
return root
|
connectordb/connectordb-python
|
refs/heads/master
|
setup.py
|
2
|
#!/usr/bin/env python
# python setup.py sdist
# python setup.py bdist_wheel --universal
# twine upload dist/*
from __future__ import absolute_import
from setuptools import setup, find_packages
setup(name='ConnectorDB',
version='0.3.5', # The a.b of a.b.c follows connectordb version. c is the version of python. Remember to change __version__ in __init__
description='ConnectorDB Python Interface',
author='ConnectorDB contributors',
license="Apache-2.0",
author_email='support@connectordb.com',
url='https://github.com/connectordb/connectordb-python',
packages=find_packages(exclude=['contrib', 'docs', '*_test']),
classifiers=[#'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3'],
install_requires=["requests", "websocket-client", "jsonschema"])
|
miconof/headphones
|
refs/heads/master
|
lib/pygazelle/category.py
|
28
|
class InvalidCategoryException(Exception):
pass
class Category(object):
def __init__(self, id, parent_api):
self.id = id
self.parent_api = parent_api
self.name = None
self.parent_api.cached_categories[self.id] = self # add self to cache of known Category objects
def __repr__(self):
return "Category: %s - id: %s" % (self.name, self.id)
|
roadmapper/ansible
|
refs/heads/devel
|
test/units/modules/network/onyx/test_onyx_mlag_vip.py
|
52
|
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.onyx import onyx_mlag_vip
from units.modules.utils import set_module_args
from .onyx_module import TestOnyxModule, load_fixture
class TestOnyxMlagVipModule(TestOnyxModule):
module = onyx_mlag_vip
def setUp(self):
super(TestOnyxMlagVipModule, self).setUp()
self._mlag_enabled = True
self.mock_show_mlag = patch.object(
onyx_mlag_vip.OnyxMLagVipModule,
"_show_mlag")
self.show_mlag = self.mock_show_mlag.start()
self.mock_show_mlag_vip = patch.object(
onyx_mlag_vip.OnyxMLagVipModule,
"_show_mlag_vip")
self.show_mlag_vip = self.mock_show_mlag_vip.start()
self.mock_load_config = patch(
'ansible.module_utils.network.onyx.onyx.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestOnyxMlagVipModule, self).tearDown()
self.mock_show_mlag.stop()
self.mock_show_mlag_vip.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, transport='cli'):
if self._mlag_enabled:
config_file = 'onyx_mlag_vip_show.cfg'
self.show_mlag_vip.return_value = load_fixture(config_file)
config_file = 'onyx_mlag_show.cfg'
self.show_mlag.return_value = load_fixture(config_file)
else:
self.show_mlag_vip.return_value = None
self.show_mlag.return_value = None
self.load_config.return_value = None
def test_mlag_no_change(self):
set_module_args(dict(ipaddress='10.209.25.107/24',
group_name='neo-mlag-vip-500',
mac_address='00:00:5E:00:01:4E'))
self.execute_module(changed=False)
def test_mlag_change(self):
self._mlag_enabled = False
set_module_args(dict(ipaddress='10.209.25.107/24',
group_name='neo-mlag-vip-500',
mac_address='00:00:5E:00:01:4E',
delay=0))
commands = ['mlag-vip neo-mlag-vip-500 ip 10.209.25.107 /24 force',
'mlag system-mac 00:00:5e:00:01:4e', 'no mlag shutdown']
self.execute_module(changed=True, commands=commands)
def test_mlag_send_group_name_only_change(self):
self._mlag_enabled = False
set_module_args(dict(group_name='neo-mlag-vip-500',
delay=0))
commands = ['mlag-vip neo-mlag-vip-500',
'no mlag shutdown']
self.execute_module(changed=True, commands=commands)
def test_mlag_absent_no_change(self):
self._mlag_enabled = False
set_module_args(dict(state='absent'))
self.execute_module(changed=False)
def test_mlag_absent_change(self):
set_module_args(dict(state='absent', delay=0))
commands = ['no mlag-vip']
self.execute_module(changed=True, commands=commands)
|
neumerance/cloudloon2
|
refs/heads/master
|
.venv/lib/python2.7/site-packages/neutronclient/tests/unit/vpn/test_utils.py
|
7
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Swaminathan Vasudevan, Hewlett Packard.
import testtools
from neutronclient.common import exceptions
from neutronclient.common import utils
from neutronclient.neutron.v2_0.vpn import utils as vpn_utils
class TestVPNUtils(testtools.TestCase):
def test_validate_lifetime_dictionary_seconds(self):
input_str = utils.str2dict("units=seconds,value=3600")
self.assertIsNone(vpn_utils.validate_lifetime_dict(input_str))
def test_validate_dpd_dictionary_action_hold(self):
input_str = utils.str2dict("action=hold,interval=30,timeout=120")
self.assertIsNone(vpn_utils.validate_dpd_dict(input_str))
def test_validate_dpd_dictionary_action_restart(self):
input_str = utils.str2dict("action=restart,interval=30,timeout=120")
self.assertIsNone(vpn_utils.validate_dpd_dict(input_str))
def test_validate_dpd_dictionary_action_restart_by_peer(self):
input_str = utils.str2dict(
"action=restart-by-peer,interval=30,timeout=120"
)
self.assertIsNone(vpn_utils.validate_dpd_dict(input_str))
def test_validate_dpd_dictionary_action_clear(self):
input_str = utils.str2dict('action=clear,interval=30,timeout=120')
self.assertIsNone(vpn_utils.validate_dpd_dict(input_str))
def test_validate_dpd_dictionary_action_disabled(self):
input_str = utils.str2dict('action=disabled,interval=30,timeout=120')
self.assertIsNone(vpn_utils.validate_dpd_dict(input_str))
def test_validate_lifetime_dictionary_invalid_unit_key(self):
input_str = utils.str2dict('ut=seconds,value=3600')
self._test_validate_lifetime_negative_test_case(input_str)
def test_validate_lifetime_dictionary_invalid_unit_key_value(self):
input_str = utils.str2dict('units=seconds,val=3600')
self._test_validate_lifetime_negative_test_case(input_str)
def test_validate_lifetime_dictionary_unsupported_units(self):
input_str = utils.str2dict('units=minutes,value=3600')
self._test_validate_lifetime_negative_test_case(input_str)
def test_validate_lifetime_dictionary_invalid_empty_unit(self):
input_str = utils.str2dict('units=,value=3600')
self._test_validate_lifetime_negative_test_case(input_str)
def test_validate_lifetime_dictionary_under_minimum_integer_value(self):
input_str = utils.str2dict('units=seconds,value=59')
self._test_validate_lifetime_negative_test_case(input_str)
def test_validate_lifetime_dictionary_negative_integer_value(self):
input_str = utils.str2dict('units=seconds,value=-1')
self._test_validate_lifetime_negative_test_case(input_str)
def test_validate_lifetime_dictionary_empty_value(self):
input_str = utils.str2dict('units=seconds,value=')
self._test_validate_lifetime_negative_test_case(input_str)
def test_validate_dpd_dictionary_invalid_key_action(self):
input_str = utils.str2dict('act=hold,interval=30,timeout=120')
self._test_validate_dpd_negative_test_case(input_str)
def test_validate_dpd_dictionary_invalid_key_interval(self):
input_str = utils.str2dict('action=hold,int=30,timeout=120')
self._test_validate_dpd_negative_test_case(input_str)
def test_validate_dpd_dictionary_invalid_key_timeout(self):
input_str = utils.str2dict('action=hold,interval=30,tiut=120')
self._test_validate_dpd_negative_test_case(input_str)
def test_validate_dpd_dictionary_unsupported_action(self):
input_str = utils.str2dict('action=bye-bye,interval=30,timeout=120')
self._test_validate_dpd_negative_test_case(input_str)
def test_validate_dpd_dictionary_empty_action(self):
input_str = utils.str2dict('action=,interval=30,timeout=120')
self._test_validate_dpd_negative_test_case(input_str)
def test_validate_dpd_dictionary_empty_interval(self):
input_str = utils.str2dict('action=hold,interval=,timeout=120')
self._test_validate_dpd_negative_test_case(input_str)
def test_validate_dpd_dictionary_negative_interval_value(self):
input_str = utils.str2dict('action=hold,interval=-1,timeout=120')
self._test_validate_lifetime_negative_test_case(input_str)
def test_validate_dpd_dictionary_zero_timeout(self):
input_str = utils.str2dict('action=hold,interval=30,timeout=0')
self._test_validate_dpd_negative_test_case(input_str)
def test_validate_dpd_dictionary_empty_timeout(self):
input_str = utils.str2dict('action=hold,interval=30,timeout=')
self._test_validate_dpd_negative_test_case(input_str)
def test_validate_dpd_dictionary_negative_timeout_value(self):
input_str = utils.str2dict('action=hold,interval=30,timeout=-1')
self._test_validate_lifetime_negative_test_case(input_str)
def _test_validate_lifetime_negative_test_case(self, input_str):
"""Generic handler for negative lifetime tests."""
self.assertRaises(exceptions.CommandError,
vpn_utils.validate_lifetime_dict,
(input_str))
def _test_validate_dpd_negative_test_case(self, input_str):
"""Generic handler for negative lifetime tests."""
self.assertRaises(exceptions.CommandError,
vpn_utils.validate_lifetime_dict,
(input_str))
|
etherkit/OpenBeacon2
|
refs/heads/master
|
client/win/venv/Lib/site-packages/PyInstaller/hooks/hook-xml.py
|
2
|
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2019, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
hiddenimports = ['xml.sax.xmlreader','xml.sax.expatreader']
|
mavit/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/parsing/convert_bool.py
|
118
|
# Copyright: 2017, Ansible Project
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause )
from ansible.module_utils.six import binary_type, text_type
from ansible.module_utils._text import to_text
BOOLEANS_TRUE = frozenset(('y', 'yes', 'on', '1', 'true', 't', 1, 1.0, True))
BOOLEANS_FALSE = frozenset(('n', 'no', 'off', '0', 'false', 'f', 0, 0.0, False))
BOOLEANS = BOOLEANS_TRUE.union(BOOLEANS_FALSE)
def boolean(value, strict=True):
if isinstance(value, bool):
return value
normalized_value = value
if isinstance(value, (text_type, binary_type)):
normalized_value = to_text(value, errors='surrogate_or_strict').lower().strip()
if normalized_value in BOOLEANS_TRUE:
return True
elif normalized_value in BOOLEANS_FALSE or not strict:
return False
raise TypeError("The value '%s' is not a valid boolean. Valid booleans include: %s" % (to_text(value), ', '.join(repr(i) for i in BOOLEANS)))
|
Nitaco/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/amazon/ec2_placement_group_facts.py
|
70
|
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_placement_group_facts
short_description: List EC2 Placement Group(s) details
description:
- List details of EC2 Placement Group(s).
version_added: "2.5"
author: "Brad Macpherson (@iiibrad)"
options:
names:
description:
- A list of names to filter on. If a listed group does not exist, there
will be no corresponding entry in the result; no error will be raised.
required: false
default: []
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details or the AWS region,
# see the AWS Guide for details.
# List all placement groups.
- ec2_placement_group_facts:
register: all_ec2_placement_groups
# List two placement groups.
- ec2_placement_group_facts:
names:
- my-cluster
- my-other-cluster
register: specific_ec2_placement_groups
- debug: msg="{{ specific_ec2_placement_groups | json_query(\"[?name=='my-cluster']\") }}"
'''
RETURN = '''
placement_groups:
description: Placement group attributes
returned: always
type: complex
contains:
name:
description: PG name
type: string
sample: my-cluster
state:
description: PG state
type: string
sample: "available"
strategy:
description: PG strategy
type: string
sample: "cluster"
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import (connect_to_aws,
boto3_conn,
ec2_argument_spec,
get_aws_connection_info)
try:
from botocore.exceptions import (BotoCoreError, ClientError)
except ImportError:
pass # caught by imported HAS_BOTO3
def get_placement_groups_details(connection, module):
names = module.params.get("names")
try:
if len(names) > 0:
response = connection.describe_placement_groups(
Filters=[{
"Name": "group-name",
"Values": names
}])
else:
response = connection.describe_placement_groups()
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(
e,
msg="Couldn't find placement groups named [%s]" % names)
results = []
for placement_group in response['PlacementGroups']:
results.append({
"name": placement_group['GroupName'],
"state": placement_group['State'],
"strategy": placement_group['Strategy'],
})
return results
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
names=dict(type='list', default=[])
)
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True
)
region, ec2_url, aws_connect_params = get_aws_connection_info(
module, boto3=True)
connection = boto3_conn(module,
resource='ec2', conn_type='client',
region=region, endpoint=ec2_url, **aws_connect_params)
placement_groups = get_placement_groups_details(connection, module)
module.exit_json(changed=False, placement_groups=placement_groups)
if __name__ == '__main__':
main()
|
azatoth/scons
|
refs/heads/master
|
test/option-unknown.py
|
2
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', "")
test.run(arguments = '-Z',
stderr = """usage: scons [OPTION] [TARGET] ...
SCons error: no such option: -Z
""",
status = 2)
test.run(arguments = '--ZizzerZazzerZuzz',
stderr = """usage: scons [OPTION] [TARGET] ...
SCons error: no such option: --ZizzerZazzerZuzz
""",
status = 2)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
toslunar/chainerrl
|
refs/heads/master
|
tests/wrappers_tests/test_atari_wrappers.py
|
1
|
"""Currently this script tests `chainerrl.wrappers.atari_wrappers.FrameStack`
only."""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases() # NOQA
import mock
import unittest
from chainer import testing
import gym
import gym.spaces
import numpy as np
from chainerrl.wrappers.atari_wrappers import FrameStack
from chainerrl.wrappers.atari_wrappers import LazyFrames
from chainerrl.wrappers.atari_wrappers import ScaledFloatFrame
@testing.parameterize(*testing.product({
'dtype': [np.uint8, np.float32],
'k': [2, 3],
}))
class TestFrameStack(unittest.TestCase):
def test_frame_stack(self):
steps = 10
# Mock env that returns atari-like frames
def make_env(idx):
env = mock.Mock()
np_random = np.random.RandomState(idx)
if self.dtype is np.uint8:
def dtyped_rand():
return np_random.randint(
low=0, high=255, size=(1, 84, 84), dtype=self.dtype)
low, high = 0, 255
elif self.dtype is np.float32:
def dtyped_rand():
return np_random.rand(1, 84, 84).astype(self.dtype)
low, high = -1.0, 3.14
else:
assert False
env.reset.side_effect = [dtyped_rand() for _ in range(steps)]
env.step.side_effect = [
(
dtyped_rand(),
np_random.rand(),
bool(np_random.randint(2)),
{},
)
for _ in range(steps)]
env.action_space = gym.spaces.Discrete(2)
env.observation_space = gym.spaces.Box(
low=low, high=high, shape=(1, 84, 84), dtype=self.dtype)
return env
env = make_env(42)
fs_env = FrameStack(make_env(42), k=self.k, channel_order='chw')
# check action/observation space
self.assertEqual(env.action_space, fs_env.action_space)
self.assertIs(
env.observation_space.dtype, fs_env.observation_space.dtype)
self.assertEqual(
env.observation_space.low.item(0),
fs_env.observation_space.low.item(0))
self.assertEqual(
env.observation_space.high.item(0),
fs_env.observation_space.high.item(0))
# check reset
obs = env.reset()
fs_obs = fs_env.reset()
self.assertIsInstance(fs_obs, LazyFrames)
np.testing.assert_allclose(
obs.take(indices=0, axis=fs_env.stack_axis),
np.asarray(fs_obs).take(indices=0, axis=fs_env.stack_axis))
# check step
for _ in range(steps - 1):
action = env.action_space.sample()
fs_action = fs_env.action_space.sample()
obs, r, done, info = env.step(action)
fs_obs, fs_r, fs_done, fs_info = fs_env.step(fs_action)
self.assertIsInstance(fs_obs, LazyFrames)
np.testing.assert_allclose(
obs.take(indices=0, axis=fs_env.stack_axis),
np.asarray(fs_obs).take(indices=-1, axis=fs_env.stack_axis))
self.assertEqual(r, fs_r)
self.assertEqual(done, fs_done)
@testing.parameterize(*testing.product({
'dtype': [np.uint8, np.float32],
}))
class TestScaledFloatFrame(unittest.TestCase):
def test_scaled_float_frame(self):
steps = 10
# Mock env that returns atari-like frames
def make_env(idx):
env = mock.Mock()
np_random = np.random.RandomState(idx)
if self.dtype is np.uint8:
def dtyped_rand():
return np_random.randint(
low=0, high=255, size=(1, 84, 84), dtype=self.dtype)
low, high = 0, 255
elif self.dtype is np.float32:
def dtyped_rand():
return np_random.rand(1, 84, 84).astype(self.dtype)
low, high = -1.0, 3.14
else:
assert False
env.reset.side_effect = [dtyped_rand() for _ in range(steps)]
env.step.side_effect = [
(
dtyped_rand(),
np_random.rand(),
bool(np_random.randint(2)),
{},
)
for _ in range(steps)]
env.action_space = gym.spaces.Discrete(2)
env.observation_space = gym.spaces.Box(
low=low, high=high, shape=(1, 84, 84), dtype=self.dtype)
return env
env = make_env(42)
s_env = ScaledFloatFrame(make_env(42))
# check observation space
self.assertIs(
type(env.observation_space), type(s_env.observation_space))
self.assertIs(s_env.observation_space.dtype, np.dtype(np.float32))
self.assertTrue(
s_env.observation_space.contains(s_env.observation_space.low))
self.assertTrue(
s_env.observation_space.contains(s_env.observation_space.high))
# check reset
obs = env.reset()
s_obs = s_env.reset()
np.testing.assert_allclose(np.array(obs) / s_env.scale, s_obs)
# check step
for _ in range(steps - 1):
action = env.action_space.sample()
s_action = s_env.action_space.sample()
obs, r, done, info = env.step(action)
s_obs, s_r, s_done, s_info = s_env.step(s_action)
np.testing.assert_allclose(np.array(obs) / s_env.scale, s_obs)
self.assertEqual(r, s_r)
self.assertEqual(done, s_done)
|
jkgneu12/python3-saml
|
refs/heads/master
|
src/onelogin/saml2/xml_utils.py
|
1
|
# -*- coding: utf-8 -*-
""" OneLogin_Saml2_XML class
Copyright (c) 2015, OneLogin, Inc.
All rights reserved.
Auxiliary class of OneLogin's Python Toolkit.
"""
from os.path import join, dirname
from lxml import etree
from onelogin.saml2 import compat
from onelogin.saml2.constants import OneLogin_Saml2_Constants
for prefix, url in OneLogin_Saml2_Constants.NSMAP.items():
etree.register_namespace(prefix, url)
class OneLogin_Saml2_XML(object):
_element_class = type(etree.Element('root'))
_parse_etree = staticmethod(etree.fromstring)
_schema_class = etree.XMLSchema
_text_class = compat.text_types
_unparse_etree = staticmethod(etree.tostring)
dump = staticmethod(etree.dump)
make_root = staticmethod(etree.Element)
make_child = staticmethod(etree.SubElement)
cleanup_namespaces = staticmethod(etree.cleanup_namespaces)
@staticmethod
def to_string(xml, **kwargs):
"""
Serialize an element to an encoded string representation of its XML tree.
:param xml: The root node
:type xml: str|bytes|xml.dom.minidom.Document|etree.Element
:returns: string representation of xml
:rtype: string
"""
if isinstance(xml, OneLogin_Saml2_XML._text_class):
return xml
if isinstance(xml, OneLogin_Saml2_XML._element_class):
OneLogin_Saml2_XML.cleanup_namespaces(xml)
return OneLogin_Saml2_XML._unparse_etree(xml, **kwargs)
raise ValueError("unsupported type %r" % type(xml))
@staticmethod
def to_etree(xml):
"""
Parses an XML document or fragment from a string.
:param xml: the string to parse
:type xml: str|bytes|xml.dom.minidom.Document|etree.Element
:returns: the root node
:rtype: OneLogin_Saml2_XML._element_class
"""
if isinstance(xml, OneLogin_Saml2_XML._element_class):
return xml
if isinstance(xml, OneLogin_Saml2_XML._text_class):
return OneLogin_Saml2_XML._parse_etree(xml)
raise ValueError('unsupported type %r' % type(xml))
@staticmethod
def validate_xml(xml, schema, debug=False):
"""
Validates a xml against a schema
:param xml: The xml that will be validated
:type xml: str|bytes|xml.dom.minidom.Document|etree.Element
:param schema: The schema
:type schema: string
:param debug: If debug is active, the parse-errors will be showed
:type debug: bool
:returns: Error code or the DomDocument of the xml
:rtype: xml.dom.minidom.Document
"""
assert isinstance(schema, compat.str_type)
try:
xml = OneLogin_Saml2_XML.to_etree(xml)
except Exception as e:
if debug:
print(e)
return 'unloaded_xml'
schema_file = join(dirname(__file__), 'schemas', schema)
with open(schema_file, 'r') as f_schema:
xmlschema = OneLogin_Saml2_XML._schema_class(etree.parse(f_schema))
if not xmlschema.validate(xml):
if debug:
print('Errors validating the metadata: ')
for error in xmlschema.error_log:
print(error.message)
return 'invalid_xml'
return xml
@staticmethod
def query(dom, query, context=None):
"""
Extracts nodes that match the query from the Element
:param dom: The root of the lxml objet
:type: Element
:param query: Xpath Expresion
:type: string
:param context: Context Node
:type: DOMElement
:returns: The queried nodes
:rtype: list
"""
if context is None:
return dom.xpath(query, namespaces=OneLogin_Saml2_Constants.NSMAP)
else:
return context.xpath(query, namespaces=OneLogin_Saml2_Constants.NSMAP)
@staticmethod
def extract_tag_text(xml, tagname):
open_tag = compat.to_bytes("<%s" % tagname)
close_tag = compat.to_bytes("</%s>" % tagname)
xml = OneLogin_Saml2_XML.to_string(xml)
start = xml.find(open_tag)
assert start != -1
end = xml.find(close_tag, start) + len(close_tag)
assert end != -1
return compat.to_string(xml[start:end])
|
Geoion/urllib3
|
refs/heads/master
|
test/with_dummyserver/test_https.py
|
7
|
import datetime
import logging
import ssl
import sys
import unittest
import warnings
import mock
from nose.plugins.skip import SkipTest
from dummyserver.testcase import HTTPSDummyServerTestCase
from dummyserver.server import (DEFAULT_CA, DEFAULT_CA_BAD, DEFAULT_CERTS,
NO_SAN_CERTS, NO_SAN_CA, DEFAULT_CA_DIR)
from test import (
onlyPy26OrOlder,
onlyPy279OrNewer,
requires_network,
TARPIT_HOST,
clear_warnings,
)
from urllib3 import HTTPSConnectionPool
from urllib3.connection import (
VerifiedHTTPSConnection,
UnverifiedHTTPSConnection,
RECENT_DATE,
)
from urllib3.exceptions import (
SSLError,
ReadTimeoutError,
ConnectTimeoutError,
InsecureRequestWarning,
SystemTimeWarning,
InsecurePlatformWarning,
)
from urllib3.packages import six
from urllib3.util.timeout import Timeout
ResourceWarning = getattr(
six.moves.builtins,
'ResourceWarning', type('ResourceWarning', (), {}))
log = logging.getLogger('urllib3.connectionpool')
log.setLevel(logging.NOTSET)
log.addHandler(logging.StreamHandler(sys.stdout))
class TestHTTPS(HTTPSDummyServerTestCase):
def setUp(self):
self._pool = HTTPSConnectionPool(self.host, self.port)
def test_simple(self):
r = self._pool.request('GET', '/')
self.assertEqual(r.status, 200, r.data)
def test_set_ssl_version_to_tlsv1(self):
self._pool.ssl_version = ssl.PROTOCOL_TLSv1
r = self._pool.request('GET', '/')
self.assertEqual(r.status, 200, r.data)
def test_verified(self):
https_pool = HTTPSConnectionPool(self.host, self.port,
cert_reqs='CERT_REQUIRED',
ca_certs=DEFAULT_CA)
conn = https_pool._new_conn()
self.assertEqual(conn.__class__, VerifiedHTTPSConnection)
with mock.patch('warnings.warn') as warn:
r = https_pool.request('GET', '/')
self.assertEqual(r.status, 200)
if sys.version_info >= (2, 7, 9):
self.assertFalse(warn.called, warn.call_args_list)
else:
self.assertTrue(warn.called)
call, = warn.call_args_list
error = call[0][1]
self.assertEqual(error, InsecurePlatformWarning)
@onlyPy279OrNewer
def test_ca_dir_verified(self):
https_pool = HTTPSConnectionPool(self.host, self.port,
cert_reqs='CERT_REQUIRED',
ca_cert_dir=DEFAULT_CA_DIR)
conn = https_pool._new_conn()
self.assertEqual(conn.__class__, VerifiedHTTPSConnection)
with mock.patch('warnings.warn') as warn:
r = https_pool.request('GET', '/')
self.assertEqual(r.status, 200)
if sys.version_info >= (2, 7, 9):
self.assertFalse(warn.called, warn.call_args_list)
else:
self.assertTrue(warn.called)
call, = warn.call_args_list
error = call[0][1]
self.assertEqual(error, InsecurePlatformWarning)
def test_invalid_common_name(self):
https_pool = HTTPSConnectionPool('127.0.0.1', self.port,
cert_reqs='CERT_REQUIRED',
ca_certs=DEFAULT_CA)
try:
https_pool.request('GET', '/')
self.fail("Didn't raise SSL invalid common name")
except SSLError as e:
self.assertTrue("doesn't match" in str(e))
def test_verified_with_bad_ca_certs(self):
https_pool = HTTPSConnectionPool(self.host, self.port,
cert_reqs='CERT_REQUIRED',
ca_certs=DEFAULT_CA_BAD)
try:
https_pool.request('GET', '/')
self.fail("Didn't raise SSL error with bad CA certs")
except SSLError as e:
self.assertTrue('certificate verify failed' in str(e),
"Expected 'certificate verify failed',"
"instead got: %r" % e)
def test_verified_without_ca_certs(self):
# default is cert_reqs=None which is ssl.CERT_NONE
https_pool = HTTPSConnectionPool(self.host, self.port,
cert_reqs='CERT_REQUIRED')
try:
https_pool.request('GET', '/')
self.fail("Didn't raise SSL error with no CA certs when"
"CERT_REQUIRED is set")
except SSLError as e:
# there is a different error message depending on whether or
# not pyopenssl is injected
self.assertTrue('No root certificates specified' in str(e) or
'certificate verify failed' in str(e),
"Expected 'No root certificates specified' or "
"'certificate verify failed', "
"instead got: %r" % e)
def test_no_ssl(self):
pool = HTTPSConnectionPool(self.host, self.port)
pool.ConnectionCls = None
self.assertRaises(SSLError, pool._new_conn)
self.assertRaises(SSLError, pool.request, 'GET', '/')
def test_unverified_ssl(self):
""" Test that bare HTTPSConnection can connect, make requests """
pool = HTTPSConnectionPool(self.host, self.port)
pool.ConnectionCls = UnverifiedHTTPSConnection
with mock.patch('warnings.warn') as warn:
r = pool.request('GET', '/')
self.assertEqual(r.status, 200)
self.assertTrue(warn.called)
call, = warn.call_args_list
category = call[0][1]
self.assertEqual(category, InsecureRequestWarning)
def test_ssl_unverified_with_ca_certs(self):
pool = HTTPSConnectionPool(self.host, self.port,
cert_reqs='CERT_NONE',
ca_certs=DEFAULT_CA_BAD)
with mock.patch('warnings.warn') as warn:
r = pool.request('GET', '/')
self.assertEqual(r.status, 200)
self.assertTrue(warn.called)
calls = warn.call_args_list
if sys.version_info >= (2, 7, 9):
category = calls[0][0][1]
else:
category = calls[1][0][1]
self.assertEqual(category, InsecureRequestWarning)
@requires_network
def test_ssl_verified_with_platform_ca_certs(self):
"""
We should rely on the platform CA file to validate authenticity of SSL
certificates. Since this file is used by many components of the OS,
such as curl, apt-get, etc., we decided to not touch it, in order to
not compromise the security of the OS running the test suite (typically
urllib3 developer's OS).
This test assumes that httpbin.org uses a certificate signed by a well
known Certificate Authority.
"""
try:
import urllib3.contrib.pyopenssl
except ImportError:
raise SkipTest('Test requires PyOpenSSL')
if (urllib3.connection.ssl_wrap_socket is
urllib3.contrib.pyopenssl.orig_connection_ssl_wrap_socket):
# Not patched
raise SkipTest('Test should only be run after PyOpenSSL '
'monkey patching')
https_pool = HTTPSConnectionPool('httpbin.org', 443,
cert_reqs=ssl.CERT_REQUIRED)
https_pool.request('HEAD', '/')
def test_assert_hostname_false(self):
https_pool = HTTPSConnectionPool('localhost', self.port,
cert_reqs='CERT_REQUIRED',
ca_certs=DEFAULT_CA)
https_pool.assert_hostname = False
https_pool.request('GET', '/')
def test_assert_specific_hostname(self):
https_pool = HTTPSConnectionPool('localhost', self.port,
cert_reqs='CERT_REQUIRED',
ca_certs=DEFAULT_CA)
https_pool.assert_hostname = 'localhost'
https_pool.request('GET', '/')
def test_assert_fingerprint_md5(self):
https_pool = HTTPSConnectionPool('localhost', self.port,
cert_reqs='CERT_REQUIRED',
ca_certs=DEFAULT_CA)
https_pool.assert_fingerprint = 'CA:84:E1:AD0E5a:ef:2f:C3:09' \
':E7:30:F8:CD:C8:5B'
https_pool.request('GET', '/')
def test_assert_fingerprint_sha1(self):
https_pool = HTTPSConnectionPool('localhost', self.port,
cert_reqs='CERT_REQUIRED',
ca_certs=DEFAULT_CA)
https_pool.assert_fingerprint = 'CC:45:6A:90:82:F7FF:C0:8218:8e:' \
'7A:F2:8A:D7:1E:07:33:67:DE'
https_pool.request('GET', '/')
def test_assert_fingerprint_sha256(self):
https_pool = HTTPSConnectionPool('localhost', self.port,
cert_reqs='CERT_REQUIRED',
ca_certs=DEFAULT_CA)
https_pool.assert_fingerprint = ('9A:29:9D:4F:47:85:1C:51:23:F5:9A:A3:'
'0F:5A:EF:96:F9:2E:3C:22:2E:FC:E8:BC:'
'0E:73:90:37:ED:3B:AA:AB')
https_pool.request('GET', '/')
def test_assert_invalid_fingerprint(self):
https_pool = HTTPSConnectionPool('127.0.0.1', self.port,
cert_reqs='CERT_REQUIRED',
ca_certs=DEFAULT_CA)
https_pool.assert_fingerprint = 'AA:AA:AA:AA:AA:AAAA:AA:AAAA:AA:' \
'AA:AA:AA:AA:AA:AA:AA:AA:AA'
self.assertRaises(SSLError, https_pool.request, 'GET', '/')
https_pool._get_conn()
# Uneven length
https_pool.assert_fingerprint = 'AA:A'
self.assertRaises(SSLError, https_pool.request, 'GET', '/')
https_pool._get_conn()
# Invalid length
https_pool.assert_fingerprint = 'AA'
self.assertRaises(SSLError, https_pool.request, 'GET', '/')
def test_verify_none_and_bad_fingerprint(self):
https_pool = HTTPSConnectionPool('127.0.0.1', self.port,
cert_reqs='CERT_NONE',
ca_certs=DEFAULT_CA_BAD)
https_pool.assert_fingerprint = 'AA:AA:AA:AA:AA:AAAA:AA:AAAA:AA:' \
'AA:AA:AA:AA:AA:AA:AA:AA:AA'
self.assertRaises(SSLError, https_pool.request, 'GET', '/')
def test_verify_none_and_good_fingerprint(self):
https_pool = HTTPSConnectionPool('127.0.0.1', self.port,
cert_reqs='CERT_NONE',
ca_certs=DEFAULT_CA_BAD)
https_pool.assert_fingerprint = 'CC:45:6A:90:82:F7FF:C0:8218:8e:' \
'7A:F2:8A:D7:1E:07:33:67:DE'
https_pool.request('GET', '/')
def test_good_fingerprint_and_hostname_mismatch(self):
https_pool = HTTPSConnectionPool('127.0.0.1', self.port,
cert_reqs='CERT_REQUIRED',
ca_certs=DEFAULT_CA)
https_pool.assert_fingerprint = 'CC:45:6A:90:82:F7FF:C0:8218:8e:' \
'7A:F2:8A:D7:1E:07:33:67:DE'
https_pool.request('GET', '/')
@requires_network
def test_https_timeout(self):
timeout = Timeout(connect=0.001)
https_pool = HTTPSConnectionPool(TARPIT_HOST, self.port,
timeout=timeout, retries=False,
cert_reqs='CERT_REQUIRED')
timeout = Timeout(total=None, connect=0.001)
https_pool = HTTPSConnectionPool(TARPIT_HOST, self.port,
timeout=timeout, retries=False,
cert_reqs='CERT_REQUIRED')
self.assertRaises(ConnectTimeoutError, https_pool.request, 'GET', '/')
timeout = Timeout(read=0.001)
https_pool = HTTPSConnectionPool(self.host, self.port,
timeout=timeout, retries=False,
cert_reqs='CERT_REQUIRED')
https_pool.ca_certs = DEFAULT_CA
https_pool.assert_fingerprint = 'CC:45:6A:90:82:F7FF:C0:8218:8e:' \
'7A:F2:8A:D7:1E:07:33:67:DE'
timeout = Timeout(total=None)
https_pool = HTTPSConnectionPool(self.host, self.port, timeout=timeout,
cert_reqs='CERT_NONE')
https_pool.request('GET', '/')
def test_tunnel(self):
""" test the _tunnel behavior """
timeout = Timeout(total=None)
https_pool = HTTPSConnectionPool(self.host, self.port, timeout=timeout,
cert_reqs='CERT_NONE')
conn = https_pool._new_conn()
try:
conn.set_tunnel(self.host, self.port)
except AttributeError: # python 2.6
conn._set_tunnel(self.host, self.port)
conn._tunnel = mock.Mock()
https_pool._make_request(conn, 'GET', '/')
conn._tunnel.assert_called_once_with()
@onlyPy26OrOlder
def test_tunnel_old_python(self):
"""HTTPSConnection can still make connections if _tunnel_host isn't set
The _tunnel_host attribute was added in 2.6.3 - because our test runners
generally use the latest Python 2.6, we simulate the old version by
deleting the attribute from the HTTPSConnection.
"""
conn = self._pool._new_conn()
del conn._tunnel_host
self._pool._make_request(conn, 'GET', '/')
@requires_network
def test_enhanced_timeout(self):
def new_pool(timeout, cert_reqs='CERT_REQUIRED'):
https_pool = HTTPSConnectionPool(TARPIT_HOST, self.port,
timeout=timeout,
retries=False,
cert_reqs=cert_reqs)
return https_pool
https_pool = new_pool(Timeout(connect=0.001))
conn = https_pool._new_conn()
self.assertRaises(ConnectTimeoutError, https_pool.request, 'GET', '/')
self.assertRaises(ConnectTimeoutError, https_pool._make_request, conn,
'GET', '/')
https_pool = new_pool(Timeout(connect=5))
self.assertRaises(ConnectTimeoutError, https_pool.request, 'GET', '/',
timeout=Timeout(connect=0.001))
t = Timeout(total=None)
https_pool = new_pool(t)
conn = https_pool._new_conn()
self.assertRaises(ConnectTimeoutError, https_pool.request, 'GET', '/',
timeout=Timeout(total=None, connect=0.001))
def test_enhanced_ssl_connection(self):
fingerprint = 'CC:45:6A:90:82:F7FF:C0:8218:8e:7A:F2:8A:D7:1E:07:33:67:DE'
conn = VerifiedHTTPSConnection(self.host, self.port)
https_pool = HTTPSConnectionPool(self.host, self.port,
cert_reqs='CERT_REQUIRED', ca_certs=DEFAULT_CA,
assert_fingerprint=fingerprint)
https_pool._make_request(conn, 'GET', '/')
def test_ssl_correct_system_time(self):
self._pool.cert_reqs = 'CERT_REQUIRED'
self._pool.ca_certs = DEFAULT_CA
w = self._request_without_resource_warnings('GET', '/')
self.assertEqual([], w)
def test_ssl_wrong_system_time(self):
self._pool.cert_reqs = 'CERT_REQUIRED'
self._pool.ca_certs = DEFAULT_CA
with mock.patch('urllib3.connection.datetime') as mock_date:
mock_date.date.today.return_value = datetime.date(1970, 1, 1)
w = self._request_without_resource_warnings('GET', '/')
self.assertEqual(len(w), 1)
warning = w[0]
self.assertEqual(SystemTimeWarning, warning.category)
self.assertTrue(str(RECENT_DATE) in warning.message.args[0])
def _request_without_resource_warnings(self, method, url):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self._pool.request(method, url)
return [x for x in w if not isinstance(x.message, ResourceWarning)]
class TestHTTPS_TLSv1(HTTPSDummyServerTestCase):
certs = DEFAULT_CERTS.copy()
certs['ssl_version'] = ssl.PROTOCOL_TLSv1
def setUp(self):
self._pool = HTTPSConnectionPool(self.host, self.port)
def test_set_ssl_version_to_sslv3(self):
self._pool.ssl_version = ssl.PROTOCOL_SSLv3
self.assertRaises(SSLError, self._pool.request, 'GET', '/')
def test_ssl_version_as_string(self):
self._pool.ssl_version = 'PROTOCOL_SSLv3'
self.assertRaises(SSLError, self._pool.request, 'GET', '/')
def test_ssl_version_as_short_string(self):
self._pool.ssl_version = 'SSLv3'
self.assertRaises(SSLError, self._pool.request, 'GET', '/')
def test_discards_connection_on_sslerror(self):
self._pool.cert_reqs = 'CERT_REQUIRED'
self.assertRaises(SSLError, self._pool.request, 'GET', '/')
self._pool.ca_certs = DEFAULT_CA
self._pool.request('GET', '/')
def test_set_cert_default_cert_required(self):
conn = VerifiedHTTPSConnection(self.host, self.port)
conn.set_cert(ca_certs='/etc/ssl/certs/custom.pem')
self.assertEqual(conn.cert_reqs, 'CERT_REQUIRED')
class TestHTTPS_NoSAN(HTTPSDummyServerTestCase):
certs = NO_SAN_CERTS
def test_warning_for_certs_without_a_san(self):
"""Ensure that a warning is raised when the cert from the server has
no Subject Alternative Name."""
with mock.patch('warnings.warn') as warn:
https_pool = HTTPSConnectionPool(self.host, self.port,
cert_reqs='CERT_REQUIRED',
ca_certs=NO_SAN_CA)
r = https_pool.request('GET', '/')
self.assertEqual(r.status, 200)
self.assertTrue(warn.called)
if __name__ == '__main__':
unittest.main()
|
mick-d/nipype
|
refs/heads/master
|
nipype/interfaces/semtools/diffusion/gtract.py
|
9
|
# -*- coding: utf-8 -*-
# -*- coding: utf8 -*-
"""Autogenerated file - DO NOT EDIT
If you spot a bug, please report it on the mailing list and/or change the generator."""
import os
from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine,
TraitedSpec, File, Directory, traits, isdefined,
InputMultiPath, OutputMultiPath)
class gtractTransformToDisplacementFieldInputSpec(CommandLineInputSpec):
inputTransform = File(desc="Input Transform File Name", exists=True, argstr="--inputTransform %s")
inputReferenceVolume = File(desc="Required: input image file name to exemplify the anatomical space over which to vcl_express the transform as a displacement field.", exists=True, argstr="--inputReferenceVolume %s")
outputDeformationFieldVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Output deformation field", argstr="--outputDeformationFieldVolume %s")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class gtractTransformToDisplacementFieldOutputSpec(TraitedSpec):
outputDeformationFieldVolume = File(desc="Output deformation field", exists=True)
class gtractTransformToDisplacementField(SEMLikeCommandLine):
"""title: Create Displacement Field
category: Diffusion.GTRACT
description: This program will compute forward deformation from the given Transform. The size of the DF is equal to MNI space
version: 4.0.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT
license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt
contributor: This tool was developed by Vincent Magnotta, Madhura Ingalhalikar, and Greg Harris
acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1
"""
input_spec = gtractTransformToDisplacementFieldInputSpec
output_spec = gtractTransformToDisplacementFieldOutputSpec
_cmd = " gtractTransformToDisplacementField "
_outputs_filenames = {'outputDeformationFieldVolume': 'outputDeformationFieldVolume.nii'}
_redirect_x = False
class gtractInvertBSplineTransformInputSpec(CommandLineInputSpec):
inputReferenceVolume = File(desc="Required: input image file name to exemplify the anatomical space to interpolate over.", exists=True, argstr="--inputReferenceVolume %s")
inputTransform = File(desc="Required: input B-Spline transform file name", exists=True, argstr="--inputTransform %s")
outputTransform = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: output transform file name", argstr="--outputTransform %s")
landmarkDensity = InputMultiPath(traits.Int, desc="Number of landmark subdivisions in all 3 directions", sep=",", argstr="--landmarkDensity %s")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class gtractInvertBSplineTransformOutputSpec(TraitedSpec):
outputTransform = File(desc="Required: output transform file name", exists=True)
class gtractInvertBSplineTransform(SEMLikeCommandLine):
"""title: B-Spline Transform Inversion
category: Diffusion.GTRACT
description: This program will invert a B-Spline transform using a thin-plate spline approximation.
version: 4.0.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT
license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt
contributor: This tool was developed by Vincent Magnotta and Greg Harris.
acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1
"""
input_spec = gtractInvertBSplineTransformInputSpec
output_spec = gtractInvertBSplineTransformOutputSpec
_cmd = " gtractInvertBSplineTransform "
_outputs_filenames = {'outputTransform': 'outputTransform.h5'}
_redirect_x = False
class gtractConcatDwiInputSpec(CommandLineInputSpec):
inputVolume = InputMultiPath(File(exists=True), desc="Required: input file containing the first diffusion weighted image", argstr="--inputVolume %s...")
ignoreOrigins = traits.Bool(desc="If image origins are different force all images to origin of first image", argstr="--ignoreOrigins ")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: name of output NRRD file containing the combined diffusion weighted images.", argstr="--outputVolume %s")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class gtractConcatDwiOutputSpec(TraitedSpec):
outputVolume = File(desc="Required: name of output NRRD file containing the combined diffusion weighted images.", exists=True)
class gtractConcatDwi(SEMLikeCommandLine):
"""title: Concat DWI Images
category: Diffusion.GTRACT
description: This program will concatenate two DTI runs together.
version: 4.0.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT
license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt
contributor: This tool was developed by Vincent Magnotta and Greg Harris.
acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1
"""
input_spec = gtractConcatDwiInputSpec
output_spec = gtractConcatDwiOutputSpec
_cmd = " gtractConcatDwi "
_outputs_filenames = {'outputVolume': 'outputVolume.nrrd'}
_redirect_x = False
class gtractAverageBvaluesInputSpec(CommandLineInputSpec):
inputVolume = File(desc="Required: input image file name containing multiple baseline gradients to average", exists=True, argstr="--inputVolume %s")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: name of output NRRD file containing directly averaged baseline images", argstr="--outputVolume %s")
directionsTolerance = traits.Float(desc="Tolerance for matching identical gradient direction pairs", argstr="--directionsTolerance %f")
averageB0only = traits.Bool(desc="Average only baseline gradients. All other gradient directions are not averaged, but retained in the outputVolume", argstr="--averageB0only ")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class gtractAverageBvaluesOutputSpec(TraitedSpec):
outputVolume = File(desc="Required: name of output NRRD file containing directly averaged baseline images", exists=True)
class gtractAverageBvalues(SEMLikeCommandLine):
"""title: Average B-Values
category: Diffusion.GTRACT
description: This program will directly average together the baseline gradients (b value equals 0) within a DWI scan. This is usually used after gtractCoregBvalues.
version: 4.0.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT
license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt
contributor: This tool was developed by Vincent Magnotta and Greg Harris.
acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1
"""
input_spec = gtractAverageBvaluesInputSpec
output_spec = gtractAverageBvaluesOutputSpec
_cmd = " gtractAverageBvalues "
_outputs_filenames = {'outputVolume': 'outputVolume.nrrd'}
_redirect_x = False
class gtractCoregBvaluesInputSpec(CommandLineInputSpec):
movingVolume = File(desc="Required: input moving image file name. In order to register gradients within a scan to its first gradient, set the movingVolume and fixedVolume as the same image.", exists=True, argstr="--movingVolume %s")
fixedVolume = File(desc="Required: input fixed image file name. It is recommended that this image should either contain or be a b0 image.", exists=True, argstr="--fixedVolume %s")
fixedVolumeIndex = traits.Int(desc="Index in the fixed image for registration. It is recommended that this image should be a b0 image.", argstr="--fixedVolumeIndex %d")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: name of output NRRD file containing moving images individually resampled and fit to the specified fixed image index.", argstr="--outputVolume %s")
outputTransform = traits.Either(traits.Bool, File(), hash_files=False, desc="Registration 3D transforms concatenated in a single output file. There are no tools that can use this, but can be used for debugging purposes.", argstr="--outputTransform %s")
eddyCurrentCorrection = traits.Bool(desc="Flag to perform eddy current corection in addition to motion correction (recommended)", argstr="--eddyCurrentCorrection ")
numberOfIterations = traits.Int(desc="Number of iterations in each 3D fit", argstr="--numberOfIterations %d")
numberOfSpatialSamples = traits.Int(
desc="The number of voxels sampled for mutual information computation. Increase this for a slower, more careful fit. NOTE that it is suggested to use samplingPercentage instead of this option. However, if set, it overwrites the samplingPercentage option. ", argstr="--numberOfSpatialSamples %d")
samplingPercentage = traits.Float(
desc="This is a number in (0.0,1.0] interval that shows the percentage of the input fixed image voxels that are sampled for mutual information computation. Increase this for a slower, more careful fit. You can also limit the sampling focus with ROI masks and ROIAUTO mask generation. The default is to use approximately 5% of voxels (for backwards compatibility 5% ~= 500000/(256*256*256)). Typical values range from 1% for low detail images to 20% for high detail images.", argstr="--samplingPercentage %f")
relaxationFactor = traits.Float(desc="Fraction of gradient from Jacobian to attempt to move in each 3D fit step (adjust when eddyCurrentCorrection is enabled; suggested value = 0.25)", argstr="--relaxationFactor %f")
maximumStepSize = traits.Float(desc="Maximum permitted step size to move in each 3D fit step (adjust when eddyCurrentCorrection is enabled; suggested value = 0.1)", argstr="--maximumStepSize %f")
minimumStepSize = traits.Float(desc="Minimum required step size to move in each 3D fit step without converging -- decrease this to make the fit more exacting", argstr="--minimumStepSize %f")
spatialScale = traits.Float(desc="How much to scale up changes in position compared to unit rotational changes in radians -- decrease this to put more rotation in the fit", argstr="--spatialScale %f")
registerB0Only = traits.Bool(desc="Register the B0 images only", argstr="--registerB0Only ")
debugLevel = traits.Int(desc="Display debug messages, and produce debug intermediate results. 0=OFF, 1=Minimal, 10=Maximum debugging.", argstr="--debugLevel %d")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class gtractCoregBvaluesOutputSpec(TraitedSpec):
outputVolume = File(desc="Required: name of output NRRD file containing moving images individually resampled and fit to the specified fixed image index.", exists=True)
outputTransform = File(desc="Registration 3D transforms concatenated in a single output file. There are no tools that can use this, but can be used for debugging purposes.", exists=True)
class gtractCoregBvalues(SEMLikeCommandLine):
"""title: Coregister B-Values
category: Diffusion.GTRACT
description: This step should be performed after converting DWI scans from DICOM to NRRD format. This program will register all gradients in a NRRD diffusion weighted 4D vector image (moving image) to a specified index in a fixed image. It also supports co-registration with a T2 weighted image or field map in the same plane as the DWI data. The fixed image for the registration should be a b0 image. A mutual information metric cost function is used for the registration because of the differences in signal intensity as a result of the diffusion gradients. The full affine allows the registration procedure to correct for eddy current distortions that may exist in the data. If the eddyCurrentCorrection is enabled, relaxationFactor (0.25) and maximumStepSize (0.1) should be adjusted.
version: 4.0.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT
license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt
contributor: This tool was developed by Vincent Magnotta and Greg Harris.
acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1
"""
input_spec = gtractCoregBvaluesInputSpec
output_spec = gtractCoregBvaluesOutputSpec
_cmd = " gtractCoregBvalues "
_outputs_filenames = {'outputVolume': 'outputVolume.nrrd', 'outputTransform': 'outputTransform.h5'}
_redirect_x = False
class gtractResampleAnisotropyInputSpec(CommandLineInputSpec):
inputAnisotropyVolume = File(desc="Required: input file containing the anisotropy image", exists=True, argstr="--inputAnisotropyVolume %s")
inputAnatomicalVolume = File(desc="Required: input file containing the anatomical image whose characteristics will be cloned.", exists=True, argstr="--inputAnatomicalVolume %s")
inputTransform = File(desc="Required: input Rigid OR Bspline transform file name", exists=True, argstr="--inputTransform %s")
transformType = traits.Enum("Rigid", "B-Spline", desc="Transform type: Rigid, B-Spline", argstr="--transformType %s")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: name of output NRRD file containing the resampled transformed anisotropy image.", argstr="--outputVolume %s")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class gtractResampleAnisotropyOutputSpec(TraitedSpec):
outputVolume = File(desc="Required: name of output NRRD file containing the resampled transformed anisotropy image.", exists=True)
class gtractResampleAnisotropy(SEMLikeCommandLine):
"""title: Resample Anisotropy
category: Diffusion.GTRACT
description: This program will resample a floating point image using either the Rigid or B-Spline transform. You may want to save the aligned B0 image after each of the anisotropy map co-registration steps with the anatomical image to check the registration quality with another tool.
version: 4.0.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT
license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt
contributor: This tool was developed by Vincent Magnotta and Greg Harris.
acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1
"""
input_spec = gtractResampleAnisotropyInputSpec
output_spec = gtractResampleAnisotropyOutputSpec
_cmd = " gtractResampleAnisotropy "
_outputs_filenames = {'outputVolume': 'outputVolume.nrrd'}
_redirect_x = False
class gtractResampleCodeImageInputSpec(CommandLineInputSpec):
inputCodeVolume = File(desc="Required: input file containing the code image", exists=True, argstr="--inputCodeVolume %s")
inputReferenceVolume = File(desc="Required: input file containing the standard image to clone the characteristics of.", exists=True, argstr="--inputReferenceVolume %s")
inputTransform = File(desc="Required: input Rigid or Inverse-B-Spline transform file name", exists=True, argstr="--inputTransform %s")
transformType = traits.Enum("Rigid", "Affine", "B-Spline", "Inverse-B-Spline", "None", desc="Transform type: Rigid or Inverse-B-Spline", argstr="--transformType %s")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: name of output NRRD file containing the resampled code image in acquisition space.", argstr="--outputVolume %s")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class gtractResampleCodeImageOutputSpec(TraitedSpec):
outputVolume = File(desc="Required: name of output NRRD file containing the resampled code image in acquisition space.", exists=True)
class gtractResampleCodeImage(SEMLikeCommandLine):
"""title: Resample Code Image
category: Diffusion.GTRACT
description: This program will resample a short integer code image using either the Rigid or Inverse-B-Spline transform. The reference image is the DTI tensor anisotropy image space, and the input code image is in anatomical space.
version: 4.0.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT
license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt
contributor: This tool was developed by Vincent Magnotta and Greg Harris.
acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1
"""
input_spec = gtractResampleCodeImageInputSpec
output_spec = gtractResampleCodeImageOutputSpec
_cmd = " gtractResampleCodeImage "
_outputs_filenames = {'outputVolume': 'outputVolume.nrrd'}
_redirect_x = False
class gtractCopyImageOrientationInputSpec(CommandLineInputSpec):
inputVolume = File(desc="Required: input file containing the signed short image to reorient without resampling.", exists=True, argstr="--inputVolume %s")
inputReferenceVolume = File(desc="Required: input file containing orietation that will be cloned.", exists=True, argstr="--inputReferenceVolume %s")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: name of output NRRD or Nifti file containing the reoriented image in reference image space.", argstr="--outputVolume %s")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class gtractCopyImageOrientationOutputSpec(TraitedSpec):
outputVolume = File(desc="Required: name of output NRRD or Nifti file containing the reoriented image in reference image space.", exists=True)
class gtractCopyImageOrientation(SEMLikeCommandLine):
"""title: Copy Image Orientation
category: Diffusion.GTRACT
description: This program will copy the orientation from the reference image into the moving image. Currently, the registration process requires that the diffusion weighted images and the anatomical images have the same image orientation (i.e. Axial, Coronal, Sagittal). It is suggested that you copy the image orientation from the diffusion weighted images and apply this to the anatomical image. This image can be subsequently removed after the registration step is complete. We anticipate that this limitation will be removed in future versions of the registration programs.
version: 4.0.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT
license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt
contributor: This tool was developed by Vincent Magnotta and Greg Harris.
acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1
"""
input_spec = gtractCopyImageOrientationInputSpec
output_spec = gtractCopyImageOrientationOutputSpec
_cmd = " gtractCopyImageOrientation "
_outputs_filenames = {'outputVolume': 'outputVolume.nrrd'}
_redirect_x = False
class gtractCreateGuideFiberInputSpec(CommandLineInputSpec):
inputFiber = File(desc="Required: input fiber tract file name", exists=True, argstr="--inputFiber %s")
numberOfPoints = traits.Int(desc="Number of points in output guide fiber", argstr="--numberOfPoints %d")
outputFiber = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: output guide fiber file name", argstr="--outputFiber %s")
writeXMLPolyDataFile = traits.Bool(desc="Flag to make use of XML files when reading and writing vtkPolyData.", argstr="--writeXMLPolyDataFile ")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class gtractCreateGuideFiberOutputSpec(TraitedSpec):
outputFiber = File(desc="Required: output guide fiber file name", exists=True)
class gtractCreateGuideFiber(SEMLikeCommandLine):
"""title: Create Guide Fiber
category: Diffusion.GTRACT
description: This program will create a guide fiber by averaging fibers from a previously generated tract.
version: 4.0.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT
license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt
contributor: This tool was developed by Vincent Magnotta and Greg Harris.
acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1
"""
input_spec = gtractCreateGuideFiberInputSpec
output_spec = gtractCreateGuideFiberOutputSpec
_cmd = " gtractCreateGuideFiber "
_outputs_filenames = {'outputFiber': 'outputFiber.vtk'}
_redirect_x = False
class gtractAnisotropyMapInputSpec(CommandLineInputSpec):
inputTensorVolume = File(desc="Required: input file containing the diffusion tensor image", exists=True, argstr="--inputTensorVolume %s")
anisotropyType = traits.Enum("ADC", "FA", "RA", "VR", "AD", "RD", "LI", desc="Anisotropy Mapping Type: ADC, FA, RA, VR, AD, RD, LI", argstr="--anisotropyType %s")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: name of output NRRD file containing the selected kind of anisotropy scalar.", argstr="--outputVolume %s")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class gtractAnisotropyMapOutputSpec(TraitedSpec):
outputVolume = File(desc="Required: name of output NRRD file containing the selected kind of anisotropy scalar.", exists=True)
class gtractAnisotropyMap(SEMLikeCommandLine):
"""title: Anisotropy Map
category: Diffusion.GTRACT
description: This program will generate a scalar map of anisotropy, given a tensor representation. Anisotropy images are used for fiber tracking, but the anisotropy scalars are not defined along the path. Instead, the tensor representation is included as point data allowing all of these metrics to be computed using only the fiber tract point data. The images can be saved in any ITK supported format, but it is suggested that you use an image format that supports the definition of the image origin. This includes NRRD, NifTI, and Meta formats. These images can also be used for scalar analysis including regional anisotropy measures or VBM style analysis.
version: 4.0.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT
license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt
contributor: This tool was developed by Vincent Magnotta and Greg Harris.
acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1
"""
input_spec = gtractAnisotropyMapInputSpec
output_spec = gtractAnisotropyMapOutputSpec
_cmd = " gtractAnisotropyMap "
_outputs_filenames = {'outputVolume': 'outputVolume.nrrd'}
_redirect_x = False
class gtractClipAnisotropyInputSpec(CommandLineInputSpec):
inputVolume = File(desc="Required: input image file name", exists=True, argstr="--inputVolume %s")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: name of output NRRD file containing the clipped anisotropy image", argstr="--outputVolume %s")
clipFirstSlice = traits.Bool(desc="Clip the first slice of the anisotropy image", argstr="--clipFirstSlice ")
clipLastSlice = traits.Bool(desc="Clip the last slice of the anisotropy image", argstr="--clipLastSlice ")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class gtractClipAnisotropyOutputSpec(TraitedSpec):
outputVolume = File(desc="Required: name of output NRRD file containing the clipped anisotropy image", exists=True)
class gtractClipAnisotropy(SEMLikeCommandLine):
"""title: Clip Anisotropy
category: Diffusion.GTRACT
description: This program will zero the first and/or last slice of an anisotropy image, creating a clipped anisotropy image.
version: 4.0.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT
license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt
contributor: This tool was developed by Vincent Magnotta and Greg Harris.
acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1
"""
input_spec = gtractClipAnisotropyInputSpec
output_spec = gtractClipAnisotropyOutputSpec
_cmd = " gtractClipAnisotropy "
_outputs_filenames = {'outputVolume': 'outputVolume.nrrd'}
_redirect_x = False
class gtractResampleB0InputSpec(CommandLineInputSpec):
inputVolume = File(desc="Required: input file containing the 4D image", exists=True, argstr="--inputVolume %s")
inputAnatomicalVolume = File(desc="Required: input file containing the anatomical image defining the origin, spacing and size of the resampled image (template)", exists=True, argstr="--inputAnatomicalVolume %s")
inputTransform = File(desc="Required: input Rigid OR Bspline transform file name", exists=True, argstr="--inputTransform %s")
vectorIndex = traits.Int(desc="Index in the diffusion weighted image set for the B0 image", argstr="--vectorIndex %d")
transformType = traits.Enum("Rigid", "B-Spline", desc="Transform type: Rigid, B-Spline", argstr="--transformType %s")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: name of output NRRD file containing the resampled input image.", argstr="--outputVolume %s")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class gtractResampleB0OutputSpec(TraitedSpec):
outputVolume = File(desc="Required: name of output NRRD file containing the resampled input image.", exists=True)
class gtractResampleB0(SEMLikeCommandLine):
"""title: Resample B0
category: Diffusion.GTRACT
description: This program will resample a signed short image using either a Rigid or B-Spline transform. The user must specify a template image that will be used to define the origin, orientation, spacing, and size of the resampled image.
version: 4.0.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT
license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt
contributor: This tool was developed by Vincent Magnotta and Greg Harris.
acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1
"""
input_spec = gtractResampleB0InputSpec
output_spec = gtractResampleB0OutputSpec
_cmd = " gtractResampleB0 "
_outputs_filenames = {'outputVolume': 'outputVolume.nrrd'}
_redirect_x = False
class gtractInvertRigidTransformInputSpec(CommandLineInputSpec):
inputTransform = File(desc="Required: input rigid transform file name", exists=True, argstr="--inputTransform %s")
outputTransform = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: output transform file name", argstr="--outputTransform %s")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class gtractInvertRigidTransformOutputSpec(TraitedSpec):
outputTransform = File(desc="Required: output transform file name", exists=True)
class gtractInvertRigidTransform(SEMLikeCommandLine):
"""title: Rigid Transform Inversion
category: Diffusion.GTRACT
description: This program will invert a Rigid transform.
version: 4.0.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT
license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt
contributor: This tool was developed by Vincent Magnotta and Greg Harris.
acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1
"""
input_spec = gtractInvertRigidTransformInputSpec
output_spec = gtractInvertRigidTransformOutputSpec
_cmd = " gtractInvertRigidTransform "
_outputs_filenames = {'outputTransform': 'outputTransform.h5'}
_redirect_x = False
class gtractImageConformityInputSpec(CommandLineInputSpec):
inputVolume = File(desc="Required: input file containing the signed short image to reorient without resampling.", exists=True, argstr="--inputVolume %s")
inputReferenceVolume = File(desc="Required: input file containing the standard image to clone the characteristics of.", exists=True, argstr="--inputReferenceVolume %s")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: name of output Nrrd or Nifti file containing the reoriented image in reference image space.", argstr="--outputVolume %s")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class gtractImageConformityOutputSpec(TraitedSpec):
outputVolume = File(desc="Required: name of output Nrrd or Nifti file containing the reoriented image in reference image space.", exists=True)
class gtractImageConformity(SEMLikeCommandLine):
"""title: Image Conformity
category: Diffusion.GTRACT
description: This program will straighten out the Direction and Origin to match the Reference Image.
version: 4.0.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT
license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt
contributor: This tool was developed by Vincent Magnotta and Greg Harris.
acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1
"""
input_spec = gtractImageConformityInputSpec
output_spec = gtractImageConformityOutputSpec
_cmd = " gtractImageConformity "
_outputs_filenames = {'outputVolume': 'outputVolume.nrrd'}
_redirect_x = False
class compareTractInclusionInputSpec(CommandLineInputSpec):
testFiber = File(desc="Required: test fiber tract file name", exists=True, argstr="--testFiber %s")
standardFiber = File(desc="Required: standard fiber tract file name", exists=True, argstr="--standardFiber %s")
closeness = traits.Float(desc="Closeness of every test fiber to some fiber in the standard tract, computed as a sum of squares of spatial differences of standard points", argstr="--closeness %f")
numberOfPoints = traits.Int(desc="Number of points in comparison fiber pairs", argstr="--numberOfPoints %d")
testForBijection = traits.Bool(desc="Flag to apply the closeness criterion both ways", argstr="--testForBijection ")
testForFiberCardinality = traits.Bool(desc="Flag to require the same number of fibers in both tracts", argstr="--testForFiberCardinality ")
writeXMLPolyDataFile = traits.Bool(desc="Flag to make use of XML files when reading and writing vtkPolyData.", argstr="--writeXMLPolyDataFile ")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class compareTractInclusionOutputSpec(TraitedSpec):
pass
class compareTractInclusion(SEMLikeCommandLine):
"""title: Compare Tracts
category: Diffusion.GTRACT
description: This program will halt with a status code indicating whether a test tract is nearly enough included in a standard tract in the sense that every fiber in the test tract has a low enough sum of squares distance to some fiber in the standard tract modulo spline resampling of every fiber to a fixed number of points.
version: 4.0.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT
license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt
contributor: This tool was developed by Vincent Magnotta and Greg Harris.
acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1
"""
input_spec = compareTractInclusionInputSpec
output_spec = compareTractInclusionOutputSpec
_cmd = " compareTractInclusion "
_outputs_filenames = {}
_redirect_x = False
class gtractFastMarchingTrackingInputSpec(CommandLineInputSpec):
inputTensorVolume = File(desc="Required: input tensor image file name", exists=True, argstr="--inputTensorVolume %s")
inputAnisotropyVolume = File(desc="Required: input anisotropy image file name", exists=True, argstr="--inputAnisotropyVolume %s")
inputCostVolume = File(desc="Required: input vcl_cost image file name", exists=True, argstr="--inputCostVolume %s")
inputStartingSeedsLabelMapVolume = File(desc="Required: input starting seeds LabelMap image file name", exists=True, argstr="--inputStartingSeedsLabelMapVolume %s")
startingSeedsLabel = traits.Int(desc="Label value for Starting Seeds", argstr="--startingSeedsLabel %d")
outputTract = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: name of output vtkPolydata file containing tract lines and the point data collected along them.", argstr="--outputTract %s")
writeXMLPolyDataFile = traits.Bool(desc="Flag to make use of the XML format for vtkPolyData fiber tracts.", argstr="--writeXMLPolyDataFile ")
numberOfIterations = traits.Int(desc="Number of iterations used for the optimization", argstr="--numberOfIterations %d")
seedThreshold = traits.Float(desc="Anisotropy threshold used for seed selection", argstr="--seedThreshold %f")
trackingThreshold = traits.Float(desc="Anisotropy threshold used for fiber tracking", argstr="--trackingThreshold %f")
costStepSize = traits.Float(desc="Cost image sub-voxel sampling", argstr="--costStepSize %f")
maximumStepSize = traits.Float(desc="Maximum step size to move when tracking", argstr="--maximumStepSize %f")
minimumStepSize = traits.Float(desc="Minimum step size to move when tracking", argstr="--minimumStepSize %f")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class gtractFastMarchingTrackingOutputSpec(TraitedSpec):
outputTract = File(desc="Required: name of output vtkPolydata file containing tract lines and the point data collected along them.", exists=True)
class gtractFastMarchingTracking(SEMLikeCommandLine):
"""title: Fast Marching Tracking
category: Diffusion.GTRACT
description: This program will use a fast marching fiber tracking algorithm to identify fiber tracts from a tensor image. This program is the second portion of the algorithm. The user must first run gtractCostFastMarching to generate the vcl_cost image. The second step of the algorithm implemented here is a gradient descent soplution from the defined ending region back to the seed points specified in gtractCostFastMarching. This algorithm is roughly based on the work by G. Parker et al. from IEEE Transactions On Medical Imaging, 21(5): 505-512, 2002. An additional feature of including anisotropy into the vcl_cost function calculation is included.
version: 4.0.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT
license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt
contributor: This tool was developed by Vincent Magnotta and Greg Harris. The original code here was developed by Daisy Espino.
acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1
"""
input_spec = gtractFastMarchingTrackingInputSpec
output_spec = gtractFastMarchingTrackingOutputSpec
_cmd = " gtractFastMarchingTracking "
_outputs_filenames = {'outputTract': 'outputTract.vtk'}
_redirect_x = False
class gtractInvertDisplacementFieldInputSpec(CommandLineInputSpec):
baseImage = File(desc="Required: base image used to define the size of the inverse field", exists=True, argstr="--baseImage %s")
deformationImage = File(desc="Required: Displacement field image", exists=True, argstr="--deformationImage %s")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: Output deformation field", argstr="--outputVolume %s")
subsamplingFactor = traits.Int(desc="Subsampling factor for the deformation field", argstr="--subsamplingFactor %d")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class gtractInvertDisplacementFieldOutputSpec(TraitedSpec):
outputVolume = File(desc="Required: Output deformation field", exists=True)
class gtractInvertDisplacementField(SEMLikeCommandLine):
"""title: Invert Displacement Field
category: Diffusion.GTRACT
description: This program will invert a deformatrion field. The size of the deformation field is defined by an example image provided by the user
version: 4.0.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT
license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt
contributor: This tool was developed by Vincent Magnotta.
acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1
"""
input_spec = gtractInvertDisplacementFieldInputSpec
output_spec = gtractInvertDisplacementFieldOutputSpec
_cmd = " gtractInvertDisplacementField "
_outputs_filenames = {'outputVolume': 'outputVolume.nrrd'}
_redirect_x = False
class gtractCoRegAnatomyInputSpec(CommandLineInputSpec):
inputVolume = File(desc="Required: input vector image file name. It is recommended that the input volume is the skull stripped baseline image of the DWI scan.", exists=True, argstr="--inputVolume %s")
inputAnatomicalVolume = File(desc="Required: input anatomical image file name. It is recommended that that the input anatomical image has been skull stripped and has the same orientation as the DWI scan.", exists=True, argstr="--inputAnatomicalVolume %s")
vectorIndex = traits.Int(desc="Vector image index in the moving image (within the DWI) to be used for registration.", argstr="--vectorIndex %d")
inputRigidTransform = File(desc="Required (for B-Spline type co-registration): input rigid transform file name. Used as a starting point for the anatomical B-Spline registration.", exists=True, argstr="--inputRigidTransform %s")
outputTransformName = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: filename for the fit transform.", argstr="--outputTransformName %s")
transformType = traits.Enum("Rigid", "Bspline", desc="Transform Type: Rigid|Bspline", argstr="--transformType %s")
numberOfIterations = traits.Int(desc="Number of iterations in the selected 3D fit", argstr="--numberOfIterations %d")
gridSize = InputMultiPath(traits.Int, desc="Number of grid subdivisions in all 3 directions", sep=",", argstr="--gridSize %s")
borderSize = traits.Int(desc="Size of border", argstr="--borderSize %d")
numberOfHistogramBins = traits.Int(desc="Number of histogram bins", argstr="--numberOfHistogramBins %d")
spatialScale = traits.Int(desc="Scales the number of voxels in the image by this value to specify the number of voxels used in the registration", argstr="--spatialScale %d")
convergence = traits.Float(desc="Convergence Factor", argstr="--convergence %f")
gradientTolerance = traits.Float(desc="Gradient Tolerance", argstr="--gradientTolerance %f")
maxBSplineDisplacement = traits.Float(
desc=" Sets the maximum allowed displacements in image physical coordinates for BSpline control grid along each axis. A value of 0.0 indicates that the problem should be unbounded. NOTE: This only constrains the BSpline portion, and does not limit the displacement from the associated bulk transform. This can lead to a substantial reduction in computation time in the BSpline optimizer., ", argstr="--maxBSplineDisplacement %f")
maximumStepSize = traits.Float(desc="Maximum permitted step size to move in the selected 3D fit", argstr="--maximumStepSize %f")
minimumStepSize = traits.Float(desc="Minimum required step size to move in the selected 3D fit without converging -- decrease this to make the fit more exacting", argstr="--minimumStepSize %f")
translationScale = traits.Float(desc="How much to scale up changes in position compared to unit rotational changes in radians -- decrease this to put more translation in the fit", argstr="--translationScale %f")
relaxationFactor = traits.Float(desc="Fraction of gradient from Jacobian to attempt to move in the selected 3D fit", argstr="--relaxationFactor %f")
numberOfSamples = traits.Int(
desc="The number of voxels sampled for mutual information computation. Increase this for a slower, more careful fit. NOTE that it is suggested to use samplingPercentage instead of this option. However, if set, it overwrites the samplingPercentage option. ", argstr="--numberOfSamples %d")
samplingPercentage = traits.Float(
desc="This is a number in (0.0,1.0] interval that shows the percentage of the input fixed image voxels that are sampled for mutual information computation. Increase this for a slower, more careful fit. You can also limit the sampling focus with ROI masks and ROIAUTO mask generation. The default is to use approximately 5% of voxels (for backwards compatibility 5% ~= 500000/(256*256*256)). Typical values range from 1% for low detail images to 20% for high detail images.", argstr="--samplingPercentage %f")
useMomentsAlign = traits.Bool(
desc="MomentsAlign assumes that the center of mass of the images represent similar structures. Perform a MomentsAlign registration as part of the sequential registration steps. This option MUST come first, and CAN NOT be used with either CenterOfHeadLAlign, GeometryAlign, or initialTransform file. This family of options superceeds the use of transformType if any of them are set.", argstr="--useMomentsAlign ")
useGeometryAlign = traits.Bool(
desc="GeometryAlign on assumes that the center of the voxel lattice of the images represent similar structures. Perform a GeometryCenterAlign registration as part of the sequential registration steps. This option MUST come first, and CAN NOT be used with either MomentsAlign, CenterOfHeadAlign, or initialTransform file. This family of options superceeds the use of transformType if any of them are set.", argstr="--useGeometryAlign ")
useCenterOfHeadAlign = traits.Bool(
desc="CenterOfHeadAlign attempts to find a hemisphere full of foreground voxels from the superior direction as an estimate of where the center of a head shape would be to drive a center of mass estimate. Perform a CenterOfHeadAlign registration as part of the sequential registration steps. This option MUST come first, and CAN NOT be used with either MomentsAlign, GeometryAlign, or initialTransform file. This family of options superceeds the use of transformType if any of them are set.", argstr="--useCenterOfHeadAlign ")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class gtractCoRegAnatomyOutputSpec(TraitedSpec):
outputTransformName = File(desc="Required: filename for the fit transform.", exists=True)
class gtractCoRegAnatomy(SEMLikeCommandLine):
"""title: Coregister B0 to Anatomy B-Spline
category: Diffusion.GTRACT
description: This program will register a Nrrd diffusion weighted 4D vector image to a fixed anatomical image. Two registration methods are supported for alignment with anatomical images: Rigid and B-Spline. The rigid registration performs a rigid body registration with the anatomical images and should be done as well to initialize the B-Spline transform. The B-SPline transform is the deformable transform, where the user can control the amount of deformation based on the number of control points as well as the maximum distance that these points can move. The B-Spline registration places a low dimensional grid in the image, which is deformed. This allows for some susceptibility related distortions to be removed from the diffusion weighted images. In general the amount of motion in the slice selection and read-out directions direction should be kept low. The distortion is in the phase encoding direction in the images. It is recommended that skull stripped (i.e. image containing only brain with skull removed) images shoud be used for image co-registration with the B-Spline transform.
version: 4.0.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT
license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt
contributor: This tool was developed by Vincent Magnotta and Greg Harris.
acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1
"""
input_spec = gtractCoRegAnatomyInputSpec
output_spec = gtractCoRegAnatomyOutputSpec
_cmd = " gtractCoRegAnatomy "
_outputs_filenames = {'outputTransformName': 'outputTransformName.h5'}
_redirect_x = False
class gtractResampleDWIInPlaceInputSpec(CommandLineInputSpec):
inputVolume = File(desc="Required: input image is a 4D NRRD image.", exists=True, argstr="--inputVolume %s")
referenceVolume = File(desc="If provided, resample to the final space of the referenceVolume 3D data set.", exists=True, argstr="--referenceVolume %s")
outputResampledB0 = traits.Either(traits.Bool, File(), hash_files=False, desc="Convenience function for extracting the first index location (assumed to be the B0)", argstr="--outputResampledB0 %s")
inputTransform = File(desc="Required: transform file derived from rigid registration of b0 image to reference structural image.", exists=True, argstr="--inputTransform %s")
warpDWITransform = File(desc="Optional: transform file to warp gradient volumes.", exists=True, argstr="--warpDWITransform %s")
debugLevel = traits.Int(desc="Display debug messages, and produce debug intermediate results. 0=OFF, 1=Minimal, 10=Maximum debugging.", argstr="--debugLevel %d")
imageOutputSize = InputMultiPath(traits.Int, desc="The voxel lattice for the output image, padding is added if necessary. NOTE: if 0,0,0, then the inputVolume size is used.", sep=",", argstr="--imageOutputSize %s")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: output image (NRRD file) that has been rigidly transformed into the space of the structural image and padded if image padding was changed from 0,0,0 default.", argstr="--outputVolume %s")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class gtractResampleDWIInPlaceOutputSpec(TraitedSpec):
outputResampledB0 = File(desc="Convenience function for extracting the first index location (assumed to be the B0)", exists=True)
outputVolume = File(desc="Required: output image (NRRD file) that has been rigidly transformed into the space of the structural image and padded if image padding was changed from 0,0,0 default.", exists=True)
class gtractResampleDWIInPlace(SEMLikeCommandLine):
"""title: Resample DWI In Place
category: Diffusion.GTRACT
description: Resamples DWI image to structural image.
version: 4.0.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT
license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt
contributor: This tool was developed by Vincent Magnotta, Greg Harris, Hans Johnson, and Joy Matsui.
acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1
"""
input_spec = gtractResampleDWIInPlaceInputSpec
output_spec = gtractResampleDWIInPlaceOutputSpec
_cmd = " gtractResampleDWIInPlace "
_outputs_filenames = {'outputResampledB0': 'outputResampledB0.nii', 'outputVolume': 'outputVolume.nii'}
_redirect_x = False
class gtractCostFastMarchingInputSpec(CommandLineInputSpec):
inputTensorVolume = File(desc="Required: input tensor image file name", exists=True, argstr="--inputTensorVolume %s")
inputAnisotropyVolume = File(desc="Required: input anisotropy image file name", exists=True, argstr="--inputAnisotropyVolume %s")
inputStartingSeedsLabelMapVolume = File(desc="Required: input starting seeds LabelMap image file name", exists=True, argstr="--inputStartingSeedsLabelMapVolume %s")
startingSeedsLabel = traits.Int(desc="Label value for Starting Seeds", argstr="--startingSeedsLabel %d")
outputCostVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Output vcl_cost image", argstr="--outputCostVolume %s")
outputSpeedVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Output speed image", argstr="--outputSpeedVolume %s")
anisotropyWeight = traits.Float(desc="Anisotropy weight used for vcl_cost function calculations", argstr="--anisotropyWeight %f")
stoppingValue = traits.Float(desc="Terminiating value for vcl_cost function estimation", argstr="--stoppingValue %f")
seedThreshold = traits.Float(desc="Anisotropy threshold used for seed selection", argstr="--seedThreshold %f")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class gtractCostFastMarchingOutputSpec(TraitedSpec):
outputCostVolume = File(desc="Output vcl_cost image", exists=True)
outputSpeedVolume = File(desc="Output speed image", exists=True)
class gtractCostFastMarching(SEMLikeCommandLine):
"""title: Cost Fast Marching
category: Diffusion.GTRACT
description: This program will use a fast marching fiber tracking algorithm to identify fiber tracts from a tensor image. This program is the first portion of the algorithm. The user must first run gtractFastMarchingTracking to generate the actual fiber tracts. This algorithm is roughly based on the work by G. Parker et al. from IEEE Transactions On Medical Imaging, 21(5): 505-512, 2002. An additional feature of including anisotropy into the vcl_cost function calculation is included.
version: 4.0.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT
license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt
contributor: This tool was developed by Vincent Magnotta and Greg Harris. The original code here was developed by Daisy Espino.
acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1
"""
input_spec = gtractCostFastMarchingInputSpec
output_spec = gtractCostFastMarchingOutputSpec
_cmd = " gtractCostFastMarching "
_outputs_filenames = {'outputCostVolume': 'outputCostVolume.nrrd', 'outputSpeedVolume': 'outputSpeedVolume.nrrd'}
_redirect_x = False
class gtractFiberTrackingInputSpec(CommandLineInputSpec):
inputTensorVolume = File(desc="Required (for Free, Streamline, GraphSearch, and Guided fiber tracking methods): input tensor image file name", exists=True, argstr="--inputTensorVolume %s")
inputAnisotropyVolume = File(desc="Required (for Free, Streamline, GraphSearch, and Guided fiber tracking methods): input anisotropy image file name", exists=True, argstr="--inputAnisotropyVolume %s")
inputStartingSeedsLabelMapVolume = File(desc="Required (for Free, Streamline, GraphSearch, and Guided fiber tracking methods): input starting seeds LabelMap image file name", exists=True, argstr="--inputStartingSeedsLabelMapVolume %s")
startingSeedsLabel = traits.Int(desc="Label value for Starting Seeds (required if Label number used to create seed point in Slicer was not 1)", argstr="--startingSeedsLabel %d")
inputEndingSeedsLabelMapVolume = File(desc="Required (for Streamline, GraphSearch, and Guided fiber tracking methods): input ending seeds LabelMap image file name", exists=True, argstr="--inputEndingSeedsLabelMapVolume %s")
endingSeedsLabel = traits.Int(desc="Label value for Ending Seeds (required if Label number used to create seed point in Slicer was not 1)", argstr="--endingSeedsLabel %d")
inputTract = File(desc="Required (for Guided fiber tracking method): guide fiber in vtkPolydata file containing one tract line.", exists=True, argstr="--inputTract %s")
outputTract = traits.Either(traits.Bool, File(), hash_files=False, desc="Required (for Free, Streamline, GraphSearch, and Guided fiber tracking methods): name of output vtkPolydata file containing tract lines and the point data collected along them.", argstr="--outputTract %s")
writeXMLPolyDataFile = traits.Bool(desc="Flag to make use of the XML format for vtkPolyData fiber tracts.", argstr="--writeXMLPolyDataFile ")
trackingMethod = traits.Enum("Guided", "Free", "Streamline", "GraphSearch", desc="Fiber tracking Filter Type: Guided|Free|Streamline|GraphSearch", argstr="--trackingMethod %s")
guidedCurvatureThreshold = traits.Float(desc="Guided Curvature Threshold (Degrees)", argstr="--guidedCurvatureThreshold %f")
maximumGuideDistance = traits.Float(desc="Maximum distance for using the guide fiber direction", argstr="--maximumGuideDistance %f")
seedThreshold = traits.Float(desc="Anisotropy threshold for seed selection (recommended for Free fiber tracking)", argstr="--seedThreshold %f")
trackingThreshold = traits.Float(desc="Anisotropy threshold for fiber tracking (anisotropy values of the next point along the path)", argstr="--trackingThreshold %f")
curvatureThreshold = traits.Float(desc="Curvature threshold in degrees (recommended for Free fiber tracking)", argstr="--curvatureThreshold %f")
branchingThreshold = traits.Float(desc="Anisotropy Branching threshold (recommended for GraphSearch fiber tracking method)", argstr="--branchingThreshold %f")
maximumBranchPoints = traits.Int(desc="Maximum branch points (recommended for GraphSearch fiber tracking method)", argstr="--maximumBranchPoints %d")
useRandomWalk = traits.Bool(desc="Flag to use random walk.", argstr="--useRandomWalk ")
randomSeed = traits.Int(desc="Random number generator seed", argstr="--randomSeed %d")
branchingAngle = traits.Float(desc="Branching angle in degrees (recommended for GraphSearch fiber tracking method)", argstr="--branchingAngle %f")
minimumLength = traits.Float(desc="Minimum fiber length. Helpful for filtering invalid tracts.", argstr="--minimumLength %f")
maximumLength = traits.Float(desc="Maximum fiber length (voxels)", argstr="--maximumLength %f")
stepSize = traits.Float(desc="Fiber tracking step size", argstr="--stepSize %f")
useLoopDetection = traits.Bool(desc="Flag to make use of loop detection.", argstr="--useLoopDetection ")
useTend = traits.Bool(desc="Flag to make use of Tend F and Tend G parameters.", argstr="--useTend ")
tendF = traits.Float(desc="Tend F parameter", argstr="--tendF %f")
tendG = traits.Float(desc="Tend G parameter", argstr="--tendG %f")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class gtractFiberTrackingOutputSpec(TraitedSpec):
outputTract = File(desc="Required (for Free, Streamline, GraphSearch, and Guided fiber tracking methods): name of output vtkPolydata file containing tract lines and the point data collected along them.", exists=True)
class gtractFiberTracking(SEMLikeCommandLine):
"""title: Fiber Tracking
category: Diffusion.GTRACT
description: This program implements four fiber tracking methods (Free, Streamline, GraphSearch, Guided). The output of the fiber tracking is vtkPolyData (i.e. Polylines) that can be loaded into Slicer3 for visualization. The poly data can be saved in either old VTK format files (.vtk) or in the new VTK XML format (.xml). The polylines contain point data that defines ther Tensor at each point along the fiber tract. This can then be used to rendered as glyphs in Slicer3 and can be used to define severeal scalar measures without referencing back to the anisotropy images. (1) Free tracking is a basic streamlines algorithm. This is a direct implementation of the method original proposed by Basser et al. The tracking follows the primarty eigenvector. The tracking begins with seed points in the starting region. Only those voxels above the specified anisotropy threshold in the starting region are used as seed points. Tracking terminates either as a result of maximum fiber length, low ansiotropy, or large curvature. This is a great way to explore your data. (2) The streamlines algorithm is a direct implementation of the method originally proposed by Basser et al. The tracking follows the primary eigenvector. The tracking begins with seed points in the starting region. Only those voxels above the specified anisotropy threshold in the starting region are used as seed points. Tracking terminates either by reaching the ending region or reaching some stopping criteria. Stopping criteria are specified using the following parameters: tracking threshold, curvature threshold, and max length. Only paths terminating in the ending region are kept in this method. The TEND algorithm proposed by Lazar et al. (Human Brain Mapping 18:306-321, 2003) has been instrumented. This can be enabled using the --useTend option while performing Streamlines tracking. This utilizes the entire diffusion tensor to deflect the incoming vector instead of simply following the primary eigenvector. The TEND parameters are set using the --tendF and --tendG options. (3) Graph Search tracking is the first step in the full GTRACT algorithm developed by Cheng et al. (NeuroImage 31(3): 1075-1085, 2006) for finding the tracks in a tensor image. This method was developed to generate fibers in a Tensor representation where crossing fibers occur. The graph search algorithm follows the primary eigenvector in non-ambigous regions and utilizes branching and a graph search algorithm in ambigous regions. Ambiguous tracking regions are defined based on two criteria: Branching Al Threshold (anisotropy values below this value and above the traching threshold) and Curvature Major Eigen (angles of the primary eigenvector direction and the current tracking direction). In regions that meet this criteria, two or three tracking paths are considered. The first is the standard primary eigenvector direction. The second is the seconadary eigenvector direction. This is based on the assumption that these regions may be prolate regions. If the Random Walk option is selected then a third direction is also considered. This direction is defined by a cone pointing from the current position to the centroid of the ending region. The interior angle of the cone is specified by the user with the Branch/Guide Angle parameter. A vector contained inside of the cone is selected at random and used as the third direction. This method can also utilize the TEND option where the primary tracking direction is that specified by the TEND method instead of the primary eigenvector. The parameter '--maximumBranchPoints' allows the tracking to have this number of branches being considered at a time. If this number of branch points is exceeded at any time, then the algorithm will revert back to a streamline alogrithm until the number of branches is reduced. This allows the user to constrain the computational complexity of the algorithm. (4) The second phase of the GTRACT algorithm is Guided Tracking. This method incorporates anatomical information about the track orientation using an initial guess of the fiber track. In the originally proposed GTRACT method, this would be created from the fibers resulting from the Graph Search tracking. However, in practice this can be created using any method and could be defined manually. To create the guide fiber the program gtractCreateGuideFiber can be used. This program will load a fiber tract that has been generated and create a centerline representation of the fiber tract (i.e. a single fiber). In this method, the fiber tracking follows the primary eigenvector direction unless it deviates from the guide fiber track by a angle greater than that specified by the '--guidedCurvatureThreshold' parameter. The user must specify the guide fiber when running this program.
version: 4.0.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT
license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt
contributor: This tool was developed by Vincent Magnotta, Greg Harris and Yongqiang Zhao.
acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1
"""
input_spec = gtractFiberTrackingInputSpec
output_spec = gtractFiberTrackingOutputSpec
_cmd = " gtractFiberTracking "
_outputs_filenames = {'outputTract': 'outputTract.vtk'}
_redirect_x = False
class extractNrrdVectorIndexInputSpec(CommandLineInputSpec):
inputVolume = File(desc="Required: input file containing the vector that will be extracted", exists=True, argstr="--inputVolume %s")
vectorIndex = traits.Int(desc="Index in the vector image to extract", argstr="--vectorIndex %d")
setImageOrientation = traits.Enum("AsAcquired", "Axial", "Coronal", "Sagittal", desc="Sets the image orientation of the extracted vector (Axial, Coronal, Sagittal)", argstr="--setImageOrientation %s")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: name of output NRRD file containing the vector image at the given index", argstr="--outputVolume %s")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class extractNrrdVectorIndexOutputSpec(TraitedSpec):
outputVolume = File(desc="Required: name of output NRRD file containing the vector image at the given index", exists=True)
class extractNrrdVectorIndex(SEMLikeCommandLine):
"""title: Extract Nrrd Index
category: Diffusion.GTRACT
description: This program will extract a 3D image (single vector) from a vector 3D image at a given vector index.
version: 4.0.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT
license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt
contributor: This tool was developed by Vincent Magnotta and Greg Harris.
acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1
"""
input_spec = extractNrrdVectorIndexInputSpec
output_spec = extractNrrdVectorIndexOutputSpec
_cmd = " extractNrrdVectorIndex "
_outputs_filenames = {'outputVolume': 'outputVolume.nii'}
_redirect_x = False
class gtractResampleFibersInputSpec(CommandLineInputSpec):
inputForwardDeformationFieldVolume = File(desc="Required: input forward deformation field image file name", exists=True, argstr="--inputForwardDeformationFieldVolume %s")
inputReverseDeformationFieldVolume = File(desc="Required: input reverse deformation field image file name", exists=True, argstr="--inputReverseDeformationFieldVolume %s")
inputTract = File(desc="Required: name of input vtkPolydata file containing tract lines.", exists=True, argstr="--inputTract %s")
outputTract = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: name of output vtkPolydata file containing tract lines and the point data collected along them.", argstr="--outputTract %s")
writeXMLPolyDataFile = traits.Bool(desc="Flag to make use of the XML format for vtkPolyData fiber tracts.", argstr="--writeXMLPolyDataFile ")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class gtractResampleFibersOutputSpec(TraitedSpec):
outputTract = File(desc="Required: name of output vtkPolydata file containing tract lines and the point data collected along them.", exists=True)
class gtractResampleFibers(SEMLikeCommandLine):
"""title: Resample Fibers
category: Diffusion.GTRACT
description: This program will resample a fiber tract with respect to a pair of deformation fields that represent the forward and reverse deformation fields.
version: 4.0.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT
license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt
contributor: This tool was developed by Vincent Magnotta and Greg Harris.
acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1
"""
input_spec = gtractResampleFibersInputSpec
output_spec = gtractResampleFibersOutputSpec
_cmd = " gtractResampleFibers "
_outputs_filenames = {'outputTract': 'outputTract.vtk'}
_redirect_x = False
class gtractTensorInputSpec(CommandLineInputSpec):
inputVolume = File(desc="Required: input image 4D NRRD image. Must contain data based on at least 6 distinct diffusion directions. The inputVolume is allowed to have multiple b0 and gradient direction images. Averaging of the b0 image is done internally in this step. Prior averaging of the DWIs is not required.",
exists=True, argstr="--inputVolume %s")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Required: name of output NRRD file containing the Tensor vector image", argstr="--outputVolume %s")
medianFilterSize = InputMultiPath(traits.Int, desc="Median filter radius in all 3 directions", sep=",", argstr="--medianFilterSize %s")
maskProcessingMode = traits.Enum(
"NOMASK", "ROIAUTO", "ROI", desc="ROIAUTO: mask is implicitly defined using a otsu forground and hole filling algorithm. ROI: Uses the masks to define what parts of the image should be used for computing the transform. NOMASK: no mask used", argstr="--maskProcessingMode %s")
maskVolume = File(desc="Mask Image, if maskProcessingMode is ROI", exists=True, argstr="--maskVolume %s")
backgroundSuppressingThreshold = traits.Int(
desc="Image threshold to suppress background. This sets a threshold used on the b0 image to remove background voxels from processing. Typically, values of 100 and 500 work well for Siemens and GE DTI data, respectively. Check your data particularly in the globus pallidus to make sure the brain tissue is not being eliminated with this threshold.", argstr="--backgroundSuppressingThreshold %d")
resampleIsotropic = traits.Bool(desc="Flag to resample to isotropic voxels. Enabling this feature is recommended if fiber tracking will be performed.", argstr="--resampleIsotropic ")
size = traits.Float(desc="Isotropic voxel size to resample to", argstr="--size %f")
b0Index = traits.Int(desc="Index in input vector index to extract", argstr="--b0Index %d")
applyMeasurementFrame = traits.Bool(desc="Flag to apply the measurement frame to the gradient directions", argstr="--applyMeasurementFrame ")
ignoreIndex = InputMultiPath(traits.Int, desc="Ignore diffusion gradient index. Used to remove specific gradient directions with artifacts.", sep=",", argstr="--ignoreIndex %s")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class gtractTensorOutputSpec(TraitedSpec):
outputVolume = File(desc="Required: name of output NRRD file containing the Tensor vector image", exists=True)
class gtractTensor(SEMLikeCommandLine):
"""title: Tensor Estimation
category: Diffusion.GTRACT
description: This step will convert a b-value averaged diffusion tensor image to a 3x3 tensor voxel image. This step takes the diffusion tensor image data and generates a tensor representation of the data based on the signal intensity decay, b values applied, and the diffusion difrections. The apparent diffusion coefficient for a given orientation is computed on a pixel-by-pixel basis by fitting the image data (voxel intensities) to the Stejskal-Tanner equation. If at least 6 diffusion directions are used, then the diffusion tensor can be computed. This program uses itk::DiffusionTensor3DReconstructionImageFilter. The user can adjust background threshold, median filter, and isotropic resampling.
version: 4.0.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT
license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt
contributor: This tool was developed by Vincent Magnotta and Greg Harris.
acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1
"""
input_spec = gtractTensorInputSpec
output_spec = gtractTensorOutputSpec
_cmd = " gtractTensor "
_outputs_filenames = {'outputVolume': 'outputVolume.nrrd'}
_redirect_x = False
|
40023154/Finalexam_0627
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/_weakrefset.py
|
766
|
# Access WeakSet through the weakref module.
# This code is separated-out because it is needed
# by abc.py to load everything else at startup.
from _weakref import ref
__all__ = ['WeakSet']
class _IterationGuard:
# This context manager registers itself in the current iterators of the
# weak container, such as to delay all removals until the context manager
# exits.
# This technique should be relatively thread-safe (since sets are).
def __init__(self, weakcontainer):
# Don't create cycles
self.weakcontainer = ref(weakcontainer)
def __enter__(self):
w = self.weakcontainer()
if w is not None:
w._iterating.add(self)
return self
def __exit__(self, e, t, b):
w = self.weakcontainer()
if w is not None:
s = w._iterating
s.remove(self)
if not s:
w._commit_removals()
class WeakSet:
def __init__(self, data=None):
self.data = set()
def _remove(item, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(item)
else:
self.data.discard(item)
self._remove = _remove
# A list of keys to be removed
self._pending_removals = []
self._iterating = set()
if data is not None:
self.update(data)
def _commit_removals(self):
l = self._pending_removals
discard = self.data.discard
while l:
discard(l.pop())
def __iter__(self):
with _IterationGuard(self):
for itemref in self.data:
item = itemref()
if item is not None:
yield item
def __len__(self):
return len(self.data) - len(self._pending_removals)
def __contains__(self, item):
try:
wr = ref(item)
except TypeError:
return False
return wr in self.data
def __reduce__(self):
return (self.__class__, (list(self),),
getattr(self, '__dict__', None))
def add(self, item):
if self._pending_removals:
self._commit_removals()
self.data.add(ref(item, self._remove))
def clear(self):
if self._pending_removals:
self._commit_removals()
self.data.clear()
def copy(self):
return self.__class__(self)
def pop(self):
if self._pending_removals:
self._commit_removals()
while True:
try:
itemref = self.data.pop()
except KeyError:
raise KeyError('pop from empty WeakSet')
item = itemref()
if item is not None:
return item
def remove(self, item):
if self._pending_removals:
self._commit_removals()
self.data.remove(ref(item))
def discard(self, item):
if self._pending_removals:
self._commit_removals()
self.data.discard(ref(item))
def update(self, other):
if self._pending_removals:
self._commit_removals()
for element in other:
self.add(element)
def __ior__(self, other):
self.update(other)
return self
def difference(self, other):
newset = self.copy()
newset.difference_update(other)
return newset
__sub__ = difference
def difference_update(self, other):
self.__isub__(other)
def __isub__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.difference_update(ref(item) for item in other)
return self
def intersection(self, other):
return self.__class__(item for item in other if item in self)
__and__ = intersection
def intersection_update(self, other):
self.__iand__(other)
def __iand__(self, other):
if self._pending_removals:
self._commit_removals()
self.data.intersection_update(ref(item) for item in other)
return self
def issubset(self, other):
return self.data.issubset(ref(item) for item in other)
__le__ = issubset
def __lt__(self, other):
return self.data < set(ref(item) for item in other)
def issuperset(self, other):
return self.data.issuperset(ref(item) for item in other)
__ge__ = issuperset
def __gt__(self, other):
return self.data > set(ref(item) for item in other)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.data == set(ref(item) for item in other)
def symmetric_difference(self, other):
newset = self.copy()
newset.symmetric_difference_update(other)
return newset
__xor__ = symmetric_difference
def symmetric_difference_update(self, other):
self.__ixor__(other)
def __ixor__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.symmetric_difference_update(ref(item, self._remove) for item in other)
return self
def union(self, other):
return self.__class__(e for s in (self, other) for e in s)
__or__ = union
def isdisjoint(self, other):
return len(self.intersection(other)) == 0
|
daevaorn/sentry
|
refs/heads/master
|
src/sentry/replays.py
|
10
|
from __future__ import absolute_import
from requests import RequestException
from sentry.http import safe_urlopen
class Replayer(object):
def __init__(self, url, method, data=None, headers=None):
self.url = url
self.method = method
self.data = data
self.headers = headers
def replay(self):
try:
response = safe_urlopen(
url=self.url,
method=self.method,
data=self.data,
headers=self.headers or {}
)
except RequestException as e:
return {
'status': 'error',
'reason': str(e),
}
return {
'status': response.status_code,
'reason': response.reason,
'headers': response.headers,
'body': response.content,
}
|
BillBillBillBill/Take-out
|
refs/heads/master
|
backend/takeout/bussiness/urls.py
|
1
|
from django.conf.urls import url
from rest_framework import routers
import views
router = routers.DefaultRouter()
#router.register(r'seller', views.SellerViewSet)
# router.register(r'store', views.StoreViewSet)
urlpatterns = [
# url(r'^', include(router.urls)),
url(r'seller/(\d*)', views.SellerDetail.as_view()),
url(r'seller', views.SellerList.as_view()),
url(r'store/(\d*)', views.StoreDetail.as_view()),
url(r'store', views.StoreList.as_view()),
url(r'food/(\d*)', views.FoodDetail.as_view()),
url(r'food', views.FoodList.as_view()),
# url(r'store/$', views.StoreList.as_view()),
]
|
archf/ansible
|
refs/heads/devel
|
lib/ansible/modules/monitoring/newrelic_deployment.py
|
29
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2013 Matt Coddington <coddington@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: newrelic_deployment
version_added: "1.2"
author: "Matt Coddington (@mcodd)"
short_description: Notify newrelic about app deployments
description:
- Notify newrelic about app deployments (see https://docs.newrelic.com/docs/apm/new-relic-apm/maintenance/deployment-notifications#api)
options:
token:
description:
- API token, to place in the x-api-key header.
required: true
app_name:
description:
- (one of app_name or application_id are required) The value of app_name in the newrelic.yml file used by the application
required: false
application_id:
description:
- (one of app_name or application_id are required) The application id, found in the URL when viewing the application in RPM
required: false
changelog:
description:
- A list of changes for this deployment
required: false
description:
description:
- Text annotation for the deployment - notes for you
required: false
revision:
description:
- A revision number (e.g., git commit SHA)
required: false
user:
description:
- The name of the user/process that triggered this deployment
required: false
appname:
description:
- Name of the application
required: false
environment:
description:
- The environment for this deployment
required: false
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
requirements: []
'''
EXAMPLES = '''
- newrelic_deployment:
token: AAAAAA
app_name: myapp
user: ansible deployment
revision: '1.0'
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.six.moves.urllib.parse import urlencode
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
token=dict(required=True, no_log=True),
app_name=dict(required=False),
application_id=dict(required=False),
changelog=dict(required=False),
description=dict(required=False),
revision=dict(required=False),
user=dict(required=False),
appname=dict(required=False),
environment=dict(required=False),
validate_certs = dict(default='yes', type='bool'),
),
required_one_of=[['app_name', 'application_id']],
supports_check_mode=True
)
# build list of params
params = {}
if module.params["app_name"] and module.params["application_id"]:
module.fail_json(msg="only one of 'app_name' or 'application_id' can be set")
if module.params["app_name"]:
params["app_name"] = module.params["app_name"]
elif module.params["application_id"]:
params["application_id"] = module.params["application_id"]
else:
module.fail_json(msg="you must set one of 'app_name' or 'application_id'")
for item in [ "changelog", "description", "revision", "user", "appname", "environment" ]:
if module.params[item]:
params[item] = module.params[item]
# If we're in check mode, just exit pretending like we succeeded
if module.check_mode:
module.exit_json(changed=True)
# Send the data to NewRelic
url = "https://rpm.newrelic.com/deployments.xml"
data = urlencode(params)
headers = {
'x-api-key': module.params["token"],
}
response, info = fetch_url(module, url, data=data, headers=headers)
if info['status'] in (200, 201):
module.exit_json(changed=True)
else:
module.fail_json(msg="unable to update newrelic: %s" % info['msg'])
if __name__ == '__main__':
main()
|
mancoast/CPythonPyc_test
|
refs/heads/master
|
fail/324_test_index.py
|
60
|
import unittest
from test import support
import operator
maxsize = support.MAX_Py_ssize_t
class newstyle:
def __index__(self):
return self.ind
class TrapInt(int):
def __index__(self):
return self
class BaseTestCase(unittest.TestCase):
def setUp(self):
self.o = newstyle()
self.n = newstyle()
def test_basic(self):
self.o.ind = -2
self.n.ind = 2
self.assertEqual(operator.index(self.o), -2)
self.assertEqual(operator.index(self.n), 2)
def test_slice(self):
self.o.ind = 1
self.n.ind = 2
slc = slice(self.o, self.o, self.o)
check_slc = slice(1, 1, 1)
self.assertEqual(slc.indices(self.o), check_slc.indices(1))
slc = slice(self.n, self.n, self.n)
check_slc = slice(2, 2, 2)
self.assertEqual(slc.indices(self.n), check_slc.indices(2))
def test_wrappers(self):
self.o.ind = 4
self.n.ind = 5
self.assertEqual(6 .__index__(), 6)
self.assertEqual(-7 .__index__(), -7)
self.assertEqual(self.o.__index__(), 4)
self.assertEqual(self.n.__index__(), 5)
self.assertEqual(True.__index__(), 1)
self.assertEqual(False.__index__(), 0)
def test_subclasses(self):
r = list(range(10))
self.assertEqual(r[TrapInt(5):TrapInt(10)], r[5:10])
self.assertEqual(slice(TrapInt()).indices(0), (0,0,1))
def test_error(self):
self.o.ind = 'dumb'
self.n.ind = 'bad'
self.assertRaises(TypeError, operator.index, self.o)
self.assertRaises(TypeError, operator.index, self.n)
self.assertRaises(TypeError, slice(self.o).indices, 0)
self.assertRaises(TypeError, slice(self.n).indices, 0)
class SeqTestCase(unittest.TestCase):
# This test case isn't run directly. It just defines common tests
# to the different sequence types below
def setUp(self):
self.o = newstyle()
self.n = newstyle()
self.o2 = newstyle()
self.n2 = newstyle()
def test_index(self):
self.o.ind = -2
self.n.ind = 2
self.assertEqual(self.seq[self.n], self.seq[2])
self.assertEqual(self.seq[self.o], self.seq[-2])
def test_slice(self):
self.o.ind = 1
self.o2.ind = 3
self.n.ind = 2
self.n2.ind = 4
self.assertEqual(self.seq[self.o:self.o2], self.seq[1:3])
self.assertEqual(self.seq[self.n:self.n2], self.seq[2:4])
def test_slice_bug7532(self):
seqlen = len(self.seq)
self.o.ind = int(seqlen * 1.5)
self.n.ind = seqlen + 2
self.assertEqual(self.seq[self.o:], self.seq[0:0])
self.assertEqual(self.seq[:self.o], self.seq)
self.assertEqual(self.seq[self.n:], self.seq[0:0])
self.assertEqual(self.seq[:self.n], self.seq)
self.o2.ind = -seqlen - 2
self.n2.ind = -int(seqlen * 1.5)
self.assertEqual(self.seq[self.o2:], self.seq)
self.assertEqual(self.seq[:self.o2], self.seq[0:0])
self.assertEqual(self.seq[self.n2:], self.seq)
self.assertEqual(self.seq[:self.n2], self.seq[0:0])
def test_repeat(self):
self.o.ind = 3
self.n.ind = 2
self.assertEqual(self.seq * self.o, self.seq * 3)
self.assertEqual(self.seq * self.n, self.seq * 2)
self.assertEqual(self.o * self.seq, self.seq * 3)
self.assertEqual(self.n * self.seq, self.seq * 2)
def test_wrappers(self):
self.o.ind = 4
self.n.ind = 5
self.assertEqual(self.seq.__getitem__(self.o), self.seq[4])
self.assertEqual(self.seq.__mul__(self.o), self.seq * 4)
self.assertEqual(self.seq.__rmul__(self.o), self.seq * 4)
self.assertEqual(self.seq.__getitem__(self.n), self.seq[5])
self.assertEqual(self.seq.__mul__(self.n), self.seq * 5)
self.assertEqual(self.seq.__rmul__(self.n), self.seq * 5)
def test_subclasses(self):
self.assertEqual(self.seq[TrapInt()], self.seq[0])
def test_error(self):
self.o.ind = 'dumb'
self.n.ind = 'bad'
indexobj = lambda x, obj: obj.seq[x]
self.assertRaises(TypeError, indexobj, self.o, self)
self.assertRaises(TypeError, indexobj, self.n, self)
sliceobj = lambda x, obj: obj.seq[x:]
self.assertRaises(TypeError, sliceobj, self.o, self)
self.assertRaises(TypeError, sliceobj, self.n, self)
class ListTestCase(SeqTestCase):
seq = [0,10,20,30,40,50]
def test_setdelitem(self):
self.o.ind = -2
self.n.ind = 2
lst = list('ab!cdefghi!j')
del lst[self.o]
del lst[self.n]
lst[self.o] = 'X'
lst[self.n] = 'Y'
self.assertEqual(lst, list('abYdefghXj'))
lst = [5, 6, 7, 8, 9, 10, 11]
lst.__setitem__(self.n, "here")
self.assertEqual(lst, [5, 6, "here", 8, 9, 10, 11])
lst.__delitem__(self.n)
self.assertEqual(lst, [5, 6, 8, 9, 10, 11])
def test_inplace_repeat(self):
self.o.ind = 2
self.n.ind = 3
lst = [6, 4]
lst *= self.o
self.assertEqual(lst, [6, 4, 6, 4])
lst *= self.n
self.assertEqual(lst, [6, 4, 6, 4] * 3)
lst = [5, 6, 7, 8, 9, 11]
l2 = lst.__imul__(self.n)
self.assertIs(l2, lst)
self.assertEqual(lst, [5, 6, 7, 8, 9, 11] * 3)
class NewSeq:
def __init__(self, iterable):
self._list = list(iterable)
def __repr__(self):
return repr(self._list)
def __eq__(self, other):
return self._list == other
def __len__(self):
return len(self._list)
def __mul__(self, n):
return self.__class__(self._list*n)
__rmul__ = __mul__
def __getitem__(self, index):
return self._list[index]
class TupleTestCase(SeqTestCase):
seq = (0,10,20,30,40,50)
class ByteArrayTestCase(SeqTestCase):
seq = bytearray(b"this is a test")
class BytesTestCase(SeqTestCase):
seq = b"this is a test"
class StringTestCase(SeqTestCase):
seq = "this is a test"
class NewSeqTestCase(SeqTestCase):
seq = NewSeq((0,10,20,30,40,50))
class RangeTestCase(unittest.TestCase):
def test_range(self):
n = newstyle()
n.ind = 5
self.assertEqual(range(1, 20)[n], 6)
self.assertEqual(range(1, 20).__getitem__(n), 6)
class OverflowTestCase(unittest.TestCase):
def setUp(self):
self.pos = 2**100
self.neg = -self.pos
def test_large_longs(self):
self.assertEqual(self.pos.__index__(), self.pos)
self.assertEqual(self.neg.__index__(), self.neg)
def test_getitem(self):
class GetItem:
def __len__(self):
assert False, "__len__ should not be invoked"
def __getitem__(self, key):
return key
x = GetItem()
self.assertEqual(x[self.pos], self.pos)
self.assertEqual(x[self.neg], self.neg)
self.assertEqual(x[self.neg:self.pos].indices(maxsize),
(0, maxsize, 1))
self.assertEqual(x[self.neg:self.pos:1].indices(maxsize),
(0, maxsize, 1))
def test_sequence_repeat(self):
self.assertRaises(OverflowError, lambda: "a" * self.pos)
self.assertRaises(OverflowError, lambda: "a" * self.neg)
def test_main():
support.run_unittest(
BaseTestCase,
ListTestCase,
TupleTestCase,
BytesTestCase,
ByteArrayTestCase,
StringTestCase,
NewSeqTestCase,
RangeTestCase,
OverflowTestCase,
)
if __name__ == "__main__":
test_main()
|
yangming85/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.3/tests/regressiontests/dispatch/__init__.py
|
277
|
"""Unit-tests for the dispatch project
"""
|
mdanielwork/intellij-community
|
refs/heads/master
|
python/testData/quickFixes/PyAddSpecifierToFormatQuickFixTest/float.py
|
80
|
a = <warning descr="Format specifier character missing">"t<caret>est %"</warning> % 1.
|
ibinti/intellij-community
|
refs/heads/master
|
python/helpers/pydev/_pydevd_frame_eval/pydevd_frame_tracing.py
|
8
|
import sys
from _pydev_bundle import pydev_log
from _pydev_imps._pydev_saved_modules import threading
from _pydevd_bundle.pydevd_comm import get_global_debugger, CMD_SET_BREAK
from pydevd_file_utils import get_abs_path_real_path_and_base_from_frame, NORM_PATHS_AND_BASE_CONTAINER
from _pydevd_bundle.pydevd_frame import handle_breakpoint_condition, handle_breakpoint_expression
class DummyTracingHolder:
dummy_trace_func = None
def set_trace_func(self, trace_func):
self.dummy_trace_func = trace_func
dummy_tracing_holder = DummyTracingHolder()
def update_globals_dict(globals_dict):
new_globals = {'_pydev_stop_at_break': _pydev_stop_at_break}
globals_dict.update(new_globals)
def handle_breakpoint(frame, thread, global_debugger, breakpoint):
# ok, hit breakpoint, now, we have to discover if it is a conditional breakpoint
new_frame = frame
condition = breakpoint.condition
info = thread.additional_info
if condition is not None:
eval_result = handle_breakpoint_condition(global_debugger, info, breakpoint, new_frame)
if not eval_result:
return False
if breakpoint.expression is not None:
handle_breakpoint_expression(breakpoint, info, new_frame)
if breakpoint.suspend_policy == "ALL":
global_debugger.suspend_all_other_threads(thread)
return True
def _get_line_for_frame(frame):
# it's absolutely necessary to reset tracing function for frame in order to get the real line number
tracing_func = frame.f_trace
frame.f_trace = None
line = frame.f_lineno
frame.f_trace = tracing_func
return line
def _pydev_stop_at_break():
frame = sys._getframe(1)
t = threading.currentThread()
if t.additional_info.is_tracing:
return
if t.additional_info.pydev_step_cmd == -1 and frame.f_trace in (None, dummy_tracing_holder.dummy_trace_func):
# do not handle breakpoints while stepping, because they're handled by old tracing function
t.additional_info.is_tracing = True
debugger = get_global_debugger()
try:
abs_path_real_path_and_base = NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename]
except:
abs_path_real_path_and_base = get_abs_path_real_path_and_base_from_frame(frame)
filename = abs_path_real_path_and_base[1]
breakpoints_for_file = debugger.breakpoints.get(filename)
line = _get_line_for_frame(frame)
try:
breakpoint = breakpoints_for_file[line]
except KeyError:
pydev_log.debug("Couldn't find breakpoint in the file {} on line {}".format(frame.f_code.co_filename, line))
t.additional_info.is_tracing = False
return
if breakpoint and handle_breakpoint(frame, t, debugger, breakpoint):
pydev_log.debug("Suspending at breakpoint in file: {} on line {}".format(frame.f_code.co_filename, line))
debugger.set_suspend(t, CMD_SET_BREAK)
debugger.do_wait_suspend(t, frame, 'line', None, "frame_eval")
t.additional_info.is_tracing = False
def pydev_trace_code_wrapper():
# import this module again, because it's inserted inside user's code
global _pydev_stop_at_break
_pydev_stop_at_break()
|
gcobos/rft
|
refs/heads/master
|
app/controllers/functions.py
|
1
|
import config
# Author: drone
import web
from app.models import functions
from app.models import rss
from app.helpers import render
from app.helpers import misc
from app.helpers import utils
from config import view, listLimit, projectName
def functionMustExist (meth):
def new(self, functionName):
if not functions.getFunctionIdByName(functionName):
return render.layout('')
else:
return meth(self, functionName)
return new
class ListLatest:
def GET (self, page = 0):
#if web.input().has_key('q'):
# return web.seeother('/search/?' + utils.url_encode(web.input()))
latest_functions, has_next = functions.getLatest(offset=page * listLimit)
return render.layout(
view.list_functions(latest_functions, has_next, sub_title='Latest Functions')
)
# Category
class ListCategory:
def GET(self, page_number):
page_number = int(page_number)
latest_functions, has_next = functions.getFromCategory(categoryId=0 ,offset=page_number * listLimit )
next_page_url = ''
if has_next:
next_page_url = '/page/%s/' % (page_number + 1)
sub_title = 'Latest functions'
if page_number:
sub_title += ' - Page %s' % (page_number + 1)
return render.layout(
view.list_functions(latest_functions, next_page_url=next_page_url, sub_title=sub_title),
title = page_number and 'Page %s - Functions' % (page_number + 1) or 'Functions')
class ListAll:
def GET(self, order_by='name', page_number= 0):
if not order_by: order_by = ''
page_number = web.intget(page_number, 0)
m, has_next = functions.getAllFunctions(order_by, limit=config.listLimit,offset=page_number*config.listLimit)
next_page_url = ''
if has_next:
if not order_by:
next_page_url = '/list/page/%s/' % (page_number + 1)
else:
next_page_url = '/list/by-%s/%s/' % (order_by, page_number + 1)
sub_title = 'Page %s' % (page_number + 1)
return render.layout(
view.all_functions(m, functions.getCount(), order_by, next_page_url, sub_title=sub_title),
title = ('All functions - %s - '+projectName) % sub_title,
mode = 'modeAllModules')
class Add:
def GET (self, functionId=0):
categories = functions.getCategories()
if functionId:
function = functions.getFunction(FunctionId)
else :
function = functions.createNew()
return render.layout(view.submit_function(function, categories, action='submit'),
title='Submit function - ' + projectName)
def POST (self):
data = web.input( isPrimitive='0', isCorrect='0', isCommutative='0', isRecursive='0', _unicode=False)
function = functions.bind(data)
print "Lalal"+ str(function)
(success, err_msg) = functions.checkData(function)
if success:
success, err_msg = functions.add(function)
#rss.update_rss()
return render.layout(view.submitted_form(success, type='submit', err_msg=err_msg),
title='Submit function - ' + projectName)
class Edit:
@functionMustExist
def GET (self, functionName):
functionId = functions.getFunctionIdByName(functionName)
function = functions.getFunction(functionId)
print function
categories = functions.getCategories()
return render.layout(view.submit_function(function, categories, action='update'),
title='Edit function - ' + projectName)
def POST (self):
function = web.input( isPrimitive='0', isCorrect='0', isCommutative='0', isRecursive='0', _unicode=False)
(success, err_msg) = functions.checkData(function,True)
if success:
#print "Updating2 "+ str(function)
success, err_msg = functions.update(function)
if success:
web.seeother('/show/'+function.name)
return render.layout(view.submitted_form(success, type='submit', err_msg=err_msg),
title='Submit function - ' + projectName)
class Show:
@functionMustExist
def GET(self, functionName):
functionId = functions.getFunctionIdByName(functionName)
function = functions.getFunction(functionId)
#print "La function ",function
related_functions = functions.getRelated(functionId)
"""
source=function.source.split("\n")
print("Source",source)
if len(source):
if ':' in source[0]:
source, error = source[0].split(':')
function.source=source
"""
return render.layout(view.show_function(function, related_functions, misc.get_pub_id()),
title=function.name + ' - ' + projectName)
class Train:
""" 1) Extracts training data and the last source from the function
2) Number of parameters and training information is stored in a object functionData
3) Retrieves the list of functions availables as primitives, as tools for search the solution, each one with a training
4) Number of parameters and
"""
@functionMustExist
def GET (self, functionName):
functionId = functions.getFunctionIdByName(functionName)
function = functions.getFunction(functionId)
del(function.category)
(primitivesStr, bestNode, minError, numNodes, err_msg) = functions.train(function)
return render.layout(view.train_function(function, primitivesStr, bestNode, minError, numNodes, err_msg ),
title='Training '+functionName+' - ' + projectName)
class Test:
""" 1) Extracts training data and the last source from the function
2) Number of parameters and training information is stored in a object functionData
3) Retrieves the list of functions availables as primitives, as tools for search the solution, each one with a training
4) Number of parameters and
"""
@functionMustExist
def GET (self, functionName):
functionId = functions.getFunctionIdByName(functionName)
function = functions.getFunction(functionId)
del(function.category)
values = []
(source, numNodes, totalError, err_msg) = functions.test(function, values)
return render.layout(view.test_function(function, source, numNodes, totalError, err_msg ),
title='Testing '+functionName+' - ' + projectName)
|
sinergatis/pathagar
|
refs/heads/master
|
books/tests/sample_epubs.py
|
2
|
"""Helper for dealing with the sample epubs stored on resources.
"""
import os
from collections import namedtuple
from django.conf import settings
RSRC_DIR = os.path.abspath(os.path.join(settings.BASE_DIR,
'resources/epubsamples'))
SampleEpub = namedtuple('SampleEpub', ('key', 'filename', 'fullpath',
'is_valid', 'has_cover'))
# List of sample epubs, with their interesting properties.
# TODO: the 'fullpath' property is a bit messy at the moment, should be
# calculated automatically based on 'filename'.
EPUBS_ALL = [SampleEpub('epub30-spec', 'epub30-spec.epub',
os.path.join(RSRC_DIR, 'epub30-spec.epub'),
True, True),
SampleEpub('figure-gallery', 'figure-gallery-bindings.epub',
os.path.join(RSRC_DIR, 'figure-gallery-bindings.epub'),
True, True),
# No cover
SampleEpub('hefty-water', 'hefty-water.epub',
os.path.join(RSRC_DIR, 'hefty-water.epub'),
True, False),
# Cover type = guide
SampleEpub('israelsailing', 'israelsailing.epub',
os.path.join(RSRC_DIR, 'israelsailing.epub'),
True, True),
SampleEpub('missing-cover', 'missing-cover.epub',
os.path.join(RSRC_DIR, 'missing-cover.epub'),
True, False),
SampleEpub('not-epub', 'not-an-epub.epub',
os.path.join(RSRC_DIR, 'not-an-epub.epub'),
False, False),
SampleEpub('not-epub-zip', 'not-an-epub-but-a-zip.epub',
os.path.join(RSRC_DIR, 'not-an-epub-but-a-zip.epub'),
False, False)]
# Convenience lists of epub, using 'key' as the identifier.
EPUBS_VALID = [epub for epub in EPUBS_ALL if epub.is_valid]
EPUBS_NOT_VALID = [epub for epub in EPUBS_ALL if not epub.is_valid]
EPUBS_COVER = [epub for epub in EPUBS_ALL if epub.has_cover]
EPUBS_NOT_COVER = [epub for epub in EPUBS_ALL if not epub.has_cover]
|
jasonwee/asus-rt-n14uhp-mrtg
|
refs/heads/master
|
src/lesson_file_system/ospath_commonprefix.py
|
1
|
import os.path
paths = ['/one/two/three/four',
'/one/two/threefold',
'/one/two/three/',
]
for path in paths:
print('PATH:', path)
print()
print('PREFIX:', os.path.commonprefix(paths))
|
sodexis/odoo
|
refs/heads/8.0
|
addons/base_report_designer/plugin/openerp_report_designer/bin/script/lib/functions.py
|
292
|
##########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
##############################################################################
import uno
import xmlrpclib
import re
import socket
import cPickle
import marshal
import tempfile
if __name__<>"package":
from gui import *
from logreport import *
from rpc import *
database="test"
uid = 1
def genTree(object, aList, insField, host, level=3, ending=None, ending_excl=None, recur=None, root='', actualroot=""):
if ending is None:
ending = []
if ending_excl is None:
ending_excl = []
if recur is None:
recur = []
try:
global url
sock=RPCSession(url)
global passwd
res = sock.execute(database, uid, passwd, object , 'fields_get')
key = res.keys()
key.sort()
for k in key:
if (not ending or res[k]['type'] in ending) and ((not ending_excl) or not (res[k]['type'] in ending_excl)):
insField.addItem(root+'/'+res[k]["string"],len(aList))
aList.append(actualroot+'/'+k)
if (res[k]['type'] in recur) and (level>0):
genTree(res[k]['relation'],aList,insField,host ,level-1, ending, ending_excl, recur,root+'/'+res[k]["string"],actualroot+'/'+k)
except:
obj=Logger()
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
obj.log_write('Function', LOG_ERROR, info)
def VariableScope(oTcur, insVariable, aObjectList, aComponentAdd, aItemList, sTableName=""):
if sTableName.find(".") != -1:
for i in range(len(aItemList)):
if aComponentAdd[i]==sTableName:
sLVal=aItemList[i][1][aItemList[i][1].find(",'")+2:aItemList[i][1].find("')")]
for j in range(len(aObjectList)):
if aObjectList[j][:aObjectList[j].find("(")] == sLVal:
insVariable.append(aObjectList[j])
VariableScope(oTcur,insVariable,aObjectList,aComponentAdd,aItemList, sTableName[:sTableName.rfind(".")])
else:
for i in range(len(aItemList)):
if aComponentAdd[i]==sTableName:
sLVal=aItemList[i][1][aItemList[i][1].find(",'")+2:aItemList[i][1].find("')")]
for j in range(len(aObjectList)):
if aObjectList[j][:aObjectList[j].find("(")] == sLVal and sLVal!="":
insVariable.append(aObjectList[j])
def getList(aObjectList, host, count):
desktop=getDesktop()
doc =desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
sMain=""
if not count == 0:
if count >= 1:
oParEnum = doc.getTextFields().createEnumeration()
while oParEnum.hasMoreElements():
oPar = oParEnum.nextElement()
if oPar.supportsService("com.sun.star.text.TextField.DropDown"):
sItem=oPar.Items[1]
if sItem[sItem.find("(")+1:sItem.find(",")]=="objects":
sMain = sItem[sItem.find(",'")+2:sItem.find("')")]
oParEnum = doc.getTextFields().createEnumeration()
while oParEnum.hasMoreElements():
oPar = oParEnum.nextElement()
if oPar.supportsService("com.sun.star.text.TextField.DropDown"):
sItem=oPar.Items[1]
if sItem[sItem.find("[[ ")+3:sItem.find("(")]=="repeatIn":
if sItem[sItem.find("(")+1:sItem.find(",")]=="objects":
aObjectList.append(sItem[sItem.rfind(",'")+2:sItem.rfind("')")] + "(" + docinfo.getUserFieldValue(3) + ")")
else:
sTemp=sItem[sItem.find("(")+1:sItem.find(",")]
if sMain == sTemp[:sTemp.find(".")]:
getRelation(docinfo.getUserFieldValue(3), sItem[sItem.find(".")+1:sItem.find(",")], sItem[sItem.find(",'")+2:sItem.find("')")],aObjectList,host)
else:
sPath=getPath(sItem[sItem.find("(")+1:sItem.find(",")], sMain)
getRelation(docinfo.getUserFieldValue(3), sPath, sItem[sItem.find(",'")+2:sItem.find("')")],aObjectList,host)
else:
aObjectList.append("List of " + docinfo.getUserFieldValue(3))
def getRelation(sRelName, sItem, sObjName, aObjectList, host):
global url
sock=RPCSession(url)
global passwd
res = sock.execute(database, uid, passwd, sRelName , 'fields_get')
key = res.keys()
for k in key:
if sItem.find(".") == -1:
if k == sItem:
aObjectList.append(sObjName + "(" + res[k]['relation'] + ")")
return 0
if k == sItem[:sItem.find(".")]:
getRelation(res[k]['relation'], sItem[sItem.find(".")+1:], sObjName,aObjectList,host)
def getPath(sPath, sMain):
desktop=getDesktop()
doc =desktop.getCurrentComponent()
oParEnum = doc.getTextFields().createEnumeration()
while oParEnum.hasMoreElements():
oPar = oParEnum.nextElement()
if oPar.supportsService("com.sun.star.text.TextField.DropDown"):
sItem=oPar.Items[1]
if sPath[:sPath.find(".")] == sMain:
break;
else:
res = re.findall('\\[\\[ *([a-zA-Z0-9_\.]+) *\\]\\]',sPath)
if len(res) <> 0:
if sItem[sItem.find(",'")+2:sItem.find("')")] == sPath[:sPath.find(".")]:
sPath = sItem[sItem.find("(")+1:sItem.find(",")] + sPath[sPath.find("."):]
getPath(sPath, sMain)
return sPath
def EnumDocument(aItemList, aComponentAdd):
desktop = getDesktop()
parent=""
bFlag = False
Doc =desktop.getCurrentComponent()
#oVC = Doc.CurrentController.getViewCursor()
oParEnum = Doc.getTextFields().createEnumeration()
while oParEnum.hasMoreElements():
oPar = oParEnum.nextElement()
if oPar.Anchor.TextTable:
#parent = oPar.Anchor.TextTable.Name
getChildTable(oPar.Anchor.TextTable,aItemList,aComponentAdd)
elif oPar.Anchor.TextSection:
parent = oPar.Anchor.TextSection.Name
elif oPar.Anchor.Text:
parent = "Document"
sItem=oPar.Items[1].replace(' ',"")
if sItem[sItem.find("[[ ")+3:sItem.find("(")]=="repeatIn" and not oPar.Items in aItemList:
templist=oPar.Items[0],sItem
aItemList.append( templist )
aComponentAdd.append( parent )
def getChildTable(oPar, aItemList, aComponentAdd, sTableName=""):
sNames = oPar.getCellNames()
bEmptyTableFlag=True
for val in sNames:
oCell = oPar.getCellByName(val)
oCurEnum = oCell.createEnumeration()
while oCurEnum.hasMoreElements():
try:
oCur = oCurEnum.nextElement()
if oCur.supportsService("com.sun.star.text.TextTable"):
if sTableName=="":
getChildTable(oCur,aItemList,aComponentAdd,oPar.Name)
else:
getChildTable(oCur,aItemList,aComponentAdd,sTableName+"."+oPar.Name)
else:
oSecEnum = oCur.createEnumeration()
while oSecEnum.hasMoreElements():
oSubSection = oSecEnum.nextElement()
if oSubSection.supportsService("com.sun.star.text.TextField"):
bEmptyTableFlag=False
sItem=oSubSection.TextField.Items[1]
if sItem[sItem.find("[[ ")+3:sItem.find("(")]=="repeatIn":
if aItemList.__contains__(oSubSection.TextField.Items)==False:
aItemList.append(oSubSection.TextField.Items)
if sTableName=="":
if aComponentAdd.__contains__(oPar.Name)==False:
aComponentAdd.append(oPar.Name)
else:
if aComponentAdd.__contains__(sTableName+"."+oPar.Name)==False:
aComponentAdd.append(sTableName+"."+oPar.Name)
except:
obj=Logger()
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
obj.log_write('Function', LOG_ERROR, info)
if bEmptyTableFlag==True:
aItemList.append((u'',u''))
if sTableName=="":
if aComponentAdd.__contains__(oPar.Name)==False:
aComponentAdd.append(oPar.Name)
else:
if aComponentAdd.__contains__(sTableName+"."+oPar.Name)==False:
aComponentAdd.append(sTableName+"."+oPar.Name)
return 0
def getRecersiveSection(oCurrentSection, aSectionList):
desktop=getDesktop()
doc =desktop.getCurrentComponent()
oParEnum=doc.getText().createEnumeration()
aSectionList.append(oCurrentSection.Name)
if oCurrentSection.ParentSection:
getRecersiveSection(oCurrentSection.ParentSection,aSectionList)
else:
return
def GetAFileName():
oFileDialog=None
iAccept=None
sPath=""
InitPath=""
oUcb=None
oFileDialog = createUnoService("com.sun.star.ui.dialogs.FilePicker")
oUcb = createUnoService("com.sun.star.ucb.SimpleFileAccess")
oFileDialog.appendFilter("Odoo Report File","*.sxw")
oFileDialog.setCurrentFilter("Odoo Report File")
if InitPath == "":
InitPath =tempfile.gettempdir()
#End If
if oUcb.exists(InitPath):
oFileDialog.setDisplayDirectory(InitPath)
#End If
iAccept = oFileDialog.execute()
if iAccept == 1:
sPath = oFileDialog.Files[0]
oFileDialog.dispose()
return sPath
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
jjz/mezzanine
|
refs/heads/master
|
mezzanine/forms/signals.py
|
71
|
from __future__ import unicode_literals
from django.dispatch import Signal
form_invalid = Signal(providing_args=["form"])
form_valid = Signal(providing_args=["form", "entry"])
|
mk-fg/feedjack
|
refs/heads/master
|
feedjack/admin.py
|
1
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from feedjack import models
class SiteAdmin(admin.ModelAdmin):
list_display = 'url', 'name'
filter_vertical = 'links',
admin.site.register(models.Site, SiteAdmin)
class PostProcessorTagInline(admin.TabularInline):
model = models.PostProcessorTag
extra = 1
class FeedAdmin(admin.ModelAdmin):
list_display = 'name', 'feed_url',\
'title', 'last_modified', 'immutable', 'is_active'
filter_horizontal = 'filters', 'post_processors'
fieldsets = (
(None,
{'fields': ('feed_url', 'name', 'shortname',
'immutable', 'skip_errors', 'is_active')}),
('Filtering',
{'classes':('collapse',), 'fields': ('filters_logic', 'filters')}),
(_('Fields updated automatically by Feedjack'),
{'classes':('collapse',), 'fields':
('title', 'tagline', 'link', 'etag', 'last_modified', 'last_checked') }) )
inlines = (PostProcessorTagInline,) # always at the end in stock admin templates
search_fields = 'feed_url', 'name', 'title'
list_filter= 'last_modified',
date_hierarchy = 'last_modified'
prepopulated_fields = {"shortname": ("name",)}
admin.site.register(models.Feed, FeedAdmin)
class PostAdmin(admin.ModelAdmin):
list_display = 'title', 'link', 'filtering_result', 'date_created'
search_fields = 'link', 'title'
date_hierarchy = 'date_created'
filter_vertical = 'tags',
list_filter = 'feed',
admin.site.register(models.Post, PostAdmin)
class SubscriberAdmin(admin.ModelAdmin):
list_display = 'name', 'site', 'feed'
search_fields = 'name',
list_filter = 'site',
admin.site.register(models.Subscriber, SubscriberAdmin)
class FilterBaseAdmin(admin.ModelAdmin):
list_display = 'name', 'handler_name',\
'crossref', 'crossref_span', 'handler_description'
ordering = 'name',
admin.site.register(models.FilterBase, FilterBaseAdmin)
class FilterAdmin(admin.ModelAdmin):
list_display = '__unicode__', 'parameter'
admin.site.register(models.Filter, FilterAdmin)
class PostProcessorBaseAdmin(admin.ModelAdmin):
list_display = 'name', 'handler_name'
ordering = 'name',
admin.site.register(models.PostProcessorBase, PostProcessorBaseAdmin)
class PostProcessorAdmin(admin.ModelAdmin):
list_display = '__unicode__', 'parameter'
admin.site.register(models.PostProcessor, PostProcessorAdmin)
admin.site.register(models.Link)
|
renegelinas/mi-instrument
|
refs/heads/master
|
mi/platform/responses.py
|
10
|
#!/usr/bin/env python
"""
@package ion.agents.platform.responses
@file ion/agents/platform/responses.py
@author Carlos Rueda
@brief Some constants for responses from platform agents/drivers.
"""
from mi.core.common import BaseEnum
__author__ = 'Carlos Rueda'
__license__ = 'Apache 2.0'
class NormalResponse(BaseEnum):
PORT_TURNED_ON = 'OK_PORT_TURNED_ON'
PORT_ALREADY_ON = 'OK_PORT_ALREADY_ON'
PORT_TURNED_OFF = 'OK_PORT_TURNED_OFF'
PORT_ALREADY_OFF = 'OK_PORT_ALREADY_OFF'
OVER_CURRENT_SET = 'OK_OVER_CURRENT_SET'
MISSION_STARTED = 'OK_MISSION_STARTED'
MISSION_STOPPED = 'OK_STOP_SUCCESSFUL'
class InvalidResponse(BaseEnum):
PLATFORM_ID = 'INVALID_PLATFORM_ID'
ATTRIBUTE_ID = 'INVALID_ATTRIBUTE_ID'
ATTRIBUTE_VALUE_OUT_OF_RANGE = 'ERROR_ATTRIBUTE_VALUE_OUT_OF_RANGE'
ATTRIBUTE_NOT_WRITABLE = 'ERROR_ATTRIBUTE_NOT_WRITABLE'
PORT_ID = 'INVALID_PORT_ID'
PORT_IS_ON = 'ERROR_PORT_IS_ON'
MISSION_ID = 'INVALID_MISSION_ID'
CANNOT_START_MISSION = 'ERROR_STARTING_MISSION'
FLAG = 'INVALID_FLAG'
DID_NOT_EXECUTE_STOP = 'ERROR_DID_NOT_EXECUTE_STOP'
PLATFORM_TYPE = 'INVALID_PLATFORM_TYPE'
EVENT_LISTENER_URL = 'INVALID_EVENT_LISTENER_URL'
EVENT_TYPE = 'INVALID_EVENT_TYPE'
|
LLNL/spack
|
refs/heads/develop
|
var/spack/repos/builtin/packages/superlu-dist/package.py
|
3
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class SuperluDist(CMakePackage):
"""A general purpose library for the direct solution of large, sparse,
nonsymmetric systems of linear equations on high performance machines."""
homepage = "http://crd-legacy.lbl.gov/~xiaoye/SuperLU/"
url = "https://github.com/xiaoyeli/superlu_dist/archive/v6.0.0.tar.gz"
git = "https://github.com/xiaoyeli/superlu_dist.git"
maintainers = ['xiaoye', 'gchavez2', 'balay']
version('develop', branch='master')
version('xsdk-0.2.0', tag='xsdk-0.2.0')
version('6.3.1', sha256='3787c2755acd6aadbb4d9029138c293a7570a2ed228806676edcc7e1d3f5a1d3')
version('6.3.0', sha256='daf3264706caccae2b8fd5a572e40275f1e128fa235cb7c21ee2f8051c11af95')
version('6.2.0', sha256='15ad1badd81b41e37941dd124d06d3b92e51c4f0ff532ad23fb09c4ebfe6eb9e')
version('6.1.1', sha256='35d25cff592c724439870444ed45e1d1d15ca2c65f02ccd4b83a6d3c9d220bd1')
version('6.1.0', sha256='92c6d1424dd830ee2d1e7396a418a5f6645160aea8472e558c4e4bfe006593c4')
version('6.0.0', sha256='ff6cdfa0263d595708bbb6d11fb780915d8cfddab438db651e246ea292f37ee4')
version('5.4.0', sha256='3ac238fe082106a2c4dbaf0c22af1ff1247308ffa8f053de9d78c3ec7dd0d801')
version('5.3.0', sha256='49ed110bdef1e284a0181d6c7dd1fae3aa110cb45f67c6aa5cb791070304d670')
version('5.2.2', sha256='65cfb9ace9a81f7affac4ad92b9571badf0f10155b3468531b0fffde3bd8e727')
version('5.2.1', sha256='67cf3c46cbded4cee68e2a9d601c30ab13b08091c8cdad95b0a8e018b6d5d1f1')
version('5.1.3', sha256='58e3dfdb4ae6f8e3f6f3d5ee5e851af59b967c4483cdb3b15ccd1dbdf38f44f9')
version('5.1.2', sha256='e34865ad6696ee6a6d178b4a01c8e19103a7d241ba9de043603970d63b0ee1e2')
version('5.1.0', sha256='73f292ab748b590b6dd7469e6986aeb95d279b8b8b3da511c695a396bdbc996c')
version('5.0.0', sha256='78d1d6460ff16b3f71e4bcd7306397574d54d421249553ccc26567f00a10bfc6')
variant('int64', default=False, description='Build with 64 bit integers')
variant('openmp', default=False, description='Build with OpenMP support (needs a good multithreaded BLAS implementation for good performance)')
variant('shared', default=True, description='Build shared libraries')
depends_on('mpi')
depends_on('blas')
depends_on('lapack')
depends_on('parmetis')
depends_on('metis@5:')
patch('xl-611.patch', when='@:6.1.1 %xl')
patch('xl-611.patch', when='@:6.1.1 %xl_r')
def cmake_args(self):
spec = self.spec
args = [
'-DCMAKE_C_COMPILER=%s' % spec['mpi'].mpicc,
'-DCMAKE_CXX_COMPILER=%s' % spec['mpi'].mpicxx,
'-DCMAKE_INSTALL_LIBDIR:STRING=%s' % self.prefix.lib,
'-DTPL_BLAS_LIBRARIES=%s' % spec['blas'].libs.joined(";"),
'-DTPL_LAPACK_LIBRARIES=%s' % spec['lapack'].libs.joined(";"),
'-DUSE_XSDK_DEFAULTS=YES',
'-DTPL_PARMETIS_LIBRARIES=%s' % spec['parmetis'].libs.ld_flags +
';' + spec['metis'].libs.ld_flags,
'-DTPL_PARMETIS_INCLUDE_DIRS=%s' %
spec['parmetis'].prefix.include +
';' + spec['metis'].prefix.include
]
if (spec.satisfies('%xl') or spec.satisfies('%xl_r')) and \
spec.satisfies('@:6.1.1'):
args.append('-DCMAKE_C_FLAGS=-DNoChange')
if '+int64' in spec:
args.append('-DXSDK_INDEX_SIZE=64')
else:
args.append('-DXSDK_INDEX_SIZE=32')
if '+openmp' in spec:
args.append('-Denable_openmp=ON')
else:
args.append('-Denable_openmp=OFF')
args.append('-DCMAKE_DISABLE_FIND_PACKAGE_OpenMP=ON')
if '+shared' in spec:
args.append('-DBUILD_SHARED_LIBS:BOOL=ON')
else:
args.append('-DBUILD_SHARED_LIBS:BOOL=OFF')
return args
def flag_handler(self, name, flags):
flags = list(flags)
if name == 'cxxflags':
flags.append(self.compiler.cxx11_flag)
if name == 'cflags' and '%pgi' not in self.spec:
flags.append('-std=c99')
return (None, None, flags)
|
mattn/ultisnips
|
refs/heads/master
|
test/test_ListSnippets.py
|
29
|
from test.vim_test_case import VimTestCase as _VimTest
from test.constant import *
# List Snippets {{{#
class _ListAllSnippets(_VimTest):
snippets = (('testblah', 'BLAAH', 'Say BLAH'),
('test', 'TEST ONE', 'Say tst one'),
('aloha', 'OHEEEE', 'Say OHEE'),
)
class ListAllAvailable_NothingTyped_ExpectCorrectResult(_ListAllSnippets):
keys = '' + LS + '3\n'
wanted = 'BLAAH'
class ListAllAvailable_SpaceInFront_ExpectCorrectResult(_ListAllSnippets):
keys = ' ' + LS + '3\n'
wanted = ' BLAAH'
class ListAllAvailable_BraceInFront_ExpectCorrectResult(_ListAllSnippets):
keys = '} ' + LS + '3\n'
wanted = '} BLAAH'
class ListAllAvailable_testtyped_ExpectCorrectResult(_ListAllSnippets):
keys = 'hallo test' + LS + '2\n'
wanted = 'hallo BLAAH'
class ListAllAvailable_testtypedSecondOpt_ExpectCorrectResult(
_ListAllSnippets):
keys = 'hallo test' + LS + '1\n'
wanted = 'hallo TEST ONE'
class ListAllAvailable_NonDefined_NoExpectionShouldBeRaised(_ListAllSnippets):
keys = 'hallo qualle' + LS + 'Hi'
wanted = 'hallo qualleHi'
# End: List Snippets #}}}
|
bratsche/Neutron-Drive
|
refs/heads/master
|
google_appengine/google/appengine/_internal/graphy/formatters.py
|
254
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains various formatters which can help format a chart
object. To use these, add them to your chart's list of formatters. For
example:
chart.formatters.append(InlineLegend)
chart.formatters.append(LabelSeparator(right=8))
Feel free to write your own formatter. Formatters are just callables that
modify the chart in some (hopefully useful) way. For example, the AutoColor
formatter makes sure each DataSeries has a color applied to it. The formatter
should take the chart to format as its only argument.
(The formatters work on a deepcopy of the user's chart, so modifications
shouldn't leak back into the user's original chart)
"""
def AutoLegend(chart):
"""Automatically fill out the legend based on series labels. This will only
fill out the legend if is at least one series with a label.
"""
chart._show_legend = False
labels = []
for series in chart.data:
if series.label is None:
labels.append('')
else:
labels.append(series.label)
chart._show_legend = True
if chart._show_legend:
chart._legend_labels = labels
class AutoColor(object):
"""Automatically add colors to any series without colors.
Object attributes:
colors: The list of colors (hex strings) to cycle through. You can modify
this list if you don't like the default colors.
"""
def __init__(self):
# TODO: Add a few more default colors.
# TODO: Add a default styles too, so if you don't specify color or
# style, you get a unique set of colors & styles for your data.
self.colors = ['0000ff', 'ff0000', '00dd00', '000000']
def __call__(self, chart):
index = -1
for series in chart.data:
if series.style.color is None:
index += 1
if index >= len(self.colors):
index = 0
series.style.color = self.colors[index]
class AutoScale(object):
"""If you don't set min/max on the dependent axes, this fills them in
automatically by calculating min/max dynamically from the data.
You can set just min or just max and this formatter will fill in the other
value for you automatically. For example, if you only set min then this will
set max automatically, but leave min untouched.
Charts can have multiple dependent axes (chart.left & chart.right, for
example.) If you set min/max on some axes but not others, then this formatter
copies your min/max to the un-set axes. For example, if you set up min/max on
only the right axis then your values will be automatically copied to the left
axis. (if you use different min/max values for different axes, the
precendence is undefined. So don't do that.)
"""
def __init__(self, buffer=0.05):
"""Create a new AutoScale formatter.
Args:
buffer: percentage of extra space to allocate around the chart's axes.
"""
self.buffer = buffer
def __call__(self, chart):
"""Format the chart by setting the min/max values on its dependent axis."""
if not chart.data:
return # Nothing to do.
min_value, max_value = chart.GetMinMaxValues()
if None in (min_value, max_value):
return # No data. Nothing to do.
# Honor user's choice, if they've picked min/max.
for axis in chart.GetDependentAxes():
if axis.min is not None:
min_value = axis.min
if axis.max is not None:
max_value = axis.max
buffer = (max_value - min_value) * self.buffer # Stay away from edge.
for axis in chart.GetDependentAxes():
if axis.min is None:
axis.min = min_value - buffer
if axis.max is None:
axis.max = max_value + buffer
class LabelSeparator(object):
"""Adjust the label positions to avoid having them overlap. This happens for
any axis with minimum_label_spacing set.
"""
def __init__(self, left=None, right=None, bottom=None):
self.left = left
self.right = right
self.bottom = bottom
def __call__(self, chart):
self.AdjustLabels(chart.left, self.left)
self.AdjustLabels(chart.right, self.right)
self.AdjustLabels(chart.bottom, self.bottom)
def AdjustLabels(self, axis, minimum_label_spacing):
if minimum_label_spacing is None:
return
if len(axis.labels) <= 1: # Nothing to adjust
return
if axis.max is not None and axis.min is not None:
# Find the spacing required to fit all labels evenly.
# Don't try to push them farther apart than that.
maximum_possible_spacing = (axis.max - axis.min) / (len(axis.labels) - 1)
if minimum_label_spacing > maximum_possible_spacing:
minimum_label_spacing = maximum_possible_spacing
labels = [list(x) for x in zip(axis.label_positions, axis.labels)]
labels = sorted(labels, reverse=True)
# First pass from the top, moving colliding labels downward
for i in range(1, len(labels)):
if labels[i - 1][0] - labels[i][0] < minimum_label_spacing:
new_position = labels[i - 1][0] - minimum_label_spacing
if axis.min is not None and new_position < axis.min:
new_position = axis.min
labels[i][0] = new_position
# Second pass from the bottom, moving colliding labels upward
for i in range(len(labels) - 2, -1, -1):
if labels[i][0] - labels[i + 1][0] < minimum_label_spacing:
new_position = labels[i + 1][0] + minimum_label_spacing
if axis.max is not None and new_position > axis.max:
new_position = axis.max
labels[i][0] = new_position
# Separate positions and labels
label_positions, labels = zip(*labels)
axis.labels = labels
axis.label_positions = label_positions
def InlineLegend(chart):
"""Provide a legend for line charts by attaching labels to the right
end of each line. Supresses the regular legend.
"""
show = False
labels = []
label_positions = []
for series in chart.data:
if series.label is None:
labels.append('')
else:
labels.append(series.label)
show = True
label_positions.append(series.data[-1])
if show:
chart.right.min = chart.left.min
chart.right.max = chart.left.max
chart.right.labels = labels
chart.right.label_positions = label_positions
chart._show_legend = False # Supress the regular legend.
|
pibouv/ingeniciel
|
refs/heads/master
|
external/gtest/scripts/pump.py
|
2471
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""pump v0.2.0 - Pretty Useful for Meta Programming.
A tool for preprocessor meta programming. Useful for generating
repetitive boilerplate code. Especially useful for writing C++
classes, functions, macros, and templates that need to work with
various number of arguments.
USAGE:
pump.py SOURCE_FILE
EXAMPLES:
pump.py foo.cc.pump
Converts foo.cc.pump to foo.cc.
GRAMMAR:
CODE ::= ATOMIC_CODE*
ATOMIC_CODE ::= $var ID = EXPRESSION
| $var ID = [[ CODE ]]
| $range ID EXPRESSION..EXPRESSION
| $for ID SEPARATOR [[ CODE ]]
| $($)
| $ID
| $(EXPRESSION)
| $if EXPRESSION [[ CODE ]] ELSE_BRANCH
| [[ CODE ]]
| RAW_CODE
SEPARATOR ::= RAW_CODE | EMPTY
ELSE_BRANCH ::= $else [[ CODE ]]
| $elif EXPRESSION [[ CODE ]] ELSE_BRANCH
| EMPTY
EXPRESSION has Python syntax.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
TOKEN_TABLE = [
(re.compile(r'\$var\s+'), '$var'),
(re.compile(r'\$elif\s+'), '$elif'),
(re.compile(r'\$else\s+'), '$else'),
(re.compile(r'\$for\s+'), '$for'),
(re.compile(r'\$if\s+'), '$if'),
(re.compile(r'\$range\s+'), '$range'),
(re.compile(r'\$[_A-Za-z]\w*'), '$id'),
(re.compile(r'\$\(\$\)'), '$($)'),
(re.compile(r'\$'), '$'),
(re.compile(r'\[\[\n?'), '[['),
(re.compile(r'\]\]\n?'), ']]'),
]
class Cursor:
"""Represents a position (line and column) in a text file."""
def __init__(self, line=-1, column=-1):
self.line = line
self.column = column
def __eq__(self, rhs):
return self.line == rhs.line and self.column == rhs.column
def __ne__(self, rhs):
return not self == rhs
def __lt__(self, rhs):
return self.line < rhs.line or (
self.line == rhs.line and self.column < rhs.column)
def __le__(self, rhs):
return self < rhs or self == rhs
def __gt__(self, rhs):
return rhs < self
def __ge__(self, rhs):
return rhs <= self
def __str__(self):
if self == Eof():
return 'EOF'
else:
return '%s(%s)' % (self.line + 1, self.column)
def __add__(self, offset):
return Cursor(self.line, self.column + offset)
def __sub__(self, offset):
return Cursor(self.line, self.column - offset)
def Clone(self):
"""Returns a copy of self."""
return Cursor(self.line, self.column)
# Special cursor to indicate the end-of-file.
def Eof():
"""Returns the special cursor to denote the end-of-file."""
return Cursor(-1, -1)
class Token:
"""Represents a token in a Pump source file."""
def __init__(self, start=None, end=None, value=None, token_type=None):
if start is None:
self.start = Eof()
else:
self.start = start
if end is None:
self.end = Eof()
else:
self.end = end
self.value = value
self.token_type = token_type
def __str__(self):
return 'Token @%s: \'%s\' type=%s' % (
self.start, self.value, self.token_type)
def Clone(self):
"""Returns a copy of self."""
return Token(self.start.Clone(), self.end.Clone(), self.value,
self.token_type)
def StartsWith(lines, pos, string):
"""Returns True iff the given position in lines starts with 'string'."""
return lines[pos.line][pos.column:].startswith(string)
def FindFirstInLine(line, token_table):
best_match_start = -1
for (regex, token_type) in token_table:
m = regex.search(line)
if m:
# We found regex in lines
if best_match_start < 0 or m.start() < best_match_start:
best_match_start = m.start()
best_match_length = m.end() - m.start()
best_match_token_type = token_type
if best_match_start < 0:
return None
return (best_match_start, best_match_length, best_match_token_type)
def FindFirst(lines, token_table, cursor):
"""Finds the first occurrence of any string in strings in lines."""
start = cursor.Clone()
cur_line_number = cursor.line
for line in lines[start.line:]:
if cur_line_number == start.line:
line = line[start.column:]
m = FindFirstInLine(line, token_table)
if m:
# We found a regex in line.
(start_column, length, token_type) = m
if cur_line_number == start.line:
start_column += start.column
found_start = Cursor(cur_line_number, start_column)
found_end = found_start + length
return MakeToken(lines, found_start, found_end, token_type)
cur_line_number += 1
# We failed to find str in lines
return None
def SubString(lines, start, end):
"""Returns a substring in lines."""
if end == Eof():
end = Cursor(len(lines) - 1, len(lines[-1]))
if start >= end:
return ''
if start.line == end.line:
return lines[start.line][start.column:end.column]
result_lines = ([lines[start.line][start.column:]] +
lines[start.line + 1:end.line] +
[lines[end.line][:end.column]])
return ''.join(result_lines)
def StripMetaComments(str):
"""Strip meta comments from each line in the given string."""
# First, completely remove lines containing nothing but a meta
# comment, including the trailing \n.
str = re.sub(r'^\s*\$\$.*\n', '', str)
# Then, remove meta comments from contentful lines.
return re.sub(r'\s*\$\$.*', '', str)
def MakeToken(lines, start, end, token_type):
"""Creates a new instance of Token."""
return Token(start, end, SubString(lines, start, end), token_type)
def ParseToken(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = regex.search(line)
if m and not m.start():
return MakeToken(lines, pos, pos + m.end(), token_type)
else:
print 'ERROR: %s expected at %s.' % (token_type, pos)
sys.exit(1)
ID_REGEX = re.compile(r'[_A-Za-z]\w*')
EQ_REGEX = re.compile(r'=')
REST_OF_LINE_REGEX = re.compile(r'.*?(?=$|\$\$)')
OPTIONAL_WHITE_SPACES_REGEX = re.compile(r'\s*')
WHITE_SPACE_REGEX = re.compile(r'\s')
DOT_DOT_REGEX = re.compile(r'\.\.')
def Skip(lines, pos, regex):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m and not m.start():
return pos + m.end()
else:
return pos
def SkipUntil(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m:
return pos + m.start()
else:
print ('ERROR: %s expected on line %s after column %s.' %
(token_type, pos.line + 1, pos.column))
sys.exit(1)
def ParseExpTokenInParens(lines, pos):
def ParseInParens(pos):
pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX)
pos = Skip(lines, pos, r'\(')
pos = Parse(pos)
pos = Skip(lines, pos, r'\)')
return pos
def Parse(pos):
pos = SkipUntil(lines, pos, r'\(|\)', ')')
if SubString(lines, pos, pos + 1) == '(':
pos = Parse(pos + 1)
pos = Skip(lines, pos, r'\)')
return Parse(pos)
else:
return pos
start = pos.Clone()
pos = ParseInParens(pos)
return MakeToken(lines, start, pos, 'exp')
def RStripNewLineFromToken(token):
if token.value.endswith('\n'):
return Token(token.start, token.end, token.value[:-1], token.token_type)
else:
return token
def TokenizeLines(lines, pos):
while True:
found = FindFirst(lines, TOKEN_TABLE, pos)
if not found:
yield MakeToken(lines, pos, Eof(), 'code')
return
if found.start == pos:
prev_token = None
prev_token_rstripped = None
else:
prev_token = MakeToken(lines, pos, found.start, 'code')
prev_token_rstripped = RStripNewLineFromToken(prev_token)
if found.token_type == '$var':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
eq_token = ParseToken(lines, pos, EQ_REGEX, '=')
yield eq_token
pos = Skip(lines, eq_token.end, r'\s*')
if SubString(lines, pos, pos + 2) != '[[':
exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp')
yield exp_token
pos = Cursor(exp_token.end.line + 1, 0)
elif found.token_type == '$for':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX)
elif found.token_type == '$range':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..')
yield MakeToken(lines, pos, dots_pos, 'exp')
yield MakeToken(lines, dots_pos, dots_pos + 2, '..')
pos = dots_pos + 2
new_pos = Cursor(pos.line + 1, 0)
yield MakeToken(lines, pos, new_pos, 'exp')
pos = new_pos
elif found.token_type == '$':
if prev_token:
yield prev_token
yield found
exp_token = ParseExpTokenInParens(lines, found.end)
yield exp_token
pos = exp_token.end
elif (found.token_type == ']]' or found.token_type == '$if' or
found.token_type == '$elif' or found.token_type == '$else'):
if prev_token_rstripped:
yield prev_token_rstripped
yield found
pos = found.end
else:
if prev_token:
yield prev_token
yield found
pos = found.end
def Tokenize(s):
"""A generator that yields the tokens in the given string."""
if s != '':
lines = s.splitlines(True)
for token in TokenizeLines(lines, Cursor(0, 0)):
yield token
class CodeNode:
def __init__(self, atomic_code_list=None):
self.atomic_code = atomic_code_list
class VarNode:
def __init__(self, identifier=None, atomic_code=None):
self.identifier = identifier
self.atomic_code = atomic_code
class RangeNode:
def __init__(self, identifier=None, exp1=None, exp2=None):
self.identifier = identifier
self.exp1 = exp1
self.exp2 = exp2
class ForNode:
def __init__(self, identifier=None, sep=None, code=None):
self.identifier = identifier
self.sep = sep
self.code = code
class ElseNode:
def __init__(self, else_branch=None):
self.else_branch = else_branch
class IfNode:
def __init__(self, exp=None, then_branch=None, else_branch=None):
self.exp = exp
self.then_branch = then_branch
self.else_branch = else_branch
class RawCodeNode:
def __init__(self, token=None):
self.raw_code = token
class LiteralDollarNode:
def __init__(self, token):
self.token = token
class ExpNode:
def __init__(self, token, python_exp):
self.token = token
self.python_exp = python_exp
def PopFront(a_list):
head = a_list[0]
a_list[:1] = []
return head
def PushFront(a_list, elem):
a_list[:0] = [elem]
def PopToken(a_list, token_type=None):
token = PopFront(a_list)
if token_type is not None and token.token_type != token_type:
print 'ERROR: %s expected at %s' % (token_type, token.start)
print 'ERROR: %s found instead' % (token,)
sys.exit(1)
return token
def PeekToken(a_list):
if not a_list:
return None
return a_list[0]
def ParseExpNode(token):
python_exp = re.sub(r'([_A-Za-z]\w*)', r'self.GetValue("\1")', token.value)
return ExpNode(token, python_exp)
def ParseElseNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
next = PeekToken(tokens)
if not next:
return None
if next.token_type == '$else':
Pop('$else')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
elif next.token_type == '$elif':
Pop('$elif')
exp = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
inner_else_node = ParseElseNode(tokens)
return CodeNode([IfNode(ParseExpNode(exp), code_node, inner_else_node)])
elif not next.value.strip():
Pop('code')
return ParseElseNode(tokens)
else:
return None
def ParseAtomicCodeNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
head = PopFront(tokens)
t = head.token_type
if t == 'code':
return RawCodeNode(head)
elif t == '$var':
id_token = Pop('id')
Pop('=')
next = PeekToken(tokens)
if next.token_type == 'exp':
exp_token = Pop()
return VarNode(id_token, ParseExpNode(exp_token))
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return VarNode(id_token, code_node)
elif t == '$for':
id_token = Pop('id')
next_token = PeekToken(tokens)
if next_token.token_type == 'code':
sep_token = next_token
Pop('code')
else:
sep_token = None
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return ForNode(id_token, sep_token, code_node)
elif t == '$if':
exp_token = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
else_node = ParseElseNode(tokens)
return IfNode(ParseExpNode(exp_token), code_node, else_node)
elif t == '$range':
id_token = Pop('id')
exp1_token = Pop('exp')
Pop('..')
exp2_token = Pop('exp')
return RangeNode(id_token, ParseExpNode(exp1_token),
ParseExpNode(exp2_token))
elif t == '$id':
return ParseExpNode(Token(head.start + 1, head.end, head.value[1:], 'id'))
elif t == '$($)':
return LiteralDollarNode(head)
elif t == '$':
exp_token = Pop('exp')
return ParseExpNode(exp_token)
elif t == '[[':
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
else:
PushFront(tokens, head)
return None
def ParseCodeNode(tokens):
atomic_code_list = []
while True:
if not tokens:
break
atomic_code_node = ParseAtomicCodeNode(tokens)
if atomic_code_node:
atomic_code_list.append(atomic_code_node)
else:
break
return CodeNode(atomic_code_list)
def ParseToAST(pump_src_text):
"""Convert the given Pump source text into an AST."""
tokens = list(Tokenize(pump_src_text))
code_node = ParseCodeNode(tokens)
return code_node
class Env:
def __init__(self):
self.variables = []
self.ranges = []
def Clone(self):
clone = Env()
clone.variables = self.variables[:]
clone.ranges = self.ranges[:]
return clone
def PushVariable(self, var, value):
# If value looks like an int, store it as an int.
try:
int_value = int(value)
if ('%s' % int_value) == value:
value = int_value
except Exception:
pass
self.variables[:0] = [(var, value)]
def PopVariable(self):
self.variables[:1] = []
def PushRange(self, var, lower, upper):
self.ranges[:0] = [(var, lower, upper)]
def PopRange(self):
self.ranges[:1] = []
def GetValue(self, identifier):
for (var, value) in self.variables:
if identifier == var:
return value
print 'ERROR: meta variable %s is undefined.' % (identifier,)
sys.exit(1)
def EvalExp(self, exp):
try:
result = eval(exp.python_exp)
except Exception, e:
print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e)
print ('ERROR: failed to evaluate meta expression %s at %s' %
(exp.python_exp, exp.token.start))
sys.exit(1)
return result
def GetRange(self, identifier):
for (var, lower, upper) in self.ranges:
if identifier == var:
return (lower, upper)
print 'ERROR: range %s is undefined.' % (identifier,)
sys.exit(1)
class Output:
def __init__(self):
self.string = ''
def GetLastLine(self):
index = self.string.rfind('\n')
if index < 0:
return ''
return self.string[index + 1:]
def Append(self, s):
self.string += s
def RunAtomicCode(env, node, output):
if isinstance(node, VarNode):
identifier = node.identifier.value.strip()
result = Output()
RunAtomicCode(env.Clone(), node.atomic_code, result)
value = result.string
env.PushVariable(identifier, value)
elif isinstance(node, RangeNode):
identifier = node.identifier.value.strip()
lower = int(env.EvalExp(node.exp1))
upper = int(env.EvalExp(node.exp2))
env.PushRange(identifier, lower, upper)
elif isinstance(node, ForNode):
identifier = node.identifier.value.strip()
if node.sep is None:
sep = ''
else:
sep = node.sep.value
(lower, upper) = env.GetRange(identifier)
for i in range(lower, upper + 1):
new_env = env.Clone()
new_env.PushVariable(identifier, i)
RunCode(new_env, node.code, output)
if i != upper:
output.Append(sep)
elif isinstance(node, RawCodeNode):
output.Append(node.raw_code.value)
elif isinstance(node, IfNode):
cond = env.EvalExp(node.exp)
if cond:
RunCode(env.Clone(), node.then_branch, output)
elif node.else_branch is not None:
RunCode(env.Clone(), node.else_branch, output)
elif isinstance(node, ExpNode):
value = env.EvalExp(node)
output.Append('%s' % (value,))
elif isinstance(node, LiteralDollarNode):
output.Append('$')
elif isinstance(node, CodeNode):
RunCode(env.Clone(), node, output)
else:
print 'BAD'
print node
sys.exit(1)
def RunCode(env, code_node, output):
for atomic_code in code_node.atomic_code:
RunAtomicCode(env, atomic_code, output)
def IsSingleLineComment(cur_line):
return '//' in cur_line
def IsInPreprocessorDirective(prev_lines, cur_line):
if cur_line.lstrip().startswith('#'):
return True
return prev_lines and prev_lines[-1].endswith('\\')
def WrapComment(line, output):
loc = line.find('//')
before_comment = line[:loc].rstrip()
if before_comment == '':
indent = loc
else:
output.append(before_comment)
indent = len(before_comment) - len(before_comment.lstrip())
prefix = indent*' ' + '// '
max_len = 80 - len(prefix)
comment = line[loc + 2:].strip()
segs = [seg for seg in re.split(r'(\w+\W*)', comment) if seg != '']
cur_line = ''
for seg in segs:
if len((cur_line + seg).rstrip()) < max_len:
cur_line += seg
else:
if cur_line.strip() != '':
output.append(prefix + cur_line.rstrip())
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapCode(line, line_concat, output):
indent = len(line) - len(line.lstrip())
prefix = indent*' ' # Prefix of the current line
max_len = 80 - indent - len(line_concat) # Maximum length of the current line
new_prefix = prefix + 4*' ' # Prefix of a continuation line
new_max_len = max_len - 4 # Maximum length of a continuation line
# Prefers to wrap a line after a ',' or ';'.
segs = [seg for seg in re.split(r'([^,;]+[,;]?)', line.strip()) if seg != '']
cur_line = '' # The current line without leading spaces.
for seg in segs:
# If the line is still too long, wrap at a space.
while cur_line == '' and len(seg.strip()) > max_len:
seg = seg.lstrip()
split_at = seg.rfind(' ', 0, max_len)
output.append(prefix + seg[:split_at].strip() + line_concat)
seg = seg[split_at + 1:]
prefix = new_prefix
max_len = new_max_len
if len((cur_line + seg).rstrip()) < max_len:
cur_line = (cur_line + seg).lstrip()
else:
output.append(prefix + cur_line.rstrip() + line_concat)
prefix = new_prefix
max_len = new_max_len
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapPreprocessorDirective(line, output):
WrapCode(line, ' \\', output)
def WrapPlainCode(line, output):
WrapCode(line, '', output)
def IsMultiLineIWYUPragma(line):
return re.search(r'/\* IWYU pragma: ', line)
def IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
return (re.match(r'^#(ifndef|define|endif\s*//)\s*[\w_]+\s*$', line) or
re.match(r'^#include\s', line) or
# Don't break IWYU pragmas, either; that causes iwyu.py problems.
re.search(r'// IWYU pragma: ', line))
def WrapLongLine(line, output):
line = line.rstrip()
if len(line) <= 80:
output.append(line)
elif IsSingleLineComment(line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapComment(line, output)
elif IsInPreprocessorDirective(output, line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapPreprocessorDirective(line, output)
elif IsMultiLineIWYUPragma(line):
output.append(line)
else:
WrapPlainCode(line, output)
def BeautifyCode(string):
lines = string.splitlines()
output = []
for line in lines:
WrapLongLine(line, output)
output2 = [line.rstrip() for line in output]
return '\n'.join(output2) + '\n'
def ConvertFromPumpSource(src_text):
"""Return the text generated from the given Pump source text."""
ast = ParseToAST(StripMetaComments(src_text))
output = Output()
RunCode(Env(), ast, output)
return BeautifyCode(output.string)
def main(argv):
if len(argv) == 1:
print __doc__
sys.exit(1)
file_path = argv[-1]
output_str = ConvertFromPumpSource(file(file_path, 'r').read())
if file_path.endswith('.pump'):
output_file_path = file_path[:-5]
else:
output_file_path = '-'
if output_file_path == '-':
print output_str,
else:
output_file = file(output_file_path, 'w')
output_file.write('// This file was GENERATED by command:\n')
output_file.write('// %s %s\n' %
(os.path.basename(__file__), os.path.basename(file_path)))
output_file.write('// DO NOT EDIT BY HAND!!!\n\n')
output_file.write(output_str)
output_file.close()
if __name__ == '__main__':
main(sys.argv)
|
joopert/home-assistant
|
refs/heads/dev
|
homeassistant/components/cover/reproduce_state.py
|
5
|
"""Reproduce an Cover state."""
import asyncio
import logging
from typing import Iterable, Optional
from homeassistant.components.cover import (
ATTR_CURRENT_POSITION,
ATTR_CURRENT_TILT_POSITION,
ATTR_POSITION,
ATTR_TILT_POSITION,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_CLOSE_COVER,
SERVICE_CLOSE_COVER_TILT,
SERVICE_OPEN_COVER,
SERVICE_OPEN_COVER_TILT,
SERVICE_SET_COVER_POSITION,
SERVICE_SET_COVER_TILT_POSITION,
STATE_CLOSED,
STATE_CLOSING,
STATE_OPEN,
STATE_OPENING,
)
from homeassistant.core import Context, State
from homeassistant.helpers.typing import HomeAssistantType
from . import DOMAIN
_LOGGER = logging.getLogger(__name__)
VALID_STATES = {STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_OPENING}
async def _async_reproduce_state(
hass: HomeAssistantType, state: State, context: Optional[Context] = None
) -> None:
"""Reproduce a single state."""
cur_state = hass.states.get(state.entity_id)
if cur_state is None:
_LOGGER.warning("Unable to find entity %s", state.entity_id)
return
if state.state not in VALID_STATES:
_LOGGER.warning(
"Invalid state specified for %s: %s", state.entity_id, state.state
)
return
# Return if we are already at the right state.
if (
cur_state.state == state.state
and cur_state.attributes.get(ATTR_CURRENT_POSITION)
== state.attributes.get(ATTR_CURRENT_POSITION)
and cur_state.attributes.get(ATTR_CURRENT_TILT_POSITION)
== state.attributes.get(ATTR_CURRENT_TILT_POSITION)
):
return
service_data = {ATTR_ENTITY_ID: state.entity_id}
service_data_tilting = {ATTR_ENTITY_ID: state.entity_id}
if cur_state.state != state.state or cur_state.attributes.get(
ATTR_CURRENT_POSITION
) != state.attributes.get(ATTR_CURRENT_POSITION):
# Open/Close
if state.state == STATE_CLOSED or state.state == STATE_CLOSING:
service = SERVICE_CLOSE_COVER
elif state.state == STATE_OPEN or state.state == STATE_OPENING:
if (
ATTR_CURRENT_POSITION in cur_state.attributes
and ATTR_CURRENT_POSITION in state.attributes
):
service = SERVICE_SET_COVER_POSITION
service_data[ATTR_POSITION] = state.attributes[ATTR_CURRENT_POSITION]
else:
service = SERVICE_OPEN_COVER
await hass.services.async_call(
DOMAIN, service, service_data, context=context, blocking=True
)
if (
ATTR_CURRENT_TILT_POSITION in state.attributes
and ATTR_CURRENT_TILT_POSITION in cur_state.attributes
and cur_state.attributes.get(ATTR_CURRENT_TILT_POSITION)
!= state.attributes.get(ATTR_CURRENT_TILT_POSITION)
):
# Tilt position
if state.attributes.get(ATTR_CURRENT_TILT_POSITION) == 100:
service_tilting = SERVICE_OPEN_COVER_TILT
elif state.attributes.get(ATTR_CURRENT_TILT_POSITION) == 0:
service_tilting = SERVICE_CLOSE_COVER_TILT
else:
service_tilting = SERVICE_SET_COVER_TILT_POSITION
service_data_tilting[ATTR_TILT_POSITION] = state.attributes[
ATTR_CURRENT_TILT_POSITION
]
await hass.services.async_call(
DOMAIN,
service_tilting,
service_data_tilting,
context=context,
blocking=True,
)
async def async_reproduce_states(
hass: HomeAssistantType, states: Iterable[State], context: Optional[Context] = None
) -> None:
"""Reproduce Cover states."""
# Reproduce states in parallel.
await asyncio.gather(
*(_async_reproduce_state(hass, state, context) for state in states)
)
|
Edraak/edraak-platform
|
refs/heads/master
|
common/lib/xmodule/xmodule/modulestore/mongo/base.py
|
1
|
"""
Modulestore backed by Mongodb.
Stores individual XModules as single documents with the following
structure:
{
'_id': <location.as_dict>,
'metadata': <dict containing all Scope.settings fields>
'definition': <dict containing all Scope.content fields>
'definition.children': <list of all child text_type(location)s>
}
"""
import copy
from datetime import datetime
from importlib import import_module
import logging
import pymongo
import re
import six
import sys
from uuid import uuid4
from bson.son import SON
from contracts import contract, new_contract
from fs.osfs import OSFS
from mongodb_proxy import autoretry_read
from opaque_keys.edx.keys import UsageKey, CourseKey, AssetKey
from opaque_keys.edx.locator import BlockUsageLocator, CourseLocator, LibraryLocator
from path import Path as path
from pytz import UTC
from xblock.core import XBlock
from xblock.exceptions import InvalidScopeError
from xblock.fields import Scope, ScopeIds, Reference, ReferenceList, ReferenceValueDict
from xblock.runtime import KvsFieldData
from xmodule.assetstore import AssetMetadata, CourseAssetsFromStorage
from xmodule.course_module import CourseSummary
from xmodule.error_module import ErrorDescriptor
from xmodule.errortracker import null_error_tracker, exc_info_to_str
from xmodule.exceptions import HeartbeatFailure
from xmodule.mako_module import MakoDescriptorSystem
from xmodule.mongo_utils import connect_to_mongodb, create_collection_index
from xmodule.modulestore import ModuleStoreWriteBase, ModuleStoreEnum, BulkOperationsMixin, BulkOpsRecord
from xmodule.modulestore.draft_and_published import ModuleStoreDraftAndPublished, DIRECT_ONLY_CATEGORIES
from xmodule.modulestore.edit_info import EditInfoRuntimeMixin
from xmodule.modulestore.exceptions import ItemNotFoundError, DuplicateCourseError, ReferentialIntegrityError
from xmodule.modulestore.inheritance import InheritanceMixin, inherit_metadata, InheritanceKeyValueStore
from xmodule.partitions.partitions_service import PartitionService
from xmodule.modulestore.xml import CourseLocationManager
from xmodule.modulestore.store_utilities import DETACHED_XBLOCK_TYPES
from xmodule.services import SettingsService
log = logging.getLogger(__name__)
new_contract('CourseKey', CourseKey)
new_contract('AssetKey', AssetKey)
new_contract('AssetMetadata', AssetMetadata)
new_contract('long', long)
new_contract('BlockUsageLocator', BlockUsageLocator)
# sort order that returns DRAFT items first
SORT_REVISION_FAVOR_DRAFT = ('_id.revision', pymongo.DESCENDING)
# sort order that returns PUBLISHED items first
SORT_REVISION_FAVOR_PUBLISHED = ('_id.revision', pymongo.ASCENDING)
BLOCK_TYPES_WITH_CHILDREN = list(set(
name for name, class_ in XBlock.load_classes() if getattr(class_, 'has_children', False)
))
# Allow us to call _from_deprecated_(son|string) throughout the file
# pylint: disable=protected-access
# at module level, cache one instance of OSFS per filesystem root.
_OSFS_INSTANCE = {}
class MongoRevisionKey(object):
"""
Key Revision constants to use for Location and Usage Keys in the Mongo modulestore
Note: These values are persisted in the database, so should not be changed without migrations
"""
draft = 'draft'
published = None
class InvalidWriteError(Exception):
"""
Raised to indicate that writing to a particular key
in the KeyValueStore is disabled
"""
pass
class MongoKeyValueStore(InheritanceKeyValueStore):
"""
A KeyValueStore that maps keyed data access to one of the 3 data areas
known to the MongoModuleStore (data, children, and metadata)
"""
def __init__(self, data, parent, children, metadata):
super(MongoKeyValueStore, self).__init__()
if not isinstance(data, dict):
self._data = {'data': data}
else:
self._data = data
self._parent = parent
self._children = children
self._metadata = metadata
def get(self, key):
if key.scope == Scope.children:
return self._children
elif key.scope == Scope.parent:
return self._parent
elif key.scope == Scope.settings:
return self._metadata[key.field_name]
elif key.scope == Scope.content:
return self._data[key.field_name]
else:
raise InvalidScopeError(
key,
(Scope.children, Scope.parent, Scope.settings, Scope.content),
)
def set(self, key, value):
if key.scope == Scope.children:
self._children = value
elif key.scope == Scope.parent:
self._parent = value
elif key.scope == Scope.settings:
self._metadata[key.field_name] = value
elif key.scope == Scope.content:
self._data[key.field_name] = value
else:
raise InvalidScopeError(
key,
(Scope.children, Scope.settings, Scope.content),
)
def delete(self, key):
if key.scope == Scope.children:
self._children = []
elif key.scope == Scope.settings:
if key.field_name in self._metadata:
del self._metadata[key.field_name]
elif key.scope == Scope.content:
if key.field_name in self._data:
del self._data[key.field_name]
else:
raise InvalidScopeError(
key,
(Scope.children, Scope.settings, Scope.content),
)
def has(self, key):
if key.scope in (Scope.children, Scope.parent):
return True
elif key.scope == Scope.settings:
return key.field_name in self._metadata
elif key.scope == Scope.content:
return key.field_name in self._data
else:
return False
def __repr__(self):
return "MongoKeyValueStore{!r}<{!r}, {!r}>".format(
(self._data, self._parent, self._children, self._metadata),
self._fields,
self.inherited_settings
)
class CachingDescriptorSystem(MakoDescriptorSystem, EditInfoRuntimeMixin):
"""
A system that has a cache of module json that it will use to load modules
from, with a backup of calling to the underlying modulestore for more data
"""
def __repr__(self):
return "CachingDescriptorSystem{!r}".format((
self.modulestore,
unicode(self.course_id),
[unicode(key) for key in self.module_data.keys()],
self.default_class,
[unicode(key) for key in self.cached_metadata.keys()],
))
def __init__(self, modulestore, course_key, module_data, default_class, cached_metadata, **kwargs):
"""
modulestore: the module store that can be used to retrieve additional modules
course_key: the course for which everything in this runtime will be relative
module_data: a dict mapping Location -> json that was cached from the
underlying modulestore
default_class: The default_class to use when loading an
XModuleDescriptor from the module_data
cached_metadata: the cache for handling inheritance computation. internal use only
resources_fs: a filesystem, as per MakoDescriptorSystem
error_tracker: a function that logs errors for later display to users
render_template: a function for rendering templates, as per
MakoDescriptorSystem
"""
id_manager = CourseLocationManager(course_key)
kwargs.setdefault('id_reader', id_manager)
kwargs.setdefault('id_generator', id_manager)
super(CachingDescriptorSystem, self).__init__(
field_data=None,
load_item=self.load_item,
**kwargs
)
self.modulestore = modulestore
self.module_data = module_data
self.default_class = default_class
# cdodge: other Systems have a course_id attribute defined. To keep things consistent, let's
# define an attribute here as well, even though it's None
self.course_id = course_key
self.cached_metadata = cached_metadata
def load_item(self, location, for_parent=None): # pylint: disable=method-hidden
"""
Return an XModule instance for the specified location
"""
assert isinstance(location, UsageKey)
if location.run is None:
# self.module_data is keyed on locations that have full run information.
# If the supplied location is missing a run, then we will miss the cache and
# incur an additional query.
# TODO: make module_data a proper class that can handle this itself.
location = location.replace(course_key=self.modulestore.fill_in_run(location.course_key))
json_data = self.module_data.get(location)
if json_data is None:
module = self.modulestore.get_item(location, using_descriptor_system=self)
return module
else:
# load the module and apply the inherited metadata
try:
category = json_data['location']['category']
class_ = self.load_block_type(category)
definition = json_data.get('definition', {})
metadata = json_data.get('metadata', {})
for old_name, new_name in getattr(class_, 'metadata_translations', {}).items():
if old_name in metadata:
metadata[new_name] = metadata[old_name]
del metadata[old_name]
children = [
self._convert_reference_to_key(childloc)
for childloc in definition.get('children', [])
]
parent = None
if self.cached_metadata is not None:
# fish the parent out of here if it's available
parent_url = self.cached_metadata.get(unicode(location), {}).get('parent', {}).get(
ModuleStoreEnum.Branch.published_only if location.branch is None
else ModuleStoreEnum.Branch.draft_preferred
)
if parent_url:
parent = self._convert_reference_to_key(parent_url)
if not parent and category not in DETACHED_XBLOCK_TYPES.union(['course']):
# try looking it up just-in-time (but not if we're working with a detached block).
parent = self.modulestore.get_parent_location(
as_published(location),
ModuleStoreEnum.RevisionOption.published_only if location.branch is None
else ModuleStoreEnum.RevisionOption.draft_preferred
)
data = definition.get('data', {})
if isinstance(data, basestring):
data = {'data': data}
mixed_class = self.mixologist.mix(class_)
if data: # empty or None means no work
data = self._convert_reference_fields_to_keys(mixed_class, location.course_key, data)
metadata = self._convert_reference_fields_to_keys(mixed_class, location.course_key, metadata)
kvs = MongoKeyValueStore(
data,
parent,
children,
metadata,
)
field_data = KvsFieldData(kvs)
scope_ids = ScopeIds(None, category, location, location)
module = self.construct_xblock_from_class(class_, scope_ids, field_data, for_parent=for_parent)
if self.cached_metadata is not None:
# parent container pointers don't differentiate between draft and non-draft
# so when we do the lookup, we should do so with a non-draft location
non_draft_loc = as_published(location)
# Convert the serialized fields values in self.cached_metadata
# to python values
metadata_to_inherit = self.cached_metadata.get(unicode(non_draft_loc), {})
inherit_metadata(module, metadata_to_inherit)
module._edit_info = json_data.get('edit_info')
# migrate published_by and published_on if edit_info isn't present
if module._edit_info is None:
module._edit_info = {}
raw_metadata = json_data.get('metadata', {})
# published_on was previously stored as a list of time components instead of a datetime
if raw_metadata.get('published_date'):
module._edit_info['published_date'] = datetime(
*raw_metadata.get('published_date')[0:6]
).replace(tzinfo=UTC)
module._edit_info['published_by'] = raw_metadata.get('published_by')
for wrapper in self.modulestore.xblock_field_data_wrappers:
module._field_data = wrapper(module, module._field_data) # pylint: disable=protected-access
# decache any computed pending field settings
module.save()
return module
except Exception: # pylint: disable=broad-except
log.warning("Failed to load descriptor from %s", json_data, exc_info=True)
return ErrorDescriptor.from_json(
json_data,
self,
location,
error_msg=exc_info_to_str(sys.exc_info())
)
def _convert_reference_to_key(self, ref_string):
"""
Convert a single serialized UsageKey string in a ReferenceField into a UsageKey.
"""
key = UsageKey.from_string(ref_string)
return key.replace(run=self.modulestore.fill_in_run(key.course_key).run)
def _convert_reference_fields_to_keys(self, class_, course_key, jsonfields):
"""
Find all fields of type reference and convert the payload into UsageKeys
:param class_: the XBlock class
:param course_key: a CourseKey object for the given course
:param jsonfields: a dict of the jsonified version of the fields
"""
result = {}
for field_name, value in jsonfields.iteritems():
field = class_.fields.get(field_name)
if field is None:
continue
elif value is None:
result[field_name] = value
elif isinstance(field, Reference):
result[field_name] = self._convert_reference_to_key(value)
elif isinstance(field, ReferenceList):
result[field_name] = [
self._convert_reference_to_key(ele) for ele in value
]
elif isinstance(field, ReferenceValueDict):
result[field_name] = {
key: self._convert_reference_to_key(subvalue) for key, subvalue in value.iteritems()
}
else:
result[field_name] = value
return result
def lookup_item(self, location):
"""
Returns the JSON payload of the xblock at location.
"""
try:
json = self.module_data[location]
except KeyError:
json = self.modulestore._find_one(location)
self.module_data[location] = json
return json
def get_edited_by(self, xblock):
"""
See :class: cms.lib.xblock.runtime.EditInfoRuntimeMixin
"""
return xblock._edit_info.get('edited_by')
def get_edited_on(self, xblock):
"""
See :class: cms.lib.xblock.runtime.EditInfoRuntimeMixin
"""
return xblock._edit_info.get('edited_on')
def get_subtree_edited_by(self, xblock):
"""
See :class: cms.lib.xblock.runtime.EditInfoRuntimeMixin
"""
return xblock._edit_info.get('subtree_edited_by')
def get_subtree_edited_on(self, xblock):
"""
See :class: cms.lib.xblock.runtime.EditInfoRuntimeMixin
"""
return xblock._edit_info.get('subtree_edited_on')
def get_published_by(self, xblock):
"""
See :class: cms.lib.xblock.runtime.EditInfoRuntimeMixin
"""
return xblock._edit_info.get('published_by')
def get_published_on(self, xblock):
"""
See :class: cms.lib.xblock.runtime.EditInfoRuntimeMixin
"""
return xblock._edit_info.get('published_date')
def applicable_aside_types(self, block):
# "old" mongo does support asides yet
return []
new_contract('CachingDescriptorSystem', CachingDescriptorSystem)
# The only thing using this w/ wildcards is contentstore.mongo for asset retrieval
def location_to_query(location, wildcard=True, tag='i4x'):
"""
Takes a Location and returns a SON object that will query for that location by subfields
rather than subdoc.
Fields in location that are None are ignored in the query.
If `wildcard` is True, then a None in a location is treated as a wildcard
query. Otherwise, it is searched for literally
"""
query = location.to_deprecated_son(prefix='_id.', tag=tag)
if wildcard:
for key, value in query.items():
# don't allow wildcards on revision, since public is set as None, so
# its ambiguous between None as a real value versus None=wildcard
if value is None and key != '_id.revision':
del query[key]
return query
def as_draft(location):
"""
Returns the Location that is the draft for `location`
If the location is in the DIRECT_ONLY_CATEGORIES, returns itself
"""
if location.block_type in DIRECT_ONLY_CATEGORIES:
return location
return location.replace(revision=MongoRevisionKey.draft)
def as_published(location):
"""
Returns the Location that is the published version for `location`
"""
return location.replace(revision=MongoRevisionKey.published)
class MongoBulkOpsRecord(BulkOpsRecord):
"""
Tracks whether there've been any writes per course and disables inheritance generation
"""
def __init__(self):
super(MongoBulkOpsRecord, self).__init__()
self.dirty = False
class MongoBulkOpsMixin(BulkOperationsMixin):
"""
Mongo bulk operation support
"""
_bulk_ops_record_type = MongoBulkOpsRecord
def _start_outermost_bulk_operation(self, bulk_ops_record, course_key, ignore_case=False):
"""
Prevent updating the meta-data inheritance cache for the given course
"""
# ensure it starts clean
bulk_ops_record.dirty = False
def _end_outermost_bulk_operation(self, bulk_ops_record, structure_key):
"""
Restart updating the meta-data inheritance cache for the given course or library.
Refresh the meta-data inheritance cache now since it was temporarily disabled.
"""
dirty = False
if bulk_ops_record.dirty:
self.refresh_cached_metadata_inheritance_tree(structure_key)
dirty = True
bulk_ops_record.dirty = False # brand spanking clean now
return dirty
def _is_in_bulk_operation(self, course_id, ignore_case=False):
"""
Returns whether a bulk operation is in progress for the given course.
"""
return super(MongoBulkOpsMixin, self)._is_in_bulk_operation(
course_id.for_branch(None), ignore_case
)
class ParentLocationCache(dict):
"""
Dict-based object augmented with a more cache-like interface, for internal use.
"""
# pylint: disable=missing-docstring
@contract(key=unicode)
def has(self, key):
return key in self
@contract(key=unicode, value="BlockUsageLocator | None")
def set(self, key, value):
self[key] = value
@contract(value="BlockUsageLocator")
def delete_by_value(self, value):
keys_to_delete = [k for k, v in self.iteritems() if v == value]
for key in keys_to_delete:
del self[key]
class MongoModuleStore(ModuleStoreDraftAndPublished, ModuleStoreWriteBase, MongoBulkOpsMixin):
"""
A Mongodb backed ModuleStore
"""
# If no name is specified for the asset metadata collection, this name is used.
DEFAULT_ASSET_COLLECTION_NAME = 'assetstore'
# TODO (cpennington): Enable non-filesystem filestores
# pylint: disable=invalid-name
# pylint: disable=attribute-defined-outside-init
def __init__(self, contentstore, doc_store_config, fs_root, render_template,
default_class=None,
error_tracker=null_error_tracker,
i18n_service=None,
fs_service=None,
user_service=None,
signal_handler=None,
retry_wait_time=0.1,
**kwargs):
"""
:param doc_store_config: must have a host, db, and collection entries. Other common entries: port, tz_aware.
"""
super(MongoModuleStore, self).__init__(contentstore=contentstore, **kwargs)
def do_connection(
db, collection, host, port=27017, tz_aware=True, user=None, password=None, asset_collection=None, **kwargs
):
"""
Create & open the connection, authenticate, and provide pointers to the collection
"""
# Set a write concern of 1, which makes writes complete successfully to the primary
# only before returning. Also makes pymongo report write errors.
kwargs['w'] = 1
self.database = connect_to_mongodb(
db, host,
port=port, tz_aware=tz_aware, user=user, password=password,
retry_wait_time=retry_wait_time, **kwargs
)
self.collection = self.database[collection]
# Collection which stores asset metadata.
if asset_collection is None:
asset_collection = self.DEFAULT_ASSET_COLLECTION_NAME
self.asset_collection = self.database[asset_collection]
do_connection(**doc_store_config)
if default_class is not None:
module_path, _, class_name = default_class.rpartition('.')
class_ = getattr(import_module(module_path), class_name)
self.default_class = class_
else:
self.default_class = None
self.fs_root = path(fs_root)
self.error_tracker = error_tracker
self.render_template = render_template
self.i18n_service = i18n_service
self.fs_service = fs_service
self.user_service = user_service
self._course_run_cache = {}
self.signal_handler = signal_handler
def close_connections(self):
"""
Closes any open connections to the underlying database
"""
self.collection.database.client.close()
def _drop_database(self, database=True, collections=True, connections=True):
"""
A destructive operation to drop the underlying database and close all connections.
Intended to be used by test code for cleanup.
If database is True, then this should drop the entire database.
Otherwise, if collections is True, then this should drop all of the collections used
by this modulestore.
Otherwise, the modulestore should remove all data from the collections.
If connections is True, then close the connection to the database as well.
"""
# drop the assets
super(MongoModuleStore, self)._drop_database(database, collections, connections)
connection = self.collection.database.client
if database:
connection.drop_database(self.collection.database.proxied_object)
elif collections:
self.collection.drop()
else:
self.collection.delete_many({})
if connections:
connection.close()
@autoretry_read()
def fill_in_run(self, course_key):
"""
In mongo some course_keys are used without runs. This helper function returns
a course_key with the run filled in, if the course does actually exist.
"""
if course_key.run is not None:
return course_key
cache_key = (course_key.org, course_key.course)
if cache_key not in self._course_run_cache:
matching_courses = list(self.collection.find(SON([
('_id.tag', 'i4x'),
('_id.org', course_key.org),
('_id.course', course_key.course),
('_id.category', 'course'),
])).limit(1))
if not matching_courses:
return course_key
self._course_run_cache[cache_key] = matching_courses[0]['_id']['name']
return course_key.replace(run=self._course_run_cache[cache_key])
def for_branch_setting(self, location):
"""
Returns the Location that is for the current branch setting.
"""
if location.block_type in DIRECT_ONLY_CATEGORIES:
return location.replace(revision=MongoRevisionKey.published)
if self.get_branch_setting() == ModuleStoreEnum.Branch.draft_preferred:
return location.replace(revision=MongoRevisionKey.draft)
return location.replace(revision=MongoRevisionKey.published)
def _get_parent_cache(self, branch):
"""
Provides a reference to one of the two branch-specific
ParentLocationCaches associated with the current request (if any).
"""
if self.request_cache is not None:
return self.request_cache.data.setdefault('parent-location-{}'.format(branch), ParentLocationCache())
else:
return ParentLocationCache()
def _compute_metadata_inheritance_tree(self, course_id):
'''
Find all inheritable fields from all xblocks in the course which may define inheritable data
'''
# get all collections in the course, this query should not return any leaf nodes
course_id = self.fill_in_run(course_id)
query = SON([
('_id.tag', 'i4x'),
('_id.org', course_id.org),
('_id.course', course_id.course),
('_id.category', {'$in': BLOCK_TYPES_WITH_CHILDREN})
])
# if we're only dealing in the published branch, then only get published containers
if self.get_branch_setting() == ModuleStoreEnum.Branch.published_only:
query['_id.revision'] = None
# we just want the Location, children, and inheritable metadata
record_filter = {'_id': 1, 'definition.children': 1}
# just get the inheritable metadata since that is all we need for the computation
# this minimizes both data pushed over the wire
for field_name in InheritanceMixin.fields:
record_filter['metadata.{0}'.format(field_name)] = 1
# call out to the DB
resultset = self.collection.find(query, record_filter)
# it's ok to keep these as deprecated strings b/c the overall cache is indexed by course_key and this
# is a dictionary relative to that course
results_by_url = {}
root = None
# now go through the results and order them by the location url
for result in resultset:
# manually pick it apart b/c the db has tag and we want as_published revision regardless
location = as_published(BlockUsageLocator._from_deprecated_son(result['_id'], course_id.run))
location_url = unicode(location)
if location_url in results_by_url:
# found either draft or live to complement the other revision
# FIXME this is wrong. If the child was moved in draft from one parent to the other, it will
# show up under both in this logic: https://openedx.atlassian.net/browse/TNL-1075
existing_children = results_by_url[location_url].get('definition', {}).get('children', [])
additional_children = result.get('definition', {}).get('children', [])
total_children = existing_children + additional_children
# use set to get rid of duplicates. We don't care about order; so, it shouldn't matter.
results_by_url[location_url].setdefault('definition', {})['children'] = set(total_children)
else:
results_by_url[location_url] = result
if location.block_type == 'course':
root = location_url
# now traverse the tree and compute down the inherited metadata
metadata_to_inherit = {}
def _compute_inherited_metadata(url):
"""
Helper method for computing inherited metadata for a specific location url
"""
my_metadata = results_by_url[url].get('metadata', {})
# go through all the children and recurse, but only if we have
# in the result set. Remember results will not contain leaf nodes
for child in results_by_url[url].get('definition', {}).get('children', []):
if child in results_by_url:
new_child_metadata = copy.deepcopy(my_metadata)
new_child_metadata.update(results_by_url[child].get('metadata', {}))
results_by_url[child]['metadata'] = new_child_metadata
metadata_to_inherit[child] = new_child_metadata
_compute_inherited_metadata(child)
else:
# this is likely a leaf node, so let's record what metadata we need to inherit
metadata_to_inherit[child] = my_metadata.copy()
# WARNING: 'parent' is not part of inherited metadata, but
# we're piggybacking on this recursive traversal to grab
# and cache the child's parent, as a performance optimization.
# The 'parent' key will be popped out of the dictionary during
# CachingDescriptorSystem.load_item
metadata_to_inherit[child].setdefault('parent', {})[self.get_branch_setting()] = url
if root is not None:
_compute_inherited_metadata(root)
return metadata_to_inherit
def _get_cached_metadata_inheritance_tree(self, course_id, force_refresh=False):
'''
Compute the metadata inheritance for the course.
'''
tree = {}
course_id = self.fill_in_run(course_id)
if not force_refresh:
# see if we are first in the request cache (if present)
if self.request_cache is not None and unicode(course_id) in self.request_cache.data.get('metadata_inheritance', {}):
return self.request_cache.data['metadata_inheritance'][unicode(course_id)]
# then look in any caching subsystem (e.g. memcached)
if self.metadata_inheritance_cache_subsystem is not None:
tree = self.metadata_inheritance_cache_subsystem.get(unicode(course_id), {})
else:
logging.warning(
'Running MongoModuleStore without a metadata_inheritance_cache_subsystem. This is \
OK in localdev and testing environment. Not OK in production.'
)
if not tree:
# if not in subsystem, or we are on force refresh, then we have to compute
tree = self._compute_metadata_inheritance_tree(course_id)
# now write out computed tree to caching subsystem (e.g. memcached), if available
if self.metadata_inheritance_cache_subsystem is not None:
self.metadata_inheritance_cache_subsystem.set(unicode(course_id), tree)
# now populate a request_cache, if available. NOTE, we are outside of the
# scope of the above if: statement so that after a memcache hit, it'll get
# put into the request_cache
if self.request_cache is not None:
# we can't assume the 'metadatat_inheritance' part of the request cache dict has been
# defined
if 'metadata_inheritance' not in self.request_cache.data:
self.request_cache.data['metadata_inheritance'] = {}
self.request_cache.data['metadata_inheritance'][unicode(course_id)] = tree
return tree
def refresh_cached_metadata_inheritance_tree(self, course_id, runtime=None):
"""
Refresh the cached metadata inheritance tree for the org/course combination
for location
If given a runtime, it replaces the cached_metadata in that runtime. NOTE: failure to provide
a runtime may mean that some objects report old values for inherited data.
"""
course_id = course_id.for_branch(None)
if not self._is_in_bulk_operation(course_id):
# below is done for side effects when runtime is None
cached_metadata = self._get_cached_metadata_inheritance_tree(course_id, force_refresh=True)
if runtime:
runtime.cached_metadata = cached_metadata
def _clean_item_data(self, item):
"""
Renames the '_id' field in item to 'location'
"""
item['location'] = item['_id']
del item['_id']
@autoretry_read()
def _query_children_for_cache_children(self, course_key, items):
"""
Generate a pymongo in query for finding the items and return the payloads
"""
# first get non-draft in a round-trip
query = {
'_id': {'$in': [
UsageKey.from_string(item).map_into_course(course_key).to_deprecated_son() for item in items
]}
}
return list(self.collection.find(query))
def _cache_children(self, course_key, items, depth=0):
"""
Returns a dictionary mapping Location -> item data, populated with json data
for all descendents of items up to the specified depth.
(0 = no descendents, 1 = children, 2 = grandchildren, etc)
If depth is None, will load all the children.
This will make a number of queries that is linear in the depth.
"""
data = {}
to_process = list(items)
course_key = self.fill_in_run(course_key)
parent_cache = self._get_parent_cache(self.get_branch_setting())
while to_process and depth is None or depth >= 0:
children = []
for item in to_process:
self._clean_item_data(item)
item_location = BlockUsageLocator._from_deprecated_son(item['location'], course_key.run)
item_children = item.get('definition', {}).get('children', [])
children.extend(item_children)
for item_child in item_children:
parent_cache.set(item_child, item_location)
data[item_location] = item
if depth == 0:
break
# Load all children by id. See
# http://www.mongodb.org/display/DOCS/Advanced+Queries#AdvancedQueries-%24or
# for or-query syntax
to_process = []
if children:
to_process = self._query_children_for_cache_children(course_key, children)
# If depth is None, then we just recurse until we hit all the descendents
if depth is not None:
depth -= 1
return data
@contract(
course_key=CourseKey,
item=dict,
apply_cached_metadata=bool,
using_descriptor_system="None|CachingDescriptorSystem"
)
def _load_item(self, course_key, item, data_cache,
apply_cached_metadata=True, using_descriptor_system=None, for_parent=None):
"""
Load an XModuleDescriptor from item, using the children stored in data_cache
Arguments:
course_key (CourseKey): which course to load from
item (dict): A dictionary with the following keys:
location: The serialized UsageKey for the item to load
data_dir (optional): The directory name to use as the root data directory for this XModule
data_cache (dict): A dictionary mapping from UsageKeys to xblock field data
(this is the xblock data loaded from the database)
apply_cached_metadata (bool): Whether to use the cached metadata for inheritance
purposes.
using_descriptor_system (CachingDescriptorSystem): The existing CachingDescriptorSystem
to add data to, and to load the XBlocks from.
for_parent (:class:`XBlock`): The parent of the XBlock being loaded.
"""
course_key = self.fill_in_run(course_key)
location = BlockUsageLocator._from_deprecated_son(item['location'], course_key.run)
data_dir = getattr(item, 'data_dir', location.course)
root = self.fs_root / data_dir
resource_fs = _OSFS_INSTANCE.setdefault(root, OSFS(root, create=True))
cached_metadata = {}
if apply_cached_metadata:
cached_metadata = self._get_cached_metadata_inheritance_tree(course_key)
if using_descriptor_system is None:
services = {}
if self.i18n_service:
services["i18n"] = self.i18n_service
if self.fs_service:
services["fs"] = self.fs_service
if self.user_service:
services["user"] = self.user_service
services["settings"] = SettingsService()
if self.request_cache:
services["request_cache"] = self.request_cache
services["partitions"] = PartitionService(course_key)
system = CachingDescriptorSystem(
modulestore=self,
course_key=course_key,
module_data=data_cache,
default_class=self.default_class,
resources_fs=resource_fs,
error_tracker=self.error_tracker,
render_template=self.render_template,
cached_metadata=cached_metadata,
mixins=self.xblock_mixins,
select=self.xblock_select,
disabled_xblock_types=self.disabled_xblock_types,
services=services,
)
else:
system = using_descriptor_system
system.module_data.update(data_cache)
system.cached_metadata.update(cached_metadata)
item = system.load_item(location, for_parent=for_parent)
# TODO Once TNL-5092 is implemented, we can remove the following line
# of code. Until then, set the course_version field on the block to be
# consistent with the Split modulestore. Since Mongo modulestore doesn't
# maintain course versions set it to None.
item.course_version = None
return item
def _load_items(self, course_key, items, depth=0, using_descriptor_system=None, for_parent=None):
"""
Load a list of xmodules from the data in items, with children cached up
to specified depth
"""
course_key = self.fill_in_run(course_key)
data_cache = self._cache_children(course_key, items, depth)
# if we are loading a course object, if we're not prefetching children (depth != 0) then don't
# bother with the metadata inheritance
return [
self._load_item(
course_key,
item,
data_cache,
using_descriptor_system=using_descriptor_system,
apply_cached_metadata=self._should_apply_cached_metadata(item, depth),
for_parent=for_parent,
)
for item in items
]
def _should_apply_cached_metadata(self, item, depth):
"""
Returns a boolean whether a particular query should trigger an application
of inherited metadata onto the item
"""
category = item['location']['category']
apply_cached_metadata = category not in DETACHED_XBLOCK_TYPES and \
not (category == 'course' and depth == 0)
return apply_cached_metadata
@autoretry_read()
def get_course_summaries(self, **kwargs):
"""
Returns a list of `CourseSummary`. This accepts an optional parameter of 'org' which
will apply an efficient filter to only get courses with the specified ORG
"""
def extract_course_summary(course):
"""
Extract course information from the course block for mongo.
"""
return {
field: course['metadata'][field]
for field in CourseSummary.course_info_fields
if field in course['metadata']
}
course_records = []
query = {'_id.category': 'course'}
course_org_filter = kwargs.get('org')
course_keys = kwargs.get('course_keys')
if course_keys:
course_queries = []
for course_key in course_keys:
course_query = {
'_id.{}'.format(value_attr): getattr(course_key, key_attr)
for key_attr, value_attr in {'org': 'org', 'course': 'course', 'run': 'name'}.iteritems()
}
course_query.update(query)
course_queries.append(course_query)
query = {'$or': course_queries}
elif course_org_filter:
query['_id.org'] = course_org_filter
course_records = self.collection.find(query, {'metadata': True})
courses_summaries = []
for course in course_records:
if not (course['_id']['org'] == 'edx' and course['_id']['course'] == 'templates'):
locator = CourseKey.from_string('/'.join(
[course['_id']['org'], course['_id']['course'], course['_id']['name']]
))
course_summary = extract_course_summary(course)
courses_summaries.append(
CourseSummary(locator, **course_summary)
)
return courses_summaries
@autoretry_read()
def get_courses(self, **kwargs):
'''
Returns a list of course descriptors. This accepts an optional parameter of 'org' which
will apply an efficient filter to only get courses with the specified ORG
'''
course_org_filter = kwargs.get('org')
if course_org_filter:
course_records = self.collection.find({'_id.category': 'course', '_id.org': course_org_filter})
else:
course_records = self.collection.find({'_id.category': 'course'})
base_list = sum(
[
self._load_items(
CourseKey.from_string('/'.join(
[course['_id']['org'], course['_id']['course'], course['_id']['name']]
)),
[course]
)
for course
# I tried to add '$and': [{'_id.org': {'$ne': 'edx'}}, {'_id.course': {'$ne': 'templates'}}]
# but it didn't do the right thing (it filtered all edx and all templates out)
in course_records
if not ( # TODO kill this
course['_id']['org'] == 'edx' and
course['_id']['course'] == 'templates'
)
],
[]
)
return [course for course in base_list if not isinstance(course, ErrorDescriptor)]
@autoretry_read()
def _find_one(self, location):
'''Look for a given location in the collection. If the item is not present, raise
ItemNotFoundError.
'''
assert isinstance(location, UsageKey)
item = self.collection.find_one(
{'_id': location.to_deprecated_son()}
)
if item is None:
raise ItemNotFoundError(location)
return item
def make_course_key(self, org, course, run):
"""
Return a valid :class:`~opaque_keys.edx.keys.CourseKey` for this modulestore
that matches the supplied `org`, `course`, and `run`.
This key may represent a course that doesn't exist in this modulestore.
"""
return CourseLocator(org, course, run, deprecated=True)
def make_course_usage_key(self, course_key):
"""
Return a valid :class:`~opaque_keys.edx.keys.UsageKey` for this modulestore
that matches the supplied course_key.
"""
return BlockUsageLocator(course_key, 'course', course_key.run)
def get_course(self, course_key, depth=0, **kwargs):
"""
Get the course with the given courseid (org/course/run)
"""
assert isinstance(course_key, CourseKey)
if not course_key.deprecated: # split course_key
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(course_key)
course_key = self.fill_in_run(course_key)
location = course_key.make_usage_key('course', course_key.run)
try:
return self.get_item(location, depth=depth)
except ItemNotFoundError:
return None
@autoretry_read()
def has_course(self, course_key, ignore_case=False, **kwargs):
"""
Returns the course_id of the course if it was found, else None
Note: we return the course_id instead of a boolean here since the found course may have
a different id than the given course_id when ignore_case is True.
If ignore_case is True, do a case insensitive search,
otherwise, do a case sensitive search
"""
assert isinstance(course_key, CourseKey)
if not course_key.deprecated: # split course_key
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
return False
if isinstance(course_key, LibraryLocator):
return None # Libraries require split mongo
course_key = self.fill_in_run(course_key)
location = course_key.make_usage_key('course', course_key.run)
if ignore_case:
course_query = location.to_deprecated_son('_id.')
for key in course_query.iterkeys():
if isinstance(course_query[key], basestring):
course_query[key] = re.compile(r"(?i)^{}$".format(course_query[key]))
else:
course_query = {'_id': location.to_deprecated_son()}
course = self.collection.find_one(course_query, projection={'_id': True})
if course:
return CourseKey.from_string('/'.join([
course['_id']['org'], course['_id']['course'], course['_id']['name']]
))
else:
return None
def has_item(self, usage_key):
"""
Returns True if location exists in this ModuleStore.
"""
try:
self._find_one(usage_key)
return True
except ItemNotFoundError:
return False
def get_item(self, usage_key, depth=0, using_descriptor_system=None, for_parent=None, **kwargs):
"""
Returns an XModuleDescriptor instance for the item at location.
If any segment of the location is None except revision, raises
xmodule.modulestore.exceptions.InsufficientSpecificationError
If no object is found at that location, raises
xmodule.modulestore.exceptions.ItemNotFoundError
Arguments:
usage_key: a :class:`.UsageKey` instance
depth (int): An argument that some module stores may use to prefetch
descendents of the queried modules for more efficient results later
in the request. The depth is counted in the number of
calls to get_children() to cache. None indicates to cache all descendents.
using_descriptor_system (CachingDescriptorSystem): The existing CachingDescriptorSystem
to add data to, and to load the XBlocks from.
"""
item = self._find_one(usage_key)
module = self._load_items(
usage_key.course_key,
[item],
depth,
using_descriptor_system=using_descriptor_system,
for_parent=for_parent,
)[0]
return module
@staticmethod
def _course_key_to_son(course_id, tag='i4x'):
"""
Generate the partial key to look up items relative to a given course
"""
return SON([
('_id.tag', tag),
('_id.org', course_id.org),
('_id.course', course_id.course),
])
@staticmethod
def _id_dict_to_son(id_dict):
"""
Generate the partial key to look up items relative to a given course
"""
return SON([
(key, id_dict[key])
for key in ('tag', 'org', 'course', 'category', 'name', 'revision')
])
@autoretry_read()
def get_items(
self,
course_id,
settings=None,
content=None,
key_revision=MongoRevisionKey.published,
qualifiers=None,
using_descriptor_system=None,
**kwargs
):
"""
Returns:
list of XModuleDescriptor instances for the matching items within the course with
the given course_id
NOTE: don't use this to look for courses
as the course_id is required. Use get_courses which is a lot faster anyway.
If you don't provide a value for revision, this limits the result to only ones in the
published course. Call this method on draft mongo store if you want to include drafts.
Args:
course_id (CourseKey): the course identifier
settings (dict): fields to look for which have settings scope. Follows same syntax
and rules as qualifiers below
content (dict): fields to look for which have content scope. Follows same syntax and
rules as qualifiers below.
key_revision (str): the revision of the items you're looking for.
MongoRevisionKey.draft - only returns drafts
MongoRevisionKey.published (equates to None) - only returns published
If you want one of each matching xblock but preferring draft to published, call this same method
on the draft modulestore with ModuleStoreEnum.RevisionOption.draft_preferred.
qualifiers (dict): what to look for within the course.
Common qualifiers are ``category`` or any field name. if the target field is a list,
then it searches for the given value in the list not list equivalence.
Substring matching pass a regex object.
For this modulestore, ``name`` is a commonly provided key (Location based stores)
This modulestore does not allow searching dates by comparison or edited_by, previous_version,
update_version info.
using_descriptor_system (CachingDescriptorSystem): The existing CachingDescriptorSystem
to add data to, and to load the XBlocks from.
"""
qualifiers = qualifiers.copy() if qualifiers else {} # copy the qualifiers (destructively manipulated here)
query = self._course_key_to_son(course_id)
query['_id.revision'] = key_revision
for field in ['category', 'name']:
if field in qualifiers:
qualifier_value = qualifiers.pop(field)
if isinstance(qualifier_value, list):
qualifier_value = {'$in': qualifier_value}
query['_id.' + field] = qualifier_value
for key, value in (settings or {}).iteritems():
query['metadata.' + key] = value
for key, value in (content or {}).iteritems():
query['definition.data.' + key] = value
if 'children' in qualifiers:
query['definition.children'] = qualifiers.pop('children')
query.update(qualifiers)
items = self.collection.find(
query,
sort=[SORT_REVISION_FAVOR_DRAFT],
)
modules = self._load_items(
course_id,
list(items),
using_descriptor_system=using_descriptor_system
)
return modules
def create_course(self, org, course, run, user_id, fields=None, **kwargs):
"""
Creates and returns the course.
Args:
org (str): the organization that owns the course
course (str): the name of the course
run (str): the name of the run
user_id: id of the user creating the course
fields (dict): Fields to set on the course at initialization
kwargs: Any optional arguments understood by a subset of modulestores to customize instantiation
Returns: a CourseDescriptor
Raises:
InvalidLocationError: If a course with the same org, course, and run already exists
"""
course_id = CourseKey.from_string('/'.join([org, course, run]))
# Check if a course with this org/course has been defined before (case-insensitive)
course_search_location = SON([
('_id.tag', 'i4x'),
('_id.org', re.compile(u'^{}$'.format(course_id.org), re.IGNORECASE)),
('_id.course', re.compile(u'^{}$'.format(course_id.course), re.IGNORECASE)),
('_id.category', 'course'),
])
courses = self.collection.find(course_search_location, projection={'_id': True})
if courses.count() > 0:
raise DuplicateCourseError(course_id, courses[0]['_id'])
with self.bulk_operations(course_id):
xblock = self.create_item(user_id, course_id, 'course', course_id.run, fields=fields, **kwargs)
# create any other necessary things as a side effect
super(MongoModuleStore, self).create_course(
org, course, run, user_id, runtime=xblock.runtime, **kwargs
)
return xblock
def create_xblock(
self, runtime, course_key, block_type, block_id=None, fields=None,
metadata=None, definition_data=None, **kwargs
):
"""
Create the new xblock but don't save it. Returns the new module.
:param runtime: if you already have an xblock from the course, the xblock.runtime value
:param fields: a dictionary of field names and values for the new xmodule
"""
if metadata is None:
metadata = {}
if definition_data is None:
definition_data = {}
# @Cale, should this use LocalId like we do in split?
if block_id is None:
if block_type == 'course':
block_id = course_key.run
else:
block_id = u'{}_{}'.format(block_type, uuid4().hex[:5])
if runtime is None:
services = {}
if self.i18n_service:
services["i18n"] = self.i18n_service
if self.fs_service:
services["fs"] = self.fs_service
if self.user_service:
services["user"] = self.user_service
services["partitions"] = PartitionService(course_key)
runtime = CachingDescriptorSystem(
modulestore=self,
module_data={},
course_key=course_key,
default_class=self.default_class,
resources_fs=None,
error_tracker=self.error_tracker,
render_template=self.render_template,
cached_metadata={},
mixins=self.xblock_mixins,
select=self.xblock_select,
services=services,
)
xblock_class = runtime.load_block_type(block_type)
location = course_key.make_usage_key(block_type, block_id)
dbmodel = self._create_new_field_data(block_type, location, definition_data, metadata)
xmodule = runtime.construct_xblock_from_class(
xblock_class,
# We're loading a descriptor, so student_id is meaningless
# We also don't have separate notions of definition and usage ids yet,
# so we use the location for both.
ScopeIds(None, block_type, location, location),
dbmodel,
for_parent=kwargs.get('for_parent'),
)
if fields is not None:
for key, value in fields.iteritems():
setattr(xmodule, key, value)
# decache any pending field settings from init
xmodule.save()
return xmodule
def create_item(self, user_id, course_key, block_type, block_id=None, **kwargs):
"""
Creates and saves a new item in a course.
Returns the newly created item.
Args:
user_id: ID of the user creating and saving the xmodule
course_key: A :class:`~opaque_keys.edx.CourseKey` identifying which course to create
this item in
block_type: The typo of block to create
block_id: a unique identifier for the new item. If not supplied,
a new identifier will be generated
"""
if block_id is None:
if block_type == 'course':
block_id = course_key.run
else:
block_id = u'{}_{}'.format(block_type, uuid4().hex[:5])
runtime = kwargs.pop('runtime', None)
xblock = self.create_xblock(runtime, course_key, block_type, block_id, **kwargs)
xblock = self.update_item(xblock, user_id, allow_not_found=True)
return xblock
def create_child(self, user_id, parent_usage_key, block_type, block_id=None, **kwargs):
"""
Creates and saves a new xblock that as a child of the specified block
Returns the newly created item.
Args:
user_id: ID of the user creating and saving the xmodule
parent_usage_key: a :class:`~opaque_key.edx.UsageKey` identifing the
block that this item should be parented under
block_type: The typo of block to create
block_id: a unique identifier for the new item. If not supplied,
a new identifier will be generated
"""
# attach to parent if given
parent = None
if parent_usage_key is not None:
parent = self.get_item(parent_usage_key)
kwargs.setdefault('for_parent', parent)
xblock = self.create_item(user_id, parent_usage_key.course_key, block_type, block_id=block_id, **kwargs)
if parent is not None and 'detached' not in xblock._class_tags:
# Originally added to support entrance exams (settings.FEATURES.get('ENTRANCE_EXAMS'))
if kwargs.get('position') is None:
parent.children.append(xblock.location)
else:
parent.children.insert(kwargs.get('position'), xblock.location)
self.update_item(parent, user_id, child_update=True)
return xblock
def import_xblock(self, user_id, course_key, block_type, block_id, fields=None, runtime=None, **kwargs):
"""
Simple implementation of overwriting any existing xblock
"""
if block_type == 'course':
block_id = course_key.run
xblock = self.create_xblock(runtime, course_key, block_type, block_id, fields)
return self.update_item(xblock, user_id, allow_not_found=True)
def _get_course_for_item(self, location, depth=0):
'''
for a given Xmodule, return the course that it belongs to
Also we have to assert that this module maps to only one course item - it'll throw an
assert if not
'''
return self.get_course(location.course_key, depth)
def _update_single_item(self, location, update, allow_not_found=False):
"""
Set update on the specified item, and raises ItemNotFoundError
if the location doesn't exist
"""
bulk_record = self._get_bulk_ops_record(location.course_key)
bulk_record.dirty = True
# See http://www.mongodb.org/display/DOCS/Updating for
# atomic update syntax
result = self.collection.update(
{'_id': location.to_deprecated_son()},
{'$set': update},
multi=False,
upsert=allow_not_found,
w=1, # wait until primary commits
)
if result['n'] == 0:
raise ItemNotFoundError(location)
def _update_ancestors(self, location, update):
"""
Recursively applies update to all the ancestors of location
"""
parent = self._get_raw_parent_location(as_published(location), ModuleStoreEnum.RevisionOption.draft_preferred)
if parent:
self._update_single_item(parent, update)
self._update_ancestors(parent, update)
def update_item(self, xblock, user_id, allow_not_found=False, force=False, isPublish=False,
is_publish_root=True):
"""
Update the persisted version of xblock to reflect its current values.
xblock: which xblock to persist
user_id: who made the change (ignored for now by this modulestore)
allow_not_found: whether to create a new object if one didn't already exist or give an error
force: force is meaningless for this modulestore
isPublish: an internal parameter that indicates whether this update is due to a Publish operation, and
thus whether the item's published information should be updated.
is_publish_root: when publishing, this indicates whether xblock is the root of the publish and should
therefore propagate subtree edit info up the tree
"""
course_key = xblock.location.course_key
try:
definition_data = self._serialize_scope(xblock, Scope.content)
now = datetime.now(UTC)
payload = {
'definition.data': definition_data,
'metadata': self._serialize_scope(xblock, Scope.settings),
'edit_info': {
'edited_on': now,
'edited_by': user_id,
'subtree_edited_on': now,
'subtree_edited_by': user_id,
}
}
if isPublish:
payload['edit_info']['published_date'] = now
payload['edit_info']['published_by'] = user_id
elif 'published_date' in getattr(xblock, '_edit_info', {}):
payload['edit_info']['published_date'] = xblock._edit_info['published_date']
payload['edit_info']['published_by'] = xblock._edit_info['published_by']
if xblock.has_children:
children = self._serialize_scope(xblock, Scope.children)
payload.update({'definition.children': children['children']})
# Remove all old pointers to me, then add my current children back
parent_cache = self._get_parent_cache(self.get_branch_setting())
parent_cache.delete_by_value(xblock.location)
for child in xblock.children:
parent_cache.set(unicode(child), xblock.location)
self._update_single_item(xblock.scope_ids.usage_id, payload, allow_not_found=allow_not_found)
# update subtree edited info for ancestors
# don't update the subtree info for descendants of the publish root for efficiency
if not isPublish or (isPublish and is_publish_root):
ancestor_payload = {
'edit_info.subtree_edited_on': now,
'edit_info.subtree_edited_by': user_id
}
self._update_ancestors(xblock.scope_ids.usage_id, ancestor_payload)
# update the edit info of the instantiated xblock
xblock._edit_info = payload['edit_info']
# recompute (and update) the metadata inheritance tree which is cached
self.refresh_cached_metadata_inheritance_tree(xblock.scope_ids.usage_id.course_key, xblock.runtime)
# fire signal that we've written to DB
except ItemNotFoundError:
if not allow_not_found:
raise
elif not self.has_course(course_key):
raise ItemNotFoundError(course_key)
return xblock
def _serialize_scope(self, xblock, scope):
"""
Find all fields of type reference and convert the payload from UsageKeys to deprecated strings
:param xblock: the XBlock class
:param jsonfields: a dict of the jsonified version of the fields
"""
jsonfields = {}
for field_name, field in xblock.fields.iteritems():
if field.scope == scope and field.is_set_on(xblock):
if field.scope == Scope.parent:
continue
elif isinstance(field, Reference):
jsonfields[field_name] = unicode(field.read_from(xblock))
elif isinstance(field, ReferenceList):
jsonfields[field_name] = [
unicode(ele) for ele in field.read_from(xblock)
]
elif isinstance(field, ReferenceValueDict):
jsonfields[field_name] = {
key: unicode(subvalue) for key, subvalue in field.read_from(xblock).iteritems()
}
else:
jsonfields[field_name] = field.read_json(xblock)
return jsonfields
def _get_non_orphan_parents(self, location, parents, revision):
"""
Extract non orphan parents by traversing the list of possible parents and remove current location
from orphan parents to avoid parents calculation overhead next time.
"""
non_orphan_parents = []
# get bulk_record once rather than for each iteration
bulk_record = self._get_bulk_ops_record(location.course_key)
for parent in parents:
parent_loc = BlockUsageLocator._from_deprecated_son(parent['_id'], location.course_key.run)
# travel up the tree for orphan validation
ancestor_loc = parent_loc
while ancestor_loc is not None:
current_loc = ancestor_loc
ancestor_loc = self._get_raw_parent_location(as_published(current_loc), revision)
if ancestor_loc is None:
bulk_record.dirty = True
# The parent is an orphan, so remove all the children including
# the location whose parent we are looking for from orphan parent
self.collection.update(
{'_id': parent_loc.to_deprecated_son()},
{'$set': {'definition.children': []}},
multi=False,
upsert=True,
)
elif ancestor_loc.block_type == 'course':
# once we reach the top location of the tree and if the location is not an orphan then the
# parent is not an orphan either
non_orphan_parents.append(parent_loc)
break
return non_orphan_parents
def _get_raw_parent_location(self, location, revision=ModuleStoreEnum.RevisionOption.published_only):
'''
Helper for get_parent_location that finds the location that is the parent of this location in this course,
but does NOT return a version agnostic location.
'''
assert location.branch is None
assert revision == ModuleStoreEnum.RevisionOption.published_only \
or revision == ModuleStoreEnum.RevisionOption.draft_preferred
parent_cache = self._get_parent_cache(self.get_branch_setting())
if parent_cache.has(unicode(location)):
return parent_cache.get(unicode(location))
# create a query with tag, org, course, and the children field set to the given location
query = self._course_key_to_son(location.course_key)
query['definition.children'] = unicode(location)
# if only looking for the PUBLISHED parent, set the revision in the query to None
if revision == ModuleStoreEnum.RevisionOption.published_only:
query['_id.revision'] = MongoRevisionKey.published
def cache_and_return(parent_loc): # pylint:disable=missing-docstring
parent_cache.set(unicode(location), parent_loc)
return parent_loc
# query the collection, sorting by DRAFT first
parents = list(
self.collection.find(query, {'_id': True}, sort=[SORT_REVISION_FAVOR_DRAFT])
)
if len(parents) == 0:
# no parents were found
return cache_and_return(None)
if revision == ModuleStoreEnum.RevisionOption.published_only:
if len(parents) > 1:
non_orphan_parents = self._get_non_orphan_parents(location, parents, revision)
if len(non_orphan_parents) == 0:
# no actual parent found
return cache_and_return(None)
if len(non_orphan_parents) > 1:
# should never have multiple PUBLISHED parents
raise ReferentialIntegrityError(
u"{} parents claim {}".format(len(parents), location)
)
else:
return cache_and_return(non_orphan_parents[0].replace(run=location.course_key.run))
else:
# return the single PUBLISHED parent
return cache_and_return(BlockUsageLocator._from_deprecated_son(parents[0]['_id'],
location.course_key.run))
else:
# there could be 2 different parents if
# (1) the draft item was moved or
# (2) the parent itself has 2 versions: DRAFT and PUBLISHED
# if there are multiple parents with version PUBLISHED then choose from non-orphan parents
all_parents = []
published_parents = 0
for parent in parents:
if parent['_id']['revision'] is None:
published_parents += 1
all_parents.append(parent)
# since we sorted by SORT_REVISION_FAVOR_DRAFT, the 0'th parent is the one we want
if published_parents > 1:
non_orphan_parents = self._get_non_orphan_parents(location, all_parents, revision)
return cache_and_return(non_orphan_parents[0].replace(run=location.course_key.run))
found_id = all_parents[0]['_id']
# don't disclose revision outside modulestore
return cache_and_return(BlockUsageLocator._from_deprecated_son(found_id, location.course_key.run))
def get_parent_location(self, location, revision=ModuleStoreEnum.RevisionOption.published_only, **kwargs):
'''
Find the location that is the parent of this location in this course.
Returns: version agnostic location (revision always None) as per the rest of mongo.
Args:
revision:
ModuleStoreEnum.RevisionOption.published_only
- return only the PUBLISHED parent if it exists, else returns None
ModuleStoreEnum.RevisionOption.draft_preferred
- return either the DRAFT or PUBLISHED parent,
preferring DRAFT, if parent(s) exists,
else returns None
'''
parent = self._get_raw_parent_location(location, revision)
if parent:
return parent
return None
def get_modulestore_type(self, course_key=None):
"""
Returns an enumeration-like type reflecting the type of this modulestore per ModuleStoreEnum.Type
Args:
course_key: just for signature compatibility
"""
return ModuleStoreEnum.Type.mongo
def get_orphans(self, course_key, **kwargs):
"""
Return an array of all of the locations for orphans in the course.
"""
course_key = self.fill_in_run(course_key)
detached_categories = [name for name, __ in XBlock.load_tagged_classes("detached")]
query = self._course_key_to_son(course_key)
query['_id.category'] = {'$nin': detached_categories}
all_items = self.collection.find(query)
all_reachable = set()
item_locs = set()
for item in all_items:
if item['_id']['category'] != 'course':
# It would be nice to change this method to return UsageKeys instead of the deprecated string.
item_locs.add(
unicode(as_published(BlockUsageLocator._from_deprecated_son(item['_id'], course_key.run)))
)
all_reachable = all_reachable.union(item.get('definition', {}).get('children', []))
item_locs -= all_reachable
return [UsageKey.from_string(item_loc).map_into_course(course_key) for item_loc in item_locs]
def get_courses_for_wiki(self, wiki_slug, **kwargs):
"""
Return the list of courses which use this wiki_slug
:param wiki_slug: the course wiki root slug
:return: list of course keys
"""
courses = self.collection.find(
{'_id.category': 'course', 'definition.data.wiki_slug': wiki_slug},
{'_id': True}
)
# the course's run == its name. It's the only xblock for which that's necessarily true.
return [
BlockUsageLocator._from_deprecated_son(course['_id'], course['_id']['name']).course_key
for course in courses
]
def _create_new_field_data(self, _category, _location, definition_data, metadata):
"""
To instantiate a new xmodule which will be saved later, set up the dbModel and kvs
"""
kvs = MongoKeyValueStore(
definition_data,
None,
[],
metadata,
)
field_data = KvsFieldData(kvs)
return field_data
def _find_course_assets(self, course_key):
"""
Internal; finds (or creates) course asset info about all assets for a particular course
Arguments:
course_key (CourseKey): course identifier
Returns:
CourseAssetsFromStorage object, wrapping the relevant Mongo doc. If asset metadata
exists, other keys will be the other asset types with values as lists of asset metadata.
"""
# Using the course_key, find or insert the course asset metadata document.
# A single document exists per course to store the course asset metadata.
course_key = self.fill_in_run(course_key)
if course_key.run is None:
log.warning(u'No run found for combo org "{}" course "{}" on asset request.'.format(
course_key.org, course_key.course
))
course_assets = None
else:
# Complete course key, so query for asset metadata.
course_assets = self.asset_collection.find_one(
{'course_id': unicode(course_key)},
)
doc_id = None if course_assets is None else course_assets['_id']
if course_assets is None:
# Check to see if the course is created in the course collection.
if self.get_course(course_key) is None:
raise ItemNotFoundError(course_key)
else:
# Course exists, so create matching assets document.
course_assets = {'course_id': unicode(course_key), 'assets': {}}
doc_id = self.asset_collection.insert(course_assets)
elif isinstance(course_assets['assets'], list):
# This record is in the old course assets format.
# Ensure that no data exists before updating the format.
assert len(course_assets['assets']) == 0
# Update the format to a dict.
self.asset_collection.update(
{'_id': doc_id},
{'$set': {'assets': {}}}
)
# Pass back wrapped 'assets' dict with the '_id' key added to it for document update purposes.
return CourseAssetsFromStorage(course_key, doc_id, course_assets['assets'])
def _make_mongo_asset_key(self, asset_type):
"""
Given a asset type, form a key needed to update the proper embedded field in the Mongo doc.
"""
return 'assets.{}'.format(asset_type)
@contract(asset_metadata_list='list(AssetMetadata)', user_id='int|long')
def _save_asset_metadata_list(self, asset_metadata_list, user_id, import_only):
"""
Internal; saves the info for a particular course's asset.
Arguments:
asset_metadata_list (list(AssetMetadata)): list of data about several course assets
user_id (int|long): user ID saving the asset metadata
import_only (bool): True if edited_on/by data should remain unchanged.
"""
course_key = asset_metadata_list[0].asset_id.course_key
course_assets = self._find_course_assets(course_key)
assets_by_type = self._save_assets_by_type(course_key, asset_metadata_list, course_assets, user_id, import_only)
# Build an update set with potentially multiple embedded fields.
updates_by_type = {}
for asset_type, assets in assets_by_type.iteritems():
updates_by_type[self._make_mongo_asset_key(asset_type)] = assets.as_list()
# Update the document.
self.asset_collection.update(
{'_id': course_assets.doc_id},
{'$set': updates_by_type}
)
return True
@contract(asset_metadata='AssetMetadata', user_id='int|long')
def save_asset_metadata(self, asset_metadata, user_id, import_only=False):
"""
Saves the info for a particular course's asset.
Arguments:
asset_metadata (AssetMetadata): data about the course asset data
user_id (int|long): user ID saving the asset metadata
import_only (bool): True if importing without editing, False if editing
Returns:
True if info save was successful, else False
"""
return self._save_asset_metadata_list([asset_metadata, ], user_id, import_only)
@contract(asset_metadata_list='list(AssetMetadata)', user_id='int|long')
def save_asset_metadata_list(self, asset_metadata_list, user_id, import_only=False):
"""
Saves the asset metadata for each asset in a list of asset metadata.
Optimizes the saving of many assets.
Args:
asset_metadata (AssetMetadata): data about the course asset data
user_id (int|long): user ID saving the asset metadata
import_only (bool): True if importing without editing, False if editing
Returns:
True if info save was successful, else False
"""
return self._save_asset_metadata_list(asset_metadata_list, user_id, import_only)
@contract(source_course_key='CourseKey', dest_course_key='CourseKey', user_id='int|long')
def copy_all_asset_metadata(self, source_course_key, dest_course_key, user_id):
"""
Copy all the course assets from source_course_key to dest_course_key.
If dest_course already has assets, this removes the previous value.
It doesn't combine the assets in dest.
Arguments:
source_course_key (CourseKey): identifier of course to copy from
dest_course_key (CourseKey): identifier of course to copy to
"""
source_assets = self._find_course_assets(source_course_key)
dest_assets = {'assets': source_assets.asset_md.copy(), 'course_id': six.text_type(dest_course_key)}
self.asset_collection.delete_many({'course_id': six.text_type(dest_course_key)})
# Update the document.
self.asset_collection.insert(dest_assets)
@contract(asset_key='AssetKey', attr_dict=dict, user_id='int|long')
def set_asset_metadata_attrs(self, asset_key, attr_dict, user_id):
"""
Add/set the given dict of attrs on the asset at the given location. Value can be any type which pymongo accepts.
Arguments:
asset_key (AssetKey): asset identifier
attr_dict (dict): attribute: value pairs to set
Raises:
ItemNotFoundError if no such item exists
AttributeError is attr is one of the build in attrs.
"""
course_assets, asset_idx = self._find_course_asset(asset_key)
if asset_idx is None:
raise ItemNotFoundError(asset_key)
# Form an AssetMetadata.
all_assets = course_assets[asset_key.asset_type]
md = AssetMetadata(asset_key, asset_key.path)
md.from_storable(all_assets[asset_idx])
md.update(attr_dict)
# Generate a Mongo doc from the metadata and update the course asset info.
all_assets[asset_idx] = md.to_storable()
self.asset_collection.update(
{'_id': course_assets.doc_id},
{"$set": {self._make_mongo_asset_key(asset_key.asset_type): all_assets}}
)
@contract(asset_key='AssetKey', user_id='int|long')
def delete_asset_metadata(self, asset_key, user_id):
"""
Internal; deletes a single asset's metadata.
Arguments:
asset_key (AssetKey): key containing original asset filename
Returns:
Number of asset metadata entries deleted (0 or 1)
"""
course_assets, asset_idx = self._find_course_asset(asset_key)
if asset_idx is None:
return 0
all_asset_info = course_assets[asset_key.asset_type]
all_asset_info.pop(asset_idx)
# Update the document.
self.asset_collection.update(
{'_id': course_assets.doc_id},
{'$set': {self._make_mongo_asset_key(asset_key.asset_type): all_asset_info}}
)
return 1
@contract(course_key='CourseKey', user_id='int|long')
def delete_all_asset_metadata(self, course_key, user_id):
"""
Delete all of the assets which use this course_key as an identifier.
Arguments:
course_key (CourseKey): course_identifier
"""
# Using the course_id, find the course asset metadata document.
# A single document exists per course to store the course asset metadata.
try:
course_assets = self._find_course_assets(course_key)
self.asset_collection.delete_many({'_id': course_assets.doc_id})
except ItemNotFoundError:
# When deleting asset metadata, if a course's asset metadata is not present, no big deal.
pass
def heartbeat(self):
"""
Check that the db is reachable.
"""
try:
# The ismaster command is cheap and does not require auth.
self.database.client.admin.command('ismaster')
return {ModuleStoreEnum.Type.mongo: True}
except pymongo.errors.ConnectionFailure:
raise HeartbeatFailure("Can't connect to {}".format(self.database.name), 'mongo')
def ensure_indexes(self):
"""
Ensure that all appropriate indexes are created that are needed by this modulestore, or raise
an exception if unable to.
This method is intended for use by tests and administrative commands, and not
to be run during server startup.
"""
# Because we often query for some subset of the id, we define this index:
create_collection_index(
self.collection,
[
('_id.tag', pymongo.ASCENDING),
('_id.org', pymongo.ASCENDING),
('_id.course', pymongo.ASCENDING),
('_id.category', pymongo.ASCENDING),
('_id.name', pymongo.ASCENDING),
('_id.revision', pymongo.ASCENDING),
],
background=True
)
# Because we often scan for all category='course' regardless of the value of the other fields:
create_collection_index(self.collection, '_id.category', background=True)
# Because lms calls get_parent_locations frequently (for path generation):
create_collection_index(self.collection, 'definition.children', sparse=True, background=True)
# To allow prioritizing draft vs published material
create_collection_index(self.collection, '_id.revision', background=True)
# Some overrides that still need to be implemented by subclasses
def convert_to_draft(self, location, user_id):
raise NotImplementedError()
def delete_item(self, location, user_id, **kwargs):
raise NotImplementedError()
def has_changes(self, xblock):
raise NotImplementedError()
def has_published_version(self, xblock):
raise NotImplementedError()
def publish(self, location, user_id):
raise NotImplementedError()
def revert_to_published(self, location, user_id):
raise NotImplementedError()
def unpublish(self, location, user_id):
raise NotImplementedError()
|
petkan/linux
|
refs/heads/master
|
tools/perf/scripts/python/export-to-postgresql.py
|
293
|
# export-to-postgresql.py: export perf data to a postgresql database
# Copyright (c) 2014, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
import os
import sys
import struct
import datetime
# To use this script you will need to have installed package python-pyside which
# provides LGPL-licensed Python bindings for Qt. You will also need the package
# libqt4-sql-psql for Qt postgresql support.
#
# The script assumes postgresql is running on the local machine and that the
# user has postgresql permissions to create databases. Examples of installing
# postgresql and adding such a user are:
#
# fedora:
#
# $ sudo yum install postgresql postgresql-server python-pyside qt-postgresql
# $ sudo su - postgres -c initdb
# $ sudo service postgresql start
# $ sudo su - postgres
# $ createuser <your user id here>
# Shall the new role be a superuser? (y/n) y
#
# ubuntu:
#
# $ sudo apt-get install postgresql python-pyside.qtsql libqt4-sql-psql
# $ sudo su - postgres
# $ createuser -s <your user id here>
#
# An example of using this script with Intel PT:
#
# $ perf record -e intel_pt//u ls
# $ perf script -s ~/libexec/perf-core/scripts/python/export-to-postgresql.py pt_example branches calls
# 2015-05-29 12:49:23.464364 Creating database...
# 2015-05-29 12:49:26.281717 Writing to intermediate files...
# 2015-05-29 12:49:27.190383 Copying to database...
# 2015-05-29 12:49:28.140451 Removing intermediate files...
# 2015-05-29 12:49:28.147451 Adding primary keys
# 2015-05-29 12:49:28.655683 Adding foreign keys
# 2015-05-29 12:49:29.365350 Done
#
# To browse the database, psql can be used e.g.
#
# $ psql pt_example
# pt_example=# select * from samples_view where id < 100;
# pt_example=# \d+
# pt_example=# \d+ samples_view
# pt_example=# \q
#
# An example of using the database is provided by the script
# call-graph-from-postgresql.py. Refer to that script for details.
#
# Tables:
#
# The tables largely correspond to perf tools' data structures. They are largely self-explanatory.
#
# samples
#
# 'samples' is the main table. It represents what instruction was executing at a point in time
# when something (a selected event) happened. The memory address is the instruction pointer or 'ip'.
#
# calls
#
# 'calls' represents function calls and is related to 'samples' by 'call_id' and 'return_id'.
# 'calls' is only created when the 'calls' option to this script is specified.
#
# call_paths
#
# 'call_paths' represents all the call stacks. Each 'call' has an associated record in 'call_paths'.
# 'calls_paths' is only created when the 'calls' option to this script is specified.
#
# branch_types
#
# 'branch_types' provides descriptions for each type of branch.
#
# comm_threads
#
# 'comm_threads' shows how 'comms' relates to 'threads'.
#
# comms
#
# 'comms' contains a record for each 'comm' - the name given to the executable that is running.
#
# dsos
#
# 'dsos' contains a record for each executable file or library.
#
# machines
#
# 'machines' can be used to distinguish virtual machines if virtualization is supported.
#
# selected_events
#
# 'selected_events' contains a record for each kind of event that has been sampled.
#
# symbols
#
# 'symbols' contains a record for each symbol. Only symbols that have samples are present.
#
# threads
#
# 'threads' contains a record for each thread.
#
# Views:
#
# Most of the tables have views for more friendly display. The views are:
#
# calls_view
# call_paths_view
# comm_threads_view
# dsos_view
# machines_view
# samples_view
# symbols_view
# threads_view
#
# More examples of browsing the database with psql:
# Note that some of the examples are not the most optimal SQL query.
# Note that call information is only available if the script's 'calls' option has been used.
#
# Top 10 function calls (not aggregated by symbol):
#
# SELECT * FROM calls_view ORDER BY elapsed_time DESC LIMIT 10;
#
# Top 10 function calls (aggregated by symbol):
#
# SELECT symbol_id,(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,
# SUM(elapsed_time) AS tot_elapsed_time,SUM(branch_count) AS tot_branch_count
# FROM calls_view GROUP BY symbol_id ORDER BY tot_elapsed_time DESC LIMIT 10;
#
# Note that the branch count gives a rough estimation of cpu usage, so functions
# that took a long time but have a relatively low branch count must have spent time
# waiting.
#
# Find symbols by pattern matching on part of the name (e.g. names containing 'alloc'):
#
# SELECT * FROM symbols_view WHERE name LIKE '%alloc%';
#
# Top 10 function calls for a specific symbol (e.g. whose symbol_id is 187):
#
# SELECT * FROM calls_view WHERE symbol_id = 187 ORDER BY elapsed_time DESC LIMIT 10;
#
# Show function calls made by function in the same context (i.e. same call path) (e.g. one with call_path_id 254):
#
# SELECT * FROM calls_view WHERE parent_call_path_id = 254;
#
# Show branches made during a function call (e.g. where call_id is 29357 and return_id is 29370 and tid is 29670)
#
# SELECT * FROM samples_view WHERE id >= 29357 AND id <= 29370 AND tid = 29670 AND event LIKE 'branches%';
#
# Show transactions:
#
# SELECT * FROM samples_view WHERE event = 'transactions';
#
# Note transaction start has 'in_tx' true whereas, transaction end has 'in_tx' false.
# Transaction aborts have branch_type_name 'transaction abort'
#
# Show transaction aborts:
#
# SELECT * FROM samples_view WHERE event = 'transactions' AND branch_type_name = 'transaction abort';
#
# To print a call stack requires walking the call_paths table. For example this python script:
# #!/usr/bin/python2
#
# import sys
# from PySide.QtSql import *
#
# if __name__ == '__main__':
# if (len(sys.argv) < 3):
# print >> sys.stderr, "Usage is: printcallstack.py <database name> <call_path_id>"
# raise Exception("Too few arguments")
# dbname = sys.argv[1]
# call_path_id = sys.argv[2]
# db = QSqlDatabase.addDatabase('QPSQL')
# db.setDatabaseName(dbname)
# if not db.open():
# raise Exception("Failed to open database " + dbname + " error: " + db.lastError().text())
# query = QSqlQuery(db)
# print " id ip symbol_id symbol dso_id dso_short_name"
# while call_path_id != 0 and call_path_id != 1:
# ret = query.exec_('SELECT * FROM call_paths_view WHERE id = ' + str(call_path_id))
# if not ret:
# raise Exception("Query failed: " + query.lastError().text())
# if not query.next():
# raise Exception("Query failed")
# print "{0:>6} {1:>10} {2:>9} {3:<30} {4:>6} {5:<30}".format(query.value(0), query.value(1), query.value(2), query.value(3), query.value(4), query.value(5))
# call_path_id = query.value(6)
from PySide.QtSql import *
# Need to access PostgreSQL C library directly to use COPY FROM STDIN
from ctypes import *
libpq = CDLL("libpq.so.5")
PQconnectdb = libpq.PQconnectdb
PQconnectdb.restype = c_void_p
PQfinish = libpq.PQfinish
PQstatus = libpq.PQstatus
PQexec = libpq.PQexec
PQexec.restype = c_void_p
PQresultStatus = libpq.PQresultStatus
PQputCopyData = libpq.PQputCopyData
PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ]
PQputCopyEnd = libpq.PQputCopyEnd
PQputCopyEnd.argtypes = [ c_void_p, c_void_p ]
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
# These perf imports are not used at present
#from perf_trace_context import *
#from Core import *
perf_db_export_mode = True
perf_db_export_calls = False
perf_db_export_callchains = False
def usage():
print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]"
print >> sys.stderr, "where: columns 'all' or 'branches'"
print >> sys.stderr, " calls 'calls' => create calls and call_paths table"
print >> sys.stderr, " callchains 'callchains' => create call_paths table"
raise Exception("Too few arguments")
if (len(sys.argv) < 2):
usage()
dbname = sys.argv[1]
if (len(sys.argv) >= 3):
columns = sys.argv[2]
else:
columns = "all"
if columns not in ("all", "branches"):
usage()
branches = (columns == "branches")
for i in range(3,len(sys.argv)):
if (sys.argv[i] == "calls"):
perf_db_export_calls = True
elif (sys.argv[i] == "callchains"):
perf_db_export_callchains = True
else:
usage()
output_dir_name = os.getcwd() + "/" + dbname + "-perf-data"
os.mkdir(output_dir_name)
def do_query(q, s):
if (q.exec_(s)):
return
raise Exception("Query failed: " + q.lastError().text())
print datetime.datetime.today(), "Creating database..."
db = QSqlDatabase.addDatabase('QPSQL')
query = QSqlQuery(db)
db.setDatabaseName('postgres')
db.open()
try:
do_query(query, 'CREATE DATABASE ' + dbname)
except:
os.rmdir(output_dir_name)
raise
query.finish()
query.clear()
db.close()
db.setDatabaseName(dbname)
db.open()
query = QSqlQuery(db)
do_query(query, 'SET client_min_messages TO WARNING')
do_query(query, 'CREATE TABLE selected_events ('
'id bigint NOT NULL,'
'name varchar(80))')
do_query(query, 'CREATE TABLE machines ('
'id bigint NOT NULL,'
'pid integer,'
'root_dir varchar(4096))')
do_query(query, 'CREATE TABLE threads ('
'id bigint NOT NULL,'
'machine_id bigint,'
'process_id bigint,'
'pid integer,'
'tid integer)')
do_query(query, 'CREATE TABLE comms ('
'id bigint NOT NULL,'
'comm varchar(16))')
do_query(query, 'CREATE TABLE comm_threads ('
'id bigint NOT NULL,'
'comm_id bigint,'
'thread_id bigint)')
do_query(query, 'CREATE TABLE dsos ('
'id bigint NOT NULL,'
'machine_id bigint,'
'short_name varchar(256),'
'long_name varchar(4096),'
'build_id varchar(64))')
do_query(query, 'CREATE TABLE symbols ('
'id bigint NOT NULL,'
'dso_id bigint,'
'sym_start bigint,'
'sym_end bigint,'
'binding integer,'
'name varchar(2048))')
do_query(query, 'CREATE TABLE branch_types ('
'id integer NOT NULL,'
'name varchar(80))')
if branches:
do_query(query, 'CREATE TABLE samples ('
'id bigint NOT NULL,'
'evsel_id bigint,'
'machine_id bigint,'
'thread_id bigint,'
'comm_id bigint,'
'dso_id bigint,'
'symbol_id bigint,'
'sym_offset bigint,'
'ip bigint,'
'time bigint,'
'cpu integer,'
'to_dso_id bigint,'
'to_symbol_id bigint,'
'to_sym_offset bigint,'
'to_ip bigint,'
'branch_type integer,'
'in_tx boolean)')
else:
do_query(query, 'CREATE TABLE samples ('
'id bigint NOT NULL,'
'evsel_id bigint,'
'machine_id bigint,'
'thread_id bigint,'
'comm_id bigint,'
'dso_id bigint,'
'symbol_id bigint,'
'sym_offset bigint,'
'ip bigint,'
'time bigint,'
'cpu integer,'
'to_dso_id bigint,'
'to_symbol_id bigint,'
'to_sym_offset bigint,'
'to_ip bigint,'
'period bigint,'
'weight bigint,'
'transaction bigint,'
'data_src bigint,'
'branch_type integer,'
'in_tx boolean,'
'call_path_id bigint)')
if perf_db_export_calls or perf_db_export_callchains:
do_query(query, 'CREATE TABLE call_paths ('
'id bigint NOT NULL,'
'parent_id bigint,'
'symbol_id bigint,'
'ip bigint)')
if perf_db_export_calls:
do_query(query, 'CREATE TABLE calls ('
'id bigint NOT NULL,'
'thread_id bigint,'
'comm_id bigint,'
'call_path_id bigint,'
'call_time bigint,'
'return_time bigint,'
'branch_count bigint,'
'call_id bigint,'
'return_id bigint,'
'parent_call_path_id bigint,'
'flags integer)')
do_query(query, 'CREATE VIEW machines_view AS '
'SELECT '
'id,'
'pid,'
'root_dir,'
'CASE WHEN id=0 THEN \'unknown\' WHEN pid=-1 THEN \'host\' ELSE \'guest\' END AS host_or_guest'
' FROM machines')
do_query(query, 'CREATE VIEW dsos_view AS '
'SELECT '
'id,'
'machine_id,'
'(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,'
'short_name,'
'long_name,'
'build_id'
' FROM dsos')
do_query(query, 'CREATE VIEW symbols_view AS '
'SELECT '
'id,'
'name,'
'(SELECT short_name FROM dsos WHERE id=dso_id) AS dso,'
'dso_id,'
'sym_start,'
'sym_end,'
'CASE WHEN binding=0 THEN \'local\' WHEN binding=1 THEN \'global\' ELSE \'weak\' END AS binding'
' FROM symbols')
do_query(query, 'CREATE VIEW threads_view AS '
'SELECT '
'id,'
'machine_id,'
'(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,'
'process_id,'
'pid,'
'tid'
' FROM threads')
do_query(query, 'CREATE VIEW comm_threads_view AS '
'SELECT '
'comm_id,'
'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
'thread_id,'
'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
'(SELECT tid FROM threads WHERE id = thread_id) AS tid'
' FROM comm_threads')
if perf_db_export_calls or perf_db_export_callchains:
do_query(query, 'CREATE VIEW call_paths_view AS '
'SELECT '
'c.id,'
'to_hex(c.ip) AS ip,'
'c.symbol_id,'
'(SELECT name FROM symbols WHERE id = c.symbol_id) AS symbol,'
'(SELECT dso_id FROM symbols WHERE id = c.symbol_id) AS dso_id,'
'(SELECT dso FROM symbols_view WHERE id = c.symbol_id) AS dso_short_name,'
'c.parent_id,'
'to_hex(p.ip) AS parent_ip,'
'p.symbol_id AS parent_symbol_id,'
'(SELECT name FROM symbols WHERE id = p.symbol_id) AS parent_symbol,'
'(SELECT dso_id FROM symbols WHERE id = p.symbol_id) AS parent_dso_id,'
'(SELECT dso FROM symbols_view WHERE id = p.symbol_id) AS parent_dso_short_name'
' FROM call_paths c INNER JOIN call_paths p ON p.id = c.parent_id')
if perf_db_export_calls:
do_query(query, 'CREATE VIEW calls_view AS '
'SELECT '
'calls.id,'
'thread_id,'
'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
'(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
'call_path_id,'
'to_hex(ip) AS ip,'
'symbol_id,'
'(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
'call_time,'
'return_time,'
'return_time - call_time AS elapsed_time,'
'branch_count,'
'call_id,'
'return_id,'
'CASE WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' ELSE \'\' END AS flags,'
'parent_call_path_id'
' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id')
do_query(query, 'CREATE VIEW samples_view AS '
'SELECT '
'id,'
'time,'
'cpu,'
'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
'(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
'(SELECT name FROM selected_events WHERE id = evsel_id) AS event,'
'to_hex(ip) AS ip_hex,'
'(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
'sym_offset,'
'(SELECT short_name FROM dsos WHERE id = dso_id) AS dso_short_name,'
'to_hex(to_ip) AS to_ip_hex,'
'(SELECT name FROM symbols WHERE id = to_symbol_id) AS to_symbol,'
'to_sym_offset,'
'(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,'
'(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,'
'in_tx'
' FROM samples')
file_header = struct.pack("!11sii", "PGCOPY\n\377\r\n\0", 0, 0)
file_trailer = "\377\377"
def open_output_file(file_name):
path_name = output_dir_name + "/" + file_name
file = open(path_name, "w+")
file.write(file_header)
return file
def close_output_file(file):
file.write(file_trailer)
file.close()
def copy_output_file_direct(file, table_name):
close_output_file(file)
sql = "COPY " + table_name + " FROM '" + file.name + "' (FORMAT 'binary')"
do_query(query, sql)
# Use COPY FROM STDIN because security may prevent postgres from accessing the files directly
def copy_output_file(file, table_name):
conn = PQconnectdb("dbname = " + dbname)
if (PQstatus(conn)):
raise Exception("COPY FROM STDIN PQconnectdb failed")
file.write(file_trailer)
file.seek(0)
sql = "COPY " + table_name + " FROM STDIN (FORMAT 'binary')"
res = PQexec(conn, sql)
if (PQresultStatus(res) != 4):
raise Exception("COPY FROM STDIN PQexec failed")
data = file.read(65536)
while (len(data)):
ret = PQputCopyData(conn, data, len(data))
if (ret != 1):
raise Exception("COPY FROM STDIN PQputCopyData failed, error " + str(ret))
data = file.read(65536)
ret = PQputCopyEnd(conn, None)
if (ret != 1):
raise Exception("COPY FROM STDIN PQputCopyEnd failed, error " + str(ret))
PQfinish(conn)
def remove_output_file(file):
name = file.name
file.close()
os.unlink(name)
evsel_file = open_output_file("evsel_table.bin")
machine_file = open_output_file("machine_table.bin")
thread_file = open_output_file("thread_table.bin")
comm_file = open_output_file("comm_table.bin")
comm_thread_file = open_output_file("comm_thread_table.bin")
dso_file = open_output_file("dso_table.bin")
symbol_file = open_output_file("symbol_table.bin")
branch_type_file = open_output_file("branch_type_table.bin")
sample_file = open_output_file("sample_table.bin")
if perf_db_export_calls or perf_db_export_callchains:
call_path_file = open_output_file("call_path_table.bin")
if perf_db_export_calls:
call_file = open_output_file("call_table.bin")
def trace_begin():
print datetime.datetime.today(), "Writing to intermediate files..."
# id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
evsel_table(0, "unknown")
machine_table(0, 0, "unknown")
thread_table(0, 0, 0, -1, -1)
comm_table(0, "unknown")
dso_table(0, 0, "unknown", "unknown", "")
symbol_table(0, 0, 0, 0, 0, "unknown")
sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
if perf_db_export_calls or perf_db_export_callchains:
call_path_table(0, 0, 0, 0)
unhandled_count = 0
def trace_end():
print datetime.datetime.today(), "Copying to database..."
copy_output_file(evsel_file, "selected_events")
copy_output_file(machine_file, "machines")
copy_output_file(thread_file, "threads")
copy_output_file(comm_file, "comms")
copy_output_file(comm_thread_file, "comm_threads")
copy_output_file(dso_file, "dsos")
copy_output_file(symbol_file, "symbols")
copy_output_file(branch_type_file, "branch_types")
copy_output_file(sample_file, "samples")
if perf_db_export_calls or perf_db_export_callchains:
copy_output_file(call_path_file, "call_paths")
if perf_db_export_calls:
copy_output_file(call_file, "calls")
print datetime.datetime.today(), "Removing intermediate files..."
remove_output_file(evsel_file)
remove_output_file(machine_file)
remove_output_file(thread_file)
remove_output_file(comm_file)
remove_output_file(comm_thread_file)
remove_output_file(dso_file)
remove_output_file(symbol_file)
remove_output_file(branch_type_file)
remove_output_file(sample_file)
if perf_db_export_calls or perf_db_export_callchains:
remove_output_file(call_path_file)
if perf_db_export_calls:
remove_output_file(call_file)
os.rmdir(output_dir_name)
print datetime.datetime.today(), "Adding primary keys"
do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE machines ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE comms ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE comm_threads ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE dsos ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE symbols ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE branch_types ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE samples ADD PRIMARY KEY (id)')
if perf_db_export_calls or perf_db_export_callchains:
do_query(query, 'ALTER TABLE call_paths ADD PRIMARY KEY (id)')
if perf_db_export_calls:
do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)')
print datetime.datetime.today(), "Adding foreign keys"
do_query(query, 'ALTER TABLE threads '
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)')
do_query(query, 'ALTER TABLE comm_threads '
'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id)')
do_query(query, 'ALTER TABLE dsos '
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id)')
do_query(query, 'ALTER TABLE symbols '
'ADD CONSTRAINT dsofk FOREIGN KEY (dso_id) REFERENCES dsos (id)')
do_query(query, 'ALTER TABLE samples '
'ADD CONSTRAINT evselfk FOREIGN KEY (evsel_id) REFERENCES selected_events (id),'
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),'
'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
'ADD CONSTRAINT dsofk FOREIGN KEY (dso_id) REFERENCES dsos (id),'
'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id),'
'ADD CONSTRAINT todsofk FOREIGN KEY (to_dso_id) REFERENCES dsos (id),'
'ADD CONSTRAINT tosymbolfk FOREIGN KEY (to_symbol_id) REFERENCES symbols (id)')
if perf_db_export_calls or perf_db_export_callchains:
do_query(query, 'ALTER TABLE call_paths '
'ADD CONSTRAINT parentfk FOREIGN KEY (parent_id) REFERENCES call_paths (id),'
'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id)')
if perf_db_export_calls:
do_query(query, 'ALTER TABLE calls '
'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),'
'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
'ADD CONSTRAINT call_pathfk FOREIGN KEY (call_path_id) REFERENCES call_paths (id),'
'ADD CONSTRAINT callfk FOREIGN KEY (call_id) REFERENCES samples (id),'
'ADD CONSTRAINT returnfk FOREIGN KEY (return_id) REFERENCES samples (id),'
'ADD CONSTRAINT parent_call_pathfk FOREIGN KEY (parent_call_path_id) REFERENCES call_paths (id)')
do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
if (unhandled_count):
print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
print datetime.datetime.today(), "Done"
def trace_unhandled(event_name, context, event_fields_dict):
global unhandled_count
unhandled_count += 1
def sched__sched_switch(*x):
pass
def evsel_table(evsel_id, evsel_name, *x):
n = len(evsel_name)
fmt = "!hiqi" + str(n) + "s"
value = struct.pack(fmt, 2, 8, evsel_id, n, evsel_name)
evsel_file.write(value)
def machine_table(machine_id, pid, root_dir, *x):
n = len(root_dir)
fmt = "!hiqiii" + str(n) + "s"
value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, root_dir)
machine_file.write(value)
def thread_table(thread_id, machine_id, process_id, pid, tid, *x):
value = struct.pack("!hiqiqiqiiii", 5, 8, thread_id, 8, machine_id, 8, process_id, 4, pid, 4, tid)
thread_file.write(value)
def comm_table(comm_id, comm_str, *x):
n = len(comm_str)
fmt = "!hiqi" + str(n) + "s"
value = struct.pack(fmt, 2, 8, comm_id, n, comm_str)
comm_file.write(value)
def comm_thread_table(comm_thread_id, comm_id, thread_id, *x):
fmt = "!hiqiqiq"
value = struct.pack(fmt, 3, 8, comm_thread_id, 8, comm_id, 8, thread_id)
comm_thread_file.write(value)
def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
n1 = len(short_name)
n2 = len(long_name)
n3 = len(build_id)
fmt = "!hiqiqi" + str(n1) + "si" + str(n2) + "si" + str(n3) + "s"
value = struct.pack(fmt, 5, 8, dso_id, 8, machine_id, n1, short_name, n2, long_name, n3, build_id)
dso_file.write(value)
def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x):
n = len(symbol_name)
fmt = "!hiqiqiqiqiii" + str(n) + "s"
value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name)
symbol_file.write(value)
def branch_type_table(branch_type, name, *x):
n = len(name)
fmt = "!hiii" + str(n) + "s"
value = struct.pack(fmt, 2, 4, branch_type, n, name)
branch_type_file.write(value)
def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, call_path_id, *x):
if branches:
value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiiiBiq", 18, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 4, branch_type, 1, in_tx, 8, call_path_id)
else:
value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiqiiiBiq", 22, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src, 4, branch_type, 1, in_tx, 8, call_path_id)
sample_file.write(value)
def call_path_table(cp_id, parent_id, symbol_id, ip, *x):
fmt = "!hiqiqiqiq"
value = struct.pack(fmt, 4, 8, cp_id, 8, parent_id, 8, symbol_id, 8, ip)
call_path_file.write(value)
def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, *x):
fmt = "!hiqiqiqiqiqiqiqiqiqiqii"
value = struct.pack(fmt, 11, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags)
call_file.write(value)
|
Polarcraft/KbveBot
|
refs/heads/master
|
commands/score.py
|
1
|
# Copyright (C) 2013-2015 Samuel Damashek, Peter Foley, James Forcier, Srijay Kasturi, Reed Koser, Christopher Reffett, and Fox Wilson
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from random import randint
from helpers import arguments
from helpers.orm import Scores
from helpers.command import Command
@Command('score', ['config', 'db', 'botnick'])
def cmd(send, msg, args):
"""Gets scores.
Syntax: {command} <--high|--low|nick>
"""
if not args['config']['feature'].getboolean('hooks'):
send("Hooks are disabled, and this command depends on hooks. Please contact the bot admin(s).")
return
session = args['db']
parser = arguments.ArgParser(args['config'])
group = parser.add_mutually_exclusive_group()
group.add_argument('--high', action='store_true')
group.add_argument('--low', action='store_true')
group.add_argument('nick', nargs='?', action=arguments.NickParser)
try:
cmdargs = parser.parse_args(msg)
except arguments.ArgumentException as e:
send(str(e))
return
if cmdargs.high:
data = session.query(Scores).order_by(Scores.score.desc()).limit(3).all()
send('High Scores:')
for x in data:
send("%s: %s" % (x.nick, x.score))
elif cmdargs.low:
data = session.query(Scores).order_by(Scores.score).limit(3).all()
send('Low Scores:')
for x in data:
send("%s: %s" % (x.nick, x.score))
elif cmdargs.nick:
name = cmdargs.nick.lower()
if name == 'c':
send("We all know you love C better than anything else, so why rub it in?")
return
score = session.query(Scores).filter(Scores.nick == name).scalar()
if score is not None:
if name == args['botnick'].lower():
emote = ':)' if score.score > 0 else ':(' if score.score < 0 else ':|'
output = 'has %s points! %s' % (score.score, emote)
send(output, 'action')
else:
send("%s has %i points!" % (name, score.score))
else:
send("Nobody cares about %s" % name)
else:
count = session.query(Scores).count()
if count == 0:
send("Nobody cares about anything =(")
else:
randid = randint(1, count)
query = session.query(Scores).get(randid)
send("%s has %i point%s!" % (query.nick, query.score, '' if abs(query.score) == 1 else 's'))
|
p0cisk/Quantum-GIS
|
refs/heads/master
|
python/plugins/db_manager/db_plugins/spatialite/info_model.py
|
6
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : May 23, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : brush.tyler@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from qgis.PyQt.QtWidgets import QApplication
from ..info_model import DatabaseInfo
from ..html_elems import HtmlTable, HtmlParagraph
class SLDatabaseInfo(DatabaseInfo):
def __init__(self, db):
self.db = db
def connectionDetails(self):
tbl = [
(QApplication.translate("DBManagerPlugin", "Filename:"), self.db.connector.dbname)
]
return HtmlTable(tbl)
def generalInfo(self):
info = self.db.connector.getInfo()
tbl = [
(QApplication.translate("DBManagerPlugin", "SQLite version:"), info[0])
]
return HtmlTable(tbl)
def spatialInfo(self):
ret = []
info = self.db.connector.getSpatialInfo()
if info is None:
return
tbl = [
(QApplication.translate("DBManagerPlugin", "Library:"), info[0]),
("GEOS:", info[1]),
("Proj:", info[2])
]
ret.append(HtmlTable(tbl))
if self.db.connector.is_gpkg:
pass
elif not self.db.connector.has_geometry_columns:
ret.append(HtmlParagraph(
QApplication.translate("DBManagerPlugin", "<warning> geometry_columns table doesn't exist!\n"
"This table is essential for many GIS applications for enumeration of tables.")))
return ret
def privilegesDetails(self):
return None
|
jasondunsmore/heat
|
refs/heads/master
|
heat/db/sqlalchemy/migrate_repo/versions/045_stack_backup.py
|
13
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
def upgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
stack = sqlalchemy.Table('stack', meta, autoload=True)
backup = sqlalchemy.Column('backup', sqlalchemy.Boolean(), default=False)
backup.create(stack)
# Set backup flag for backup stacks, which are the only ones named "foo*"
not_deleted = None
stmt = sqlalchemy.select([stack.c.id,
stack.c.name]
).where(stack.c.deleted_at == not_deleted)
stacks = migrate_engine.execute(stmt)
for s in stacks:
if s.name.endswith('*'):
values = {'backup': True}
update = stack.update().where(
stack.c.id == s.id).values(values)
migrate_engine.execute(update)
|
opencb/cellbase
|
refs/heads/develop
|
cellbase-client/src/main/python/pycellbase/commons.py
|
1
|
import sys
import time
import warnings
import requests
import threading
import itertools
try:
from Queue import Queue
except ImportError:
from queue import Queue
_CALL_BATCH_SIZE = 200
_NUM_THREADS_DEFAULT = 4
def _create_rest_url(host, version, species, category, subcategory,
resource, query_id, options):
"""Creates the URL for querying the REST service"""
# Creating the basic URL
url_items = [host, 'webservices/rest', version, species, category,
subcategory, query_id, resource]
url_items = filter(None, url_items) # Some url items can be empty
url = ('/'.join(url_items))
# Checking optional params
if options is not None:
opts = []
for k, v in options.items():
if k == 'debug':
continue
if isinstance(v, list):
opts.append(k + '=' + ','.join(map(str, v)))
else:
opts.append(k + '=' + str(v))
if opts:
url += '?' + '&'.join(opts)
return url
def _fetch(session, host, version, species, category, subcategory, resource,
query_id=None, options=None, method='get', data=None):
"""Queries the REST service retrieving results until exhaustion or limit"""
# HERE BE DRAGONS
final_response = None
# Setting up skip and limit default parameters
call_skip = 0
call_limit = 1000
max_limit = None
if options is None:
opts = {'skip': call_skip, 'limit': call_limit}
else:
opts = options.copy() # Do not modify original data!
if 'skip' not in opts:
opts['skip'] = call_skip
# If 'limit' is specified, a maximum of 'limit' results will be returned
if 'limit' in opts:
max_limit = opts['limit']
# Server must be always queried for results in groups of 1000
opts['limit'] = call_limit
# If there is a query_id, the next variables will be used
total_id_list = [] # All initial ids
next_id_list = [] # Ids which should be queried again for more results
next_id_indexes = [] # Ids position in the final response
if query_id is not None:
total_id_list = query_id.split(',')
# If some query has more than 'call_limit' results, the server will be
# queried again to retrieve the next 'call_limit results'
call = True
current_query_id = None # Current REST query
current_id_list = None # Current list of ids
time_out_counter = 0 # Number of times a query is repeated due to time-out
while call:
# Check 'limit' parameter if there is a maximum limit of results
if max_limit is not None and max_limit <= call_limit:
opts['limit'] = max_limit
# Updating query_id and list of ids to query
if query_id is not None:
if current_query_id is None:
current_query_id = query_id
current_id_list = total_id_list
current_id_indexes = range(len(total_id_list))
else:
current_query_id = ','.join(next_id_list)
current_id_list = next_id_list
current_id_indexes = next_id_indexes
# Retrieving url
url = _create_rest_url(host=host,
version=version,
species=species,
category=category,
subcategory=subcategory,
query_id=current_query_id,
resource=resource,
options=opts)
# DEBUG
if options is not None:
if 'debug' in options and options['debug']:
sys.stderr.write(url + '\n')
# Getting REST response
if method == 'get':
r = session.get(url)
elif method == 'post':
r = session.post(url, data=data)
else:
msg = 'Method "' + method + '" not implemented'
raise NotImplementedError(msg)
if r.status_code == 504: # Gateway Time-out
if time_out_counter == 99:
msg = 'Server not responding in time'
raise requests.ConnectionError(msg)
time_out_counter += 1
time.sleep(1)
continue
time_out_counter = 0
try:
json_obj = r.json()
if 'response' in json_obj:
response = json_obj['response']
else:
return json_obj
except ValueError:
msg = 'Bad JSON format retrieved from server'
raise ValueError(msg)
# Setting up final_response
if final_response is None:
final_response = response
# Concatenating results
else:
if query_id is not None:
for index, res in enumerate(response):
id_index = current_id_indexes[index]
final_response[id_index]['result'] += res['result']
else:
final_response[0]['result'] += response[0]['result']
if query_id is not None:
# Checking which ids are completely retrieved
next_id_list = []
next_id_indexes = []
for index, res in enumerate(response):
if res['numResults'] == call_limit:
next_id_list.append(current_id_list[index])
next_id_indexes.append(current_id_indexes[index])
# Ending REST calling when there are no more ids to retrieve
if not next_id_list:
call = False
else:
# Ending REST calling when there are no more results to retrieve
if response[0]['numResults'] != call_limit:
call = False
# Skipping the first 'limit' results to retrieve the next ones
opts['skip'] += call_limit
# Subtracting the number of returned results from the maximum goal
if max_limit is not None:
max_limit -= call_limit
# When 'limit' is 0 returns all the results. So, break the loop if 0
if max_limit == 0:
break
return final_response
def _worker(queue, results, session, host, version, species, category,
subcategory, resource, options=None, method='get', data=None):
"""Manages the queue system for the threads"""
while True:
# Fetching new element from the queue
index, query_id = queue.get()
response = _fetch(session, host, version, species, category,
subcategory, resource, query_id, options, method,
data)
# Store data in results at correct index
results[index] = response
# Signaling to the queue that task has been processed
queue.task_done()
def get(session, host, version, species, category, subcategory, resource,
query_id=None, options=None, method='get', data=None):
"""Queries the REST service using multiple threads if needed"""
# If query_id is an array, convert to comma-separated string
if query_id is not None and isinstance(query_id, list):
query_id = ','.join(query_id)
# If data is an array, convert to comma-separated string
if data is not None and isinstance(data, list):
data = ','.join(data)
# Multithread if the number of queries is greater than _CALL_BATCH_SIZE
if query_id is None or len(query_id.split(',')) <= _CALL_BATCH_SIZE:
response = _fetch(session, host, version, species, category,
subcategory, resource, query_id, options, method,
data)
return response
else:
if options is not None and 'num_threads' in options:
num_threads = options['num_threads']
else:
num_threads = _NUM_THREADS_DEFAULT
# Splitting query_id into batches depending on the call batch size
id_list = query_id.split(',')
id_batches = [','.join(id_list[x:x+_CALL_BATCH_SIZE])
for x in range(0, len(id_list), _CALL_BATCH_SIZE)]
# Setting up the queue to hold all the id batches
q = Queue(maxsize=0)
# Creating a size defined list to store thread results
res = [''] * len(id_batches)
# Setting up the threads
for thread in range(num_threads):
t = threading.Thread(target=_worker,
kwargs={'queue': q,
'results': res,
'session': session,
'host': host,
'version': version,
'species': species,
'category': category,
'subcategory': subcategory,
'resource': resource,
'options': options,
'method': method,
'data': data})
# Setting threads as "daemon" allows main program to exit eventually
# even if these do not finish correctly
t.setDaemon(True)
t.start()
# Loading up the queue with index and id batches for each job
for index, batch in enumerate(id_batches):
q.put((index, batch)) # Notice this is a tuple
# Waiting until the queue has been processed
q.join()
# Joining all the responses into a one final response
final_response = list(itertools.chain.from_iterable(res))
return final_response
def deprecated(func):
"""Prints a warning for functions marked as deprecated"""
def new_func(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning) # turn off filter
warnings.warn('Call to deprecated function "{}".'.format(func.__name__),
category=DeprecationWarning, stacklevel=2)
warnings.simplefilter('default', DeprecationWarning) # reset filter
return func(*args, **kwargs)
return new_func
|
sebgoa/client-python
|
refs/heads/master
|
kubernetes/client/models/v1_limit_range_list.py
|
2
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1LimitRangeList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, api_version=None, items=None, kind=None, metadata=None):
"""
V1LimitRangeList - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'api_version': 'str',
'items': 'list[V1LimitRange]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
self.attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
self._api_version = api_version
self._items = items
self._kind = kind
self._metadata = metadata
@property
def api_version(self):
"""
Gets the api_version of this V1LimitRangeList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1LimitRangeList.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1LimitRangeList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1LimitRangeList.
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""
Gets the items of this V1LimitRangeList.
Items is a list of LimitRange objects. More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_limit_range.md
:return: The items of this V1LimitRangeList.
:rtype: list[V1LimitRange]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this V1LimitRangeList.
Items is a list of LimitRange objects. More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_limit_range.md
:param items: The items of this V1LimitRangeList.
:type: list[V1LimitRange]
"""
if items is None:
raise ValueError("Invalid value for `items`, must not be `None`")
self._items = items
@property
def kind(self):
"""
Gets the kind of this V1LimitRangeList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1LimitRangeList.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1LimitRangeList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1LimitRangeList.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1LimitRangeList.
Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The metadata of this V1LimitRangeList.
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1LimitRangeList.
Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param metadata: The metadata of this V1LimitRangeList.
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1LimitRangeList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
LukeCarrier/py3k-pexpect
|
refs/heads/master
|
notes/my_forkpty.py
|
1
|
import os, fcntl, termios
import time
def my_forkpty():
(master_fd, slave_fd) = os.openpty()
if (master_fd < 0 or slave_fd < 0):
raise ExceptionPexpect("Forkpty failed")
# slave_name = ptsname(master_fd);
pid = os.fork();
if pid == -1:
raise ExceptionPexpect("Forkpty failed")
elif pid == 0: # Child
if hasattr(termios, 'TIOCNOTTY'):
# Some platforms require an explicit detach of the
# current controlling tty before closing stdin, stdout, stderr.
# OpenBSD says that this is obsolete, but doesn't hurt.
try:
fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY)
except:
pass
else: #if fd >= 0:
fcntl.ioctl(fd, termios.TIOCNOTTY, 0)
os.close(fd)
# The setsid() system call will place the process into its own session
# which has the effect of disassociating it from the controlling terminal.
# This is known to be true for OpenBSD.
os.setsid()
# except: return posix_error();
# Verify that we are disconnected from the controlling tty.
try:
fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY)
os.close(fd)
raise ExceptionPexpect("Forkpty failed")
except:
pass
if 'TIOCSCTTY' in dir(termios):
# Make the pseudo terminal the controlling terminal for this process
# (the process must not currently have a controlling terminal).
if fcntl.ioctl(slave_fd, termios.TIOCSCTTY, '') < 0:
raise ExceptionPexpect("Forkpty failed")
# # Verify that we can open to the slave pty file. */
# fd = os.open(slave_name, os.O_RDWR);
# if fd < 0:
# raise ExceptionPexpect("Forkpty failed")
# else:
# os.close(fd);
# Verify that we now have a controlling tty.
fd = os.open("/dev/tty", os.O_WRONLY)
if fd < 0:
raise ExceptionPexpect("This process could not get a controlling tty.")
else:
os.close(fd)
os.close(master_fd)
os.dup2(slave_fd, 0)
os.dup2(slave_fd, 1)
os.dup2(slave_fd, 2)
if slave_fd > 2:
os.close(slave_fd)
pid = 0
else:
# PARENT
os.close(slave_fd);
if pid == -1:
raise ExceptionPexpect("This process could not get a controlling tty.")
# if (pid == 0)
# PyOS_AfterFork();
return (pid, master_fd)
pid, fd = my_forkpty ()
if pid == 0: # child
print('I am not a robot!')
else:
print('(pid, fd) = (%d, %d)' % (pid, fd))
time.sleep(1) # Give the child a chance to print.
print('Robots always say:', os.read(fd,100))
os.close(fd)
|
firerszd/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/test/test___all__.py
|
72
|
import unittest
from test import support
import os
import sys
class NoAll(RuntimeError):
pass
class FailedImport(RuntimeError):
pass
class AllTest(unittest.TestCase):
def check_all(self, modname):
names = {}
with support.check_warnings(
(".* (module|package)", DeprecationWarning),
("", ResourceWarning),
quiet=True):
try:
exec("import %s" % modname, names)
except:
# Silent fail here seems the best route since some modules
# may not be available or not initialize properly in all
# environments.
raise FailedImport(modname)
if not hasattr(sys.modules[modname], "__all__"):
raise NoAll(modname)
names = {}
with self.subTest(module=modname):
try:
exec("from %s import *" % modname, names)
except Exception as e:
# Include the module name in the exception string
self.fail("__all__ failure in {}: {}: {}".format(
modname, e.__class__.__name__, e))
if "__builtins__" in names:
del names["__builtins__"]
keys = set(names)
all_list = sys.modules[modname].__all__
all_set = set(all_list)
self.assertCountEqual(all_set, all_list, "in module {}".format(modname))
self.assertEqual(keys, all_set, "in module {}".format(modname))
def walk_modules(self, basedir, modpath):
for fn in sorted(os.listdir(basedir)):
path = os.path.join(basedir, fn)
if os.path.isdir(path):
pkg_init = os.path.join(path, '__init__.py')
if os.path.exists(pkg_init):
yield pkg_init, modpath + fn
for p, m in self.walk_modules(path, modpath + fn + "."):
yield p, m
continue
if not fn.endswith('.py') or fn == '__init__.py':
continue
yield path, modpath + fn[:-3]
def test_all(self):
# Blacklisted modules and packages
blacklist = set([
# Will raise a SyntaxError when compiling the exec statement
'__future__',
])
if not sys.platform.startswith('java'):
# In case _socket fails to build, make this test fail more gracefully
# than an AttributeError somewhere deep in CGIHTTPServer.
import _socket
# rlcompleter needs special consideration; it import readline which
# initializes GNU readline which calls setlocale(LC_CTYPE, "")... :-(
try:
import rlcompleter
import locale
except ImportError:
pass
else:
locale.setlocale(locale.LC_CTYPE, 'C')
ignored = []
failed_imports = []
lib_dir = os.path.dirname(os.path.dirname(__file__))
for path, modname in self.walk_modules(lib_dir, ""):
m = modname
blacklisted = False
while m:
if m in blacklist:
blacklisted = True
break
m = m.rpartition('.')[0]
if blacklisted:
continue
if support.verbose:
print(modname)
try:
# This heuristic speeds up the process by removing, de facto,
# most test modules (and avoiding the auto-executing ones).
with open(path, "rb") as f:
if b"__all__" not in f.read():
raise NoAll(modname)
self.check_all(modname)
except NoAll:
ignored.append(modname)
except FailedImport:
failed_imports.append(modname)
if support.verbose:
print('Following modules have no __all__ and have been ignored:',
ignored)
print('Following modules failed to be imported:', failed_imports)
if __name__ == "__main__":
unittest.main()
|
pabigot/pyxb
|
refs/heads/next
|
tests/utils/test-fac.py
|
2
|
import unittest
import sys
from pyxb.utils.fac import *
from pyxb.utils import six
from pyxb.utils.six.moves import xrange
class TestFAC (unittest.TestCase):
a = Symbol('a')
b = Symbol('b')
c = Symbol('c')
aOb = Choice(a, b)
aTb = Sequence(a, b)
a2 = NumericalConstraint(a, 2, 2)
bTc = Sequence(b, c)
a2ObTc = Choice(a2, bTc)
aXb = All(a, b)
ex = NumericalConstraint(a2ObTc, 3, 5)
def testSymbol (self):
self.assertEqual('a', self.a.metadata)
au = self.a.buildAutomaton()
cfg = Configuration(au)
self.assertFalse(cfg.isAccepting())
cfg.step('a')
self.assertTrue(cfg.isAccepting())
cfg.reset()
self.assertFalse(cfg.isAccepting())
self.assertRaises(AutomatonStepError, cfg.step, 'b')
def testNumericalConstraint (self):
self.assertEqual(self.a2ObTc, self.ex.term)
self.assertEqual(3, self.ex.min)
self.assertEqual(5, self.ex.max)
def testBasicStr (self):
self.assertEqual('a', str(self.a))
self.assertEqual('b', str(self.b))
self.assertEqual('a+b', str(self.aOb))
self.assertEqual('a.b', str(self.aTb))
self.assertEqual('&(a,b)', str(self.aXb))
x = Choice(self.b, self.aTb)
self.assertEqual('b+a.b', str(x))
x = Sequence(self.a, self.aOb)
self.assertEqual('a.(a+b)', str(x))
x = NumericalConstraint(self.a2ObTc, 3, 5)
self.assertEqual('(a^(2,2)+b.c)^(3,5)', str(x))
def testNullable (self):
x = NumericalConstraint(self.a, 0, 1)
self.assertTrue(x.nullable)
self.assertFalse(self.a.nullable)
self.assertFalse(self.aOb.nullable)
self.assertFalse(self.aTb.nullable)
self.assertFalse(self.aXb.nullable)
x = NumericalConstraint(self.a, 1, 4)
self.assertFalse(x.nullable)
def testFirst (self):
null_position = frozenset([()])
p0 = frozenset([(0,)])
p1 = frozenset([(1,)])
p0or1 = frozenset(set(p0).union(p1))
self.assertEqual(null_position, self.a.first)
for p in self.a.first:
self.assertEqual(self.a, self.a.posNodeMap[p])
self.assertEqual(p0or1, self.aOb.first)
self.assertEqual(p0, self.aTb.first)
for p in self.aTb.first:
self.assertEqual(self.a, self.aTb.posNodeMap[p])
rs = set()
for p in self.a2ObTc.first:
rs.add(self.a2ObTc.posNodeMap[p])
self.assertEqual(frozenset([self.a, self.b]), rs)
def testLast (self):
null_position = frozenset([()])
p0 = frozenset([(0,)])
p1 = frozenset([(1,)])
p0or1 = frozenset(set(p0).union(p1))
self.assertEqual(null_position, self.a.last)
self.assertEqual(p0or1, self.aOb.last)
self.assertEqual(p1, self.aTb.last)
rs = set()
for p in self.a2ObTc.last:
rs.add(self.a2ObTc.posNodeMap[p])
self.assertEqual(frozenset([self.a, self.c]), rs)
def testWalkTermTree (self):
pre_pos = []
post_pos = []
set_sym_pos = lambda _n,_p,_a: isinstance(_n, Symbol) and _a.append(_p)
self.ex.walkTermTree(set_sym_pos, None, pre_pos)
self.ex.walkTermTree(None, set_sym_pos, post_pos)
self.assertEqual(pre_pos, post_pos)
self.assertEqual([(0,0,0),(0,1,0),(0,1,1)], pre_pos)
def testCounterPositions (self):
self.assertEqual(frozenset([(), (0,0)]), self.ex.counterPositions)
def testFollow (self):
m = self.a.follow
self.assertEqual(1, len(m))
self.assertEqual([((), frozenset())], list(six.iteritems(m)))
def testValidateAutomaton (self):
a = Symbol('a')
x = Sequence(a, a)
if sys.version_info[:2] >= (2, 7):
with self.assertRaises(InvalidTermTreeError) as cm:
x.buildAutomaton()
self.assertEqual(cm.exception.parent, x)
self.assertEqual(cm.exception.term, a)
else:
self.assertRaises(InvalidTermTreeError, x.buildAutomaton)
def testUpdateApplication (self):
cc = CounterCondition(0, 1)
ui = UpdateInstruction(cc, True)
values = { cc : 0 }
self.assertTrue(ui.satisfiedBy(values))
ui.apply(values)
self.assertEqual(values[cc], 1)
if sys.version_info[:2] >= (2, 7):
with self.assertRaises(UpdateApplicationError) as cm:
ui.apply(values)
self.assertEqual(cm.exception.update_instruction, ui)
self.assertEqual(cm.exception.values, values)
else:
self.assertRaises(UpdateApplicationError, ui.apply, values)
def testInternals (self):
#print self.ex.facToString()
au = self.ex.buildAutomaton()
#print str(au)
def testAutomaton (self):
au = self.ex.buildAutomaton()
cfg = Configuration(au)
for c in 'aabcaa':
cfg.step(c)
self.assertTrue(cfg.isAccepting())
def testAllConstruction (self):
tt = All(Symbol('a'), Symbol('b'))
au = tt.buildAutomaton()
self.assertEqual(1, len(au.states))
st = next(iter(au.states))
self.assertTrue(st.isUnorderedCatenation)
# Example from Kilpelainen & Tuhkanen, "Towards Efficient
# Implementation of XML Schema Content Models"
def testKT2004 (self):
a = Symbol('a')
x = NumericalConstraint(Symbol('b'), 0, 1)
x = NumericalConstraint(Sequence(x, Symbol('c')), 1, 2)
x = Sequence(NumericalConstraint(Symbol('a'), 0, 1), x, Choice(Symbol('a'), Symbol('d')))
x = NumericalConstraint(x, 3, 4)
cfg = Configuration(x.buildAutomaton())
for word in ['cacaca', 'abcaccdacd']:
cfg.reset()
for c in word:
cfg = cfg.step(c)
self.assertTrue(cfg.isAccepting())
for word in ['caca', 'abcaccdac']:
cfg.reset()
for c in word:
cfg = cfg.step(c)
self.assertFalse(cfg.isAccepting())
word = list('ad')
cfg.reset()
cfg = cfg.step(word.pop(0))
try:
cfg = cfg.step(word.pop(0))
self.fail("Expected recognition error")
except AutomatonStepError as e:
self.assertEqual(e.symbol, 'd')
self.assertEqual(frozenset(e.acceptable), frozenset(['c', 'b']))
except Exception as e:
self.fail("Unexpected exception %s" % (e,))
# Example from email by Casey Jordan to xmlschema-dev mailing list
# 20100810: http://lists.w3.org/Archives/Public/xmlschema-dev/2010Aug/0008.html
# This expression is non-deterministic, but at the end only one path is
# accepting.
def testCJ2010 (self):
x = NumericalConstraint(Symbol('b'), 1, 2)
x = NumericalConstraint(Choice(x, Symbol('c')), 2, 2)
x = Sequence(Symbol('a'), x, Symbol('d'))
cfg = Configuration(x.buildAutomaton())
word = list('abbd')
cfg = cfg.step(word.pop(0))
cfg = cfg.step(word.pop(0))
try:
cfg = cfg.step(word.pop(0))
self.fail('Expected nondeterminism exception')
except NondeterministicSymbolError as e:
word.insert(0, e.symbol)
mcfg = MultiConfiguration(cfg)
mcfg = mcfg.step(word.pop(0))
mcfg = mcfg.step(word.pop(0))
accepting = mcfg.acceptingConfigurations()
self.assertEqual(1, len(accepting))
def testDeepMulti (self):
# Verify multiconfig works when non-determinism is introduced
# in a subconfiguration
x = NumericalConstraint(Symbol('b'), 1, 2)
x = NumericalConstraint(Choice(x, Symbol('c')), 2, 2)
mx = Sequence(Symbol('a'), x, Symbol('d'))
ax = All(mx, Symbol('e'), NumericalConstraint(Symbol('f'), 0, 1))
topcfg = Configuration(ax.buildAutomaton())
word = list('abbde')
cfg = topcfg.step(word.pop(0))
# Descended into sub-automaton
self.assertNotEqual(cfg, topcfg)
cfg = cfg.step(word.pop(0))
try:
cfg = cfg.step(word.pop(0))
self.fail('Expected nondeterminism exception')
except NondeterministicSymbolError as e:
word.insert(0, e.symbol)
mcfg = MultiConfiguration(cfg)
mcfg = mcfg.step(word.pop(0))
mcfg = mcfg.step(word.pop(0))
# NB: buildAutomaton may not preserve term order
self.assertEqual(frozenset(mcfg.acceptableSymbols()), frozenset(['e', 'f']))
accepting = mcfg.acceptingConfigurations()
self.assertEqual(0, len(accepting))
mcfg = mcfg.step('f')
accepting = mcfg.acceptingConfigurations()
self.assertEqual(0, len(accepting))
self.assertEqual(mcfg.acceptableSymbols(), [ 'e' ])
mcfg = mcfg.step('e')
accepting = mcfg.acceptingConfigurations()
self.assertEqual(1, len(accepting))
def testSubAcceptMulti (self):
a = NumericalConstraint(Symbol('a'), 0, 1)
b = Symbol('b')
ax = All(a, b)
mcfg = MultiConfiguration(Configuration(ax.buildAutomaton()))
word = list('a')
mcfg = mcfg.step(word.pop(0))
acc = mcfg.acceptingConfigurations()
self.assertEqual(0, len(acc))
# Example from page 2 of Kilpelainen "Checking Determinism of XML
# Schema Content Models in Optimal Time", IS preprint 20101026
# ("K2010") Note that though the paper states this RE is
# deterministic, it's not in the sense that are multiple paths
# recognizing the same word.
def testK2010a (self):
t = NumericalConstraint(Symbol('a'), 2, 3)
t = NumericalConstraint(Choice(t, Symbol('b')), 2, 2)
ex = Sequence(t, Symbol('b'))
L = [ 'aaaab', 'aaaaab', 'aaaaaab', 'aabb', 'aaabb', 'baab', 'baaab', 'bbb' ]
cfg = Configuration(ex.buildAutomaton())
for word in L:
cfg.reset()
mcfg = MultiConfiguration(cfg)
for c in word:
mcfg.step(c)
accepting = mcfg.acceptingConfigurations()
if word in ('aaaaab',):
self.assertEqual(2, len(accepting))
else:
self.assertEqual(1, len(accepting), 'multiple for %s' % (word,))
# The MPEG-7 example from page 3 of K2010
def testK2010b (self):
def makeInstance (num_m, num_reps):
s = []
while 0 < num_reps:
num_reps -= 1
s.append('t')
s.append('m' * num_m)
return ''.join(s)
m = NumericalConstraint(Symbol('m'), 2, 12)
ex = NumericalConstraint(Sequence(Symbol('t'), m), 0, 65535)
cfg = Configuration(ex.buildAutomaton())
self.assertTrue(cfg.isAccepting())
cfg = cfg.step('t')
self.assertFalse(cfg.isAccepting())
cfg = cfg.step('m')
self.assertFalse(cfg.isAccepting())
cfg = cfg.step('m')
self.assertTrue(cfg.isAccepting())
cfg = cfg.step('t')
self.assertFalse(cfg.isAccepting())
for _ in xrange(12):
cfg = cfg.step('m')
self.assertTrue(cfg.isAccepting())
self.assertRaises(UnrecognizedSymbolError, cfg.step, 'm')
# Example from page 6 of K2010. This is the "nondeterministic"
# expression similar to the "deterministic" one of testK2010a.
# From the perspective of this implementation, there is no
# difference.
def testK2010c (self):
t = NumericalConstraint(Symbol('a'), 1, 2)
t = NumericalConstraint(Choice(t, Symbol('b')), 2, 2)
ex = Sequence(t, Symbol('b'))
L = [ 'aab', 'aaab', 'abb', 'aabb', 'bbb' ]
cfg = Configuration(ex.buildAutomaton())
for word in L:
cfg.reset()
mcfg = MultiConfiguration(cfg)
for c in word:
mcfg.step(c)
accepting = mcfg.acceptingConfigurations()
if word in ('aaab',):
self.assertEqual(2, len(accepting))
else:
self.assertEqual(1, len(accepting), 'multiple for %s' % (word,))
Lbar = [ 'aa', 'bb' ]
for word in Lbar:
cfg.reset()
mcfg = MultiConfiguration(cfg)
for c in word:
mcfg.step(c)
self.assertEqual(0, len(mcfg.acceptingConfigurations()), 'accepting %s' % (word,))
cfg.reset()
mcfg = MultiConfiguration(cfg)
mcfg.step('a')
mcfg.step('b')
self.assertRaises(UnrecognizedSymbolError, mcfg.step, 'a')
def testExpandAll (self):
a = Symbol('a')
b = Symbol('b')
c = Symbol('c')
all = All.CreateTermTree(a, b, c)
import itertools
cfg = Configuration(all.buildAutomaton())
for word in itertools.permutations('abc'):
cfg.reset()
for c in word:
cfg.step(c)
self.assertTrue(cfg.isAccepting())
cfg.reset()
cfg.step('a')
cfg.step('b')
self.assertFalse(cfg.isAccepting())
def testTransitionChain (self):
cc1 = CounterCondition(0, 1)
cc2 = CounterCondition(3, None)
psi = frozenset([UpdateInstruction(cc1, False), UpdateInstruction(cc2, True)])
s1 = State('1', True)
s2 = State('2', False)
x1 = Transition(s1, psi)
x2 = Transition(s2, [UpdateInstruction(cc2, False)])
x1b = x1.chainTo(x2)
self.assertNotEqual(x1, x1b)
self.assertEqual(id(x1.updateInstructions), id(x1b.updateInstructions))
self.assertEqual(x1.nextTransition, None)
self.assertEqual(x1b.nextTransition, x2)
def testTransitionLayers (self):
a1 = All(NumericalConstraint(Symbol('a'), 0, 1), Symbol('b'), NumericalConstraint(Symbol('c'), 0, 1))
a2 = All(Symbol('d'), NumericalConstraint(Symbol('e'), 0, 1), Symbol('f'))
tt = NumericalConstraint(Sequence(NumericalConstraint(a1, 0, 1), NumericalConstraint(a2, 0, 1), Symbol('l')), 0, 3)
au = tt.buildAutomaton()
topcfg = Configuration(au)
cfg = topcfg.step('b')
cfg = cfg.step('a')
cfg = cfg.step('e')
topcfg.reset()
cfg = topcfg.step('a')
# Can't move to 'e' until the required component 'b' of a1 has
# been provided. 'c' is also permitted.
try:
cfg = cfg.step('e')
self.fail('Expected recognition error')
except AutomatonStepError as e:
self.assertEqual(e.symbol, 'e')
# NB: buildAutomaton may not preserve term order
self.assertEqual(frozenset(e.acceptable), frozenset(['c', 'b']))
except Exception as e:
self.fail('Unexpected exception %s' % (e,))
cfg = cfg.step('b')
cfg = cfg.step('e')
def testAllTree (self):
a1 = All(Symbol('a'), Symbol('b'), Symbol('c'))
a2 = All(Symbol('d'), Symbol('e'), Symbol('f'))
ex = Sequence(NumericalConstraint(Symbol('f'), 0, 1), a1, NumericalConstraint(a2, 0, 1), Symbol('l'))
# print ex
# f^(0,1).&(a,b,c).&(d,e,f)^(0,1).l
au = ex.buildAutomaton()
cfg = Configuration(au)
for word in ['fabcl', 'fcabl']:
cfg.reset()
for c in word:
cfg = cfg.step(c)
self.assertTrue(cfg.isAccepting())
def testNonAllTree (self):
a1 = Symbol('a')
a2 = Symbol('d')
ex = Sequence(NumericalConstraint(Symbol('f'), 0, 1), a1, NumericalConstraint(a2, 0, 1), Symbol('l'))
# f?ad?l
au = ex.buildAutomaton()
cfg = Configuration(au)
# This checks that the transition from a can jump over the d and find the l.
for word in ['fal', 'fadl']:
cfg.reset()
for c in word:
cfg = cfg.step(c)
self.assertTrue(cfg.isAccepting())
def testFilterXit (self):
# This models part of the content of time-layoutElementType in
# the DWMLgen schema from NDFD. The initial state is reached
# through 's'. The state can be re-entered on 's' in two
# ways: a repetition of the 's' term, and a follow past a
# trivial occurrence of the 'e' term to another instance of
# ex. This produces a state that has two identical
# transitions, introducing non-determinism unnecessarily.
# Make sure we filter that.
s = NumericalConstraint(Symbol('s'), 1, None)
e = NumericalConstraint(Symbol('e'), 0, None)
ex = NumericalConstraint(Sequence(s, e), 1, None)
au = ex.buildAutomaton()
cfg = Configuration(au)
cfg = cfg.step('s')
self.assertEqual(1, len(cfg.candidateTransitions('s')))
if __name__ == '__main__':
unittest.main()
|
Jgarcia-IAS/Fidelizacion_odoo
|
refs/heads/master
|
openerp/addons/base/tests/test_translate.py
|
460
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 OpenERP S.A. http://www.openerp.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import unittest
from openerp.tools.translate import quote, unquote
class TranslationToolsTestCase(unittest.TestCase):
def test_quote_unquote(self):
def test_string(str):
quoted = quote(str)
#print "\n1:", repr(str)
#print "2:", repr(quoted)
unquoted = unquote("".join(quoted.split('"\n"')))
#print "3:", repr(unquoted)
self.assertEquals(str, unquoted)
test_string("""test \nall kinds\n \n o\r
\\\\ nope\n\n"
""")
# The ones with 1+ backslashes directly followed by
# a newline or literal N can fail... we would need a
# state-machine parser to handle these, but this would
# be much slower so it's better to avoid them at the moment
self.assertRaises(AssertionError, quote, """test \nall kinds\n\no\r
\\\\nope\n\n"
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
BT-fgarbely/account-financial-reporting
|
refs/heads/8.0
|
account_financial_report_webkit/report/common_reports.py
|
2
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi, Guewen Baconnier
# Copyright Camptocamp SA 2011
# SQL inspired from OpenERP original code
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# TODO refactor helper in order to act more like mixin
# By using properties we will have a more simple signature in fuctions
import logging
from openerp.osv import osv
from openerp.tools.translate import _
from openerp.addons.account.report.common_report_header \
import common_report_header
_logger = logging.getLogger('financial.reports.webkit')
MAX_MONSTER_SLICE = 50000
class CommonReportHeaderWebkit(common_report_header):
"""Define common helper for financial report"""
######################################################################
# From getter helper #
######################################################################
def get_start_period_br(self, data):
return self._get_info(data, 'period_from', 'account.period')
def get_end_period_br(self, data):
return self._get_info(data, 'period_to', 'account.period')
def get_fiscalyear_br(self, data):
return self._get_info(data, 'fiscalyear_id', 'account.fiscalyear')
def _get_chart_account_id_br(self, data):
return self._get_info(data, 'chart_account_id', 'account.account')
def _get_accounts_br(self, data):
return self._get_info(data, 'account_ids', 'account.account')
def _get_info(self, data, field, model):
info = data.get('form', {}).get(field)
if info:
return self.pool.get(model).browse(self.cursor, self.uid, info)
return False
def _get_journals_br(self, data):
return self._get_info(data, 'journal_ids', 'account.journal')
def _get_display_account(self, data):
val = self._get_form_param('display_account', data)
if val == 'bal_all':
return _('All accounts')
elif val == 'bal_mix':
return _('With transactions or non zero balance')
else:
return val
def _get_display_partner_account(self, data):
val = self._get_form_param('result_selection', data)
if val == 'customer':
return _('Receivable Accounts')
elif val == 'supplier':
return _('Payable Accounts')
elif val == 'customer_supplier':
return _('Receivable and Payable Accounts')
else:
return val
def _get_display_target_move(self, data):
val = self._get_form_param('target_move', data)
if val == 'posted':
return _('All Posted Entries')
elif val == 'all':
return _('All Entries')
else:
return val
def _get_display_account_raw(self, data):
return self._get_form_param('display_account', data)
def _get_filter(self, data):
return self._get_form_param('filter', data)
def _get_target_move(self, data):
return self._get_form_param('target_move', data)
def _get_initial_balance(self, data):
return self._get_form_param('initial_balance', data)
def _get_amount_currency(self, data):
return self._get_form_param('amount_currency', data)
def _get_date_from(self, data):
return self._get_form_param('date_from', data)
def _get_date_to(self, data):
return self._get_form_param('date_to', data)
def _get_form_param(self, param, data, default=False):
return data.get('form', {}).get(param, default)
#############################################
# Account and account line filter helper #
#############################################
def sort_accounts_with_structure(self, root_account_ids, account_ids,
context=None):
"""Sort accounts by code respecting their structure"""
def recursive_sort_by_code(accounts, parent):
sorted_accounts = []
# add all accounts with same parent
level_accounts = [account for account in accounts
if account['parent_id']
and account['parent_id'][0] == parent['id']]
# add consolidation children of parent, as they are logically on
# the same level
if parent.get('child_consol_ids'):
level_accounts.extend([account for account in accounts
if account['id']
in parent['child_consol_ids']])
# stop recursion if no children found
if not level_accounts:
return []
level_accounts = sorted(level_accounts, key=lambda a: a['code'])
for level_account in level_accounts:
sorted_accounts.append(level_account['id'])
sorted_accounts.extend(
recursive_sort_by_code(accounts, parent=level_account))
return sorted_accounts
if not account_ids:
return []
accounts_data = self.pool.get('account.account').read(
self.cr, self.uid, account_ids,
['id', 'parent_id', 'level', 'code', 'child_consol_ids'],
context=context)
sorted_accounts = []
root_accounts_data = [account_data for account_data in accounts_data
if account_data['id'] in root_account_ids]
for root_account_data in root_accounts_data:
sorted_accounts.append(root_account_data['id'])
sorted_accounts.extend(
recursive_sort_by_code(accounts_data, root_account_data))
# fallback to unsorted accounts when sort failed
# sort fails when the levels are miscalculated by account.account
# check lp:783670
if len(sorted_accounts) != len(account_ids):
_logger.warn('Webkit financial reports: Sort of accounts failed.')
sorted_accounts = account_ids
return sorted_accounts
def get_all_accounts(self, account_ids, exclude_type=None, only_type=None,
filter_report_type=None, context=None):
"""Get all account passed in params with their childrens
@param exclude_type: list of types to exclude (view, receivable,
payable, consolidation, other)
@param only_type: list of types to filter on (view, receivable,
payable, consolidation, other)
@param filter_report_type: list of report type to filter on
"""
context = context or {}
accounts = []
if not isinstance(account_ids, list):
account_ids = [account_ids]
acc_obj = self.pool.get('account.account')
for account_id in account_ids:
accounts.append(account_id)
accounts += acc_obj._get_children_and_consol(
self.cursor, self.uid, account_id, context=context)
res_ids = list(set(accounts))
res_ids = self.sort_accounts_with_structure(
account_ids, res_ids, context=context)
if exclude_type or only_type or filter_report_type:
sql_filters = {'ids': tuple(res_ids)}
sql_select = "SELECT a.id FROM account_account a"
sql_join = ""
sql_where = "WHERE a.id IN %(ids)s"
if exclude_type:
sql_where += " AND a.type not in %(exclude_type)s"
sql_filters.update({'exclude_type': tuple(exclude_type)})
if only_type:
sql_where += " AND a.type IN %(only_type)s"
sql_filters.update({'only_type': tuple(only_type)})
if filter_report_type:
sql_join += "INNER JOIN account_account_type t" \
" ON t.id = a.user_type"
sql_join += " AND t.report_type IN %(report_type)s"
sql_filters.update({'report_type': tuple(filter_report_type)})
sql = ' '.join((sql_select, sql_join, sql_where))
self.cursor.execute(sql, sql_filters)
fetch_only_ids = self.cursor.fetchall()
if not fetch_only_ids:
return []
only_ids = [only_id[0] for only_id in fetch_only_ids]
# keep sorting but filter ids
res_ids = [res_id for res_id in res_ids if res_id in only_ids]
return res_ids
##########################################
# Periods and fiscal years helper #
##########################################
def _get_opening_periods(self):
"""Return the list of all journal that can be use to create opening
entries.
We actually filter on this instead of opening period as older version
of OpenERP did not have this notion"""
return self.pool.get('account.period').search(self.cursor, self.uid,
[('special', '=', True)])
def exclude_opening_periods(self, period_ids):
period_obj = self.pool.get('account.period')
return period_obj.search(self.cr, self.uid, [['special', '=', False],
['id', 'in', period_ids]])
def get_included_opening_period(self, period):
"""Return the opening included in normal period we use the assumption
that there is only one opening period per fiscal year"""
period_obj = self.pool.get('account.period')
return period_obj.search(self.cursor, self.uid,
[('special', '=', True),
('date_start', '>=', period.date_start),
('date_stop', '<=', period.date_stop),
('company_id', '=', period.company_id.id)],
limit=1)
def periods_contains_move_lines(self, period_ids):
if not period_ids:
return False
mv_line_obj = self.pool.get('account.move.line')
if isinstance(period_ids, (int, long)):
period_ids = [period_ids]
return mv_line_obj.search(self.cursor, self.uid,
[('period_id', 'in', period_ids)], limit=1) \
and True or False
def _get_period_range_from_periods(self, start_period, stop_period,
mode=None):
"""
Deprecated. We have to use now the build_ctx_periods of period_obj
otherwise we'll have inconsistencies, because build_ctx_periods does
never filter on the the special
"""
period_obj = self.pool.get('account.period')
search_period = [('date_start', '>=', start_period.date_start),
('date_stop', '<=', stop_period.date_stop)]
if mode == 'exclude_opening':
search_period += [('special', '=', False)]
res = period_obj.search(self.cursor, self.uid, search_period)
return res
def _get_period_range_from_start_period(self, start_period,
include_opening=False,
fiscalyear=False,
stop_at_previous_opening=False):
"""We retrieve all periods before start period"""
opening_period_id = False
past_limit = []
period_obj = self.pool.get('account.period')
mv_line_obj = self.pool.get('account.move.line')
# We look for previous opening period
if stop_at_previous_opening:
opening_search = [('special', '=', True),
('date_stop', '<', start_period.date_start)]
if fiscalyear:
opening_search.append(('fiscalyear_id', '=', fiscalyear.id))
opening_periods = period_obj.search(self.cursor, self.uid,
opening_search,
order='date_stop desc')
for opening_period in opening_periods:
validation_res = mv_line_obj.search(self.cursor,
self.uid,
[('period_id', '=',
opening_period)],
limit=1)
if validation_res:
opening_period_id = opening_period
break
if opening_period_id:
# we also look for overlapping periods
opening_period_br = period_obj.browse(
self.cursor, self.uid, opening_period_id)
past_limit = [
('date_start', '>=', opening_period_br.date_stop)]
periods_search = [('date_stop', '<=', start_period.date_stop)]
periods_search += past_limit
if not include_opening:
periods_search += [('special', '=', False)]
if fiscalyear:
periods_search.append(('fiscalyear_id', '=', fiscalyear.id))
periods = period_obj.search(self.cursor, self.uid, periods_search)
if include_opening and opening_period_id:
periods.append(opening_period_id)
periods = list(set(periods))
if start_period.id in periods:
periods.remove(start_period.id)
return periods
def get_first_fiscalyear_period(self, fiscalyear):
return self._get_st_fiscalyear_period(fiscalyear)
def get_last_fiscalyear_period(self, fiscalyear):
return self._get_st_fiscalyear_period(fiscalyear, order='DESC')
def _get_st_fiscalyear_period(self, fiscalyear, special=False,
order='ASC'):
period_obj = self.pool.get('account.period')
p_id = period_obj.search(self.cursor,
self.uid,
[('special', '=', special),
('fiscalyear_id', '=', fiscalyear.id)],
limit=1,
order='date_start %s' % (order,))
if not p_id:
raise osv.except_osv(_('No period found'), '')
return period_obj.browse(self.cursor, self.uid, p_id[0])
###############################
# Initial Balance helper #
###############################
def _compute_init_balance(self, account_id=None, period_ids=None,
mode='computed', default_values=False):
if not isinstance(period_ids, list):
period_ids = [period_ids]
res = {}
if not default_values:
if not account_id or not period_ids:
raise Exception('Missing account or period_ids')
try:
self.cursor.execute("SELECT sum(debit) AS debit, "
" sum(credit) AS credit, "
" sum(debit)-sum(credit) AS balance, "
" sum(amount_currency) AS curr_balance"
" FROM account_move_line"
" WHERE period_id in %s"
" AND account_id = %s",
(tuple(period_ids), account_id))
res = self.cursor.dictfetchone()
except Exception:
self.cursor.rollback()
raise
return {'debit': res.get('debit') or 0.0,
'credit': res.get('credit') or 0.0,
'init_balance': res.get('balance') or 0.0,
'init_balance_currency': res.get('curr_balance') or 0.0,
'state': mode}
def _read_opening_balance(self, account_ids, start_period):
""" Read opening balances from the opening balance
"""
opening_period_selected = self.get_included_opening_period(
start_period)
if not opening_period_selected:
raise osv.except_osv(
_('Error'),
_('No opening period found to compute the opening balances.\n'
'You have to configure a period on the first of January'
' with the special flag.'))
res = {}
for account_id in account_ids:
res[account_id] = self._compute_init_balance(
account_id, opening_period_selected, mode='read')
return res
def _compute_initial_balances(self, account_ids, start_period, fiscalyear):
"""We compute initial balance.
If form is filtered by date all initial balance are equal to 0
This function will sum pear and apple in currency amount if account as
no secondary currency"""
# if opening period is included in start period we do not need to
# compute init balance we just read it from opening entries
res = {}
# PNL and Balance accounts are not computed the same way look for
# attached doc We include opening period in pnl account in order to see
# if opening entries were created by error on this account
pnl_periods_ids = self._get_period_range_from_start_period(
start_period, fiscalyear=fiscalyear, include_opening=True)
bs_period_ids = self._get_period_range_from_start_period(
start_period, include_opening=True, stop_at_previous_opening=True)
opening_period_selected = self.get_included_opening_period(
start_period)
for acc in self.pool.get('account.account').browse(
self.cursor,
self.uid,
account_ids,
context=self.localcontext):
res[acc.id] = self._compute_init_balance(default_values=True)
if acc.user_type.close_method == 'none':
# we compute the initial balance for close_method == none only
# when we print a GL during the year, when the opening period
# is not included in the period selection!
if pnl_periods_ids and not opening_period_selected:
res[acc.id] = self._compute_init_balance(
acc.id, pnl_periods_ids)
else:
res[acc.id] = self._compute_init_balance(acc.id, bs_period_ids)
return res
################################################
# Account move retrieval helper #
################################################
def _get_move_ids_from_periods(self, account_id, period_start, period_stop,
target_move):
move_line_obj = self.pool.get('account.move.line')
period_obj = self.pool.get('account.period')
periods = period_obj.build_ctx_periods(
self.cursor, self.uid, period_start.id, period_stop.id)
if not periods:
return []
search = [
('period_id', 'in', periods), ('account_id', '=', account_id)]
if target_move == 'posted':
search += [('move_id.state', '=', 'posted')]
return move_line_obj.search(self.cursor, self.uid, search)
def _get_move_ids_from_dates(self, account_id, date_start, date_stop,
target_move, mode='include_opening'):
# TODO imporve perfomance by setting opening period as a property
move_line_obj = self.pool.get('account.move.line')
search_period = [('date', '>=', date_start),
('date', '<=', date_stop),
('account_id', '=', account_id)]
# actually not used because OpenERP itself always include the opening
# when we get the periods from january to december
if mode == 'exclude_opening':
opening = self._get_opening_periods()
if opening:
search_period += ['period_id', 'not in', opening]
if target_move == 'posted':
search_period += [('move_id.state', '=', 'posted')]
return move_line_obj.search(self.cursor, self.uid, search_period)
def get_move_lines_ids(self, account_id, main_filter, start, stop,
target_move, mode='include_opening'):
"""Get account move lines base on form data"""
if mode not in ('include_opening', 'exclude_opening'):
raise osv.except_osv(
_('Invalid query mode'),
_('Must be in include_opening, exclude_opening'))
if main_filter in ('filter_period', 'filter_no'):
return self._get_move_ids_from_periods(account_id, start, stop,
target_move)
elif main_filter == 'filter_date':
return self._get_move_ids_from_dates(account_id, start, stop,
target_move)
else:
raise osv.except_osv(
_('No valid filter'), _('Please set a valid time filter'))
def _get_move_line_datas(self, move_line_ids,
order='per.special DESC, l.date ASC, \
per.date_start ASC, m.name ASC'):
# Possible bang if move_line_ids is too long
# We can not slice here as we have to do the sort.
# If slice has to be done it means that we have to reorder in python
# after all is finished. That quite crapy...
# We have a defective desing here (mea culpa) that should be fixed
#
# TODO improve that by making a better domain or if not possible
# by using python sort
if not move_line_ids:
return []
if not isinstance(move_line_ids, list):
move_line_ids = [move_line_ids]
monster = """
SELECT l.id AS id,
l.date AS ldate,
j.code AS jcode ,
j.type AS jtype,
l.currency_id,
l.account_id,
l.amount_currency,
l.ref AS lref,
l.name AS lname,
COALESCE(l.debit, 0.0) - COALESCE(l.credit, 0.0) AS balance,
l.debit,
l.credit,
l.period_id AS lperiod_id,
per.code as period_code,
per.special AS peropen,
l.partner_id AS lpartner_id,
p.name AS partner_name,
m.name AS move_name,
COALESCE(partialrec.name, fullrec.name, '') AS rec_name,
COALESCE(partialrec.id, fullrec.id, NULL) AS rec_id,
m.id AS move_id,
c.name AS currency_code,
i.id AS invoice_id,
i.type AS invoice_type,
i.number AS invoice_number,
l.date_maturity
FROM account_move_line l
JOIN account_move m on (l.move_id=m.id)
LEFT JOIN res_currency c on (l.currency_id=c.id)
LEFT JOIN account_move_reconcile partialrec
on (l.reconcile_partial_id = partialrec.id)
LEFT JOIN account_move_reconcile fullrec on (l.reconcile_id = fullrec.id)
LEFT JOIN res_partner p on (l.partner_id=p.id)
LEFT JOIN account_invoice i on (m.id =i.move_id)
LEFT JOIN account_period per on (per.id=l.period_id)
JOIN account_journal j on (l.journal_id=j.id)
WHERE l.id in %s"""
monster += (" ORDER BY %s" % (order,))
try:
self.cursor.execute(monster, (tuple(move_line_ids),))
res = self.cursor.dictfetchall()
except Exception:
self.cursor.rollback()
raise
return res or []
def _get_moves_counterparts(self, move_ids, account_id, limit=3):
if not move_ids:
return {}
if not isinstance(move_ids, list):
move_ids = [move_ids]
sql = """
SELECT account_move.id,
array_to_string(
ARRAY(SELECT DISTINCT a.code
FROM account_move_line m2
LEFT JOIN account_account a ON (m2.account_id=a.id)
WHERE m2.move_id =account_move_line.move_id
AND m2.account_id<>%s limit %s) , ', ')
FROM account_move
JOIN account_move_line
on (account_move_line.move_id = account_move.id)
JOIN account_account
on (account_move_line.account_id = account_account.id)
WHERE move_id in %s"""
try:
self.cursor.execute(sql, (account_id, limit, tuple(move_ids)))
res = self.cursor.fetchall()
except Exception:
self.cursor.rollback()
raise
return res and dict(res) or {}
def is_initial_balance_enabled(self, main_filter):
if main_filter not in ('filter_no', 'filter_year', 'filter_period'):
return False
return True
def _get_initial_balance_mode(self, start_period):
opening_period_selected = self.get_included_opening_period(
start_period)
opening_move_lines = self.periods_contains_move_lines(
opening_period_selected)
if opening_move_lines:
return 'opening_balance'
else:
return 'initial_balance'
|
nvbn/coviolations_web
|
refs/heads/develop
|
violations/tests/test_pep8.py
|
1
|
import sure
from django.test import TestCase
from tasks.const import STATUS_SUCCESS, STATUS_FAILED
from ..pep8 import pep8_violation
from .base import get_content
class PEP8ViolationCase(TestCase):
"""PEP8 violation case"""
def test_success(self):
"""Test success result"""
data = {'raw': ''}
result = pep8_violation(data)
result['status'].should.be.equal(STATUS_SUCCESS)
result['plot']['count'].should.be.equal(0)
def test_fail_on_real(self):
"""Test fail on real data"""
data = {
'raw': get_content('pep8.out'),
}
result = pep8_violation(data)
result['status'].should.be.equal(STATUS_FAILED)
result['plot']['count'].should.be.equal(307)
|
Azure/qpid-proton
|
refs/heads/master
|
examples/python/messenger/server.py
|
8
|
#!/usr/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
import sys, optparse
from proton import *
parser = optparse.OptionParser(usage="usage: %prog <addr_1> ... <addr_n>",
description="simple message server")
opts, args = parser.parse_args()
if not args:
args = ["amqp://~0.0.0.0"]
mng = Messenger()
mng.start()
for a in args:
mng.subscribe(a)
def dispatch(request, response):
if request.subject:
response.subject = "Re: %s" % request.subject
response.properties = request.properties
print("Dispatched %s %s" % (request.subject, request.properties))
msg = Message()
reply = Message()
while True:
if mng.incoming < 10:
mng.recv(10)
if mng.incoming > 0:
mng.get(msg)
if msg.reply_to:
print(msg.reply_to)
reply.address = msg.reply_to
reply.correlation_id = msg.correlation_id
reply.body = msg.body
dispatch(msg, reply)
mng.put(reply)
mng.send()
mng.stop()
|
kevinkle/semantic
|
refs/heads/master
|
superphy/src/upload/tests/test_gene_location_upload.py
|
1
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import unittest
import mock
from collections import defaultdict
import sys
import traceback
import subprocess
import json
import re
import os
from rdflib import Graph, Namespace, Literal, XSD
from Bio.Blast import NCBIXML, Record
from superphy.upload._sparql import check_named_individual, has_ref_gene, _sparql_query
from superphy.upload._utils import generate_output, generate_path
from superphy.upload.classes import GeneLocation
from superphy.upload.blazegraph_upload import BlazegraphUploader
from superphy.upload.contig_upload import ContigUploader
from superphy.upload.gene_location_upload import GeneLocationUploader, VFLocationUploader, AMRLocationUploader
n = Namespace("https://github.com/superphy#")
owl = Namespace("http://www.w3.org/2002/07/owl#")
rdf = Namespace("http://www.w3.org/1999/02/22-rdf-syntax-ns#")
rdfs = Namespace("http://www.w3.org/2000/01/rdf-schema#")
gfvo = Namespace("http://www.biointerchange.org/gfvo#")
faldo = Namespace("http://biohackathon.org/resource/faldo#")
class GeneLocationUploaderTestCase(unittest.TestCase):
def setUp(self):
self.case = GeneLocationUploader()
@classmethod
def setUpClass(cls):
super(GeneLocationUploaderTestCase, cls).setUpClass()
cls.setupBlazegraph()
@classmethod
def setupBlazegraph(cls):
"""
Sets up some data for testing methods in this class that queries Blazegraph.
Assumes that Blazegraph is initially empty when testing.
"""
g = Graph()
g.add((n.agn43_AP009048_closed_0, rdf.type, faldo.Region))
g.add((n.agn43_AP009048_closed_0, rdf.type, n.reference_gene))
g.add((n.agn43_AP009048_closed_0, n.is_gene_of, n.AP009048))
g.add((n.agn43_AP009048_closed_0, n.has_sequence, Literal("ACGTTGCA", datatype=XSD.string)))
g.add((n.agn43, n.has_copy, n.agn43_AP009048_closed_0))
g.add((n.agn43_AP009048_closed_0_begin, faldo.position, Literal("2073676", datatype=XSD.string)))
g.add((n.agn43_AP009048_closed_0_end, faldo.position, Literal("2076795", datatype=XSD.string)))
g.add((n.agn43_AP009048_closed_0, faldo.begin, n.agn43_AP009048_closed_0_begin))
g.add((n.agn43_AP009048_closed_0, faldo.end, n.agn43_AP009048_closed_0_end))
g.add((n.agn43, n.has_copy, n.agn43_AP009048_closed_0))
g.add((n.AP009048, n.has_gene, n.agn43_AP009048_closed_0))
g.add((n.ecpC_CP002729_closed_0, rdf.type, faldo.Region))
g.add((n.ecpC_CP002729_closed_0, n.has_sequence, Literal("ACGTTGCA", datatype=XSD.string)))
g.add((n.ecpC, n.has_copy, n.ecpC_CP002729_closed_0))
g.add((n.CP002729, n.has_gene, n.ecpC_CP002729_closed_0))
BlazegraphUploader().upload_data(generate_output(g))
del g
def tearDown(self):
del self.case
def test_get_accession_name(self):
## Non-complete genome
contig = "ANVW00000000"
desc = "a description"
returned_string = self.case.get_accession_name(contig, desc)
self.assertEqual(returned_string, "ANVW00000000")
## Complete genome
contig = "CP102000"
desc = "a complete genome"
returned_string = self.case.get_accession_name(contig, desc)
self.assertEqual(returned_string, "CP102000_closed")
@mock.patch('superphy.upload.gene_location_upload.GeneLocationUploader.get_num_gene_copies')
def test_add_contig(self, mock_copies):
## Adding a location to an existing gene with existing contig
self.case.dict = {"hlyA":{"ANVW01000001":0}}
self.case.add_contig("hlyA", "ANVW01000001")
self.assertEqual(self.case.dict, {"hlyA":{"ANVW01000001":1}})
## Adding location to an existing gene with non-existing contig
mock_copies.return_value = 0
self.case.add_contig("hlyA", "JPQG01000001")
self.assertEqual(self.case.dict, {"hlyA":{"ANVW01000001":1, "JPQG01000001": 0}})
## Adding location w/ non-existing genome and contig
self.case.add_contig("espF", "JPQG01000002")
self.assertEqual(self.case.dict, {"hlyA":{"ANVW01000001":1, "JPQG01000001": 0},
"espF":{"JPQG01000002": 0}})
## Adding location w/ exisitng genome and contig in Blazegraph (but not the dict)
mock_copies.reset_mock()
mock_copies.return_value = 4
self.case.add_contig("aafA", "JPQG01000001")
self.assertEqual(self.case.dict, {"hlyA":{"ANVW01000001":1, "JPQG01000001": 0},
"espF":{"JPQG01000002": 0},
"aafA":{"JPQG01000001": 4}})
def test_get_num_gene_copies(self):
self.assertEqual(self.case.get_num_gene_copies("agn43", "AP009048"), 1)
self.assertEqual(self.case.get_num_gene_copies("espF", "ANVW01000001"), 0)
@mock.patch('superphy.upload.gene_location_upload.BlazegraphUploader', autospec=True)
@mock.patch('superphy.upload.gene_location_upload.Graph')
@mock.patch('superphy.upload.gene_location_upload.GeneLocation')
@mock.patch('superphy.upload.gene_location_upload.GeneLocationUploader.check_gene_copy')
def test_create_gene_location(self, mock_check, mock_rdf, mock_graph, mock_bg):
mock_check.return_value = False
mock_genelocation = mock.MagicMock(spec=GeneLocation, create=True)
mock_g = mock.MagicMock(spec=Graph, create=True)
mock_graph.return_value = mock_g
mock_rdf.return_value = mock_genelocation
self.case.create_gene_location("name", "gene", "contig", "begin", "end", "seq", "ref_gene", None)
self.assertEqual(len(mock_check.mock_calls), 1)
mock_rdf.assert_called_once_with(mock_g, "name", "gene", "contig", "begin", "end", "seq", "ref_gene", None)
mock_bg.assert_called_once_with()
def test_check_gene_copy(self):
"""
Assumes that there is no data uploaded to Blazegraph before executing these tests.
"""
self.assertTrue(self.case.check_gene_copy("agn43", "AP009048", "2073676", "2076795"))
class VFLocationUploaderTestCase(unittest.TestCase):
def setUp(self):
self.case = VFLocationUploader()
def tearDown(self):
del self.case
def test_get_gene_name(self):
string = "agn43|VFO:3000001| - (b2000) - CP4-44 prophage; antigen 43 (Ag43) phase-variable biofilm \
formation autotransporter [Escherichia coli str. MG1655 (K12)]"
gene_name = self.case.get_gene_name(string)
self.assertEqual(gene_name, "agn43")
@mock.patch('superphy.upload.gene_location_upload.GeneLocationUploader.create_gene_location')
@mock.patch('superphy.upload.gene_location_upload.GeneLocationUploader.check_gene_copy')
@mock.patch('superphy.upload.gene_location_upload.GeneLocationUploader.get_num_gene_copies')
@mock.patch('superphy.upload.gene_location_upload.NCBIXML.parse')
@mock.patch('superphy.upload.gene_location_upload.open')
def test_ncbixml_parse(self, mock_open, mock_parse, mock_copies, mock_check, mock_create):
mock_handle = mock.MagicMock(spec=file)
mock_open.return_value = mock.MagicMock(spec=file)
## Gene Location w/ 100% query and identity, incomplete genome
mock_check.return_value = False
mock_copies.return_value = 0
mock_parse.return_value = [self.create_sample_record("aafA", "gnl|BL_ORD_ID|56 gi|606962173|gb|JHNV01000057.1| \
Escherichia coli O119:H4 str. 03-3458 contig57, whole genome shotgun sequence", 0, 1146, 123, 123, "ATGC")]
self.case.parse_result()
mock_create.assert_called_once_with("aafA_JHNV01000057_0", "aafA", "JHNV01000057", '1146', '1268', "ATGC", False)
## Gene Location w/ 100% query and identity, complete genome
mock_check.reset_mock()
mock_parse.reset_mock()
mock_create.reset_mock()
mock_check.return_value = False
mock_parse.return_value = [self.create_sample_record("bapF", "gnl|BL_ORD_ID|56 gi|606962173|gb|CP002729.1| \
complete genome", 0, 1146, 1230, 1230, "CAT")]
self.case.parse_result()
mock_create.assert_called_once_with("bapF_CP002729_closed_0", "bapF", "CP002729_closed", '1146', '2375', "CAT", False)
def test_get_reference_genes(self):
"""
Assumes that there is no data uploaded to Blazegraph before executing these tests.
"""
self.assertEqual(len(list(self.case.get_reference_genes())), 1)
@mock.patch('superphy.upload.gene_location_upload.GeneLocationUploader.create_gene_location')
@mock.patch('superphy.upload.gene_location_upload.NCBIXML.parse', autospec=True)
@mock.patch('superphy.upload.gene_location_upload.open')
def test_parse_result(self, mock_open, mock_parse, mock_create):
mock_open.return_value = mock.MagicMock(spec=file)
mock_parse.return_value = [self.create_sample_record("gaa", "gnl|BL_ORD_ID|56 gi|606962173|gb|JHNV01000056.1| \
Escherichia coli O119:H4 str. 03-3458 contig56, whole genome shotgun sequence",
0, 1146, 123, 123, "ATGC")]
self.case.parse_result()
mock_create.assert_called_once_with("gaa_JHNV01000056_0", "gaa", "JHNV01000056", '1146', '1268', "ATGC", False)
def create_sample_record(self, query, title, expect, start, score, ident, seq):
"""
Helper function that creates Blast record handles for testing NCBI parse-related methods.
"""
record = mock.MagicMock(spec=Record)
entry = mock.MagicMock(spec=Record.Alignment)
hsp = mock.MagicMock(spec=Record.HSP)
record.query = query
entry.title = title
entry.hsps = [hsp]
hsp.expect = expect
hsp.sbjct_start = start
hsp.score = score
hsp.identities = ident
hsp.sbjct = seq
record.alignments = [entry]
return record
class AMRLocationUploaderTestCase(unittest.TestCase):
def setUp(self):
self.case = AMRLocationUploader()
def tearDown(self):
del self.case
@mock.patch('superphy.upload.gene_location_upload.GeneLocationUploader.create_gene_location')
@mock.patch('superphy.upload.metadata_upload.json.load')
@mock.patch('superphy.upload.gene_location_upload.open')
def test_parse_result(self, mock_open, mock_load, mock_create):
mock_open.return_value = mock.MagicMock(spec=file)
mock_load.return_value = {
"gene_10|GeneMark.hmm|356_aa|+|5964|7034|gi|606959961|gb|JHNV01100083.1|\
Escherichia coli O119:H4 str. 03-3458 contig83, whole genome shotgun sequence": {},
"gene_12|GeneMark.hmm|304_aa|-|7759|8673|gi|606959961|gb|JHNV01100083.1|\
Escherichia coli O119:H4 str. 03-3458 contig83, whole genome shotgun sequence": {
"gnl|BL_ORD_ID|796|hsp_num:0": {
"SequenceFromBroadStreet": "MRKSTTLLIGFVKAAYRILQALDNKQ",
"orf_start": 7759,
"ARO_name": "OCH-3",
"type_match": "Loose",
"query": "TKQPLENILRVGSQGIASYVDGNTSFLGNGIESRILFDQQRPDNIIE",
"evalue": 4.07141,
"max-identities": 13,
"orf_strand": "-",
"bit-score": 25.0238,
"cvterm_id": "37077",
"sequenceFromDB": "TVRPLMAEQKIPGMAVAITIDGKSHFFGYGVASKESGQKVTEDTIFE",
"match": "T +PL ++ +A +DG + F G G+ S+ + D I E",
"model_id": "201",
"orf_From": "gi|606959961|gb|JHNV01100083.1|\
Escherichia coli O119:H4 str. 03-3458 contig83, whole genome shotgun sequence",
"pass_evalue": 1E-100,
"query_end": 8628,
"ARO_category": {
"36696": {
"category_aro_name": "antibiotic inactivation enzyme",
"category_aro_cvterm_id": "36696",
"category_aro_accession": "3000557",
"category_aro_description": "Enzyme."
},
"36268": {
"category_aro_name": "beta-lactam resistance gene",
"category_aro_cvterm_id": "36268",
"category_aro_accession": "3000129",
"category_aro_description": "Genes conferring resistance to beta-lactams."
}
},
"ARO_accession": "3002516",
"query_start": 8488,
"model_name": "OCH-3",
"model_type": "model-blastP",
"orf_end": 8673
}
}
}
self.case.parse_result()
mock_load.assert_called_with(mock.ANY)
mock_create.assert_called_with("OCH-3_JHNV01100083_0",
"OCH-3",
"JHNV01100083",
8673,
7759,
"",
False,
"Loose")
|
bzbarsky/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/fetch/api/resources/method.py
|
161
|
def main(request, response):
headers = []
if "cors" in request.GET:
headers.append(("Access-Control-Allow-Origin", "*"))
headers.append(("Access-Control-Allow-Credentials", "true"))
headers.append(("Access-Control-Allow-Methods", "GET, POST, PUT, FOO"))
headers.append(("Access-Control-Allow-Headers", "x-test, x-foo"))
headers.append(("Access-Control-Expose-Headers", "x-request-method"))
headers.append(("x-request-method", request.method))
return headers, request.body
|
codeforamerica/pdfhook
|
refs/heads/master
|
src/pdfhook/__init__.py
|
3
|
# -*- coding: utf-8 -*-
from flask import Blueprint
blueprint = Blueprint(
'pdfhook', __name__,
)
from . import views
|
delinhabit/django-rest-framework
|
refs/heads/master
|
tests/test_pagination.py
|
59
|
# coding: utf-8
from __future__ import unicode_literals
import pytest
from rest_framework import (
exceptions, filters, generics, pagination, serializers, status
)
from rest_framework.pagination import PAGE_BREAK, PageLink
from rest_framework.request import Request
from rest_framework.test import APIRequestFactory
factory = APIRequestFactory()
class TestPaginationIntegration:
"""
Integration tests.
"""
def setup(self):
class PassThroughSerializer(serializers.BaseSerializer):
def to_representation(self, item):
return item
class EvenItemsOnly(filters.BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
return [item for item in queryset if item % 2 == 0]
class BasicPagination(pagination.PageNumberPagination):
page_size = 5
page_size_query_param = 'page_size'
max_page_size = 20
self.view = generics.ListAPIView.as_view(
serializer_class=PassThroughSerializer,
queryset=range(1, 101),
filter_backends=[EvenItemsOnly],
pagination_class=BasicPagination
)
def test_filtered_items_are_paginated(self):
request = factory.get('/', {'page': 2})
response = self.view(request)
assert response.status_code == status.HTTP_200_OK
assert response.data == {
'results': [12, 14, 16, 18, 20],
'previous': 'http://testserver/',
'next': 'http://testserver/?page=3',
'count': 50
}
def test_setting_page_size(self):
"""
When 'paginate_by_param' is set, the client may choose a page size.
"""
request = factory.get('/', {'page_size': 10})
response = self.view(request)
assert response.status_code == status.HTTP_200_OK
assert response.data == {
'results': [2, 4, 6, 8, 10, 12, 14, 16, 18, 20],
'previous': None,
'next': 'http://testserver/?page=2&page_size=10',
'count': 50
}
def test_setting_page_size_over_maximum(self):
"""
When page_size parameter exceeds maxiumum allowable,
then it should be capped to the maxiumum.
"""
request = factory.get('/', {'page_size': 1000})
response = self.view(request)
assert response.status_code == status.HTTP_200_OK
assert response.data == {
'results': [
2, 4, 6, 8, 10, 12, 14, 16, 18, 20,
22, 24, 26, 28, 30, 32, 34, 36, 38, 40
],
'previous': None,
'next': 'http://testserver/?page=2&page_size=1000',
'count': 50
}
def test_setting_page_size_to_zero(self):
"""
When page_size parameter is invalid it should return to the default.
"""
request = factory.get('/', {'page_size': 0})
response = self.view(request)
assert response.status_code == status.HTTP_200_OK
assert response.data == {
'results': [2, 4, 6, 8, 10],
'previous': None,
'next': 'http://testserver/?page=2&page_size=0',
'count': 50
}
def test_additional_query_params_are_preserved(self):
request = factory.get('/', {'page': 2, 'filter': 'even'})
response = self.view(request)
assert response.status_code == status.HTTP_200_OK
assert response.data == {
'results': [12, 14, 16, 18, 20],
'previous': 'http://testserver/?filter=even',
'next': 'http://testserver/?filter=even&page=3',
'count': 50
}
def test_404_not_found_for_zero_page(self):
request = factory.get('/', {'page': '0'})
response = self.view(request)
assert response.status_code == status.HTTP_404_NOT_FOUND
assert response.data == {
'detail': 'Invalid page "0": That page number is less than 1.'
}
def test_404_not_found_for_invalid_page(self):
request = factory.get('/', {'page': 'invalid'})
response = self.view(request)
assert response.status_code == status.HTTP_404_NOT_FOUND
assert response.data == {
'detail': 'Invalid page "invalid": That page number is not an integer.'
}
class TestPaginationDisabledIntegration:
"""
Integration tests for disabled pagination.
"""
def setup(self):
class PassThroughSerializer(serializers.BaseSerializer):
def to_representation(self, item):
return item
self.view = generics.ListAPIView.as_view(
serializer_class=PassThroughSerializer,
queryset=range(1, 101),
pagination_class=None
)
def test_unpaginated_list(self):
request = factory.get('/', {'page': 2})
response = self.view(request)
assert response.status_code == status.HTTP_200_OK
assert response.data == list(range(1, 101))
class TestDeprecatedStylePagination:
"""
Integration tests for deprecated style of setting pagination
attributes on the view.
"""
def setup(self):
class PassThroughSerializer(serializers.BaseSerializer):
def to_representation(self, item):
return item
class ExampleView(generics.ListAPIView):
serializer_class = PassThroughSerializer
queryset = range(1, 101)
pagination_class = pagination.PageNumberPagination
paginate_by = 20
page_query_param = 'page_number'
self.view = ExampleView.as_view()
def test_paginate_by_attribute_on_view(self):
request = factory.get('/?page_number=2')
response = self.view(request)
assert response.status_code == status.HTTP_200_OK
assert response.data == {
'results': [
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40
],
'previous': 'http://testserver/',
'next': 'http://testserver/?page_number=3',
'count': 100
}
class TestPageNumberPagination:
"""
Unit tests for `pagination.PageNumberPagination`.
"""
def setup(self):
class ExamplePagination(pagination.PageNumberPagination):
page_size = 5
self.pagination = ExamplePagination()
self.queryset = range(1, 101)
def paginate_queryset(self, request):
return list(self.pagination.paginate_queryset(self.queryset, request))
def get_paginated_content(self, queryset):
response = self.pagination.get_paginated_response(queryset)
return response.data
def get_html_context(self):
return self.pagination.get_html_context()
def test_no_page_number(self):
request = Request(factory.get('/'))
queryset = self.paginate_queryset(request)
content = self.get_paginated_content(queryset)
context = self.get_html_context()
assert queryset == [1, 2, 3, 4, 5]
assert content == {
'results': [1, 2, 3, 4, 5],
'previous': None,
'next': 'http://testserver/?page=2',
'count': 100
}
assert context == {
'previous_url': None,
'next_url': 'http://testserver/?page=2',
'page_links': [
PageLink('http://testserver/', 1, True, False),
PageLink('http://testserver/?page=2', 2, False, False),
PageLink('http://testserver/?page=3', 3, False, False),
PAGE_BREAK,
PageLink('http://testserver/?page=20', 20, False, False),
]
}
assert self.pagination.display_page_controls
assert isinstance(self.pagination.to_html(), type(''))
def test_second_page(self):
request = Request(factory.get('/', {'page': 2}))
queryset = self.paginate_queryset(request)
content = self.get_paginated_content(queryset)
context = self.get_html_context()
assert queryset == [6, 7, 8, 9, 10]
assert content == {
'results': [6, 7, 8, 9, 10],
'previous': 'http://testserver/',
'next': 'http://testserver/?page=3',
'count': 100
}
assert context == {
'previous_url': 'http://testserver/',
'next_url': 'http://testserver/?page=3',
'page_links': [
PageLink('http://testserver/', 1, False, False),
PageLink('http://testserver/?page=2', 2, True, False),
PageLink('http://testserver/?page=3', 3, False, False),
PAGE_BREAK,
PageLink('http://testserver/?page=20', 20, False, False),
]
}
def test_last_page(self):
request = Request(factory.get('/', {'page': 'last'}))
queryset = self.paginate_queryset(request)
content = self.get_paginated_content(queryset)
context = self.get_html_context()
assert queryset == [96, 97, 98, 99, 100]
assert content == {
'results': [96, 97, 98, 99, 100],
'previous': 'http://testserver/?page=19',
'next': None,
'count': 100
}
assert context == {
'previous_url': 'http://testserver/?page=19',
'next_url': None,
'page_links': [
PageLink('http://testserver/', 1, False, False),
PAGE_BREAK,
PageLink('http://testserver/?page=18', 18, False, False),
PageLink('http://testserver/?page=19', 19, False, False),
PageLink('http://testserver/?page=20', 20, True, False),
]
}
def test_invalid_page(self):
request = Request(factory.get('/', {'page': 'invalid'}))
with pytest.raises(exceptions.NotFound):
self.paginate_queryset(request)
class TestLimitOffset:
"""
Unit tests for `pagination.LimitOffsetPagination`.
"""
def setup(self):
class ExamplePagination(pagination.LimitOffsetPagination):
default_limit = 10
max_limit = 15
self.pagination = ExamplePagination()
self.queryset = range(1, 101)
def paginate_queryset(self, request):
return list(self.pagination.paginate_queryset(self.queryset, request))
def get_paginated_content(self, queryset):
response = self.pagination.get_paginated_response(queryset)
return response.data
def get_html_context(self):
return self.pagination.get_html_context()
def test_no_offset(self):
request = Request(factory.get('/', {'limit': 5}))
queryset = self.paginate_queryset(request)
content = self.get_paginated_content(queryset)
context = self.get_html_context()
assert queryset == [1, 2, 3, 4, 5]
assert content == {
'results': [1, 2, 3, 4, 5],
'previous': None,
'next': 'http://testserver/?limit=5&offset=5',
'count': 100
}
assert context == {
'previous_url': None,
'next_url': 'http://testserver/?limit=5&offset=5',
'page_links': [
PageLink('http://testserver/?limit=5', 1, True, False),
PageLink('http://testserver/?limit=5&offset=5', 2, False, False),
PageLink('http://testserver/?limit=5&offset=10', 3, False, False),
PAGE_BREAK,
PageLink('http://testserver/?limit=5&offset=95', 20, False, False),
]
}
assert self.pagination.display_page_controls
assert isinstance(self.pagination.to_html(), type(''))
def test_single_offset(self):
"""
When the offset is not a multiple of the limit we get some edge cases:
* The first page should still be offset zero.
* We may end up displaying an extra page in the pagination control.
"""
request = Request(factory.get('/', {'limit': 5, 'offset': 1}))
queryset = self.paginate_queryset(request)
content = self.get_paginated_content(queryset)
context = self.get_html_context()
assert queryset == [2, 3, 4, 5, 6]
assert content == {
'results': [2, 3, 4, 5, 6],
'previous': 'http://testserver/?limit=5',
'next': 'http://testserver/?limit=5&offset=6',
'count': 100
}
assert context == {
'previous_url': 'http://testserver/?limit=5',
'next_url': 'http://testserver/?limit=5&offset=6',
'page_links': [
PageLink('http://testserver/?limit=5', 1, False, False),
PageLink('http://testserver/?limit=5&offset=1', 2, True, False),
PageLink('http://testserver/?limit=5&offset=6', 3, False, False),
PAGE_BREAK,
PageLink('http://testserver/?limit=5&offset=96', 21, False, False),
]
}
def test_first_offset(self):
request = Request(factory.get('/', {'limit': 5, 'offset': 5}))
queryset = self.paginate_queryset(request)
content = self.get_paginated_content(queryset)
context = self.get_html_context()
assert queryset == [6, 7, 8, 9, 10]
assert content == {
'results': [6, 7, 8, 9, 10],
'previous': 'http://testserver/?limit=5',
'next': 'http://testserver/?limit=5&offset=10',
'count': 100
}
assert context == {
'previous_url': 'http://testserver/?limit=5',
'next_url': 'http://testserver/?limit=5&offset=10',
'page_links': [
PageLink('http://testserver/?limit=5', 1, False, False),
PageLink('http://testserver/?limit=5&offset=5', 2, True, False),
PageLink('http://testserver/?limit=5&offset=10', 3, False, False),
PAGE_BREAK,
PageLink('http://testserver/?limit=5&offset=95', 20, False, False),
]
}
def test_middle_offset(self):
request = Request(factory.get('/', {'limit': 5, 'offset': 10}))
queryset = self.paginate_queryset(request)
content = self.get_paginated_content(queryset)
context = self.get_html_context()
assert queryset == [11, 12, 13, 14, 15]
assert content == {
'results': [11, 12, 13, 14, 15],
'previous': 'http://testserver/?limit=5&offset=5',
'next': 'http://testserver/?limit=5&offset=15',
'count': 100
}
assert context == {
'previous_url': 'http://testserver/?limit=5&offset=5',
'next_url': 'http://testserver/?limit=5&offset=15',
'page_links': [
PageLink('http://testserver/?limit=5', 1, False, False),
PageLink('http://testserver/?limit=5&offset=5', 2, False, False),
PageLink('http://testserver/?limit=5&offset=10', 3, True, False),
PageLink('http://testserver/?limit=5&offset=15', 4, False, False),
PAGE_BREAK,
PageLink('http://testserver/?limit=5&offset=95', 20, False, False),
]
}
def test_ending_offset(self):
request = Request(factory.get('/', {'limit': 5, 'offset': 95}))
queryset = self.paginate_queryset(request)
content = self.get_paginated_content(queryset)
context = self.get_html_context()
assert queryset == [96, 97, 98, 99, 100]
assert content == {
'results': [96, 97, 98, 99, 100],
'previous': 'http://testserver/?limit=5&offset=90',
'next': None,
'count': 100
}
assert context == {
'previous_url': 'http://testserver/?limit=5&offset=90',
'next_url': None,
'page_links': [
PageLink('http://testserver/?limit=5', 1, False, False),
PAGE_BREAK,
PageLink('http://testserver/?limit=5&offset=85', 18, False, False),
PageLink('http://testserver/?limit=5&offset=90', 19, False, False),
PageLink('http://testserver/?limit=5&offset=95', 20, True, False),
]
}
def test_erronous_offset(self):
request = Request(factory.get('/', {'limit': 5, 'offset': 1000}))
queryset = self.paginate_queryset(request)
self.get_paginated_content(queryset)
self.get_html_context()
def test_invalid_offset(self):
"""
An invalid offset query param should be treated as 0.
"""
request = Request(factory.get('/', {'limit': 5, 'offset': 'invalid'}))
queryset = self.paginate_queryset(request)
assert queryset == [1, 2, 3, 4, 5]
def test_invalid_limit(self):
"""
An invalid limit query param should be ignored in favor of the default.
"""
request = Request(factory.get('/', {'limit': 'invalid', 'offset': 0}))
queryset = self.paginate_queryset(request)
content = self.get_paginated_content(queryset)
next_limit = self.pagination.default_limit
next_offset = self.pagination.default_limit
next_url = 'http://testserver/?limit={0}&offset={1}'.format(next_limit, next_offset)
assert queryset == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
assert content.get('next') == next_url
def test_max_limit(self):
"""
The limit defaults to the max_limit when there is a max_limit and the
requested limit is greater than the max_limit
"""
offset = 50
request = Request(factory.get('/', {'limit': '11235', 'offset': offset}))
queryset = self.paginate_queryset(request)
content = self.get_paginated_content(queryset)
max_limit = self.pagination.max_limit
next_offset = offset + max_limit
prev_offset = offset - max_limit
base_url = 'http://testserver/?limit={0}'.format(max_limit)
next_url = base_url + '&offset={0}'.format(next_offset)
prev_url = base_url + '&offset={0}'.format(prev_offset)
assert queryset == list(range(51, 66))
assert content.get('next') == next_url
assert content.get('previous') == prev_url
class TestCursorPagination:
"""
Unit tests for `pagination.CursorPagination`.
"""
def setup(self):
class MockObject(object):
def __init__(self, idx):
self.created = idx
class MockQuerySet(object):
def __init__(self, items):
self.items = items
def filter(self, created__gt=None, created__lt=None):
if created__gt is not None:
return MockQuerySet([
item for item in self.items
if item.created > int(created__gt)
])
assert created__lt is not None
return MockQuerySet([
item for item in self.items
if item.created < int(created__lt)
])
def order_by(self, *ordering):
if ordering[0].startswith('-'):
return MockQuerySet(list(reversed(self.items)))
return self
def __getitem__(self, sliced):
return self.items[sliced]
class ExamplePagination(pagination.CursorPagination):
page_size = 5
ordering = 'created'
self.pagination = ExamplePagination()
self.queryset = MockQuerySet([
MockObject(idx) for idx in [
1, 1, 1, 1, 1,
1, 2, 3, 4, 4,
4, 4, 5, 6, 7,
7, 7, 7, 7, 7,
7, 7, 7, 8, 9,
9, 9, 9, 9, 9
]
])
def get_pages(self, url):
"""
Given a URL return a tuple of:
(previous page, current page, next page, previous url, next url)
"""
request = Request(factory.get(url))
queryset = self.pagination.paginate_queryset(self.queryset, request)
current = [item.created for item in queryset]
next_url = self.pagination.get_next_link()
previous_url = self.pagination.get_previous_link()
if next_url is not None:
request = Request(factory.get(next_url))
queryset = self.pagination.paginate_queryset(self.queryset, request)
next = [item.created for item in queryset]
else:
next = None
if previous_url is not None:
request = Request(factory.get(previous_url))
queryset = self.pagination.paginate_queryset(self.queryset, request)
previous = [item.created for item in queryset]
else:
previous = None
return (previous, current, next, previous_url, next_url)
def test_invalid_cursor(self):
request = Request(factory.get('/', {'cursor': '123'}))
with pytest.raises(exceptions.NotFound):
self.pagination.paginate_queryset(self.queryset, request)
def test_use_with_ordering_filter(self):
class MockView:
filter_backends = (filters.OrderingFilter,)
ordering_fields = ['username', 'created']
ordering = 'created'
request = Request(factory.get('/', {'ordering': 'username'}))
ordering = self.pagination.get_ordering(request, [], MockView())
assert ordering == ('username',)
request = Request(factory.get('/', {'ordering': '-username'}))
ordering = self.pagination.get_ordering(request, [], MockView())
assert ordering == ('-username',)
request = Request(factory.get('/', {'ordering': 'invalid'}))
ordering = self.pagination.get_ordering(request, [], MockView())
assert ordering == ('created',)
def test_cursor_pagination(self):
(previous, current, next, previous_url, next_url) = self.get_pages('/')
assert previous is None
assert current == [1, 1, 1, 1, 1]
assert next == [1, 2, 3, 4, 4]
(previous, current, next, previous_url, next_url) = self.get_pages(next_url)
assert previous == [1, 1, 1, 1, 1]
assert current == [1, 2, 3, 4, 4]
assert next == [4, 4, 5, 6, 7]
(previous, current, next, previous_url, next_url) = self.get_pages(next_url)
assert previous == [1, 2, 3, 4, 4]
assert current == [4, 4, 5, 6, 7]
assert next == [7, 7, 7, 7, 7]
(previous, current, next, previous_url, next_url) = self.get_pages(next_url)
assert previous == [4, 4, 4, 5, 6] # Paging artifact
assert current == [7, 7, 7, 7, 7]
assert next == [7, 7, 7, 8, 9]
(previous, current, next, previous_url, next_url) = self.get_pages(next_url)
assert previous == [7, 7, 7, 7, 7]
assert current == [7, 7, 7, 8, 9]
assert next == [9, 9, 9, 9, 9]
(previous, current, next, previous_url, next_url) = self.get_pages(next_url)
assert previous == [7, 7, 7, 8, 9]
assert current == [9, 9, 9, 9, 9]
assert next is None
(previous, current, next, previous_url, next_url) = self.get_pages(previous_url)
assert previous == [7, 7, 7, 7, 7]
assert current == [7, 7, 7, 8, 9]
assert next == [9, 9, 9, 9, 9]
(previous, current, next, previous_url, next_url) = self.get_pages(previous_url)
assert previous == [4, 4, 5, 6, 7]
assert current == [7, 7, 7, 7, 7]
assert next == [8, 9, 9, 9, 9] # Paging artifact
(previous, current, next, previous_url, next_url) = self.get_pages(previous_url)
assert previous == [1, 2, 3, 4, 4]
assert current == [4, 4, 5, 6, 7]
assert next == [7, 7, 7, 7, 7]
(previous, current, next, previous_url, next_url) = self.get_pages(previous_url)
assert previous == [1, 1, 1, 1, 1]
assert current == [1, 2, 3, 4, 4]
assert next == [4, 4, 5, 6, 7]
(previous, current, next, previous_url, next_url) = self.get_pages(previous_url)
assert previous is None
assert current == [1, 1, 1, 1, 1]
assert next == [1, 2, 3, 4, 4]
assert isinstance(self.pagination.to_html(), type(''))
def test_get_displayed_page_numbers():
"""
Test our contextual page display function.
This determines which pages to display in a pagination control,
given the current page and the last page.
"""
displayed_page_numbers = pagination._get_displayed_page_numbers
# At five pages or less, all pages are displayed, always.
assert displayed_page_numbers(1, 5) == [1, 2, 3, 4, 5]
assert displayed_page_numbers(2, 5) == [1, 2, 3, 4, 5]
assert displayed_page_numbers(3, 5) == [1, 2, 3, 4, 5]
assert displayed_page_numbers(4, 5) == [1, 2, 3, 4, 5]
assert displayed_page_numbers(5, 5) == [1, 2, 3, 4, 5]
# Between six and either pages we may have a single page break.
assert displayed_page_numbers(1, 6) == [1, 2, 3, None, 6]
assert displayed_page_numbers(2, 6) == [1, 2, 3, None, 6]
assert displayed_page_numbers(3, 6) == [1, 2, 3, 4, 5, 6]
assert displayed_page_numbers(4, 6) == [1, 2, 3, 4, 5, 6]
assert displayed_page_numbers(5, 6) == [1, None, 4, 5, 6]
assert displayed_page_numbers(6, 6) == [1, None, 4, 5, 6]
assert displayed_page_numbers(1, 7) == [1, 2, 3, None, 7]
assert displayed_page_numbers(2, 7) == [1, 2, 3, None, 7]
assert displayed_page_numbers(3, 7) == [1, 2, 3, 4, None, 7]
assert displayed_page_numbers(4, 7) == [1, 2, 3, 4, 5, 6, 7]
assert displayed_page_numbers(5, 7) == [1, None, 4, 5, 6, 7]
assert displayed_page_numbers(6, 7) == [1, None, 5, 6, 7]
assert displayed_page_numbers(7, 7) == [1, None, 5, 6, 7]
assert displayed_page_numbers(1, 8) == [1, 2, 3, None, 8]
assert displayed_page_numbers(2, 8) == [1, 2, 3, None, 8]
assert displayed_page_numbers(3, 8) == [1, 2, 3, 4, None, 8]
assert displayed_page_numbers(4, 8) == [1, 2, 3, 4, 5, None, 8]
assert displayed_page_numbers(5, 8) == [1, None, 4, 5, 6, 7, 8]
assert displayed_page_numbers(6, 8) == [1, None, 5, 6, 7, 8]
assert displayed_page_numbers(7, 8) == [1, None, 6, 7, 8]
assert displayed_page_numbers(8, 8) == [1, None, 6, 7, 8]
# At nine or more pages we may have two page breaks, one on each side.
assert displayed_page_numbers(1, 9) == [1, 2, 3, None, 9]
assert displayed_page_numbers(2, 9) == [1, 2, 3, None, 9]
assert displayed_page_numbers(3, 9) == [1, 2, 3, 4, None, 9]
assert displayed_page_numbers(4, 9) == [1, 2, 3, 4, 5, None, 9]
assert displayed_page_numbers(5, 9) == [1, None, 4, 5, 6, None, 9]
assert displayed_page_numbers(6, 9) == [1, None, 5, 6, 7, 8, 9]
assert displayed_page_numbers(7, 9) == [1, None, 6, 7, 8, 9]
assert displayed_page_numbers(8, 9) == [1, None, 7, 8, 9]
assert displayed_page_numbers(9, 9) == [1, None, 7, 8, 9]
|
Cyrillic327/p2pool
|
refs/heads/master
|
p2pool/test/util/test_pack.py
|
283
|
import unittest
from p2pool.util import pack
class Test(unittest.TestCase):
def test_VarInt(self):
t = pack.VarIntType()
for i in xrange(2**20):
assert t.unpack(t.pack(i)) == i
for i in xrange(2**36, 2**36+25):
assert t.unpack(t.pack(i)) == i
|
kawasaki2013/python-for-android-x86
|
refs/heads/master
|
python3-alpha/python3-src/Lib/lib2to3/pygram.py
|
170
|
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Export the Python grammar and symbols."""
# Python imports
import os
# Local imports
from .pgen2 import token
from .pgen2 import driver
from . import pytree
# The grammar file
_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt")
_PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__),
"PatternGrammar.txt")
class Symbols(object):
def __init__(self, grammar):
"""Initializer.
Creates an attribute for each grammar symbol (nonterminal),
whose value is the symbol's type (an int >= 256).
"""
for name, symbol in grammar.symbol2number.items():
setattr(self, name, symbol)
python_grammar = driver.load_grammar(_GRAMMAR_FILE)
python_symbols = Symbols(python_grammar)
python_grammar_no_print_statement = python_grammar.copy()
del python_grammar_no_print_statement.keywords["print"]
pattern_grammar = driver.load_grammar(_PATTERN_GRAMMAR_FILE)
pattern_symbols = Symbols(pattern_grammar)
|
weimingtom/python-for-android
|
refs/heads/master
|
python-modules/twisted/twisted/conch/test/test_cftp.py
|
56
|
# -*- test-case-name: twisted.conch.test.test_cftp -*-
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE file for details.
"""
Tests for L{twisted.conch.scripts.cftp}.
"""
import time, sys, os, operator, getpass, struct
from StringIO import StringIO
from twisted.conch.test.test_ssh import Crypto, pyasn1
_reason = None
if Crypto and pyasn1:
try:
from twisted.conch import unix
from twisted.conch.scripts import cftp
from twisted.conch.test.test_filetransfer import FileTransferForTestAvatar
except ImportError, e:
# Python 2.3 compatibility fix
sys.modules.pop("twisted.conch.unix", None)
unix = None
_reason = str(e)
del e
else:
unix = None
from twisted.python.fakepwd import UserDatabase
from twisted.trial.unittest import TestCase
from twisted.cred import portal
from twisted.internet import reactor, protocol, interfaces, defer, error
from twisted.internet.utils import getProcessOutputAndValue
from twisted.python import log
from twisted.conch import ls
from twisted.test.proto_helpers import StringTransport
from twisted.internet.task import Clock
from twisted.conch.test import test_ssh, test_conch
from twisted.conch.test.test_filetransfer import SFTPTestBase
from twisted.conch.test.test_filetransfer import FileTransferTestAvatar
class ListingTests(TestCase):
"""
Tests for L{lsLine}, the function which generates an entry for a file or
directory in an SFTP I{ls} command's output.
"""
if getattr(time, 'tzset', None) is None:
skip = "Cannot test timestamp formatting code without time.tzset"
def setUp(self):
"""
Patch the L{ls} module's time function so the results of L{lsLine} are
deterministic.
"""
self.now = 123456789
def fakeTime():
return self.now
self.patch(ls, 'time', fakeTime)
# Make sure that the timezone ends up the same after these tests as
# it was before.
if 'TZ' in os.environ:
self.addCleanup(operator.setitem, os.environ, 'TZ', os.environ['TZ'])
self.addCleanup(time.tzset)
else:
def cleanup():
# os.environ.pop is broken! Don't use it! Ever! Or die!
try:
del os.environ['TZ']
except KeyError:
pass
time.tzset()
self.addCleanup(cleanup)
def _lsInTimezone(self, timezone, stat):
"""
Call L{ls.lsLine} after setting the timezone to C{timezone} and return
the result.
"""
# Set the timezone to a well-known value so the timestamps are
# predictable.
os.environ['TZ'] = timezone
time.tzset()
return ls.lsLine('foo', stat)
def test_oldFile(self):
"""
A file with an mtime six months (approximately) or more in the past has
a listing including a low-resolution timestamp.
"""
# Go with 7 months. That's more than 6 months.
then = self.now - (60 * 60 * 24 * 31 * 7)
stat = os.stat_result((0, 0, 0, 0, 0, 0, 0, 0, then, 0))
self.assertEqual(
self._lsInTimezone('America/New_York', stat),
'!--------- 0 0 0 0 Apr 26 1973 foo')
self.assertEqual(
self._lsInTimezone('Pacific/Auckland', stat),
'!--------- 0 0 0 0 Apr 27 1973 foo')
def test_oldSingleDigitDayOfMonth(self):
"""
A file with a high-resolution timestamp which falls on a day of the
month which can be represented by one decimal digit is formatted with
one padding 0 to preserve the columns which come after it.
"""
# A point about 7 months in the past, tweaked to fall on the first of a
# month so we test the case we want to test.
then = self.now - (60 * 60 * 24 * 31 * 7) + (60 * 60 * 24 * 5)
stat = os.stat_result((0, 0, 0, 0, 0, 0, 0, 0, then, 0))
self.assertEqual(
self._lsInTimezone('America/New_York', stat),
'!--------- 0 0 0 0 May 01 1973 foo')
self.assertEqual(
self._lsInTimezone('Pacific/Auckland', stat),
'!--------- 0 0 0 0 May 02 1973 foo')
def test_newFile(self):
"""
A file with an mtime fewer than six months (approximately) in the past
has a listing including a high-resolution timestamp excluding the year.
"""
# A point about three months in the past.
then = self.now - (60 * 60 * 24 * 31 * 3)
stat = os.stat_result((0, 0, 0, 0, 0, 0, 0, 0, then, 0))
self.assertEqual(
self._lsInTimezone('America/New_York', stat),
'!--------- 0 0 0 0 Aug 28 17:33 foo')
self.assertEqual(
self._lsInTimezone('Pacific/Auckland', stat),
'!--------- 0 0 0 0 Aug 29 09:33 foo')
def test_newSingleDigitDayOfMonth(self):
"""
A file with a high-resolution timestamp which falls on a day of the
month which can be represented by one decimal digit is formatted with
one padding 0 to preserve the columns which come after it.
"""
# A point about three months in the past, tweaked to fall on the first
# of a month so we test the case we want to test.
then = self.now - (60 * 60 * 24 * 31 * 3) + (60 * 60 * 24 * 4)
stat = os.stat_result((0, 0, 0, 0, 0, 0, 0, 0, then, 0))
self.assertEqual(
self._lsInTimezone('America/New_York', stat),
'!--------- 0 0 0 0 Sep 01 17:33 foo')
self.assertEqual(
self._lsInTimezone('Pacific/Auckland', stat),
'!--------- 0 0 0 0 Sep 02 09:33 foo')
class StdioClientTests(TestCase):
"""
Tests for L{cftp.StdioClient}.
"""
def setUp(self):
"""
Create a L{cftp.StdioClient} hooked up to dummy transport and a fake
user database.
"""
class Connection:
pass
conn = Connection()
conn.transport = StringTransport()
conn.transport.localClosed = False
self.client = cftp.StdioClient(conn)
self.database = self.client._pwd = UserDatabase()
# Intentionally bypassing makeConnection - that triggers some code
# which uses features not provided by our dumb Connection fake.
self.client.transport = StringTransport()
def test_exec(self):
"""
The I{exec} command runs its arguments locally in a child process
using the user's shell.
"""
self.database.addUser(
getpass.getuser(), 'secret', os.getuid(), 1234, 'foo', 'bar',
sys.executable)
d = self.client._dispatchCommand("exec print 1 + 2")
d.addCallback(self.assertEquals, "3\n")
return d
def test_execWithoutShell(self):
"""
If the local user has no shell, the I{exec} command runs its arguments
using I{/bin/sh}.
"""
self.database.addUser(
getpass.getuser(), 'secret', os.getuid(), 1234, 'foo', 'bar', '')
d = self.client._dispatchCommand("exec echo hello")
d.addCallback(self.assertEquals, "hello\n")
return d
def test_bang(self):
"""
The I{exec} command is run for lines which start with C{"!"}.
"""
self.database.addUser(
getpass.getuser(), 'secret', os.getuid(), 1234, 'foo', 'bar',
'/bin/sh')
d = self.client._dispatchCommand("!echo hello")
d.addCallback(self.assertEquals, "hello\n")
return d
def setKnownConsoleSize(self, width, height):
"""
For the duration of this test, patch C{cftp}'s C{fcntl} module to return
a fixed width and height.
@param width: the width in characters
@type width: C{int}
@param height: the height in characters
@type height: C{int}
"""
import tty # local import to avoid win32 issues
class FakeFcntl(object):
def ioctl(self, fd, opt, mutate):
if opt != tty.TIOCGWINSZ:
self.fail("Only window-size queries supported.")
return struct.pack("4H", height, width, 0, 0)
self.patch(cftp, "fcntl", FakeFcntl())
def test_progressReporting(self):
"""
L{StdioClient._printProgressBar} prints a progress description,
including percent done, amount transferred, transfer rate, and time
remaining, all based the given start time, the given L{FileWrapper}'s
progress information and the reactor's current time.
"""
# Use a short, known console width because this simple test doesn't need
# to test the console padding.
self.setKnownConsoleSize(10, 34)
clock = self.client.reactor = Clock()
wrapped = StringIO("x")
wrapped.name = "sample"
wrapper = cftp.FileWrapper(wrapped)
wrapper.size = 1024 * 10
startTime = clock.seconds()
clock.advance(2.0)
wrapper.total += 4096
self.client._printProgressBar(wrapper, startTime)
self.assertEquals(self.client.transport.value(),
"\rsample 40% 4.0kB 2.0kBps 00:03 ")
def test_reportNoProgress(self):
"""
L{StdioClient._printProgressBar} prints a progress description that
indicates 0 bytes transferred if no bytes have been transferred and no
time has passed.
"""
self.setKnownConsoleSize(10, 34)
clock = self.client.reactor = Clock()
wrapped = StringIO("x")
wrapped.name = "sample"
wrapper = cftp.FileWrapper(wrapped)
startTime = clock.seconds()
self.client._printProgressBar(wrapper, startTime)
self.assertEquals(self.client.transport.value(),
"\rsample 0% 0.0B 0.0Bps 00:00 ")
class FileTransferTestRealm:
def __init__(self, testDir):
self.testDir = testDir
def requestAvatar(self, avatarID, mind, *interfaces):
a = FileTransferTestAvatar(self.testDir)
return interfaces[0], a, lambda: None
class SFTPTestProcess(protocol.ProcessProtocol):
"""
Protocol for testing cftp. Provides an interface between Python (where all
the tests are) and the cftp client process (which does the work that is
being tested).
"""
def __init__(self, onOutReceived):
"""
@param onOutReceived: A L{Deferred} to be fired as soon as data is
received from stdout.
"""
self.clearBuffer()
self.onOutReceived = onOutReceived
self.onProcessEnd = None
self._expectingCommand = None
self._processEnded = False
def clearBuffer(self):
"""
Clear any buffered data received from stdout. Should be private.
"""
self.buffer = ''
self._linesReceived = []
self._lineBuffer = ''
def outReceived(self, data):
"""
Called by Twisted when the cftp client prints data to stdout.
"""
log.msg('got %s' % data)
lines = (self._lineBuffer + data).split('\n')
self._lineBuffer = lines.pop(-1)
self._linesReceived.extend(lines)
# XXX - not strictly correct.
# We really want onOutReceived to fire after the first 'cftp>' prompt
# has been received. (See use in TestOurServerCmdLineClient.setUp)
if self.onOutReceived is not None:
d, self.onOutReceived = self.onOutReceived, None
d.callback(data)
self.buffer += data
self._checkForCommand()
def _checkForCommand(self):
prompt = 'cftp> '
if self._expectingCommand and self._lineBuffer == prompt:
buf = '\n'.join(self._linesReceived)
if buf.startswith(prompt):
buf = buf[len(prompt):]
self.clearBuffer()
d, self._expectingCommand = self._expectingCommand, None
d.callback(buf)
def errReceived(self, data):
"""
Called by Twisted when the cftp client prints data to stderr.
"""
log.msg('err: %s' % data)
def getBuffer(self):
"""
Return the contents of the buffer of data received from stdout.
"""
return self.buffer
def runCommand(self, command):
"""
Issue the given command via the cftp client. Return a C{Deferred} that
fires when the server returns a result. Note that the C{Deferred} will
callback even if the server returns some kind of error.
@param command: A string containing an sftp command.
@return: A C{Deferred} that fires when the sftp server returns a
result. The payload is the server's response string.
"""
self._expectingCommand = defer.Deferred()
self.clearBuffer()
self.transport.write(command + '\n')
return self._expectingCommand
def runScript(self, commands):
"""
Run each command in sequence and return a Deferred that fires when all
commands are completed.
@param commands: A list of strings containing sftp commands.
@return: A C{Deferred} that fires when all commands are completed. The
payload is a list of response strings from the server, in the same
order as the commands.
"""
sem = defer.DeferredSemaphore(1)
dl = [sem.run(self.runCommand, command) for command in commands]
return defer.gatherResults(dl)
def killProcess(self):
"""
Kill the process if it is still running.
If the process is still running, sends a KILL signal to the transport
and returns a C{Deferred} which fires when L{processEnded} is called.
@return: a C{Deferred}.
"""
if self._processEnded:
return defer.succeed(None)
self.onProcessEnd = defer.Deferred()
self.transport.signalProcess('KILL')
return self.onProcessEnd
def processEnded(self, reason):
"""
Called by Twisted when the cftp client process ends.
"""
self._processEnded = True
if self.onProcessEnd:
d, self.onProcessEnd = self.onProcessEnd, None
d.callback(None)
class CFTPClientTestBase(SFTPTestBase):
def setUp(self):
f = open('dsa_test.pub','w')
f.write(test_ssh.publicDSA_openssh)
f.close()
f = open('dsa_test','w')
f.write(test_ssh.privateDSA_openssh)
f.close()
os.chmod('dsa_test', 33152)
f = open('kh_test','w')
f.write('127.0.0.1 ' + test_ssh.publicRSA_openssh)
f.close()
return SFTPTestBase.setUp(self)
def startServer(self):
realm = FileTransferTestRealm(self.testDir)
p = portal.Portal(realm)
p.registerChecker(test_ssh.ConchTestPublicKeyChecker())
fac = test_ssh.ConchTestServerFactory()
fac.portal = p
self.server = reactor.listenTCP(0, fac, interface="127.0.0.1")
def stopServer(self):
if not hasattr(self.server.factory, 'proto'):
return self._cbStopServer(None)
self.server.factory.proto.expectedLoseConnection = 1
d = defer.maybeDeferred(
self.server.factory.proto.transport.loseConnection)
d.addCallback(self._cbStopServer)
return d
def _cbStopServer(self, ignored):
return defer.maybeDeferred(self.server.stopListening)
def tearDown(self):
for f in ['dsa_test.pub', 'dsa_test', 'kh_test']:
try:
os.remove(f)
except:
pass
return SFTPTestBase.tearDown(self)
class TestOurServerCmdLineClient(CFTPClientTestBase):
def setUp(self):
CFTPClientTestBase.setUp(self)
self.startServer()
cmds = ('-p %i -l testuser '
'--known-hosts kh_test '
'--user-authentications publickey '
'--host-key-algorithms ssh-rsa '
'-i dsa_test '
'-a '
'-v '
'127.0.0.1')
port = self.server.getHost().port
cmds = test_conch._makeArgs((cmds % port).split(), mod='cftp')
log.msg('running %s %s' % (sys.executable, cmds))
d = defer.Deferred()
self.processProtocol = SFTPTestProcess(d)
d.addCallback(lambda _: self.processProtocol.clearBuffer())
env = os.environ.copy()
env['PYTHONPATH'] = os.pathsep.join(sys.path)
reactor.spawnProcess(self.processProtocol, sys.executable, cmds,
env=env)
return d
def tearDown(self):
d = self.stopServer()
d.addCallback(lambda _: self.processProtocol.killProcess())
return d
def _killProcess(self, ignored):
try:
self.processProtocol.transport.signalProcess('KILL')
except error.ProcessExitedAlready:
pass
def runCommand(self, command):
"""
Run the given command with the cftp client. Return a C{Deferred} that
fires when the command is complete. Payload is the server's output for
that command.
"""
return self.processProtocol.runCommand(command)
def runScript(self, *commands):
"""
Run the given commands with the cftp client. Returns a C{Deferred}
that fires when the commands are all complete. The C{Deferred}'s
payload is a list of output for each command.
"""
return self.processProtocol.runScript(commands)
def testCdPwd(self):
"""
Test that 'pwd' reports the current remote directory, that 'lpwd'
reports the current local directory, and that changing to a
subdirectory then changing to its parent leaves you in the original
remote directory.
"""
# XXX - not actually a unit test, see docstring.
homeDir = os.path.join(os.getcwd(), self.testDir)
d = self.runScript('pwd', 'lpwd', 'cd testDirectory', 'cd ..', 'pwd')
d.addCallback(lambda xs: xs[:3] + xs[4:])
d.addCallback(self.assertEqual,
[homeDir, os.getcwd(), '', homeDir])
return d
def testChAttrs(self):
"""
Check that 'ls -l' output includes the access permissions and that
this output changes appropriately with 'chmod'.
"""
def _check(results):
self.flushLoggedErrors()
self.assertTrue(results[0].startswith('-rw-r--r--'))
self.assertEqual(results[1], '')
self.assertTrue(results[2].startswith('----------'), results[2])
self.assertEqual(results[3], '')
d = self.runScript('ls -l testfile1', 'chmod 0 testfile1',
'ls -l testfile1', 'chmod 644 testfile1')
return d.addCallback(_check)
# XXX test chgrp/own
def testList(self):
"""
Check 'ls' works as expected. Checks for wildcards, hidden files,
listing directories and listing empty directories.
"""
def _check(results):
self.assertEqual(results[0], ['testDirectory', 'testRemoveFile',
'testRenameFile', 'testfile1'])
self.assertEqual(results[1], ['testDirectory', 'testRemoveFile',
'testRenameFile', 'testfile1'])
self.assertEqual(results[2], ['testRemoveFile', 'testRenameFile'])
self.assertEqual(results[3], ['.testHiddenFile', 'testRemoveFile',
'testRenameFile'])
self.assertEqual(results[4], [''])
d = self.runScript('ls', 'ls ../' + os.path.basename(self.testDir),
'ls *File', 'ls -a *File', 'ls -l testDirectory')
d.addCallback(lambda xs: [x.split('\n') for x in xs])
return d.addCallback(_check)
def testHelp(self):
"""
Check that running the '?' command returns help.
"""
d = self.runCommand('?')
d.addCallback(self.assertEqual,
cftp.StdioClient(None).cmd_HELP('').strip())
return d
def assertFilesEqual(self, name1, name2, msg=None):
"""
Assert that the files at C{name1} and C{name2} contain exactly the
same data.
"""
f1 = file(name1).read()
f2 = file(name2).read()
self.failUnlessEqual(f1, f2, msg)
def testGet(self):
"""
Test that 'get' saves the remote file to the correct local location,
that the output of 'get' is correct and that 'rm' actually removes
the file.
"""
# XXX - not actually a unit test
expectedOutput = ("Transferred %s/%s/testfile1 to %s/test file2"
% (os.getcwd(), self.testDir, self.testDir))
def _checkGet(result):
self.assertTrue(result.endswith(expectedOutput))
self.assertFilesEqual(self.testDir + '/testfile1',
self.testDir + '/test file2',
"get failed")
return self.runCommand('rm "test file2"')
d = self.runCommand('get testfile1 "%s/test file2"' % (self.testDir,))
d.addCallback(_checkGet)
d.addCallback(lambda _: self.failIf(
os.path.exists(self.testDir + '/test file2')))
return d
def testWildcardGet(self):
"""
Test that 'get' works correctly when given wildcard parameters.
"""
def _check(ignored):
self.assertFilesEqual(self.testDir + '/testRemoveFile',
'testRemoveFile',
'testRemoveFile get failed')
self.assertFilesEqual(self.testDir + '/testRenameFile',
'testRenameFile',
'testRenameFile get failed')
d = self.runCommand('get testR*')
return d.addCallback(_check)
def testPut(self):
"""
Check that 'put' uploads files correctly and that they can be
successfully removed. Also check the output of the put command.
"""
# XXX - not actually a unit test
expectedOutput = ('Transferred %s/testfile1 to %s/%s/test"file2'
% (self.testDir, os.getcwd(), self.testDir))
def _checkPut(result):
self.assertFilesEqual(self.testDir + '/testfile1',
self.testDir + '/test"file2')
self.failUnless(result.endswith(expectedOutput))
return self.runCommand('rm "test\\"file2"')
d = self.runCommand('put %s/testfile1 "test\\"file2"'
% (self.testDir,))
d.addCallback(_checkPut)
d.addCallback(lambda _: self.failIf(
os.path.exists(self.testDir + '/test"file2')))
return d
def test_putOverLongerFile(self):
"""
Check that 'put' uploads files correctly when overwriting a longer
file.
"""
# XXX - not actually a unit test
f = file(os.path.join(self.testDir, 'shorterFile'), 'w')
f.write("a")
f.close()
f = file(os.path.join(self.testDir, 'longerFile'), 'w')
f.write("bb")
f.close()
def _checkPut(result):
self.assertFilesEqual(self.testDir + '/shorterFile',
self.testDir + '/longerFile')
d = self.runCommand('put %s/shorterFile longerFile'
% (self.testDir,))
d.addCallback(_checkPut)
return d
def test_putMultipleOverLongerFile(self):
"""
Check that 'put' uploads files correctly when overwriting a longer
file and you use a wildcard to specify the files to upload.
"""
# XXX - not actually a unit test
os.mkdir(os.path.join(self.testDir, 'dir'))
f = file(os.path.join(self.testDir, 'dir', 'file'), 'w')
f.write("a")
f.close()
f = file(os.path.join(self.testDir, 'file'), 'w')
f.write("bb")
f.close()
def _checkPut(result):
self.assertFilesEqual(self.testDir + '/dir/file',
self.testDir + '/file')
d = self.runCommand('put %s/dir/*'
% (self.testDir,))
d.addCallback(_checkPut)
return d
def testWildcardPut(self):
"""
What happens if you issue a 'put' command and include a wildcard (i.e.
'*') in parameter? Check that all files matching the wildcard are
uploaded to the correct directory.
"""
def check(results):
self.assertEqual(results[0], '')
self.assertEqual(results[2], '')
self.assertFilesEqual(self.testDir + '/testRemoveFile',
self.testDir + '/../testRemoveFile',
'testRemoveFile get failed')
self.assertFilesEqual(self.testDir + '/testRenameFile',
self.testDir + '/../testRenameFile',
'testRenameFile get failed')
d = self.runScript('cd ..',
'put %s/testR*' % (self.testDir,),
'cd %s' % os.path.basename(self.testDir))
d.addCallback(check)
return d
def testLink(self):
"""
Test that 'ln' creates a file which appears as a link in the output of
'ls'. Check that removing the new file succeeds without output.
"""
def _check(results):
self.flushLoggedErrors()
self.assertEqual(results[0], '')
self.assertTrue(results[1].startswith('l'), 'link failed')
return self.runCommand('rm testLink')
d = self.runScript('ln testLink testfile1', 'ls -l testLink')
d.addCallback(_check)
d.addCallback(self.assertEqual, '')
return d
def testRemoteDirectory(self):
"""
Test that we can create and remove directories with the cftp client.
"""
def _check(results):
self.assertEqual(results[0], '')
self.assertTrue(results[1].startswith('d'))
return self.runCommand('rmdir testMakeDirectory')
d = self.runScript('mkdir testMakeDirectory',
'ls -l testMakeDirector?')
d.addCallback(_check)
d.addCallback(self.assertEqual, '')
return d
def test_existingRemoteDirectory(self):
"""
Test that a C{mkdir} on an existing directory fails with the
appropriate error, and doesn't log an useless error server side.
"""
def _check(results):
self.assertEquals(results[0], '')
self.assertEquals(results[1],
'remote error 11: mkdir failed')
d = self.runScript('mkdir testMakeDirectory',
'mkdir testMakeDirectory')
d.addCallback(_check)
return d
def testLocalDirectory(self):
"""
Test that we can create a directory locally and remove it with the
cftp client. This test works because the 'remote' server is running
out of a local directory.
"""
d = self.runCommand('lmkdir %s/testLocalDirectory' % (self.testDir,))
d.addCallback(self.assertEqual, '')
d.addCallback(lambda _: self.runCommand('rmdir testLocalDirectory'))
d.addCallback(self.assertEqual, '')
return d
def testRename(self):
"""
Test that we can rename a file.
"""
def _check(results):
self.assertEqual(results[0], '')
self.assertEqual(results[1], 'testfile2')
return self.runCommand('rename testfile2 testfile1')
d = self.runScript('rename testfile1 testfile2', 'ls testfile?')
d.addCallback(_check)
d.addCallback(self.assertEqual, '')
return d
class TestOurServerBatchFile(CFTPClientTestBase):
def setUp(self):
CFTPClientTestBase.setUp(self)
self.startServer()
def tearDown(self):
CFTPClientTestBase.tearDown(self)
return self.stopServer()
def _getBatchOutput(self, f):
fn = self.mktemp()
open(fn, 'w').write(f)
port = self.server.getHost().port
cmds = ('-p %i -l testuser '
'--known-hosts kh_test '
'--user-authentications publickey '
'--host-key-algorithms ssh-rsa '
'-i dsa_test '
'-a '
'-v -b %s 127.0.0.1') % (port, fn)
cmds = test_conch._makeArgs(cmds.split(), mod='cftp')[1:]
log.msg('running %s %s' % (sys.executable, cmds))
env = os.environ.copy()
env['PYTHONPATH'] = os.pathsep.join(sys.path)
self.server.factory.expectedLoseConnection = 1
d = getProcessOutputAndValue(sys.executable, cmds, env=env)
def _cleanup(res):
os.remove(fn)
return res
d.addCallback(lambda res: res[0])
d.addBoth(_cleanup)
return d
def testBatchFile(self):
"""Test whether batch file function of cftp ('cftp -b batchfile').
This works by treating the file as a list of commands to be run.
"""
cmds = """pwd
ls
exit
"""
def _cbCheckResult(res):
res = res.split('\n')
log.msg('RES %s' % str(res))
self.failUnless(res[1].find(self.testDir) != -1, repr(res))
self.failUnlessEqual(res[3:-2], ['testDirectory', 'testRemoveFile',
'testRenameFile', 'testfile1'])
d = self._getBatchOutput(cmds)
d.addCallback(_cbCheckResult)
return d
def testError(self):
"""Test that an error in the batch file stops running the batch.
"""
cmds = """chown 0 missingFile
pwd
exit
"""
def _cbCheckResult(res):
self.failIf(res.find(self.testDir) != -1)
d = self._getBatchOutput(cmds)
d.addCallback(_cbCheckResult)
return d
def testIgnoredError(self):
"""Test that a minus sign '-' at the front of a line ignores
any errors.
"""
cmds = """-chown 0 missingFile
pwd
exit
"""
def _cbCheckResult(res):
self.failIf(res.find(self.testDir) == -1)
d = self._getBatchOutput(cmds)
d.addCallback(_cbCheckResult)
return d
class TestOurServerSftpClient(CFTPClientTestBase):
"""
Test the sftp server against sftp command line client.
"""
def setUp(self):
CFTPClientTestBase.setUp(self)
return self.startServer()
def tearDown(self):
return self.stopServer()
def test_extendedAttributes(self):
"""
Test the return of extended attributes by the server: the sftp client
should ignore them, but still be able to parse the response correctly.
This test is mainly here to check that
L{filetransfer.FILEXFER_ATTR_EXTENDED} has the correct value.
"""
fn = self.mktemp()
open(fn, 'w').write("ls .\nexit")
port = self.server.getHost().port
oldGetAttr = FileTransferForTestAvatar._getAttrs
def _getAttrs(self, s):
attrs = oldGetAttr(self, s)
attrs["ext_foo"] = "bar"
return attrs
self.patch(FileTransferForTestAvatar, "_getAttrs", _getAttrs)
self.server.factory.expectedLoseConnection = True
cmds = ('-o', 'IdentityFile=dsa_test',
'-o', 'UserKnownHostsFile=kh_test',
'-o', 'HostKeyAlgorithms=ssh-rsa',
'-o', 'Port=%i' % (port,), '-b', fn, 'testuser@127.0.0.1')
d = getProcessOutputAndValue("sftp", cmds)
def check(result):
self.assertEquals(result[2], 0)
for i in ['testDirectory', 'testRemoveFile',
'testRenameFile', 'testfile1']:
self.assertIn(i, result[0])
return d.addCallback(check)
if unix is None or Crypto is None or pyasn1 is None or interfaces.IReactorProcess(reactor, None) is None:
if _reason is None:
_reason = "don't run w/o spawnProcess or PyCrypto or pyasn1"
TestOurServerCmdLineClient.skip = _reason
TestOurServerBatchFile.skip = _reason
TestOurServerSftpClient.skip = _reason
StdioClientTests.skip = _reason
else:
from twisted.python.procutils import which
if not which('sftp'):
TestOurServerSftpClient.skip = "no sftp command-line client available"
|
cr/fxos-certsuite
|
refs/heads/master
|
webapi_tests/semiauto/environment.py
|
7
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import time
import threading
from mozlog.structured import structuredlog
from tornado.ioloop import IOLoop
from server import FrontendServer
"""Used to hold a TestEnvironment in a static field."""
env = None
class EnvironmentError(Exception):
pass
def get(environ, *args, **kwargs):
global env
if not env:
env = environ(*args, **kwargs)
env.start()
assert env.is_alive()
timeout = kwargs.pop("timeout", 10)
wait = 0
if not env.server.is_alive() and wait < timeout:
wait += 0.1
time.sleep(wait)
if not env.server.is_alive():
raise EnvironmentError("Starting server failed")
return env
class InProcessTestEnvironment(object):
def __init__(self, addr=None, server_cls=None, io_loop=None, verbose=False):
self.io_loop = io_loop or IOLoop()
self.started = False
self.handler = None
if addr is None:
addr = ("127.0.0.1", 0)
if server_cls is None:
server_cls = FrontendServer
self.server = server_cls(addr, io_loop=self.io_loop,
verbose=verbose)
def start(self, block=False):
"""Start the test environment.
:param block: True to run the server on the current thread,
blocking, False to run on a separate thread.
"""
self.started = True
if block:
self.server.start()
else:
self.server_thread = threading.Thread(target=self.server.start)
self.server_thread.daemon = True # don't hang on exit
self.server_thread.start()
def stop(self):
"""Stop the test environment. If the test environment is
not running, this method has no effect."""
if self.started:
try:
self.server.stop()
self.server_thread.join()
self.server_thread = None
except AttributeError:
pass
self.started = False
self.server = None
def is_alive(self):
return self.started
if __name__ == "__main__":
structuredlog.set_default_logger()
env = InProcessTestEnvironment()
print("Listening on %s" % ":".join(str(i) for i in env.server.addr))
# We ask the environment to block here so that the program won't
# end immediately.
env.start(block=True)
|
eriksf/atmosphere-cli
|
refs/heads/develop
|
tests/test_projects.py
|
1
|
import json
from .mock_server import get_free_port, start_mock_server
from atmosphere.api import AtmosphereAPI
from atmosphere.main import AtmosphereApp
from atmosphere.project import ProjectList
class TestProjects(object):
@classmethod
def setup_class(cls):
cls.mock_server_port = get_free_port()
cls.mock_users_base_url = 'http://localhost:{port}'.format(port=cls.mock_server_port)
cls.mock_users_bad_base_url = 'http://localhosty:{port}'.format(port=cls.mock_server_port)
start_mock_server(cls.mock_server_port)
def test_project_list_description(self):
app = AtmosphereApp()
project_list = ProjectList(app, None)
assert project_list.get_description() == 'List projects for a user.'
def test_getting_projects_when_response_is_not_ok(self):
api = AtmosphereAPI('token', base_url=self.mock_users_bad_base_url)
response = api.get_projects()
assert not response.ok
def test_getting_projects_when_response_is_ok(self):
api = AtmosphereAPI('token', base_url=self.mock_users_base_url)
response = api.get_projects()
assert response.ok
assert response.message['count'] == 2 and response.message['results'][0]['name'] == 'myfirstproject'
def test_getting_project_when_response_is_not_ok(self):
api = AtmosphereAPI('token', base_url=self.mock_users_bad_base_url)
response = api.get_project(2)
assert not response.ok
def test_getting_project_when_response_is_ok(self):
api = AtmosphereAPI('token', base_url=self.mock_users_base_url)
response = api.get_project(2)
assert response.ok
assert response.message['id'] == 2 and response.message['name'] == 'myfirstproject'
def test_creating_project_when_response_is_not_ok(self):
api = AtmosphereAPI('token', base_url=self.mock_users_base_url)
payload = {
'name': '',
'description': 'my first project',
'owner': 'eriksf'
}
response = api.create_project(json.dumps(payload))
assert not response.ok
assert response.message['name'][0] == 'This field may not be blank.'
def test_creating_project_when_owner_is_invalid(self):
api = AtmosphereAPI('token', base_url=self.mock_users_base_url)
payload = {
'name': 'myfirstproject',
'description': 'my first project',
'owner': 'xxxxx'
}
response = api.create_project(json.dumps(payload))
assert not response.ok
assert response.message['owner'][0] == "Group with Field: name 'xxxxx' does not exist."
def test_creating_project_when_response_is_ok(self):
api = AtmosphereAPI('token', base_url=self.mock_users_base_url)
payload = {
'name': 'myfirstproject',
'description': 'my first project',
'owner': 'eriksf'
}
response = api.create_project(json.dumps(payload))
assert response.ok
assert response.message['id'] == 2 and response.message['name'] == 'myfirstproject'
|
fatcloud/PyCV-time
|
refs/heads/master
|
opencv-official-samples/3.0.0/logpolar.py
|
8
|
#!/usr/bin/env python
import cv2
if __name__ == '__main__':
import sys
try:
fn = sys.argv[1]
except:
fn = '../data/fruits.jpg'
img = cv2.imread(fn)
if img is None:
print 'Failed to load image file:', fn
sys.exit(1)
img2 = cv2.logPolar(img, (img.shape[0]/2, img.shape[1]/2), 40, cv2.WARP_FILL_OUTLIERS)
img3 = cv2.linearPolar(img, (img.shape[0]/2, img.shape[1]/2), 40, cv2.WARP_FILL_OUTLIERS)
cv2.imshow('before', img)
cv2.imshow('logpolar', img2)
cv2.imshow('linearpolar', img3)
cv2.waitKey(0)
|
fmierlo/django-default-settings
|
refs/heads/master
|
release/1.3/app/models.py
|
10644
|
from django.db import models
# Create your models here.
|
sio2project/oioioi
|
refs/heads/master
|
oioioi/plagiarism/urls.py
|
1
|
from django.conf.urls import url
from oioioi.plagiarism import views
app_name = 'plagiarism'
contest_patterns = [
url(
r'^moss_submit/$',
views.moss_submit,
name='moss_submit',
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.