gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
from test import test_support
from tokenize import (untokenize, generate_tokens, NUMBER, NAME, OP,
STRING, ENDMARKER, tok_name, Untokenizer, tokenize)
from StringIO import StringIO
import os
from unittest import TestCase
class TokenizeTest(TestCase):
# Tests for the tokenize module.
# The tests can be really simple. Given a small fragment of source
# code, print out a table with tokens. The ENDMARKER is omitted for
# brevity.
def check_tokenize(self, s, expected):
# Format the tokens in s in a table format.
# The ENDMARKER is omitted.
result = []
f = StringIO(s)
for type, token, start, end, line in generate_tokens(f.readline):
if type == ENDMARKER:
break
type = tok_name[type]
result.append(" %(type)-10.10s %(token)-13.13r %(start)s %(end)s" %
locals())
self.assertEqual(result,
expected.rstrip().splitlines())
def test_basic(self):
self.check_tokenize("1 + 1", """\
NUMBER '1' (1, 0) (1, 1)
OP '+' (1, 2) (1, 3)
NUMBER '1' (1, 4) (1, 5)
""")
self.check_tokenize("if False:\n"
" # NL\n"
" True = False # NEWLINE\n", """\
NAME 'if' (1, 0) (1, 2)
NAME 'False' (1, 3) (1, 8)
OP ':' (1, 8) (1, 9)
NEWLINE '\\n' (1, 9) (1, 10)
COMMENT '# NL' (2, 4) (2, 8)
NL '\\n' (2, 8) (2, 9)
INDENT ' ' (3, 0) (3, 4)
NAME 'True' (3, 4) (3, 8)
OP '=' (3, 9) (3, 10)
NAME 'False' (3, 11) (3, 16)
COMMENT '# NEWLINE' (3, 17) (3, 26)
NEWLINE '\\n' (3, 26) (3, 27)
DEDENT '' (4, 0) (4, 0)
""")
indent_error_file = """\
def k(x):
x += 2
x += 5
"""
with self.assertRaisesRegexp(IndentationError,
"unindent does not match any "
"outer indentation level"):
for tok in generate_tokens(StringIO(indent_error_file).readline):
pass
def test_int(self):
# Ordinary integers and binary operators
self.check_tokenize("0xff <= 255", """\
NUMBER '0xff' (1, 0) (1, 4)
OP '<=' (1, 5) (1, 7)
NUMBER '255' (1, 8) (1, 11)
""")
self.check_tokenize("0b10 <= 255", """\
NUMBER '0b10' (1, 0) (1, 4)
OP '<=' (1, 5) (1, 7)
NUMBER '255' (1, 8) (1, 11)
""")
self.check_tokenize("0o123 <= 0123", """\
NUMBER '0o123' (1, 0) (1, 5)
OP '<=' (1, 6) (1, 8)
NUMBER '0123' (1, 9) (1, 13)
""")
self.check_tokenize("01234567 > ~0x15", """\
NUMBER '01234567' (1, 0) (1, 8)
OP '>' (1, 9) (1, 10)
OP '~' (1, 11) (1, 12)
NUMBER '0x15' (1, 12) (1, 16)
""")
self.check_tokenize("2134568 != 01231515", """\
NUMBER '2134568' (1, 0) (1, 7)
OP '!=' (1, 8) (1, 10)
NUMBER '01231515' (1, 11) (1, 19)
""")
self.check_tokenize("(-124561-1) & 0200000000", """\
OP '(' (1, 0) (1, 1)
OP '-' (1, 1) (1, 2)
NUMBER '124561' (1, 2) (1, 8)
OP '-' (1, 8) (1, 9)
NUMBER '1' (1, 9) (1, 10)
OP ')' (1, 10) (1, 11)
OP '&' (1, 12) (1, 13)
NUMBER '0200000000' (1, 14) (1, 24)
""")
self.check_tokenize("0xdeadbeef != -1", """\
NUMBER '0xdeadbeef' (1, 0) (1, 10)
OP '!=' (1, 11) (1, 13)
OP '-' (1, 14) (1, 15)
NUMBER '1' (1, 15) (1, 16)
""")
self.check_tokenize("0xdeadc0de & 012345", """\
NUMBER '0xdeadc0de' (1, 0) (1, 10)
OP '&' (1, 11) (1, 12)
NUMBER '012345' (1, 13) (1, 19)
""")
self.check_tokenize("0xFF & 0x15 | 1234", """\
NUMBER '0xFF' (1, 0) (1, 4)
OP '&' (1, 5) (1, 6)
NUMBER '0x15' (1, 7) (1, 11)
OP '|' (1, 12) (1, 13)
NUMBER '1234' (1, 14) (1, 18)
""")
def test_long(self):
# Long integers
self.check_tokenize("x = 0L", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '0L' (1, 4) (1, 6)
""")
self.check_tokenize("x = 0xfffffffffff", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '0xffffffffff (1, 4) (1, 17)
""")
self.check_tokenize("x = 123141242151251616110l", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '123141242151 (1, 4) (1, 26)
""")
self.check_tokenize("x = -15921590215012591L", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
OP '-' (1, 4) (1, 5)
NUMBER '159215902150 (1, 5) (1, 23)
""")
def test_float(self):
# Floating point numbers
self.check_tokenize("x = 3.14159", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '3.14159' (1, 4) (1, 11)
""")
self.check_tokenize("x = 314159.", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '314159.' (1, 4) (1, 11)
""")
self.check_tokenize("x = .314159", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '.314159' (1, 4) (1, 11)
""")
self.check_tokenize("x = 3e14159", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '3e14159' (1, 4) (1, 11)
""")
self.check_tokenize("x = 3E123", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '3E123' (1, 4) (1, 9)
""")
self.check_tokenize("x+y = 3e-1230", """\
NAME 'x' (1, 0) (1, 1)
OP '+' (1, 1) (1, 2)
NAME 'y' (1, 2) (1, 3)
OP '=' (1, 4) (1, 5)
NUMBER '3e-1230' (1, 6) (1, 13)
""")
self.check_tokenize("x = 3.14e159", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '3.14e159' (1, 4) (1, 12)
""")
def test_string(self):
# String literals
self.check_tokenize("x = ''; y = \"\"", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
STRING "''" (1, 4) (1, 6)
OP ';' (1, 6) (1, 7)
NAME 'y' (1, 8) (1, 9)
OP '=' (1, 10) (1, 11)
STRING '""' (1, 12) (1, 14)
""")
self.check_tokenize("x = '\"'; y = \"'\"", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
STRING '\\'"\\'' (1, 4) (1, 7)
OP ';' (1, 7) (1, 8)
NAME 'y' (1, 9) (1, 10)
OP '=' (1, 11) (1, 12)
STRING '"\\'"' (1, 13) (1, 16)
""")
self.check_tokenize("x = \"doesn't \"shrink\", does it\"", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
STRING '"doesn\\'t "' (1, 4) (1, 14)
NAME 'shrink' (1, 14) (1, 20)
STRING '", does it"' (1, 20) (1, 31)
""")
self.check_tokenize("x = u'abc' + U'ABC'", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
STRING "u'abc'" (1, 4) (1, 10)
OP '+' (1, 11) (1, 12)
STRING "U'ABC'" (1, 13) (1, 19)
""")
self.check_tokenize('y = u"ABC" + U"ABC"', """\
NAME 'y' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
STRING 'u"ABC"' (1, 4) (1, 10)
OP '+' (1, 11) (1, 12)
STRING 'U"ABC"' (1, 13) (1, 19)
""")
self.check_tokenize("x = ur'abc' + Ur'ABC' + uR'ABC' + UR'ABC'", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
STRING "ur'abc'" (1, 4) (1, 11)
OP '+' (1, 12) (1, 13)
STRING "Ur'ABC'" (1, 14) (1, 21)
OP '+' (1, 22) (1, 23)
STRING "uR'ABC'" (1, 24) (1, 31)
OP '+' (1, 32) (1, 33)
STRING "UR'ABC'" (1, 34) (1, 41)
""")
self.check_tokenize('y = ur"abc" + Ur"ABC" + uR"ABC" + UR"ABC"', """\
NAME 'y' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
STRING 'ur"abc"' (1, 4) (1, 11)
OP '+' (1, 12) (1, 13)
STRING 'Ur"ABC"' (1, 14) (1, 21)
OP '+' (1, 22) (1, 23)
STRING 'uR"ABC"' (1, 24) (1, 31)
OP '+' (1, 32) (1, 33)
STRING 'UR"ABC"' (1, 34) (1, 41)
""")
self.check_tokenize("b'abc' + B'abc'", """\
STRING "b'abc'" (1, 0) (1, 6)
OP '+' (1, 7) (1, 8)
STRING "B'abc'" (1, 9) (1, 15)
""")
self.check_tokenize('b"abc" + B"abc"', """\
STRING 'b"abc"' (1, 0) (1, 6)
OP '+' (1, 7) (1, 8)
STRING 'B"abc"' (1, 9) (1, 15)
""")
self.check_tokenize("br'abc' + bR'abc' + Br'abc' + BR'abc'", """\
STRING "br'abc'" (1, 0) (1, 7)
OP '+' (1, 8) (1, 9)
STRING "bR'abc'" (1, 10) (1, 17)
OP '+' (1, 18) (1, 19)
STRING "Br'abc'" (1, 20) (1, 27)
OP '+' (1, 28) (1, 29)
STRING "BR'abc'" (1, 30) (1, 37)
""")
self.check_tokenize('br"abc" + bR"abc" + Br"abc" + BR"abc"', """\
STRING 'br"abc"' (1, 0) (1, 7)
OP '+' (1, 8) (1, 9)
STRING 'bR"abc"' (1, 10) (1, 17)
OP '+' (1, 18) (1, 19)
STRING 'Br"abc"' (1, 20) (1, 27)
OP '+' (1, 28) (1, 29)
STRING 'BR"abc"' (1, 30) (1, 37)
""")
def test_function(self):
self.check_tokenize("def d22(a, b, c=2, d=2, *k): pass", """\
NAME 'def' (1, 0) (1, 3)
NAME 'd22' (1, 4) (1, 7)
OP '(' (1, 7) (1, 8)
NAME 'a' (1, 8) (1, 9)
OP ',' (1, 9) (1, 10)
NAME 'b' (1, 11) (1, 12)
OP ',' (1, 12) (1, 13)
NAME 'c' (1, 14) (1, 15)
OP '=' (1, 15) (1, 16)
NUMBER '2' (1, 16) (1, 17)
OP ',' (1, 17) (1, 18)
NAME 'd' (1, 19) (1, 20)
OP '=' (1, 20) (1, 21)
NUMBER '2' (1, 21) (1, 22)
OP ',' (1, 22) (1, 23)
OP '*' (1, 24) (1, 25)
NAME 'k' (1, 25) (1, 26)
OP ')' (1, 26) (1, 27)
OP ':' (1, 27) (1, 28)
NAME 'pass' (1, 29) (1, 33)
""")
self.check_tokenize("def d01v_(a=1, *k, **w): pass", """\
NAME 'def' (1, 0) (1, 3)
NAME 'd01v_' (1, 4) (1, 9)
OP '(' (1, 9) (1, 10)
NAME 'a' (1, 10) (1, 11)
OP '=' (1, 11) (1, 12)
NUMBER '1' (1, 12) (1, 13)
OP ',' (1, 13) (1, 14)
OP '*' (1, 15) (1, 16)
NAME 'k' (1, 16) (1, 17)
OP ',' (1, 17) (1, 18)
OP '**' (1, 19) (1, 21)
NAME 'w' (1, 21) (1, 22)
OP ')' (1, 22) (1, 23)
OP ':' (1, 23) (1, 24)
NAME 'pass' (1, 25) (1, 29)
""")
def test_comparison(self):
# Comparison
self.check_tokenize("if 1 < 1 > 1 == 1 >= 5 <= 0x15 <= 0x12 != " +
"1 and 5 in 1 not in 1 is 1 or 5 is not 1: pass", """\
NAME 'if' (1, 0) (1, 2)
NUMBER '1' (1, 3) (1, 4)
OP '<' (1, 5) (1, 6)
NUMBER '1' (1, 7) (1, 8)
OP '>' (1, 9) (1, 10)
NUMBER '1' (1, 11) (1, 12)
OP '==' (1, 13) (1, 15)
NUMBER '1' (1, 16) (1, 17)
OP '>=' (1, 18) (1, 20)
NUMBER '5' (1, 21) (1, 22)
OP '<=' (1, 23) (1, 25)
NUMBER '0x15' (1, 26) (1, 30)
OP '<=' (1, 31) (1, 33)
NUMBER '0x12' (1, 34) (1, 38)
OP '!=' (1, 39) (1, 41)
NUMBER '1' (1, 42) (1, 43)
NAME 'and' (1, 44) (1, 47)
NUMBER '5' (1, 48) (1, 49)
NAME 'in' (1, 50) (1, 52)
NUMBER '1' (1, 53) (1, 54)
NAME 'not' (1, 55) (1, 58)
NAME 'in' (1, 59) (1, 61)
NUMBER '1' (1, 62) (1, 63)
NAME 'is' (1, 64) (1, 66)
NUMBER '1' (1, 67) (1, 68)
NAME 'or' (1, 69) (1, 71)
NUMBER '5' (1, 72) (1, 73)
NAME 'is' (1, 74) (1, 76)
NAME 'not' (1, 77) (1, 80)
NUMBER '1' (1, 81) (1, 82)
OP ':' (1, 82) (1, 83)
NAME 'pass' (1, 84) (1, 88)
""")
def test_shift(self):
# Shift
self.check_tokenize("x = 1 << 1 >> 5", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '1' (1, 4) (1, 5)
OP '<<' (1, 6) (1, 8)
NUMBER '1' (1, 9) (1, 10)
OP '>>' (1, 11) (1, 13)
NUMBER '5' (1, 14) (1, 15)
""")
def test_additive(self):
# Additive
self.check_tokenize("x = 1 - y + 15 - 01 + 0x124 + z + a[5]", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '1' (1, 4) (1, 5)
OP '-' (1, 6) (1, 7)
NAME 'y' (1, 8) (1, 9)
OP '+' (1, 10) (1, 11)
NUMBER '15' (1, 12) (1, 14)
OP '-' (1, 15) (1, 16)
NUMBER '01' (1, 17) (1, 19)
OP '+' (1, 20) (1, 21)
NUMBER '0x124' (1, 22) (1, 27)
OP '+' (1, 28) (1, 29)
NAME 'z' (1, 30) (1, 31)
OP '+' (1, 32) (1, 33)
NAME 'a' (1, 34) (1, 35)
OP '[' (1, 35) (1, 36)
NUMBER '5' (1, 36) (1, 37)
OP ']' (1, 37) (1, 38)
""")
def test_multiplicative(self):
# Multiplicative
self.check_tokenize("x = 1//1*1/5*12%0x12", """\
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '1' (1, 4) (1, 5)
OP '//' (1, 5) (1, 7)
NUMBER '1' (1, 7) (1, 8)
OP '*' (1, 8) (1, 9)
NUMBER '1' (1, 9) (1, 10)
OP '/' (1, 10) (1, 11)
NUMBER '5' (1, 11) (1, 12)
OP '*' (1, 12) (1, 13)
NUMBER '12' (1, 13) (1, 15)
OP '%' (1, 15) (1, 16)
NUMBER '0x12' (1, 16) (1, 20)
""")
def test_unary(self):
# Unary
self.check_tokenize("~1 ^ 1 & 1 |1 ^ -1", """\
OP '~' (1, 0) (1, 1)
NUMBER '1' (1, 1) (1, 2)
OP '^' (1, 3) (1, 4)
NUMBER '1' (1, 5) (1, 6)
OP '&' (1, 7) (1, 8)
NUMBER '1' (1, 9) (1, 10)
OP '|' (1, 11) (1, 12)
NUMBER '1' (1, 12) (1, 13)
OP '^' (1, 14) (1, 15)
OP '-' (1, 16) (1, 17)
NUMBER '1' (1, 17) (1, 18)
""")
self.check_tokenize("-1*1/1+1*1//1 - ---1**1", """\
OP '-' (1, 0) (1, 1)
NUMBER '1' (1, 1) (1, 2)
OP '*' (1, 2) (1, 3)
NUMBER '1' (1, 3) (1, 4)
OP '/' (1, 4) (1, 5)
NUMBER '1' (1, 5) (1, 6)
OP '+' (1, 6) (1, 7)
NUMBER '1' (1, 7) (1, 8)
OP '*' (1, 8) (1, 9)
NUMBER '1' (1, 9) (1, 10)
OP '//' (1, 10) (1, 12)
NUMBER '1' (1, 12) (1, 13)
OP '-' (1, 14) (1, 15)
OP '-' (1, 16) (1, 17)
OP '-' (1, 17) (1, 18)
OP '-' (1, 18) (1, 19)
NUMBER '1' (1, 19) (1, 20)
OP '**' (1, 20) (1, 22)
NUMBER '1' (1, 22) (1, 23)
""")
def test_selector(self):
# Selector
self.check_tokenize("import sys, time\n"
"x = sys.modules['time'].time()", """\
NAME 'import' (1, 0) (1, 6)
NAME 'sys' (1, 7) (1, 10)
OP ',' (1, 10) (1, 11)
NAME 'time' (1, 12) (1, 16)
NEWLINE '\\n' (1, 16) (1, 17)
NAME 'x' (2, 0) (2, 1)
OP '=' (2, 2) (2, 3)
NAME 'sys' (2, 4) (2, 7)
OP '.' (2, 7) (2, 8)
NAME 'modules' (2, 8) (2, 15)
OP '[' (2, 15) (2, 16)
STRING "'time'" (2, 16) (2, 22)
OP ']' (2, 22) (2, 23)
OP '.' (2, 23) (2, 24)
NAME 'time' (2, 24) (2, 28)
OP '(' (2, 28) (2, 29)
OP ')' (2, 29) (2, 30)
""")
def test_method(self):
# Methods
self.check_tokenize("@staticmethod\n"
"def foo(x,y): pass", """\
OP '@' (1, 0) (1, 1)
NAME 'staticmethod (1, 1) (1, 13)
NEWLINE '\\n' (1, 13) (1, 14)
NAME 'def' (2, 0) (2, 3)
NAME 'foo' (2, 4) (2, 7)
OP '(' (2, 7) (2, 8)
NAME 'x' (2, 8) (2, 9)
OP ',' (2, 9) (2, 10)
NAME 'y' (2, 10) (2, 11)
OP ')' (2, 11) (2, 12)
OP ':' (2, 12) (2, 13)
NAME 'pass' (2, 14) (2, 18)
""")
def test_tabs(self):
# Evil tabs
self.check_tokenize("def f():\n"
"\tif x\n"
" \tpass", """\
NAME 'def' (1, 0) (1, 3)
NAME 'f' (1, 4) (1, 5)
OP '(' (1, 5) (1, 6)
OP ')' (1, 6) (1, 7)
OP ':' (1, 7) (1, 8)
NEWLINE '\\n' (1, 8) (1, 9)
INDENT '\\t' (2, 0) (2, 1)
NAME 'if' (2, 1) (2, 3)
NAME 'x' (2, 4) (2, 5)
NEWLINE '\\n' (2, 5) (2, 6)
INDENT ' \\t' (3, 0) (3, 9)
NAME 'pass' (3, 9) (3, 13)
DEDENT '' (4, 0) (4, 0)
DEDENT '' (4, 0) (4, 0)
""")
def test_pathological_trailing_whitespace(self):
# Pathological whitespace (http://bugs.python.org/issue16152)
self.check_tokenize("@ ", """\
OP '@' (1, 0) (1, 1)
""")
def decistmt(s):
result = []
g = generate_tokens(StringIO(s).readline) # tokenize the string
for toknum, tokval, _, _, _ in g:
if toknum == NUMBER and '.' in tokval: # replace NUMBER tokens
result.extend([
(NAME, 'Decimal'),
(OP, '('),
(STRING, repr(tokval)),
(OP, ')')
])
else:
result.append((toknum, tokval))
return untokenize(result)
class TestMisc(TestCase):
def test_decistmt(self):
# Substitute Decimals for floats in a string of statements.
# This is an example from the docs.
from decimal import Decimal
s = '+21.3e-5*-.1234/81.7'
self.assertEqual(decistmt(s),
"+Decimal ('21.3e-5')*-Decimal ('.1234')/Decimal ('81.7')")
# The format of the exponent is inherited from the platform C library.
# Known cases are "e-007" (Windows) and "e-07" (not Windows). Since
# we're only showing 12 digits, and the 13th isn't close to 5, the
# rest of the output should be platform-independent.
self.assertRegexpMatches(str(eval(s)), '-3.21716034272e-0+7')
# Output from calculations with Decimal should be identical across all
# platforms.
self.assertEqual(eval(decistmt(s)), Decimal('-3.217160342717258261933904529E-7'))
class UntokenizeTest(TestCase):
def test_bad_input_order(self):
# raise if previous row
u = Untokenizer()
u.prev_row = 2
u.prev_col = 2
with self.assertRaises(ValueError) as cm:
u.add_whitespace((1,3))
self.assertEqual(cm.exception.args[0],
'start (1,3) precedes previous end (2,2)')
# raise if previous column in row
self.assertRaises(ValueError, u.add_whitespace, (2,1))
def test_backslash_continuation(self):
# The problem is that <whitespace>\<newline> leaves no token
u = Untokenizer()
u.prev_row = 1
u.prev_col = 1
u.tokens = []
u.add_whitespace((2, 0))
self.assertEqual(u.tokens, ['\\\n'])
u.prev_row = 2
u.add_whitespace((4, 4))
self.assertEqual(u.tokens, ['\\\n', '\\\n\\\n', ' '])
def test_iter_compat(self):
u = Untokenizer()
token = (NAME, 'Hello')
u.compat(token, iter([]))
self.assertEqual(u.tokens, ["Hello "])
u = Untokenizer()
self.assertEqual(u.untokenize(iter([token])), 'Hello ')
class TestRoundtrip(TestCase):
def check_roundtrip(self, f):
"""
Test roundtrip for `untokenize`. `f` is an open file or a string.
The source code in f is tokenized, converted back to source code
via tokenize.untokenize(), and tokenized again from the latter.
The test fails if the second tokenization doesn't match the first.
"""
if isinstance(f, str): f = StringIO(f)
token_list = list(generate_tokens(f.readline))
f.close()
tokens1 = [tok[:2] for tok in token_list]
new_text = untokenize(tokens1)
readline = iter(new_text.splitlines(1)).next
tokens2 = [tok[:2] for tok in generate_tokens(readline)]
self.assertEqual(tokens2, tokens1)
def test_roundtrip(self):
# There are some standard formatting practices that are easy to get right.
self.check_roundtrip("if x == 1:\n"
" print(x)\n")
# There are some standard formatting practices that are easy to get right.
self.check_roundtrip("if x == 1:\n"
" print x\n")
self.check_roundtrip("# This is a comment\n"
"# This also")
# Some people use different formatting conventions, which makes
# untokenize a little trickier. Note that this test involves trailing
# whitespace after the colon. Note that we use hex escapes to make the
# two trailing blanks apperant in the expected output.
self.check_roundtrip("if x == 1 : \n"
" print x\n")
fn = test_support.findfile("tokenize_tests" + os.extsep + "txt")
with open(fn) as f:
self.check_roundtrip(f)
self.check_roundtrip("if x == 1:\n"
" # A comment by itself.\n"
" print x # Comment here, too.\n"
" # Another comment.\n"
"after_if = True\n")
self.check_roundtrip("if (x # The comments need to go in the right place\n"
" == 1):\n"
" print 'x==1'\n")
self.check_roundtrip("class Test: # A comment here\n"
" # A comment with weird indent\n"
" after_com = 5\n"
" def x(m): return m*5 # a one liner\n"
" def y(m): # A whitespace after the colon\n"
" return y*4 # 3-space indent\n")
# Some error-handling code
self.check_roundtrip("try: import somemodule\n"
"except ImportError: # comment\n"
" print 'Can not import' # comment2\n"
"else: print 'Loaded'\n")
def test_continuation(self):
# Balancing continuation
self.check_roundtrip("a = (3,4, \n"
"5,6)\n"
"y = [3, 4,\n"
"5]\n"
"z = {'a': 5,\n"
"'b':15, 'c':True}\n"
"x = len(y) + 5 - a[\n"
"3] - a[2]\n"
"+ len(z) - z[\n"
"'b']\n")
def test_backslash_continuation(self):
# Backslash means line continuation, except for comments
self.check_roundtrip("x=1+\\\n"
"1\n"
"# This is a comment\\\n"
"# This also\n")
self.check_roundtrip("# Comment \\\n"
"x = 0")
def test_string_concatenation(self):
# Two string literals on the same line
self.check_roundtrip("'' ''")
def test_random_files(self):
# Test roundtrip on random python modules.
# pass the '-ucpu' option to process the full directory.
import glob, random
fn = test_support.findfile("tokenize_tests" + os.extsep + "txt")
tempdir = os.path.dirname(fn) or os.curdir
testfiles = glob.glob(os.path.join(tempdir, "test*.py"))
if not test_support.is_resource_enabled("cpu"):
testfiles = random.sample(testfiles, 10)
for testfile in testfiles:
try:
with open(testfile, 'rb') as f:
self.check_roundtrip(f)
except:
print "Roundtrip failed for file %s" % testfile
raise
def roundtrip(self, code):
if isinstance(code, str):
code = code.encode('utf-8')
tokens = generate_tokens(StringIO(code).readline)
return untokenize(tokens).decode('utf-8')
def test_indentation_semantics_retained(self):
"""
Ensure that although whitespace might be mutated in a roundtrip,
the semantic meaning of the indentation remains consistent.
"""
code = "if False:\n\tx=3\n\tx=3\n"
codelines = self.roundtrip(code).split('\n')
self.assertEqual(codelines[1], codelines[2])
def test_main():
test_support.run_unittest(TokenizeTest)
test_support.run_unittest(UntokenizeTest)
test_support.run_unittest(TestRoundtrip)
test_support.run_unittest(TestMisc)
if __name__ == "__main__":
test_main()
|
|
"""
Implementation of `nbio_interface.AbstractIOServices` on top of a
selector-based I/O loop, such as tornado's and our home-grown
select_connection's I/O loops.
"""
import abc
import logging
import socket
import threading
from rez.vendor.pika.adapters.utils import nbio_interface, io_services_utils
from rez.vendor.pika.adapters.utils.io_services_utils import (check_callback_arg,
check_fd_arg)
LOGGER = logging.getLogger(__name__)
class AbstractSelectorIOLoop(object):
"""Selector-based I/O loop interface expected by
`selector_ioloop_adapter.SelectorIOServicesAdapter`
NOTE: this interface follows the corresponding methods and attributes
of `tornado.ioloop.IOLoop` in order to avoid additional adapter layering
when wrapping tornado's IOLoop.
"""
@property
@abc.abstractmethod
def READ(self): # pylint: disable=C0103
"""The value of the I/O loop's READ flag; READ/WRITE/ERROR may be used
with bitwise operators as expected.
Implementation note: the implementations can simply replace these
READ/WRITE/ERROR properties with class-level attributes
"""
@property
@abc.abstractmethod
def WRITE(self): # pylint: disable=C0103
"""The value of the I/O loop's WRITE flag; READ/WRITE/ERROR may be used
with bitwise operators as expected
"""
@property
@abc.abstractmethod
def ERROR(self): # pylint: disable=C0103
"""The value of the I/O loop's ERROR flag; READ/WRITE/ERROR may be used
with bitwise operators as expected
"""
@abc.abstractmethod
def close(self):
"""Release IOLoop's resources.
the `close()` method is intended to be called by the application or test
code only after `start()` returns. After calling `close()`, no other
interaction with the closed instance of `IOLoop` should be performed.
"""
@abc.abstractmethod
def start(self):
"""Run the I/O loop. It will loop until requested to exit. See `stop()`.
"""
@abc.abstractmethod
def stop(self):
"""Request exit from the ioloop. The loop is NOT guaranteed to
stop before this method returns.
To invoke `stop()` safely from a thread other than this IOLoop's thread,
call it via `add_callback_threadsafe`; e.g.,
`ioloop.add_callback(ioloop.stop)`
"""
@abc.abstractmethod
def call_later(self, delay, callback):
"""Add the callback to the IOLoop timer to be called after delay seconds
from the time of call on best-effort basis. Returns a handle to the
timeout.
:param float delay: The number of seconds to wait to call callback
:param callable callback: The callback method
:returns: handle to the created timeout that may be passed to
`remove_timeout()`
:rtype: object
"""
@abc.abstractmethod
def remove_timeout(self, timeout_handle):
"""Remove a timeout
:param timeout_handle: Handle of timeout to remove
"""
@abc.abstractmethod
def add_callback(self, callback):
"""Requests a call to the given function as soon as possible in the
context of this IOLoop's thread.
NOTE: This is the only thread-safe method in IOLoop. All other
manipulations of IOLoop must be performed from the IOLoop's thread.
For example, a thread may request a call to the `stop` method of an
ioloop that is running in a different thread via
`ioloop.add_callback_threadsafe(ioloop.stop)`
:param callable callback: The callback method
"""
@abc.abstractmethod
def add_handler(self, fd, handler, events):
"""Start watching the given file descriptor for events
:param int fd: The file descriptor
:param callable handler: When requested event(s) occur,
`handler(fd, events)` will be called.
:param int events: The event mask using READ, WRITE, ERROR.
"""
@abc.abstractmethod
def update_handler(self, fd, events):
"""Changes the events we watch for
:param int fd: The file descriptor
:param int events: The event mask using READ, WRITE, ERROR
"""
@abc.abstractmethod
def remove_handler(self, fd):
"""Stop watching the given file descriptor for events
:param int fd: The file descriptor
"""
class SelectorIOServicesAdapter(io_services_utils.SocketConnectionMixin,
io_services_utils.StreamingConnectionMixin,
nbio_interface.AbstractIOServices,
nbio_interface.AbstractFileDescriptorServices):
"""Implements the
:py:class:`.nbio_interface.AbstractIOServices` interface
on top of selector-style native loop having the
:py:class:`AbstractSelectorIOLoop` interface, such as
:py:class:`pika.selection_connection.IOLoop` and :py:class:`tornado.IOLoop`.
NOTE:
:py:class:`.nbio_interface.AbstractFileDescriptorServices`
interface is only required by the mixins.
"""
def __init__(self, native_loop):
"""
:param AbstractSelectorIOLoop native_loop: An instance compatible with
the `AbstractSelectorIOLoop` interface, but not necessarily derived
from it.
"""
self._loop = native_loop
# Active watchers: maps file descriptors to `_FileDescriptorCallbacks`
self._watchers = dict()
# Native loop-specific event masks of interest
self._readable_mask = self._loop.READ
# NOTE: tying ERROR to WRITE is particularly handy for Windows, whose
# `select.select()` differs from Posix by reporting
# connection-establishment failure only through exceptfds (ERROR event),
# while the typical application workflow is to wait for the socket to
# become writable when waiting for socket connection to be established.
self._writable_mask = self._loop.WRITE | self._loop.ERROR
def get_native_ioloop(self):
"""Implement
:py:meth:`.nbio_interface.AbstractIOServices.get_native_ioloop()`.
"""
return self._loop
def close(self):
"""Implement :py:meth:`.nbio_interface.AbstractIOServices.close()`.
"""
self._loop.close()
def run(self):
"""Implement :py:meth:`.nbio_interface.AbstractIOServices.run()`.
"""
self._loop.start()
def stop(self):
"""Implement :py:meth:`.nbio_interface.AbstractIOServices.stop()`.
"""
self._loop.stop()
def add_callback_threadsafe(self, callback):
"""Implement
:py:meth:`.nbio_interface.AbstractIOServices.add_callback_threadsafe()`.
"""
self._loop.add_callback(callback)
def call_later(self, delay, callback):
"""Implement :py:meth:`.nbio_interface.AbstractIOServices.call_later()`.
"""
return _TimerHandle(self._loop.call_later(delay, callback), self._loop)
def getaddrinfo(self,
host,
port,
on_done,
family=0,
socktype=0,
proto=0,
flags=0):
"""Implement :py:meth:`.nbio_interface.AbstractIOServices.getaddrinfo()`.
"""
return _SelectorIOLoopIOHandle(
_AddressResolver(
native_loop=self._loop,
host=host,
port=port,
family=family,
socktype=socktype,
proto=proto,
flags=flags,
on_done=on_done).start())
def set_reader(self, fd, on_readable):
"""Implement
:py:meth:`.nbio_interface.AbstractFileDescriptorServices.set_reader()`.
"""
LOGGER.debug('SelectorIOServicesAdapter.set_reader(%s, %r)', fd,
on_readable)
check_fd_arg(fd)
check_callback_arg(on_readable, 'on_readable')
try:
callbacks = self._watchers[fd]
except KeyError:
self._loop.add_handler(fd, self._on_reader_writer_fd_events,
self._readable_mask)
self._watchers[fd] = _FileDescriptorCallbacks(reader=on_readable)
LOGGER.debug('set_reader(%s, _) added handler Rd', fd)
else:
if callbacks.reader is None:
assert callbacks.writer is not None
self._loop.update_handler(
fd, self._readable_mask | self._writable_mask)
LOGGER.debug('set_reader(%s, _) updated handler RdWr', fd)
else:
LOGGER.debug('set_reader(%s, _) replacing reader', fd)
callbacks.reader = on_readable
def remove_reader(self, fd):
"""Implement
:py:meth:`.nbio_interface.AbstractFileDescriptorServices.remove_reader()`.
"""
LOGGER.debug('SelectorIOServicesAdapter.remove_reader(%s)', fd)
check_fd_arg(fd)
try:
callbacks = self._watchers[fd]
except KeyError:
LOGGER.debug('remove_reader(%s) neither was set', fd)
return False
if callbacks.reader is None:
assert callbacks.writer is not None
LOGGER.debug('remove_reader(%s) reader wasn\'t set Wr', fd)
return False
callbacks.reader = None
if callbacks.writer is None:
del self._watchers[fd]
self._loop.remove_handler(fd)
LOGGER.debug('remove_reader(%s) removed handler', fd)
else:
self._loop.update_handler(fd, self._writable_mask)
LOGGER.debug('remove_reader(%s) updated handler Wr', fd)
return True
def set_writer(self, fd, on_writable):
"""Implement
:py:meth:`.nbio_interface.AbstractFileDescriptorServices.set_writer()`.
"""
LOGGER.debug('SelectorIOServicesAdapter.set_writer(%s, %r)', fd,
on_writable)
check_fd_arg(fd)
check_callback_arg(on_writable, 'on_writable')
try:
callbacks = self._watchers[fd]
except KeyError:
self._loop.add_handler(fd, self._on_reader_writer_fd_events,
self._writable_mask)
self._watchers[fd] = _FileDescriptorCallbacks(writer=on_writable)
LOGGER.debug('set_writer(%s, _) added handler Wr', fd)
else:
if callbacks.writer is None:
assert callbacks.reader is not None
# NOTE: Set the writer func before setting the mask!
# Otherwise a race condition can occur where ioloop tries to
# call writer when it is still None.
callbacks.writer = on_writable
self._loop.update_handler(
fd, self._readable_mask | self._writable_mask)
LOGGER.debug('set_writer(%s, _) updated handler RdWr', fd)
else:
LOGGER.debug('set_writer(%s, _) replacing writer', fd)
callbacks.writer = on_writable
def remove_writer(self, fd):
"""Implement
:py:meth:`.nbio_interface.AbstractFileDescriptorServices.remove_writer()`.
"""
LOGGER.debug('SelectorIOServicesAdapter.remove_writer(%s)', fd)
check_fd_arg(fd)
try:
callbacks = self._watchers[fd]
except KeyError:
LOGGER.debug('remove_writer(%s) neither was set.', fd)
return False
if callbacks.writer is None:
assert callbacks.reader is not None
LOGGER.debug('remove_writer(%s) writer wasn\'t set Rd', fd)
return False
callbacks.writer = None
if callbacks.reader is None:
del self._watchers[fd]
self._loop.remove_handler(fd)
LOGGER.debug('remove_writer(%s) removed handler', fd)
else:
self._loop.update_handler(fd, self._readable_mask)
LOGGER.debug('remove_writer(%s) updated handler Rd', fd)
return True
def _on_reader_writer_fd_events(self, fd, events):
"""Handle indicated file descriptor events requested via `set_reader()`
and `set_writer()`.
:param fd: file descriptor
:param events: event mask using native loop's READ/WRITE/ERROR. NOTE:
depending on the underlying poller mechanism, ERROR may be indicated
upon certain file description state even though we don't request it.
We ignore ERROR here since `set_reader()`/`set_writer()` don't
request for it.
"""
callbacks = self._watchers[fd]
if events & self._readable_mask and callbacks.reader is None:
# NOTE: we check for consistency here ahead of the writer callback
# because the writer callback, if any, can change the events being
# watched
LOGGER.warning(
'READ indicated on fd=%s, but reader callback is None; '
'events=%s', fd, bin(events))
if events & self._writable_mask:
if callbacks.writer is not None:
callbacks.writer()
else:
LOGGER.warning(
'WRITE indicated on fd=%s, but writer callback is None; '
'events=%s', fd, bin(events))
if events & self._readable_mask:
if callbacks.reader is not None:
callbacks.reader()
else:
# Reader callback might have been removed in the scope of writer
# callback.
pass
class _FileDescriptorCallbacks(object):
"""Holds reader and writer callbacks for a file descriptor"""
__slots__ = ('reader', 'writer')
def __init__(self, reader=None, writer=None):
self.reader = reader
self.writer = writer
class _TimerHandle(nbio_interface.AbstractTimerReference):
"""This module's adaptation of `nbio_interface.AbstractTimerReference`.
"""
def __init__(self, handle, loop):
"""
:param opaque handle: timer handle from the underlying loop
implementation that may be passed to its `remove_timeout()` method
:param AbstractSelectorIOLoop loop: the I/O loop instance that created
the timeout.
"""
self._handle = handle
self._loop = loop
def cancel(self):
if self._loop is not None:
self._loop.remove_timeout(self._handle)
self._handle = None
self._loop = None
class _SelectorIOLoopIOHandle(nbio_interface.AbstractIOReference):
"""This module's adaptation of `nbio_interface.AbstractIOReference`
"""
def __init__(self, subject):
"""
:param subject: subject of the reference containing a `cancel()` method
"""
self._cancel = subject.cancel
def cancel(self):
"""Cancel pending operation
:returns: False if was already done or cancelled; True otherwise
:rtype: bool
"""
return self._cancel()
class _AddressResolver(object):
"""Performs getaddrinfo asynchronously using a thread, then reports result
via callback from the given I/O loop.
NOTE: at this stage, we're using a thread per request, which may prove
inefficient and even prohibitive if the app performs many of these
operations concurrently.
"""
NOT_STARTED = 0
ACTIVE = 1
CANCELED = 2
COMPLETED = 3
def __init__(self, native_loop, host, port, family, socktype, proto, flags,
on_done):
"""
:param AbstractSelectorIOLoop native_loop:
:param host: `see socket.getaddrinfo()`
:param port: `see socket.getaddrinfo()`
:param family: `see socket.getaddrinfo()`
:param socktype: `see socket.getaddrinfo()`
:param proto: `see socket.getaddrinfo()`
:param flags: `see socket.getaddrinfo()`
:param on_done: on_done(records|BaseException) callback for reporting
result from the given I/O loop. The single arg will be either an
exception object (check for `BaseException`) in case of failure or
the result returned by `socket.getaddrinfo()`.
"""
check_callback_arg(on_done, 'on_done')
self._state = self.NOT_STARTED
self._result = None
self._loop = native_loop
self._host = host
self._port = port
self._family = family
self._socktype = socktype
self._proto = proto
self._flags = flags
self._on_done = on_done
self._mutex = threading.Lock()
self._threading_timer = None
def _cleanup(self):
"""Release resources
"""
self._loop = None
self._threading_timer = None
self._on_done = None
def start(self):
"""Start asynchronous DNS lookup.
:rtype: nbio_interface.AbstractIOReference
"""
assert self._state == self.NOT_STARTED, self._state
self._state = self.ACTIVE
self._threading_timer = threading.Timer(0, self._resolve)
self._threading_timer.start()
return _SelectorIOLoopIOHandle(self)
def cancel(self):
"""Cancel the pending resolver
:returns: False if was already done or cancelled; True otherwise
:rtype: bool
"""
# Try to cancel, but no guarantees
with self._mutex:
if self._state == self.ACTIVE:
LOGGER.debug('Canceling resolver for %s:%s', self._host,
self._port)
self._state = self.CANCELED
# Attempt to cancel, but not guaranteed
self._threading_timer.cancel()
self._cleanup()
return True
else:
LOGGER.debug(
'Ignoring _AddressResolver cancel request when not ACTIVE; '
'(%s:%s); state=%s', self._host, self._port, self._state)
return False
def _resolve(self):
"""Call `socket.getaddrinfo()` and return result via user's callback
function on the given I/O loop
"""
try:
# NOTE: on python 2.x, can't pass keyword args to getaddrinfo()
result = socket.getaddrinfo(self._host, self._port, self._family,
self._socktype, self._proto,
self._flags)
except Exception as exc: # pylint: disable=W0703
LOGGER.error('Address resolution failed: %r', exc)
result = exc
self._result = result
# Schedule result to be returned to user via user's event loop
with self._mutex:
if self._state == self.ACTIVE:
self._loop.add_callback(self._dispatch_result)
else:
LOGGER.debug(
'Asynchronous getaddrinfo cancellation detected; '
'in thread; host=%r', self._host)
def _dispatch_result(self):
"""This is called from the user's I/O loop to pass the result to the
user via the user's on_done callback
"""
if self._state == self.ACTIVE:
self._state = self.COMPLETED
try:
LOGGER.debug(
'Invoking asynchronous getaddrinfo() completion callback; '
'host=%r', self._host)
self._on_done(self._result)
finally:
self._cleanup()
else:
LOGGER.debug(
'Asynchronous getaddrinfo cancellation detected; '
'in I/O loop context; host=%r', self._host)
|
|
# Copyright 2014 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime
import json
import netaddr
from time import sleep
import uuid
from proboscis import after_class
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_not_equal
from proboscis.asserts import assert_raises
from proboscis.asserts import assert_true
from proboscis.asserts import fail
from proboscis import before_class
from proboscis.decorators import time_out
from proboscis import SkipTest
from proboscis import test
from troveclient.compat import exceptions
from trove.common.utils import poll_until
from trove import tests
from trove.tests.api.instances import assert_unprocessable
from trove.tests.api.instances import instance_info
from trove.tests.api.instances import InstanceTestInfo
from trove.tests.api.instances import TIMEOUT_INSTANCE_CREATE
from trove.tests.api.instances import TIMEOUT_INSTANCE_DELETE
from trove.tests.config import CONFIG
from trove.tests.util.check import AttrCheck
from trove.tests.util.check import CollectionCheck
from trove.tests.util.check import TypeCheck
from trove.tests.util import create_dbaas_client
from trove.tests.util.mysql import create_mysql_connection
from trove.tests.util.users import Requirements
CONFIG_NAME = "test_configuration"
CONFIG_DESC = "configuration description"
configuration_default = None
configuration_info = None
configuration_href = None
configuration_instance = InstanceTestInfo()
configuration_instance_id = None
sql_variables = [
'key_buffer_size',
'connect_timeout',
'join_buffer_size',
]
def _is_valid_timestamp(time_string):
try:
datetime.strptime(time_string, "%Y-%m-%dT%H:%M:%S")
except ValueError:
return False
return True
# helper methods to validate configuration is applied to instance
def _execute_query(host, user_name, password, query):
print("Starting to query database, host: %s, user: %s, password: %s, "
"query: %s" % (host, user_name, password, query))
with create_mysql_connection(host, user_name, password) as db:
result = db.execute(query)
return result
def _get_address(instance_id):
result = instance_info.dbaas_admin.mgmt.instances.show(instance_id)
try:
return next(str(ip) for ip in result.ip
if netaddr.valid_ipv4(ip))
except StopIteration:
fail("No IPV4 ip found")
def _test_configuration_is_applied_to_instance(instance, configuration_id):
if CONFIG.fake_mode:
raise SkipTest("configuration from sql does not work in fake mode")
instance_test = instance_info.dbaas.instances.get(instance.id)
assert_equal(configuration_id, instance_test.configuration['id'])
if configuration_id:
testconfig_info = instance_info.dbaas.configurations.get(
configuration_id)
else:
testconfig_info = instance_info.dbaas.instance.configuration(
instance.id)
testconfig_info['configuration']
conf_instances = instance_info.dbaas.configurations.instances(
configuration_id)
config_instance_ids = [inst.id for inst in conf_instances]
assert_true(instance_test.id in config_instance_ids)
cfg_names = testconfig_info.values.keys()
host = _get_address(instance.id)
for user in instance.users:
username = user['name']
password = user['password']
concat_variables = "','".join(cfg_names)
query = ("show variables where Variable_name "
"in ('%s');" % concat_variables)
actual_values = _execute_query(host, username, password, query)
print("actual_values %s" % actual_values)
print("testconfig_info.values %s" % testconfig_info.values)
assert_true(len(actual_values) == len(cfg_names))
# check the configs exist
attrcheck = AttrCheck()
allowed_attrs = [actual_key for actual_key, actual_value in actual_values]
attrcheck.contains_allowed_attrs(
testconfig_info.values, allowed_attrs,
msg="Configurations parameters")
def _get_parameter_type(name):
instance_info.dbaas.configuration_parameters.get_parameter(
instance_info.dbaas_datastore,
instance_info.dbaas_datastore_version,
name)
resp, body = instance_info.dbaas.client.last_response
print(resp)
print(body)
return json.loads(body.decode())['type']
# check the config values are correct
for key, value in actual_values:
key_type = _get_parameter_type(key)
# mysql returns 'ON' and 'OFF' for True and False respectively
if value == 'ON':
converted_key_value = (str(key), 1)
elif value == 'OFF':
converted_key_value = (str(key), 0)
else:
if key_type == 'integer':
value = int(value)
converted_key_value = (str(key), value)
print("converted_key_value: %s" % str(converted_key_value))
assert_true(converted_key_value in testconfig_info.values.items())
class ConfigurationsTestBase(object):
@staticmethod
def expected_instance_datastore_configs(instance_id):
"""Given an instance retrieve the expected test configurations for
instance's datastore.
"""
instance = instance_info.dbaas.instances.get(instance_id)
datastore_type = instance.datastore['type']
datastore_test_configs = CONFIG.get(datastore_type, {})
return datastore_test_configs.get("configurations", {})
@staticmethod
def expected_default_datastore_configs():
"""Returns the expected test configurations for the default datastore
defined in the Test Config as dbaas_datastore.
"""
default_datastore = CONFIG.get('dbaas_datastore', None)
datastore_test_configs = CONFIG.get(default_datastore, {})
return datastore_test_configs.get("configurations", {})
@test(depends_on_groups=[tests.DBAAS_API_BACKUPS],
groups=[tests.DBAAS_API_CONFIGURATIONS])
class CreateConfigurations(ConfigurationsTestBase):
@test
def test_expected_configurations_parameters(self):
"""Test get expected configurations parameters."""
allowed_attrs = ["configuration-parameters"]
instance_info.dbaas.configuration_parameters.parameters(
instance_info.dbaas_datastore,
instance_info.dbaas_datastore_version)
resp, body = instance_info.dbaas.client.last_response
attrcheck = AttrCheck()
config_parameters_dict = json.loads(body.decode())
attrcheck.contains_allowed_attrs(
config_parameters_dict, allowed_attrs,
msg="Configurations parameters")
# sanity check that a few options are in the list
config_params_list = config_parameters_dict['configuration-parameters']
config_param_keys = []
for param in config_params_list:
config_param_keys.append(param['name'])
expected_configs = self.expected_default_datastore_configs()
expected_config_params = expected_configs.get('parameters_list')
# check for duplicate configuration parameters
msg = "check for duplicate configuration parameters"
assert_equal(len(config_param_keys), len(set(config_param_keys)), msg)
for expected_config_item in expected_config_params:
assert_true(expected_config_item in config_param_keys)
@test
def test_expected_get_configuration_parameter(self):
# tests get on a single parameter to verify it has expected attributes
param_name = 'key_buffer_size'
allowed_config_params = ['name', 'restart_required',
'max', 'min', 'type',
'deleted', 'deleted_at',
'datastore_version_id']
param = instance_info.dbaas.configuration_parameters.get_parameter(
instance_info.dbaas_datastore,
instance_info.dbaas_datastore_version,
param_name)
resp, body = instance_info.dbaas.client.last_response
print("params: %s" % param)
print("resp: %s" % resp)
print("body: %s" % body)
attrcheck = AttrCheck()
config_parameter_dict = json.loads(body.decode())
print("config_parameter_dict: %s" % config_parameter_dict)
attrcheck.contains_allowed_attrs(
config_parameter_dict,
allowed_config_params,
msg="Get Configuration parameter")
assert_equal(param_name, config_parameter_dict['name'])
with TypeCheck('ConfigurationParameter', param) as parameter:
parameter.has_field('name', str)
parameter.has_field('restart_required', bool)
parameter.has_field('max', int)
parameter.has_field('min', int)
parameter.has_field('type', str)
parameter.has_field('datastore_version_id', str)
@test
def test_configurations_create_invalid_values(self):
"""Test create configurations with invalid values."""
values = '{"this_is_invalid": 123}'
try:
instance_info.dbaas.configurations.create(
CONFIG_NAME,
values,
CONFIG_DESC)
except exceptions.UnprocessableEntity:
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 422)
@test
def test_configurations_create_invalid_value_type(self):
"""Test create configuration with invalid value type."""
values = '{"key_buffer_size": "this is a string not int"}'
assert_unprocessable(instance_info.dbaas.configurations.create,
CONFIG_NAME, values, CONFIG_DESC)
@test
def test_configurations_create_value_out_of_bounds(self):
"""Test create configuration with value out of bounds."""
expected_configs = self.expected_default_datastore_configs()
values = json.dumps(expected_configs.get('out_of_bounds_over'))
assert_unprocessable(instance_info.dbaas.configurations.create,
CONFIG_NAME, values, CONFIG_DESC)
values = json.dumps(expected_configs.get('out_of_bounds_under'))
assert_unprocessable(instance_info.dbaas.configurations.create,
CONFIG_NAME, values, CONFIG_DESC)
@test
def test_valid_configurations_create(self):
"""create a configuration with valid parameters from config."""
expected_configs = self.expected_default_datastore_configs()
values = json.dumps(expected_configs.get('valid_values'))
expected_values = json.loads(values)
result = instance_info.dbaas.configurations.create(
CONFIG_NAME,
values,
CONFIG_DESC,
datastore=instance_info.dbaas_datastore,
datastore_version=instance_info.dbaas_datastore_version)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 200)
with TypeCheck('Configuration', result) as configuration:
configuration.has_field('name', str)
configuration.has_field('description', str)
configuration.has_field('values', dict)
configuration.has_field('datastore_name', str)
configuration.has_field('datastore_version_id', str)
configuration.has_field('datastore_version_name', str)
global configuration_info
configuration_info = result
assert_equal(configuration_info.name, CONFIG_NAME)
assert_equal(configuration_info.description, CONFIG_DESC)
assert_equal(configuration_info.values, expected_values)
@test(runs_after=[test_valid_configurations_create])
def test_appending_to_existing_configuration(self):
"""test_appending_to_existing_configuration"""
# test being able to update and insert new parameter name and values
# to an existing configuration
expected_configs = self.expected_default_datastore_configs()
values = json.dumps(expected_configs.get('appending_values'))
# ensure updated timestamp is different than created
if not CONFIG.fake_mode:
sleep(1)
instance_info.dbaas.configurations.edit(configuration_info.id,
values)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 200)
@test(depends_on_classes=[CreateConfigurations],
groups=[tests.DBAAS_API_CONFIGURATIONS])
class AfterConfigurationsCreation(ConfigurationsTestBase):
@test
def test_assign_configuration_to_invalid_instance(self):
"""test assigning to an instance that does not exist"""
invalid_id = "invalid-inst-id"
try:
instance_info.dbaas.instances.modify(invalid_id,
configuration_info.id)
except exceptions.NotFound:
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 404)
@test
def test_assign_configuration_to_valid_instance(self):
"""test assigning a configuration to an instance"""
print("instance_info.id: %s" % instance_info.id)
print("configuration_info: %s" % configuration_info)
print("configuration_info.id: %s" % configuration_info.id)
config_id = configuration_info.id
instance_info.dbaas.instances.modify(instance_info.id,
configuration=config_id)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 202)
@test(depends_on=[test_assign_configuration_to_valid_instance])
def test_assign_configuration_to_instance_with_config(self):
"""test assigning a configuration to an instance conflicts"""
config_id = configuration_info.id
assert_raises(exceptions.BadRequest,
instance_info.dbaas.instances.modify, instance_info.id,
configuration=config_id)
@test(depends_on=[test_assign_configuration_to_valid_instance])
@time_out(30)
def test_get_configuration_details_from_instance_validation(self):
"""validate the configuration after attaching"""
print("instance_info.id: %s" % instance_info.id)
inst = instance_info.dbaas.instances.get(instance_info.id)
configuration_id = inst.configuration['id']
print("configuration_info: %s" % configuration_id)
assert_not_equal(None, configuration_id)
_test_configuration_is_applied_to_instance(instance_info,
configuration_id)
@test(depends_on=[test_get_configuration_details_from_instance_validation])
def test_configurations_get(self):
"""test that the instance shows up on the assigned configuration"""
result = instance_info.dbaas.configurations.get(configuration_info.id)
assert_equal(configuration_info.id, result.id)
assert_equal(configuration_info.name, result.name)
assert_equal(configuration_info.description, result.description)
# check the result field types
with TypeCheck("configuration", result) as check:
check.has_field("id", str)
check.has_field("name", str)
check.has_field("description", str)
check.has_field("values", dict)
check.has_field("created", str)
check.has_field("updated", str)
check.has_field("instance_count", int)
print(result.values)
# check for valid timestamps
assert_true(_is_valid_timestamp(result.created))
assert_true(_is_valid_timestamp(result.updated))
# check that created and updated timestamps differ, since
# test_appending_to_existing_configuration should have changed the
# updated timestamp
if not CONFIG.fake_mode:
assert_not_equal(result.created, result.updated)
assert_equal(result.instance_count, 1)
with CollectionCheck("configuration_values", result.values) as check:
# check each item has the correct type according to the rules
for (item_key, item_val) in result.values.items():
print("item_key: %s" % item_key)
print("item_val: %s" % item_val)
dbaas = instance_info.dbaas
param = dbaas.configuration_parameters.get_parameter(
instance_info.dbaas_datastore,
instance_info.dbaas_datastore_version,
item_key)
if param.type == 'integer':
check.has_element(item_key, int)
if param.type == 'string':
check.has_element(item_key, str)
if param.type == 'boolean':
check.has_element(item_key, bool)
# Test to make sure that another user is not able to GET this config
reqs = Requirements(is_admin=False)
test_auth_user = instance_info.user.auth_user
other_user = CONFIG.users.find_user(reqs, black_list=[test_auth_user])
other_user_tenant_id = other_user.tenant_id
client_tenant_id = instance_info.user.tenant_id
if other_user_tenant_id == client_tenant_id:
other_user = CONFIG.users.find_user(
reqs, black_list=[instance_info.user.auth_user,
other_user])
print(other_user)
print(other_user.__dict__)
other_client = create_dbaas_client(other_user)
assert_raises(exceptions.NotFound, other_client.configurations.get,
configuration_info.id)
@test(depends_on_classes=[AfterConfigurationsCreation],
groups=[tests.DBAAS_API_CONFIGURATIONS])
class ListConfigurations(ConfigurationsTestBase):
@test
def test_configurations_list(self):
# test listing configurations show up
result = instance_info.dbaas.configurations.list()
for conf in result:
with TypeCheck("Configuration", conf) as check:
check.has_field('id', str)
check.has_field('name', str)
check.has_field('description', str)
check.has_field('datastore_version_id', str)
check.has_field('datastore_version_name', str)
check.has_field('datastore_name', str)
exists = [config for config in result if
config.id == configuration_info.id]
assert_equal(1, len(exists))
configuration = exists[0]
assert_equal(configuration.id, configuration_info.id)
assert_equal(configuration.name, configuration_info.name)
assert_equal(configuration.description, configuration_info.description)
@test
def test_configurations_list_for_instance(self):
# test getting an instance shows the configuration assigned shows up
instance = instance_info.dbaas.instances.get(instance_info.id)
assert_equal(instance.configuration['id'], configuration_info.id)
assert_equal(instance.configuration['name'], configuration_info.name)
# expecting two things in links, href and bookmark
assert_equal(2, len(instance.configuration['links']))
link = instance.configuration['links'][0]
global configuration_href
configuration_href = link['href']
@test
def test_get_default_configuration_on_instance(self):
# test the api call to get the default template of an instance exists
result = instance_info.dbaas.instances.configuration(instance_info.id)
global configuration_default
configuration_default = result
assert_not_equal(None, result.configuration)
@test
def test_changing_configuration_with_nondynamic_parameter(self):
"""test_changing_configuration_with_nondynamic_parameter"""
expected_configs = self.expected_default_datastore_configs()
values = json.dumps(expected_configs.get('nondynamic_parameter'))
instance_info.dbaas.configurations.update(configuration_info.id,
values)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 202)
instance_info.dbaas.configurations.get(configuration_info.id)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 200)
@test(depends_on=[test_changing_configuration_with_nondynamic_parameter])
@time_out(20)
def test_waiting_for_instance_in_restart_required(self):
"""test_waiting_for_instance_in_restart_required"""
def result_is_not_active():
instance = instance_info.dbaas.instances.get(
instance_info.id)
if instance.status in CONFIG.running_status:
return False
else:
return True
poll_until(result_is_not_active)
instance = instance_info.dbaas.instances.get(instance_info.id)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 200)
assert_equal('RESTART_REQUIRED', instance.status)
@test(depends_on=[test_waiting_for_instance_in_restart_required])
def test_restart_service_should_return_active(self):
"""test_restart_service_should_return_active"""
instance_info.dbaas.instances.restart(instance_info.id)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 202)
def result_is_active():
instance = instance_info.dbaas.instances.get(
instance_info.id)
if instance.status in CONFIG.running_status:
return True
else:
assert_true(instance.status in ['REBOOT', 'SHUTDOWN'])
return False
poll_until(result_is_active)
@test(depends_on=[test_restart_service_should_return_active])
@time_out(30)
def test_get_configuration_details_from_instance_validation(self):
"""test_get_configuration_details_from_instance_validation"""
inst = instance_info.dbaas.instances.get(instance_info.id)
configuration_id = inst.configuration['id']
assert_not_equal(None, inst.configuration['id'])
_test_configuration_is_applied_to_instance(instance_info,
configuration_id)
@test(depends_on=[test_configurations_list])
def test_compare_list_and_details_timestamps(self):
# compare config timestamps between list and details calls
result = instance_info.dbaas.configurations.list()
list_config = [config for config in result if
config.id == configuration_info.id]
assert_equal(1, len(list_config))
details_config = instance_info.dbaas.configurations.get(
configuration_info.id)
assert_equal(list_config[0].created, details_config.created)
assert_equal(list_config[0].updated, details_config.updated)
@test(depends_on_classes=[ListConfigurations],
groups=[tests.DBAAS_API_CONFIGURATIONS])
class StartInstanceWithConfiguration(ConfigurationsTestBase):
@test
def test_start_instance_with_configuration(self):
"""test that a new instance will apply the configuration on create"""
global configuration_instance
databases = []
databases.append({"name": "firstdbconfig", "character_set": "latin2",
"collate": "latin2_general_ci"})
databases.append({"name": "db2"})
configuration_instance.databases = databases
users = []
users.append({"name": "liteconf", "password": "liteconfpass",
"databases": [{"name": "firstdbconfig"}]})
configuration_instance.users = users
configuration_instance.name = "TEST_" + str(uuid.uuid4()) + "_config"
flavor_href = instance_info.dbaas_flavor_href
configuration_instance.dbaas_flavor_href = flavor_href
configuration_instance.volume = instance_info.volume
configuration_instance.dbaas_datastore = instance_info.dbaas_datastore
configuration_instance.dbaas_datastore_version = \
instance_info.dbaas_datastore_version
configuration_instance.nics = instance_info.nics
result = instance_info.dbaas.instances.create(
configuration_instance.name,
configuration_instance.dbaas_flavor_href,
configuration_instance.volume,
configuration_instance.databases,
configuration_instance.users,
nics=configuration_instance.nics,
availability_zone="nova",
datastore=configuration_instance.dbaas_datastore,
datastore_version=configuration_instance.dbaas_datastore_version,
configuration=configuration_href)
assert_equal(200, instance_info.dbaas.last_http_code)
assert_equal("BUILD", result.status)
configuration_instance.id = result.id
@test(depends_on_classes=[StartInstanceWithConfiguration],
groups=[tests.DBAAS_API_CONFIGURATIONS])
class WaitForConfigurationInstanceToFinish(ConfigurationsTestBase):
@test
@time_out(TIMEOUT_INSTANCE_CREATE)
def test_instance_with_configuration_active(self):
"""wait for the instance created with configuration"""
def result_is_active():
instance = instance_info.dbaas.instances.get(
configuration_instance.id)
if instance.status in CONFIG.running_status:
return True
else:
assert_equal("BUILD", instance.status)
return False
poll_until(result_is_active)
@test(depends_on=[test_instance_with_configuration_active])
@time_out(30)
def test_get_configuration_details_from_instance_validation(self):
"""Test configuration is applied correctly to the instance."""
inst = instance_info.dbaas.instances.get(configuration_instance.id)
configuration_id = inst.configuration['id']
assert_not_equal(None, configuration_id)
_test_configuration_is_applied_to_instance(configuration_instance,
configuration_id)
@test(depends_on=[WaitForConfigurationInstanceToFinish],
groups=[tests.DBAAS_API_CONFIGURATIONS])
class DeleteConfigurations(ConfigurationsTestBase):
@before_class
def setUp(self):
# need to store the parameter details that will be deleted
config_param_name = sql_variables[1]
instance_info.dbaas.configuration_parameters.get_parameter(
instance_info.dbaas_datastore,
instance_info.dbaas_datastore_version,
config_param_name)
resp, body = instance_info.dbaas.client.last_response
print(resp)
print(body)
self.config_parameter_dict = json.loads(body.decode())
@after_class(always_run=True)
def tearDown(self):
# need to "undelete" the parameter that was deleted from the mgmt call
if instance_info.dbaas:
ds = instance_info.dbaas_datastore
ds_v = instance_info.dbaas_datastore_version
version = instance_info.dbaas.datastore_versions.get(
ds, ds_v)
client = instance_info.dbaas_admin.mgmt_configs
print(self.config_parameter_dict)
client.create(version.id,
self.config_parameter_dict['name'],
self.config_parameter_dict['restart_required'],
self.config_parameter_dict['type'],
self.config_parameter_dict['max'],
self.config_parameter_dict['min'])
@test
def test_delete_invalid_configuration_not_found(self):
# test deleting a configuration that does not exist throws exception
invalid_configuration_id = "invalid-config-id"
assert_raises(exceptions.NotFound,
instance_info.dbaas.configurations.delete,
invalid_configuration_id)
@test(depends_on=[test_delete_invalid_configuration_not_found])
def test_delete_configuration_parameter_with_mgmt_api(self):
# testing a param that is assigned to an instance can be deleted
# and doesn't affect an unassign later. So we delete a parameter
# that is used by a test (connect_timeout)
ds = instance_info.dbaas_datastore
ds_v = instance_info.dbaas_datastore_version
version = instance_info.dbaas.datastore_versions.get(
ds, ds_v)
client = instance_info.dbaas_admin.mgmt_configs
config_param_name = self.config_parameter_dict['name']
client.delete(version.id, config_param_name)
assert_raises(
exceptions.NotFound,
instance_info.dbaas.configuration_parameters.get_parameter,
ds,
ds_v,
config_param_name)
@test(depends_on=[test_delete_configuration_parameter_with_mgmt_api])
def test_unable_delete_instance_configurations(self):
# test deleting a configuration that is assigned to
# an instance is not allowed.
assert_raises(exceptions.BadRequest,
instance_info.dbaas.configurations.delete,
configuration_info.id)
@test(depends_on=[test_unable_delete_instance_configurations])
@time_out(30)
def test_unassign_configuration_from_instances(self):
"""test to unassign configuration from instance"""
instance_info.dbaas.instances.update(configuration_instance.id,
remove_configuration=True)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 202)
instance_info.dbaas.instances.update(instance_info.id,
remove_configuration=True)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 202)
instance_info.dbaas.instances.get(instance_info.id)
def result_has_no_configuration():
instance = instance_info.dbaas.instances.get(inst_info.id)
if hasattr(instance, 'configuration'):
return False
else:
return True
inst_info = instance_info
poll_until(result_has_no_configuration)
inst_info = configuration_instance
poll_until(result_has_no_configuration)
instance = instance_info.dbaas.instances.get(instance_info.id)
assert_equal('RESTART_REQUIRED', instance.status)
@test(depends_on=[test_unassign_configuration_from_instances])
def test_assign_in_wrong_state(self):
# test assigning a config to an instance in RESTART state
assert_raises(exceptions.BadRequest,
instance_info.dbaas.instances.modify,
configuration_instance.id,
configuration=configuration_info.id)
@test(depends_on=[test_assign_in_wrong_state])
def test_no_instances_on_configuration(self):
"""test_no_instances_on_configuration"""
result = instance_info.dbaas.configurations.get(configuration_info.id)
assert_equal(configuration_info.id, result.id)
assert_equal(configuration_info.name, result.name)
assert_equal(configuration_info.description, result.description)
assert_equal(result.instance_count, 0)
print(configuration_instance.id)
print(instance_info.id)
@test(depends_on=[test_unassign_configuration_from_instances])
@time_out(120)
def test_restart_service_should_return_active(self):
"""test that after restarting the instance it becomes active"""
instance_info.dbaas.instances.restart(instance_info.id)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 202)
def result_is_active():
instance = instance_info.dbaas.instances.get(
instance_info.id)
if instance.status in CONFIG.running_status:
return True
else:
assert_equal("REBOOT", instance.status)
return False
poll_until(result_is_active)
@test(depends_on=[test_restart_service_should_return_active])
def test_assign_config_and_name_to_instance_using_patch(self):
"""test_assign_config_and_name_to_instance_using_patch"""
new_name = 'new_name'
report = CONFIG.get_report()
report.log("instance_info.id: %s" % instance_info.id)
report.log("configuration_info: %s" % configuration_info)
report.log("configuration_info.id: %s" % configuration_info.id)
report.log("instance name:%s" % instance_info.name)
report.log("instance new name:%s" % new_name)
saved_name = instance_info.name
config_id = configuration_info.id
instance_info.dbaas.instances.update(instance_info.id,
configuration=config_id,
name=new_name)
assert_equal(202, instance_info.dbaas.last_http_code)
check = instance_info.dbaas.instances.get(instance_info.id)
assert_equal(200, instance_info.dbaas.last_http_code)
assert_equal(check.name, new_name)
# restore instance name
instance_info.dbaas.instances.update(instance_info.id,
name=saved_name)
assert_equal(202, instance_info.dbaas.last_http_code)
instance = instance_info.dbaas.instances.get(instance_info.id)
assert_equal('RESTART_REQUIRED', instance.status)
# restart to be sure configuration is applied
instance_info.dbaas.instances.restart(instance_info.id)
assert_equal(202, instance_info.dbaas.last_http_code)
sleep(2)
def result_is_active():
instance = instance_info.dbaas.instances.get(
instance_info.id)
if instance.status in CONFIG.running_status:
return True
else:
assert_equal("REBOOT", instance.status)
return False
poll_until(result_is_active)
# test assigning a configuration to an instance that
# already has an assigned configuration with patch
config_id = configuration_info.id
assert_raises(exceptions.BadRequest,
instance_info.dbaas.instances.update,
instance_info.id, configuration=config_id)
@test(runs_after=[test_assign_config_and_name_to_instance_using_patch])
def test_unassign_configuration_after_patch(self):
"""Remove the configuration from the instance"""
instance_info.dbaas.instances.update(instance_info.id,
remove_configuration=True)
assert_equal(202, instance_info.dbaas.last_http_code)
instance = instance_info.dbaas.instances.get(instance_info.id)
assert_equal('RESTART_REQUIRED', instance.status)
# restart to be sure configuration has been unassigned
instance_info.dbaas.instances.restart(instance_info.id)
assert_equal(202, instance_info.dbaas.last_http_code)
sleep(2)
def result_is_active():
instance = instance_info.dbaas.instances.get(
instance_info.id)
if instance.status in CONFIG.running_status:
return True
else:
assert_equal("REBOOT", instance.status)
return False
poll_until(result_is_active)
result = instance_info.dbaas.configurations.get(configuration_info.id)
assert_equal(result.instance_count, 0)
@test
def test_unassign_configuration_from_invalid_instance_using_patch(self):
# test unassign config group from an invalid instance
invalid_id = "invalid-inst-id"
try:
instance_info.dbaas.instances.update(invalid_id,
remove_configuration=True)
except exceptions.NotFound:
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 404)
@test(runs_after=[test_unassign_configuration_after_patch])
def test_delete_unassigned_configuration(self):
"""test_delete_unassigned_configuration"""
instance_info.dbaas.configurations.delete(configuration_info.id)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 202)
@test(depends_on=[test_delete_unassigned_configuration])
@time_out(TIMEOUT_INSTANCE_DELETE)
def test_delete_configuration_instance(self):
"""test_delete_configuration_instance"""
instance_info.dbaas.instances.delete(configuration_instance.id)
assert_equal(202, instance_info.dbaas.last_http_code)
def instance_is_gone():
try:
instance_info.dbaas.instances.get(configuration_instance.id)
return False
except exceptions.NotFound:
return True
poll_until(instance_is_gone)
assert_raises(exceptions.NotFound, instance_info.dbaas.instances.get,
configuration_instance.id)
|
|
from datetime import timedelta
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
Index,
IntervalIndex,
MultiIndex,
date_range,
)
from pandas.core.indexes.base import InvalidIndexError
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
def test_slice_locs_partial(idx):
sorted_idx, _ = idx.sortlevel(0)
result = sorted_idx.slice_locs(("foo", "two"), ("qux", "one"))
assert result == (1, 5)
result = sorted_idx.slice_locs(None, ("qux", "one"))
assert result == (0, 5)
result = sorted_idx.slice_locs(("foo", "two"), None)
assert result == (1, len(sorted_idx))
result = sorted_idx.slice_locs("bar", "baz")
assert result == (2, 4)
def test_slice_locs():
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(
*idx.slice_locs(
df.index[5] + timedelta(seconds=30), df.index[15] - timedelta(seconds=30)
)
)
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch():
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
with pytest.raises(TypeError, match="^Level type mismatch"):
idx.slice_locs((1, 3))
with pytest.raises(TypeError, match="^Level type mismatch"):
idx.slice_locs(df.index[5] + timedelta(seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with pytest.raises(TypeError, match="^Level type mismatch"):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with pytest.raises(TypeError, match="^Level type mismatch"):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted():
index = MultiIndex(
levels=[Index(np.arange(4)), Index(np.arange(4)), Index(np.arange(4))],
codes=[
np.array([0, 0, 1, 2, 2, 2, 3, 3]),
np.array([0, 1, 0, 0, 0, 1, 0, 1]),
np.array([1, 0, 1, 1, 0, 0, 1, 0]),
],
)
msg = "[Kk]ey length.*greater than MultiIndex lexsort depth"
with pytest.raises(KeyError, match=msg):
index.slice_locs((1, 0, 1), (2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_not_contained():
# some searchsorted action
index = MultiIndex(
levels=[[0, 2, 4, 6], [0, 2, 4]],
codes=[[0, 0, 0, 1, 1, 2, 3, 3, 3], [0, 1, 2, 1, 2, 2, 0, 1, 2]],
sortorder=0,
)
result = index.slice_locs((1, 0), (5, 2))
assert result == (3, 6)
result = index.slice_locs(1, 5)
assert result == (3, 6)
result = index.slice_locs((2, 2), (5, 2))
assert result == (3, 6)
result = index.slice_locs(2, 5)
assert result == (3, 6)
result = index.slice_locs((1, 0), (6, 3))
assert result == (3, 8)
result = index.slice_locs(-1, 10)
assert result == (0, len(index))
def test_putmask_with_wrong_mask(idx):
# GH18368
msg = "putmask: mask and data must be the same size"
with pytest.raises(ValueError, match=msg):
idx.putmask(np.ones(len(idx) + 1, np.bool), 1)
with pytest.raises(ValueError, match=msg):
idx.putmask(np.ones(len(idx) - 1, np.bool), 1)
with pytest.raises(ValueError, match=msg):
idx.putmask("foo", 1)
def test_get_indexer():
major_axis = Index(np.arange(4))
minor_axis = Index(np.arange(2))
major_codes = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)
minor_codes = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)
index = MultiIndex(
levels=[major_axis, minor_axis], codes=[major_codes, minor_codes]
)
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method="pad")
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method="pad")
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method="ffill")
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method="backfill")
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method="backfill")
assert_almost_equal(r2, e1[::-1])
rbfill1 = idx2.get_indexer(idx1, method="bfill")
assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2.values)
rexp1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
assert (r1 == [-1, -1, -1]).all()
# create index with duplicates
idx1 = Index(list(range(10)) + list(range(10)))
idx2 = Index(list(range(20)))
msg = "Reindexing only valid with uniquely valued Index objects"
with pytest.raises(InvalidIndexError, match=msg):
idx1.get_indexer(idx2)
def test_get_indexer_nearest():
midx = MultiIndex.from_tuples([("a", 1), ("b", 2)])
msg = "method='nearest' not implemented yet for MultiIndex; see GitHub issue 9365"
with pytest.raises(NotImplementedError, match=msg):
midx.get_indexer(["a"], method="nearest")
msg = "tolerance not implemented yet for MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
midx.get_indexer(["a"], method="pad", tolerance=2)
def test_getitem(idx):
# scalar
assert idx[2] == ("bar", "one")
# slice
result = idx[2:5]
expected = idx[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = idx[[True, False, True, False, True, True]]
result2 = idx[np.array([True, False, True, False, True, True])]
expected = idx[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(idx):
sorted_idx, _ = idx.sortlevel(0)
assert sorted_idx.get_loc("baz") == slice(3, 4)
assert sorted_idx.get_loc("foo") == slice(0, 2)
def test_get_indexer_consistency(idx):
# See GH 16819
if isinstance(idx, IntervalIndex):
pass
if idx.is_unique or isinstance(idx, CategoricalIndex):
indexer = idx.get_indexer(idx[0:2])
assert isinstance(indexer, np.ndarray)
assert indexer.dtype == np.intp
else:
e = "Reindexing only valid with uniquely valued Index objects"
with pytest.raises(InvalidIndexError, match=e):
idx.get_indexer(idx[0:2])
indexer, _ = idx.get_indexer_non_unique(idx[0:2])
assert isinstance(indexer, np.ndarray)
assert indexer.dtype == np.intp
@pytest.mark.parametrize("ind1", [[True] * 5, pd.Index([True] * 5)])
@pytest.mark.parametrize(
"ind2",
[[True, False, True, False, False], pd.Index([True, False, True, False, False])],
)
def test_getitem_bool_index_all(ind1, ind2):
# GH#22533
idx = MultiIndex.from_tuples([(10, 1), (20, 2), (30, 3), (40, 4), (50, 5)])
tm.assert_index_equal(idx[ind1], idx)
expected = MultiIndex.from_tuples([(10, 1), (30, 3)])
tm.assert_index_equal(idx[ind2], expected)
@pytest.mark.parametrize("ind1", [[True], pd.Index([True])])
@pytest.mark.parametrize("ind2", [[False], pd.Index([False])])
def test_getitem_bool_index_single(ind1, ind2):
# GH#22533
idx = MultiIndex.from_tuples([(10, 1)])
tm.assert_index_equal(idx[ind1], idx)
expected = pd.MultiIndex(
levels=[np.array([], dtype=np.int64), np.array([], dtype=np.int64)],
codes=[[], []],
)
tm.assert_index_equal(idx[ind2], expected)
def test_get_loc(idx):
assert idx.get_loc(("foo", "two")) == 1
assert idx.get_loc(("baz", "two")) == 3
with pytest.raises(KeyError, match=r"^10$"):
idx.get_loc(("bar", "two"))
with pytest.raises(KeyError, match=r"^'quux'$"):
idx.get_loc("quux")
msg = "only the default get_loc method is currently supported for MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
idx.get_loc("foo", method="nearest")
# 3 levels
index = MultiIndex(
levels=[Index(np.arange(4)), Index(np.arange(4)), Index(np.arange(4))],
codes=[
np.array([0, 0, 1, 2, 2, 2, 3, 3]),
np.array([0, 1, 0, 0, 0, 1, 0, 1]),
np.array([1, 0, 1, 1, 0, 0, 1, 0]),
],
)
with pytest.raises(KeyError, match=r"^\(1, 1\)$"):
index.get_loc((1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates():
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(["c", "a", "a", "b", "b"])
rs = index.get_loc("c")
xp = 0
assert rs == xp
def test_get_loc_level():
index = MultiIndex(
levels=[Index(np.arange(4)), Index(np.arange(4)), Index(np.arange(4))],
codes=[
np.array([0, 0, 1, 2, 2, 2, 3, 3]),
np.array([0, 1, 0, 0, 0, 1, 0, 1]),
np.array([1, 0, 1, 1, 0, 0, 1, 0]),
],
)
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
with pytest.raises(KeyError, match=r"^\(2, 2\)$"):
index.get_loc_level((2, 2))
# GH 22221: unused label
with pytest.raises(KeyError, match=r"^2$"):
index.drop(2).get_loc_level(2)
# Unused label on unsorted level:
with pytest.raises(KeyError, match=r"^2$"):
index.drop(1, level=2).get_loc_level(2, level=2)
index = MultiIndex(
levels=[[2000], list(range(4))],
codes=[np.array([0, 0, 0, 0]), np.array([0, 1, 2, 3])],
)
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
assert result == expected
assert new_index.equals(index.droplevel(0))
@pytest.mark.parametrize("dtype1", [int, float, bool, str])
@pytest.mark.parametrize("dtype2", [int, float, bool, str])
def test_get_loc_multiple_dtypes(dtype1, dtype2):
# GH 18520
levels = [np.array([0, 1]).astype(dtype1), np.array([0, 1]).astype(dtype2)]
idx = pd.MultiIndex.from_product(levels)
assert idx.get_loc(idx[2]) == 2
@pytest.mark.parametrize("level", [0, 1])
@pytest.mark.parametrize("dtypes", [[int, float], [float, int]])
def test_get_loc_implicit_cast(level, dtypes):
# GH 18818, GH 15994 : as flat index, cast int to float and vice-versa
levels = [["a", "b"], ["c", "d"]]
key = ["b", "d"]
lev_dtype, key_dtype = dtypes
levels[level] = np.array([0, 1], dtype=lev_dtype)
key[level] = key_dtype(1)
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_cast_bool():
# GH 19086 : int is casted to bool, but not vice-versa
levels = [[False, True], np.arange(2, dtype="int64")]
idx = MultiIndex.from_product(levels)
assert idx.get_loc((0, 1)) == 1
assert idx.get_loc((1, 0)) == 2
with pytest.raises(KeyError, match=r"^\(False, True\)$"):
idx.get_loc((False, True))
with pytest.raises(KeyError, match=r"^\(True, False\)$"):
idx.get_loc((True, False))
@pytest.mark.parametrize("level", [0, 1])
def test_get_loc_nan(level, nulls_fixture):
# GH 18485 : NaN in MultiIndex
levels = [["a", "b"], ["c", "d"]]
key = ["b", "d"]
levels[level] = np.array([0, nulls_fixture], dtype=type(nulls_fixture))
key[level] = nulls_fixture
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_missing_nan():
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
with pytest.raises(KeyError, match=r"^3\.0$"):
idx.get_loc(3)
with pytest.raises(KeyError, match=r"^nan$"):
idx.get_loc(np.nan)
with pytest.raises(KeyError, match=r"^\[nan\]$"):
idx.get_loc([np.nan])
def test_get_indexer_categorical_time():
# https://github.com/pandas-dev/pandas/issues/21390
midx = MultiIndex.from_product(
[
Categorical(["a", "b", "c"]),
Categorical(date_range("2012-01-01", periods=3, freq="H")),
]
)
result = midx.get_indexer(midx)
tm.assert_numpy_array_equal(result, np.arange(9, dtype=np.intp))
def test_timestamp_multiindex_indexer():
# https://github.com/pandas-dev/pandas/issues/26944
idx = pd.MultiIndex.from_product(
[
pd.date_range("2019-01-01T00:15:33", periods=100, freq="H", name="date"),
["x"],
[3],
]
)
df = pd.DataFrame({"foo": np.arange(len(idx))}, idx)
result = df.loc[pd.IndexSlice["2019-1-2":, "x", :], "foo"]
qidx = pd.MultiIndex.from_product(
[
pd.date_range(
start="2019-01-02T00:15:33",
end="2019-01-05T02:15:33",
freq="H",
name="date",
),
["x"],
[3],
]
)
should_be = pd.Series(data=np.arange(24, len(qidx) + 24), index=qidx, name="foo")
tm.assert_series_equal(result, should_be)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.learning."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
from numpy import testing as np_testing
from tensorflow.contrib.framework.python.ops import variables as variables_lib2
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.losses.python.losses import loss_ops
from tensorflow.contrib.slim.python.slim import learning
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import saver as saver_lib
class ClipGradientNormsTest(test.TestCase):
def clip_values(self, arr):
norm = np.sqrt(np.sum(arr**2))
if norm > self._max_norm:
return self._max_norm * arr / np.sqrt(np.sum(arr**2))
return arr
def setUp(self):
np.random.seed(0)
self._max_norm = 1.0
self._grad_vec = np.array([1., 2., 3.])
self._clipped_grad_vec = self.clip_values(self._grad_vec)
self._zero_vec = np.zeros(self._grad_vec.size)
def testOrdinaryGradIsClippedCorrectly(self):
gradient = constant_op.constant(self._grad_vec, dtype=dtypes.float32)
variable = variables_lib.Variable(self._zero_vec, dtype=dtypes.float32)
gradients_to_variables = (gradient, variable)
[gradients_to_variables] = learning.clip_gradient_norms(
[gradients_to_variables], self._max_norm)
# Ensure the variable passed through.
self.assertEqual(gradients_to_variables[1], variable)
with self.test_session() as sess:
actual_gradient = sess.run(gradients_to_variables[0])
np_testing.assert_almost_equal(actual_gradient, self._clipped_grad_vec)
def testNoneGradPassesThroughCorrectly(self):
gradient = None
variable = variables_lib.Variable(self._zero_vec, dtype=dtypes.float32)
gradients_to_variables = (gradient, variable)
[gradients_to_variables] = learning.clip_gradient_norms(
[gradients_to_variables], self._max_norm)
self.assertEqual(gradients_to_variables[0], None)
self.assertEqual(gradients_to_variables[1], variable)
def testIndexedSlicesGradIsClippedCorrectly(self):
sparse_grad_indices = np.array([0, 1, 4])
sparse_grad_dense_shape = [self._grad_vec.size]
values = constant_op.constant(self._grad_vec, dtype=dtypes.float32)
indices = constant_op.constant(sparse_grad_indices, dtype=dtypes.int32)
dense_shape = constant_op.constant(
sparse_grad_dense_shape, dtype=dtypes.int32)
gradient = ops.IndexedSlices(values, indices, dense_shape)
variable = variables_lib.Variable(self._zero_vec, dtype=dtypes.float32)
gradients_to_variables = (gradient, variable)
gradients_to_variables = learning.clip_gradient_norms(
[gradients_to_variables], self._max_norm)[0]
# Ensure the built IndexedSlice has the right form.
self.assertEqual(gradients_to_variables[1], variable)
self.assertEqual(gradients_to_variables[0].indices, indices)
self.assertEqual(gradients_to_variables[0].dense_shape, dense_shape)
with session.Session() as sess:
actual_gradient = sess.run(gradients_to_variables[0].values)
np_testing.assert_almost_equal(actual_gradient, self._clipped_grad_vec)
class MultiplyGradientsTest(test.TestCase):
def setUp(self):
np.random.seed(0)
self._multiplier = 3.7
self._grad_vec = np.array([1., 2., 3.])
self._multiplied_grad_vec = np.multiply(self._grad_vec, self._multiplier)
def testNonListGradsRaisesError(self):
gradient = constant_op.constant(self._grad_vec, dtype=dtypes.float32)
variable = variables_lib.Variable(array_ops.zeros_like(gradient))
grad_to_var = (gradient, variable)
gradient_multipliers = {variable: self._multiplier}
with self.assertRaises(ValueError):
learning.multiply_gradients(grad_to_var, gradient_multipliers)
def testEmptyMultiplesRaisesError(self):
gradient = constant_op.constant(self._grad_vec, dtype=dtypes.float32)
variable = variables_lib.Variable(array_ops.zeros_like(gradient))
grad_to_var = (gradient, variable)
with self.assertRaises(ValueError):
learning.multiply_gradients([grad_to_var], {})
def testNonDictMultiplierRaisesError(self):
gradient = constant_op.constant(self._grad_vec, dtype=dtypes.float32)
variable = variables_lib.Variable(array_ops.zeros_like(gradient))
grad_to_var = (gradient, variable)
with self.assertRaises(ValueError):
learning.multiply_gradients([grad_to_var], 3)
def testMultipleOfNoneGradRaisesError(self):
gradient = constant_op.constant(self._grad_vec, dtype=dtypes.float32)
variable = variables_lib.Variable(array_ops.zeros_like(gradient))
grad_to_var = (None, variable)
gradient_multipliers = {variable: self._multiplier}
with self.assertRaises(ValueError):
learning.multiply_gradients(grad_to_var, gradient_multipliers)
def testMultipleGradientsWithVariables(self):
gradient = constant_op.constant(self._grad_vec, dtype=dtypes.float32)
variable = variables_lib.Variable(array_ops.zeros_like(gradient))
grad_to_var = (gradient, variable)
gradient_multipliers = {variable: self._multiplier}
[grad_to_var] = learning.multiply_gradients([grad_to_var],
gradient_multipliers)
# Ensure the variable passed through.
self.assertEqual(grad_to_var[1], variable)
with self.test_session() as sess:
actual_gradient = sess.run(grad_to_var[0])
np_testing.assert_almost_equal(actual_gradient, self._multiplied_grad_vec,
5)
def testIndexedSlicesGradIsMultiplied(self):
values = constant_op.constant(self._grad_vec, dtype=dtypes.float32)
indices = constant_op.constant([0, 1, 2], dtype=dtypes.int32)
dense_shape = constant_op.constant(
[self._grad_vec.size], dtype=dtypes.int32)
gradient = ops.IndexedSlices(values, indices, dense_shape)
variable = variables_lib.Variable(array_ops.zeros((1, 3)))
grad_to_var = (gradient, variable)
gradient_multipliers = {variable: self._multiplier}
[grad_to_var] = learning.multiply_gradients([grad_to_var],
gradient_multipliers)
# Ensure the built IndexedSlice has the right form.
self.assertEqual(grad_to_var[1], variable)
self.assertEqual(grad_to_var[0].indices, indices)
self.assertEqual(grad_to_var[0].dense_shape, dense_shape)
with self.test_session() as sess:
actual_gradient = sess.run(grad_to_var[0].values)
np_testing.assert_almost_equal(actual_gradient, self._multiplied_grad_vec,
5)
def LogisticClassifier(inputs):
return layers.fully_connected(inputs, 1, activation_fn=math_ops.sigmoid)
def BatchNormClassifier(inputs):
inputs = layers.batch_norm(inputs, decay=0.1)
return layers.fully_connected(inputs, 1, activation_fn=math_ops.sigmoid)
class TrainBNClassifierTest(test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
logdir = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
g = ops.Graph()
with g.as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = BatchNormClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(
train_op, logdir, number_of_steps=300, log_every_n_steps=10)
self.assertLess(loss, .1)
class CreateTrainOpTest(test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.random.rand(16, 4).astype(np.float32)
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
def testUseUpdateOps(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
expected_mean = np.mean(self._inputs, axis=(0))
expected_var = np.var(self._inputs, axis=(0))
tf_predictions = BatchNormClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
moving_mean = variables_lib2.get_variables_by_name('moving_mean')[0]
moving_variance = variables_lib2.get_variables_by_name('moving_variance')[
0]
with session.Session() as sess:
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
for _ in range(10):
sess.run([train_op])
mean = moving_mean.eval()
variance = moving_variance.eval()
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
def testEmptyUpdateOps(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = BatchNormClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer, update_ops=[])
moving_mean = variables_lib2.get_variables_by_name('moving_mean')[0]
moving_variance = variables_lib2.get_variables_by_name('moving_variance')[
0]
with session.Session() as sess:
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
for _ in range(10):
sess.run([train_op])
mean = moving_mean.eval()
variance = moving_variance.eval()
# Since we skip update_ops the moving_vars are not updated.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
def testUseGlobalStep(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = BatchNormClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
global_step = variables_lib2.get_or_create_global_step()
with session.Session() as sess:
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
for _ in range(10):
sess.run([train_op])
global_step = global_step.eval()
# After 10 updates global_step should be 10.
self.assertAllClose(global_step, 10)
def testNoneGlobalStep(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = BatchNormClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(
total_loss, optimizer, global_step=None)
global_step = variables_lib2.get_or_create_global_step()
with session.Session() as sess:
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
for _ in range(10):
sess.run([train_op])
global_step = global_step.eval()
# Since train_op don't use global_step it shouldn't change.
self.assertAllClose(global_step, 0)
def testRecordTrainOpInCollection(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
# Make sure the training op was recorded in the proper collection
self.assertTrue(train_op in ops.get_collection(ops.GraphKeys.TRAIN_OP))
class TrainTest(test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testTrainWithNonDefaultGraph(self):
logdir = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
g = ops.Graph()
with g.as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(
train_op, logdir, number_of_steps=300, log_every_n_steps=10, graph=g)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainWithNoneAsLogdir(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(
train_op, None, number_of_steps=300, log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainWithSessionConfig(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
session_config = config_pb2.ConfigProto(allow_soft_placement=True)
loss = learning.train(
train_op,
None,
number_of_steps=300,
log_every_n_steps=10,
session_config=session_config)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainWithTrace(self):
logdir = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
summary.scalar('total_loss', total_loss)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(
train_op,
logdir,
number_of_steps=300,
log_every_n_steps=10,
trace_every_n_steps=100)
self.assertIsNotNone(loss)
for trace_step in [1, 101, 201]:
trace_filename = 'tf_trace-%d.json' % trace_step
self.assertTrue(os.path.isfile(os.path.join(logdir, trace_filename)))
def testTrainWithNoneAsLogdirWhenUsingSummariesRaisesError(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
summary.scalar('total_loss', total_loss)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
summary_op = summary.merge_all()
with self.assertRaises(ValueError):
learning.train(
train_op, None, number_of_steps=300, summary_op=summary_op)
def testTrainWithNoneAsLogdirWhenUsingTraceRaisesError(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
with self.assertRaises(ValueError):
learning.train(
train_op, None, number_of_steps=300, trace_every_n_steps=10)
def testTrainWithNoneAsLogdirWhenUsingSaverRaisesError(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
saver = saver_lib.Saver()
with self.assertRaises(ValueError):
learning.train(
train_op, None, init_op=None, number_of_steps=300, saver=saver)
def testTrainWithNoneAsInitWhenUsingVarsRaisesError(self):
logdir = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
with self.assertRaises(RuntimeError):
learning.train(train_op, logdir, init_op=None, number_of_steps=300)
def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
logdir = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(
train_op, logdir, number_of_steps=300, log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainWithLocalVariable(self):
logdir = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
local_multiplier = variables_lib2.local_variable(1.0)
tf_predictions = LogisticClassifier(tf_inputs) * local_multiplier
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(
train_op, logdir, number_of_steps=300, log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testResumeTrainAchievesRoughlyTheSameLoss(self):
logdir = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
number_of_steps = [300, 301, 305]
for i in range(len(number_of_steps)):
with ops.Graph().as_default():
random_seed.set_random_seed(i)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(
train_op,
logdir,
number_of_steps=number_of_steps[i],
log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0):
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(
learning_rate=learning_rate)
if gradient_multiplier != 1.0:
variables = variables_lib.trainable_variables()
gradient_multipliers = {var: gradient_multiplier for var in variables}
else:
gradient_multipliers = None
return learning.create_train_op(
total_loss, optimizer, gradient_multipliers=gradient_multipliers)
def testTrainWithInitFromCheckpoint(self):
logdir1 = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs1')
logdir2 = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs2')
# First, train the model one step (make sure the error is high).
with ops.Graph().as_default():
random_seed.set_random_seed(0)
train_op = self.create_train_op()
loss = learning.train(train_op, logdir1, number_of_steps=1)
self.assertGreater(loss, .5)
# Next, train the model to convergence.
with ops.Graph().as_default():
random_seed.set_random_seed(1)
train_op = self.create_train_op()
loss = learning.train(
train_op, logdir1, number_of_steps=300, log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .02)
# Finally, advance the model a single step and validate that the loss is
# still low.
with ops.Graph().as_default():
random_seed.set_random_seed(2)
train_op = self.create_train_op()
model_variables = variables_lib.global_variables()
model_path = os.path.join(logdir1, 'model.ckpt-300')
init_op = variables_lib.global_variables_initializer()
op, init_feed_dict = variables_lib2.assign_from_checkpoint(
model_path, model_variables)
def InitAssignFn(sess):
sess.run(op, init_feed_dict)
loss = learning.train(
train_op,
logdir2,
number_of_steps=1,
init_op=init_op,
init_fn=InitAssignFn)
self.assertIsNotNone(loss)
self.assertLess(loss, .02)
def testTrainWithInitFromFn(self):
logdir1 = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs1')
logdir2 = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs2')
# First, train the model one step (make sure the error is high).
with ops.Graph().as_default():
random_seed.set_random_seed(0)
train_op = self.create_train_op()
loss = learning.train(train_op, logdir1, number_of_steps=1)
self.assertGreater(loss, .5)
# Next, train the model to convergence.
with ops.Graph().as_default():
random_seed.set_random_seed(1)
train_op = self.create_train_op()
loss = learning.train(
train_op, logdir1, number_of_steps=300, log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
# Finally, advance the model a single step and validate that the loss is
# still low.
with ops.Graph().as_default():
random_seed.set_random_seed(2)
train_op = self.create_train_op()
model_variables = variables_lib.global_variables()
model_path = os.path.join(logdir1, 'model.ckpt-300')
saver = saver_lib.Saver(model_variables)
def RestoreFn(sess):
saver.restore(sess, model_path)
loss = learning.train(
train_op, logdir2, number_of_steps=1, init_fn=RestoreFn)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def ModelLoss(self):
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
return loss_ops.get_total_loss()
def testTrainAllVarsHasLowerLossThanTrainSubsetOfVars(self):
logdir1 = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs1')
# First, train only the weights of the model.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
weights = variables_lib2.get_variables_by_name('weights')
train_op = learning.create_train_op(
total_loss, optimizer, variables_to_train=weights)
loss = learning.train(
train_op, logdir1, number_of_steps=200, log_every_n_steps=10)
self.assertGreater(loss, .015)
self.assertLess(loss, .05)
# Next, train the biases of the model.
with ops.Graph().as_default():
random_seed.set_random_seed(1)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
biases = variables_lib2.get_variables_by_name('biases')
train_op = learning.create_train_op(
total_loss, optimizer, variables_to_train=biases)
loss = learning.train(
train_op, logdir1, number_of_steps=300, log_every_n_steps=10)
self.assertGreater(loss, .015)
self.assertLess(loss, .05)
# Finally, train both weights and bias to get lower loss.
with ops.Graph().as_default():
random_seed.set_random_seed(2)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(
train_op, logdir1, number_of_steps=400, log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainingSubsetsOfVariablesOnlyUpdatesThoseVariables(self):
# First, train only the weights of the model.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
weights, biases = variables_lib2.get_variables()
train_op = learning.create_train_op(total_loss, optimizer)
train_weights = learning.create_train_op(
total_loss, optimizer, variables_to_train=[weights])
train_biases = learning.create_train_op(
total_loss, optimizer, variables_to_train=[biases])
with session.Session() as sess:
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Get the intial weights and biases values.
weights_values, biases_values = sess.run([weights, biases])
self.assertGreater(np.linalg.norm(weights_values), 0)
self.assertAlmostEqual(np.linalg.norm(biases_values), 0)
# Update weights and biases.
loss = sess.run(train_op)
self.assertGreater(loss, .5)
new_weights, new_biases = sess.run([weights, biases])
# Check that the weights and biases have been updated.
self.assertGreater(np.linalg.norm(weights_values - new_weights), 0)
self.assertGreater(np.linalg.norm(biases_values - new_biases), 0)
weights_values, biases_values = new_weights, new_biases
# Update only weights.
loss = sess.run(train_weights)
self.assertGreater(loss, .5)
new_weights, new_biases = sess.run([weights, biases])
# Check that the weights have been updated, but biases have not.
self.assertGreater(np.linalg.norm(weights_values - new_weights), 0)
self.assertAlmostEqual(np.linalg.norm(biases_values - new_biases), 0)
weights_values = new_weights
# Update only biases.
loss = sess.run(train_biases)
self.assertGreater(loss, .5)
new_weights, new_biases = sess.run([weights, biases])
# Check that the biases have been updated, but weights have not.
self.assertAlmostEqual(np.linalg.norm(weights_values - new_weights), 0)
self.assertGreater(np.linalg.norm(biases_values - new_biases), 0)
def testTrainWithAlteredGradients(self):
# Use the same learning rate but different gradient multipliers
# to train two models. Model with equivalently larger learning
# rate (i.e., learning_rate * gradient_multiplier) has smaller
# training loss.
logdir1 = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs1')
logdir2 = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs2')
multipliers = [1., 1000.]
number_of_steps = 10
losses = []
learning_rate = 0.001
# First, train the model with equivalently smaller learning rate.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
train_op = self.create_train_op(
learning_rate=learning_rate, gradient_multiplier=multipliers[0])
loss = learning.train(train_op, logdir1, number_of_steps=number_of_steps)
losses.append(loss)
self.assertGreater(loss, .5)
# Second, train the model with equivalently larger learning rate.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
train_op = self.create_train_op(
learning_rate=learning_rate, gradient_multiplier=multipliers[1])
loss = learning.train(train_op, logdir2, number_of_steps=number_of_steps)
losses.append(loss)
self.assertIsNotNone(loss)
self.assertLess(loss, .5)
# The loss of the model trained with larger learning rate should
# be smaller.
self.assertGreater(losses[0], losses[1])
def testTrainWithEpochLimit(self):
logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
'tmp_logs')
with tf.Graph().as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_inputs_limited = tf.train.limit_epochs(tf_inputs, num_epochs=300)
tf_labels_limited = tf.train.limit_epochs(tf_labels, num_epochs=300)
tf_predictions = LogisticClassifier(tf_inputs_limited)
slim.losses.log_loss(tf_predictions, tf_labels_limited)
total_loss = slim.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(total_loss, optimizer)
loss = slim.learning.train(train_op, logdir, log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
self.assertTrue(os.path.isfile('{}/model.ckpt-300.index'.format(logdir)))
self.assertTrue(os.path.isfile('{}/model.ckpt-300.data-00000-of-00001'.format(logdir)))
if __name__ == '__main__':
test.main()
|
|
"""Local node REST api.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import flask
import flask_restplus as restplus
from flask_restplus import inputs
from treadmill import webutils
_LOGGER = logging.getLogger(__name__)
def init(api, cors, impl):
"""Configures REST handlers for allocation resource."""
app_ns = api.namespace(
'local-app', description='Local app REST operations'
)
req_parser = api.parser()
req_parser.add_argument('start',
default=0, help='The index (inclusive) of'
' the first line from the log file to return.'
' Index is zero based.',
location='args', required=False, type=int)
req_parser.add_argument('limit',
help='The number of lines to return. '
'-1 (the default) means no limit ie. return all'
' the lines in the file from "start".',
location='args', required=False, type=int,
default=-1)
req_parser.add_argument('order',
choices=('asc', 'desc'), default='asc',
help='The order of the log lines to return. "asc":'
' chronological, "desc": reverse chronological',
location='args', required=False, type=str)
req_parser.add_argument('all',
default=False, help='Whether to return all the log'
'entries or just the latest ones',
location='args', required=False,
type=inputs.boolean)
@app_ns.route('/', defaults={'app': None})
@app_ns.route('/<app>')
class _AppList(restplus.Resource):
"""Local app list resource."""
@webutils.get_api(api, cors)
def get(self, app):
"""Returns listof local instances."""
return impl.list(
state=flask.request.args.get('state'),
app_name=app,
)
@app_ns.route('/<app>/<uniq>',)
class _AppDetails(restplus.Resource):
"""Local app details resource."""
@webutils.get_api(api, cors)
def get(self, app, uniq):
"""Returns list of local instances."""
return impl.get('/'.join([app, uniq]))
@app_ns.route('/<app>/<uniq>/sys/<component>',)
class _AppSystemLog(restplus.Resource):
"""Local app details resource."""
def _to_rsrc_id(self, app, uniq, component):
"""Returns the log resource id based on the args."""
return '/'.join([app, uniq, 'sys', component])
@webutils.opt_gzip
@webutils.raw_get_api(api, cors, parser=req_parser)
def get(self, app, uniq, component):
"""Return content of system component log.."""
kwargs = req_parser.parse_args()
if kwargs.get('all'):
return flask.Response(
impl.log.get_all(self._to_rsrc_id(app, uniq, component)),
mimetype='text/plain')
del kwargs['all'] # 'all' is always in kwargs...
return flask.Response(
impl.log.get(self._to_rsrc_id(app, uniq, component), **kwargs),
mimetype='text/plain')
@app_ns.route('/<app>/<uniq>/service/<service>',)
class _AppServiceLog(restplus.Resource):
"""Local app details resource."""
def _to_rsrc_id(self, app, uniq, service):
"""Returns the log resource id based on the args."""
return '/'.join([app, uniq, 'app', service])
@webutils.opt_gzip
@webutils.raw_get_api(api, cors, parser=req_parser)
def get(self, app, uniq, service):
"""Return content of system component log.."""
kwargs = req_parser.parse_args()
if kwargs.get('all'):
return flask.Response(
impl.log.get_all(self._to_rsrc_id(app, uniq, service)),
mimetype='text/plain')
del kwargs['all'] # 'all' is always in kwargs...
return flask.Response(
impl.log.get(self._to_rsrc_id(app, uniq, service), **kwargs),
mimetype='text/plain')
archive_ns = api.namespace('archive',
description='Local archive REST operations')
@archive_ns.route('/<app>/<uniq>/sys')
class _SysArchiveAsAttachment(restplus.Resource):
"""Download sys archive as attachment."""
@webutils.raw_get_api(api, cors)
def get(self, app, uniq):
"""Return content of sys archived file.."""
fname = impl.archive.get('/'.join([app, uniq, 'sys']))
return flask.send_file(
fname,
as_attachment=True,
attachment_filename=os.path.basename(fname)
)
@archive_ns.route('/<app>/<uniq>/app')
class _AppArchiveAsAttachment(restplus.Resource):
"""Download app archive as attachment."""
@webutils.raw_get_api(api, cors)
def get(self, app, uniq):
"""Return content of app archived file.."""
fname = impl.archive.get('/'.join([app, uniq, 'app']))
return flask.send_file(
fname,
as_attachment=True,
attachment_filename=os.path.basename(fname)
)
metrics_ns = api.namespace('metrics', description='Local metrics '
'REST operations')
metrics_req_parser = api.parser()
metrics_req_parser.add_argument(
'timeframe',
choices=('short', 'long'),
default='short',
help='Whether to query the metrics for shorter or longer timeframe.',
location='args',
required=False,
type=str)
@metrics_ns.route('/',)
class _MetricsList(restplus.Resource):
"""Local metrics list resource."""
@webutils.get_api(api, cors)
def get(self):
"""Returns list of locally available metrics."""
return impl.list(flask.request.args.get('state'), inc_svc=True)
@metrics_ns.route('/<app>/<uniq>')
@metrics_ns.route('/<service>')
class _Metrics(restplus.Resource):
"""Download metrics."""
@webutils.raw_get_api(api, cors, parser=metrics_req_parser)
def get(self, **id_parts):
"""
Return metrics either as an attachment or as json.
"""
args = metrics_req_parser.parse_args()
if webutils.wants_json_resp(flask.request):
return self._get(self._to_rsrc_id(**id_parts),
args.get('timeframe'))
else:
return self._get_as_attach(self._to_rsrc_id(**id_parts),
args.get('timeframe'))
def _to_rsrc_id(self, **id_parts):
"""
Return the metrics resource id based on the keyword args.
"""
try:
rsrc_id = '/'.join([id_parts['app'], id_parts['uniq']])
except KeyError:
rsrc_id = id_parts['service']
return rsrc_id
def _get(self, rsrc_id, timeframe):
"""Return the metrics file as json."""
return flask.Response(impl.metrics.get(rsrc_id, timeframe,
as_json=True),
mimetype='application/json')
def _get_as_attach(self, rsrc_id, timeframe):
"""Return the metrics file as attachment."""
return flask.send_file(
impl.metrics.get(rsrc_id, timeframe),
as_attachment=True,
mimetype='application/octet-stream',
attachment_filename=os.path.basename(
impl.metrics.file_path(rsrc_id)
)
)
|
|
from __future__ import annotations
from typing import (
TYPE_CHECKING,
Any,
)
import numpy as np
from pandas._config import get_option
from pandas._libs import (
lib,
missing as libmissing,
)
from pandas._libs.arrays import NDArrayBacked
from pandas._typing import (
Dtype,
Scalar,
type_t,
)
from pandas.compat import pa_version_under1p0
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.base import (
ExtensionDtype,
register_extension_dtype,
)
from pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
is_dtype_equal,
is_integer_dtype,
is_object_dtype,
is_string_dtype,
pandas_dtype,
)
from pandas.core import ops
from pandas.core.array_algos import masked_reductions
from pandas.core.arrays import (
FloatingArray,
IntegerArray,
PandasArray,
)
from pandas.core.arrays.base import ExtensionArray
from pandas.core.arrays.floating import FloatingDtype
from pandas.core.arrays.integer import _IntegerDtype
from pandas.core.construction import extract_array
from pandas.core.indexers import check_array_indexer
from pandas.core.missing import isna
if TYPE_CHECKING:
import pyarrow
@register_extension_dtype
class StringDtype(ExtensionDtype):
"""
Extension dtype for string data.
.. versionadded:: 1.0.0
.. warning::
StringDtype is considered experimental. The implementation and
parts of the API may change without warning.
In particular, StringDtype.na_value may change to no longer be
``numpy.nan``.
Parameters
----------
storage : {"python", "pyarrow"}, optional
If not given, the value of ``pd.options.mode.string_storage``.
Attributes
----------
None
Methods
-------
None
Examples
--------
>>> pd.StringDtype()
string[python]
>>> pd.StringDtype(storage="pyarrow")
string[pyarrow]
"""
name = "string"
#: StringDtype.na_value uses pandas.NA
na_value = libmissing.NA
_metadata = ("storage",)
def __init__(self, storage=None):
if storage is None:
storage = get_option("mode.string_storage")
if storage not in {"python", "pyarrow"}:
raise ValueError(
f"Storage must be 'python' or 'pyarrow'. Got {storage} instead."
)
if storage == "pyarrow" and pa_version_under1p0:
raise ImportError(
"pyarrow>=1.0.0 is required for PyArrow backed StringArray."
)
self.storage = storage
@property
def type(self) -> type[str]:
return str
@classmethod
def construct_from_string(cls, string):
"""
Construct a StringDtype from a string.
Parameters
----------
string : str
The type of the name. The storage type will be taking from `string`.
Valid options and their storage types are
========================== ==============================================
string result storage
========================== ==============================================
``'string'`` pd.options.mode.string_storage, default python
``'string[python]'`` python
``'string[pyarrow]'`` pyarrow
========================== ==============================================
Returns
-------
StringDtype
Raise
-----
TypeError
If the string is not a valid option.
"""
if not isinstance(string, str):
raise TypeError(
f"'construct_from_string' expects a string, got {type(string)}"
)
if string == "string":
return cls()
elif string == "string[python]":
return cls(storage="python")
elif string == "string[pyarrow]":
return cls(storage="pyarrow")
else:
raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'")
def __eq__(self, other: Any) -> bool:
if isinstance(other, str) and other == "string":
return True
return super().__eq__(other)
def __hash__(self) -> int:
# custom __eq__ so have to override __hash__
return super().__hash__()
# https://github.com/pandas-dev/pandas/issues/36126
# error: Signature of "construct_array_type" incompatible with supertype
# "ExtensionDtype"
def construct_array_type( # type: ignore[override]
self,
) -> type_t[BaseStringArray]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
from pandas.core.arrays.string_arrow import ArrowStringArray
if self.storage == "python":
return StringArray
else:
return ArrowStringArray
def __repr__(self):
return f"string[{self.storage}]"
def __str__(self):
return self.name
def __from_arrow__(
self, array: pyarrow.Array | pyarrow.ChunkedArray
) -> BaseStringArray:
"""
Construct StringArray from pyarrow Array/ChunkedArray.
"""
if self.storage == "pyarrow":
from pandas.core.arrays.string_arrow import ArrowStringArray
return ArrowStringArray(array)
else:
import pyarrow
if isinstance(array, pyarrow.Array):
chunks = [array]
else:
# pyarrow.ChunkedArray
chunks = array.chunks
results = []
for arr in chunks:
# using _from_sequence to ensure None is converted to NA
str_arr = StringArray._from_sequence(np.array(arr))
results.append(str_arr)
if results:
return StringArray._concat_same_type(results)
else:
return StringArray(np.array([], dtype="object"))
class BaseStringArray(ExtensionArray):
pass
class StringArray(BaseStringArray, PandasArray):
"""
Extension array for string data.
.. versionadded:: 1.0.0
.. warning::
StringArray is considered experimental. The implementation and
parts of the API may change without warning.
Parameters
----------
values : array-like
The array of data.
.. warning::
Currently, this expects an object-dtype ndarray
where the elements are Python strings or :attr:`pandas.NA`.
This may change without warning in the future. Use
:meth:`pandas.array` with ``dtype="string"`` for a stable way of
creating a `StringArray` from any sequence.
copy : bool, default False
Whether to copy the array of data.
Attributes
----------
None
Methods
-------
None
See Also
--------
array
The recommended function for creating a StringArray.
Series.str
The string methods are available on Series backed by
a StringArray.
Notes
-----
StringArray returns a BooleanArray for comparison methods.
Examples
--------
>>> pd.array(['This is', 'some text', None, 'data.'], dtype="string")
<StringArray>
['This is', 'some text', <NA>, 'data.']
Length: 4, dtype: string
Unlike arrays instantiated with ``dtype="object"``, ``StringArray``
will convert the values to strings.
>>> pd.array(['1', 1], dtype="object")
<PandasArray>
['1', 1]
Length: 2, dtype: object
>>> pd.array(['1', 1], dtype="string")
<StringArray>
['1', '1']
Length: 2, dtype: string
However, instantiating StringArrays directly with non-strings will raise an error.
For comparison methods, `StringArray` returns a :class:`pandas.BooleanArray`:
>>> pd.array(["a", None, "c"], dtype="string") == "a"
<BooleanArray>
[True, <NA>, False]
Length: 3, dtype: boolean
"""
# undo the PandasArray hack
_typ = "extension"
def __init__(self, values, copy=False):
values = extract_array(values)
super().__init__(values, copy=copy)
# error: Incompatible types in assignment (expression has type "StringDtype",
# variable has type "PandasDtype")
NDArrayBacked.__init__(self, self._ndarray, StringDtype(storage="python"))
if not isinstance(values, type(self)):
self._validate()
def _validate(self):
"""Validate that we only store NA or strings."""
if len(self._ndarray) and not lib.is_string_array(
self._ndarray.ravel("K"), skipna=True
):
raise ValueError("StringArray requires a sequence of strings or pandas.NA")
if self._ndarray.dtype != "object":
raise ValueError(
"StringArray requires a sequence of strings or pandas.NA. Got "
f"'{self._ndarray.dtype}' dtype instead."
)
@classmethod
def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy=False):
if dtype and not (isinstance(dtype, str) and dtype == "string"):
dtype = pandas_dtype(dtype)
assert isinstance(dtype, StringDtype) and dtype.storage == "python"
from pandas.core.arrays.masked import BaseMaskedArray
if isinstance(scalars, BaseMaskedArray):
# avoid costly conversion to object dtype
na_values = scalars._mask
result = scalars._data
result = lib.ensure_string_array(result, copy=copy, convert_na_value=False)
result[na_values] = StringDtype.na_value
else:
# convert non-na-likes to str, and nan-likes to StringDtype.na_value
result = lib.ensure_string_array(
scalars, na_value=StringDtype.na_value, copy=copy
)
# Manually creating new array avoids the validation step in the __init__, so is
# faster. Refactor need for validation?
new_string_array = cls.__new__(cls)
NDArrayBacked.__init__(new_string_array, result, StringDtype(storage="python"))
return new_string_array
@classmethod
def _from_sequence_of_strings(
cls, strings, *, dtype: Dtype | None = None, copy=False
):
return cls._from_sequence(strings, dtype=dtype, copy=copy)
@classmethod
def _empty(cls, shape, dtype) -> StringArray:
values = np.empty(shape, dtype=object)
values[:] = libmissing.NA
return cls(values).astype(dtype, copy=False)
def __arrow_array__(self, type=None):
"""
Convert myself into a pyarrow Array.
"""
import pyarrow as pa
if type is None:
type = pa.string()
values = self._ndarray.copy()
values[self.isna()] = None
return pa.array(values, type=type, from_pandas=True)
def _values_for_factorize(self):
arr = self._ndarray.copy()
mask = self.isna()
arr[mask] = -1
return arr, -1
def __setitem__(self, key, value):
value = extract_array(value, extract_numpy=True)
if isinstance(value, type(self)):
# extract_array doesn't extract PandasArray subclasses
value = value._ndarray
key = check_array_indexer(self, key)
scalar_key = lib.is_scalar(key)
scalar_value = lib.is_scalar(value)
if scalar_key and not scalar_value:
raise ValueError("setting an array element with a sequence.")
# validate new items
if scalar_value:
if isna(value):
value = StringDtype.na_value
elif not isinstance(value, str):
raise ValueError(
f"Cannot set non-string value '{value}' into a StringArray."
)
else:
if not is_array_like(value):
value = np.asarray(value, dtype=object)
if len(value) and not lib.is_string_array(value, skipna=True):
raise ValueError("Must provide strings.")
super().__setitem__(key, value)
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if is_dtype_equal(dtype, self.dtype):
if copy:
return self.copy()
return self
elif isinstance(dtype, _IntegerDtype):
arr = self._ndarray.copy()
mask = self.isna()
arr[mask] = 0
values = arr.astype(dtype.numpy_dtype)
return IntegerArray(values, mask, copy=False)
elif isinstance(dtype, FloatingDtype):
arr = self.copy()
mask = self.isna()
arr[mask] = "0"
values = arr.astype(dtype.numpy_dtype)
return FloatingArray(values, mask, copy=False)
elif isinstance(dtype, ExtensionDtype):
return super().astype(dtype, copy=copy)
elif np.issubdtype(dtype, np.floating):
arr = self._ndarray.copy()
mask = self.isna()
arr[mask] = 0
values = arr.astype(dtype)
values[mask] = np.nan
return values
return super().astype(dtype, copy)
def _reduce(
self, name: str, *, skipna: bool = True, axis: int | None = 0, **kwargs
):
if name in ["min", "max"]:
return getattr(self, name)(skipna=skipna, axis=axis)
raise TypeError(f"Cannot perform reduction '{name}' with string dtype")
def min(self, axis=None, skipna: bool = True, **kwargs) -> Scalar:
nv.validate_min((), kwargs)
result = masked_reductions.min(
values=self.to_numpy(), mask=self.isna(), skipna=skipna
)
return self._wrap_reduction_result(axis, result)
def max(self, axis=None, skipna: bool = True, **kwargs) -> Scalar:
nv.validate_max((), kwargs)
result = masked_reductions.max(
values=self.to_numpy(), mask=self.isna(), skipna=skipna
)
return self._wrap_reduction_result(axis, result)
def value_counts(self, dropna: bool = True):
from pandas import value_counts
return value_counts(self._ndarray, dropna=dropna).astype("Int64")
def memory_usage(self, deep: bool = False) -> int:
result = self._ndarray.nbytes
if deep:
return result + lib.memory_usage_of_objects(self._ndarray)
return result
def _cmp_method(self, other, op):
from pandas.arrays import BooleanArray
if isinstance(other, StringArray):
other = other._ndarray
mask = isna(self) | isna(other)
valid = ~mask
if not lib.is_scalar(other):
if len(other) != len(self):
# prevent improper broadcasting when other is 2D
raise ValueError(
f"Lengths of operands do not match: {len(self)} != {len(other)}"
)
other = np.asarray(other)
other = other[valid]
if op.__name__ in ops.ARITHMETIC_BINOPS:
result = np.empty_like(self._ndarray, dtype="object")
result[mask] = StringDtype.na_value
result[valid] = op(self._ndarray[valid], other)
return StringArray(result)
else:
# logical
result = np.zeros(len(self._ndarray), dtype="bool")
result[valid] = op(self._ndarray[valid], other)
return BooleanArray(result, mask)
_arith_method = _cmp_method
# ------------------------------------------------------------------------
# String methods interface
_str_na_value = StringDtype.na_value
def _str_map(
self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True
):
from pandas.arrays import BooleanArray
if dtype is None:
dtype = StringDtype(storage="python")
if na_value is None:
na_value = self.dtype.na_value
mask = isna(self)
arr = np.asarray(self)
if is_integer_dtype(dtype) or is_bool_dtype(dtype):
constructor: type[IntegerArray] | type[BooleanArray]
if is_integer_dtype(dtype):
constructor = IntegerArray
else:
constructor = BooleanArray
na_value_is_na = isna(na_value)
if na_value_is_na:
na_value = 1
result = lib.map_infer_mask(
arr,
f,
mask.view("uint8"),
convert=False,
na_value=na_value,
# error: Value of type variable "_DTypeScalar" of "dtype" cannot be
# "object"
# error: Argument 1 to "dtype" has incompatible type
# "Union[ExtensionDtype, str, dtype[Any], Type[object]]"; expected
# "Type[object]"
dtype=np.dtype(dtype), # type: ignore[type-var,arg-type]
)
if not na_value_is_na:
mask[:] = False
return constructor(result, mask)
elif is_string_dtype(dtype) and not is_object_dtype(dtype):
# i.e. StringDtype
result = lib.map_infer_mask(
arr, f, mask.view("uint8"), convert=False, na_value=na_value
)
return StringArray(result)
else:
# This is when the result type is object. We reach this when
# -> We know the result type is truly object (e.g. .encode returns bytes
# or .findall returns a list).
# -> We don't know the result type. E.g. `.get` can return anything.
return lib.map_infer_mask(arr, f, mask.view("uint8"))
|
|
#!/bin/bash/python
from __future__ import print_function
import commandLineErrors
from commandLineErrors import *
import dataConsistency
import graph as gr
import stringOperations as strOps
import json
import os
import sys
# Define a class for handling the command line and operations around it.
class commandLine:
def __init__(self):
# Define errors.
self.errors = commandLineErrors()
# Store all of the arguments with their values. These will be broken up into arguments that
# are for individual tasks within the main pipeline, gkno specific arguments and pipeline
# arguments.
self.arguments = {}
self.gknoArguments = {}
self.pipelineArguments = {}
# For tasks supplied on the command line, store the task and the list of arguments. Also, store
# the broken out arguments (e.g. after the string associated with the task is parsed).
self.tasksAsArguments = {}
self.taskArguments = {}
# Store commands. These could be instructions on the mode of usage etc.
self.commands = []
# Store the values for lists that have been reordered.
self.reorderedLists = []
# Parse the command line and store all the arguments with their values.
argument = None
isTaskArguments = False
for entry in sys.argv[1:]:
isArgument = entry.startswith('-')
# If this is a continuation of arguments for a specific task, append the arguments to
# the task arguments. If the entry terminates with ']', the task arguments are complete.
if isTaskArguments:
if entry.endswith(']'):
taskArguments += ' ' + entry[:-1]
isTaskArguments = False
self.arguments[argument].append(taskArguments)
# Reset the argument and the taskArguments now the task arguments have been handled.
taskArguments = ''
argument = ''
else: taskArguments += ' ' + entry
# If the entry starts with a '['. this is the start of a set of arguments to be applied to
# a task within in the pipeline. Find all the commands in the square brackets and associate
# with the task argument.
elif argument and entry.startswith('['):
isTaskArguments = True
taskArguments = ''
taskArguments += entry[1:]
# Only process the entry if not part of task arguments contained in square brackets.
else:
# If this and the previous entry were not arguments, this should be a command and
# is stored.
if not argument and not isArgument: self.commands.append(entry)
# If the previous command line entry was an argument, check if this entry is an argument or
# not. If not, store this as the value for the previous argument.
elif argument and not isArgument:
self.arguments[argument].append(entry)
# If this entry is an argument, set the value of 'argument' to this entry and create the key in
# self.arguments.
elif isArgument:
argument = entry
if argument not in self.arguments: self.arguments[argument] = []
# If the end of the command line is reached and argument is still populated, this is assumed
# to be a flag and should be added.
if argument and not self.arguments[argument]: self.arguments[argument] = [None]
# If a flag was set on the command line, it's value will be empty. Loop over self.arguments and replace
# all empty fields with None.
for argument in self.arguments:
if len(self.arguments[argument]) == 0: self.arguments[argument].append(None)
# If the mode of operation is to provide information on help categories, store the help category.
self.category = None
# Determine if gkno is being run in admin mode.
def isAdmin(self, modes):
try:
if self.commands[0] in modes: return True, self.commands[0]
except: return False, None
return False, None
# Determine the mode in which gkno is being run. This is either in 'admin' mode for general gkno
# admin (resource management, building, updating etc.), 'help' for different categories of help
# or 'run' for running tools or pipelines.
def determineMode(self, isAdmin, gkno):
if isAdmin: return 'admin'
# Check if json files for the web page were requested.
if gkno.getGknoArgument('GKNO-WEB', self.arguments): return 'web'
# Check if help for gkno specific arguments was requested.
if gkno.getGknoArgument('GKNO-ARGUMENTS', self.arguments): return 'gkno help'
# If help is requested, return the mode 'help'.
if gkno.getGknoArgument('GKNO-HELP', self.arguments): return 'help'
# Check if help categories were requested.
category = gkno.getGknoArgument('GKNO-CATEGORIES', self.arguments)
if category:
self.category = None if category == True else category
return 'categories'
# Check if a list of all pipeline was requested.
if gkno.getGknoArgument('GKNO-ALL-PIPELINES', self.arguments): return 'list-all'
# If no information is provided (e.g. no admin, tool or pipeline), return 'help' as the mode.
if len(self.commands) == 0 and len(self.arguments) == 0: return 'help'
# If none of the above, return 'run',
return 'run'
# Determine the path to the configuration files, if set.
def getConfigurationFilePath(self, options):
longFormArgument = options['GKNO-CONFIGURATION-PATH'].longFormArgument
shortFormArgument = options['GKNO-CONFIGURATION-PATH'].shortFormArgument
# If the path is defined, get the path.
path = None
if longFormArgument in self.arguments: path = self.arguments[longFormArgument][0]
elif shortFormArgument in self.arguments: path = self.arguments[shortFormArgument][0]
# If the path is defined, check that the path exists.
# TODO ERROR
if path and not os.path.isdir(path): print('commandLine.getConfigurationFilePath - PATH DOESN\'T EXIST'); exit(1)
# Remove trailing '/'.
if path and path.endswith('/'): path = path.rstrip('/')
# Return the path.
return path
# Determine the name of the pipeline being run (tools are considered pipelines of a single task).
def determinePipeline(self):
# If no pipeline name was supplied return None. The mode will be help, so a general help message
# will be provided to the user.
if not self.commands: return None
# If there are multiple commands from the pipeline, the command line is invalid. If not in admin
# mode, the command line can only include a single entry that is not an argument or accompanying
# value.
# TODO ERROR
if len(self.commands) != 1: self.errors.invalidCommandLine()
# Return the pipeline name.
return self.commands[0]
# Process the command line arguments.
def processArguments(self, superpipeline, args, gkno):
# Get the name of the top level pipeline.
pipeline = superpipeline.pipeline
# Get the names of all the contained pipelines.
containedPipelines = superpipeline.tiersByPipeline.keys()
# Get a list of all the allowable long and short form arguments. In this first pass, any arguments
# being pulled from a contained pipeline are stored.
containedPipelineArguments = []
longFormArguments = {}
shortFormArguments = {}
for argument in args.arguments.keys():
# Check if the argument name is prefixed with the name of a task, following by a '.'. If so, this
# argument is pulled in from a contained pipeline.
if '.' in argument:
prefix = argument.split('.')[0]
if prefix in containedPipelines: containedPipelineArguments.append(argument)
# Handle defined arguments in the configuration file.
else:
longFormArguments[argument] = argument
shortFormArguments[args.arguments[argument].shortFormArgument] = argument
# Loop over the contained pipeline arguments.
for argument in containedPipelineArguments:
longFormArgument = args.arguments[argument].longFormArgument
shortFormArgument = args.arguments[argument].shortFormArgument
if longFormArgument not in longFormArguments and shortFormArgument not in shortFormArguments:
longFormArguments[argument.split('.')[1]] = argument
shortFormArguments[args.arguments[argument].shortFormArgument] = argument
# Loop over all of the supplied command line arguments, ensure that they are in their long form
# versions and consolidate. Check that all of the arguments are valid for the pipeline being run
# or are gkno specific arguments.
for argument in self.arguments:
values = self.arguments[argument]
pipelineShortFormArguments = superpipeline.pipelineConfigurationData[pipeline].shortFormArguments
# First check if the argument is the name of a task in the superpipeline.
if argument.strip('-') in superpipeline.tasks:
if argument.strip('-') in self.tasksAsArguments:
for value in values: self.tasksAsArguments[argument.strip('-')].append(value)
else: self.tasksAsArguments[argument.strip('-')] = values
# If this is a long form argument, check to see if it is a gkno specific argument or a valid pipeline
# argument.
elif argument in gkno.arguments:
shortFormArgument = gkno.arguments[argument].shortFormArgument
dataType = gkno.arguments[argument].dataType
if argument not in self.gknoArguments: self.gknoArguments[argument] = []
for value in values:
if not dataConsistency.isCorrectDataType(value, dataType): self.errors.invalidValue(argument, shortFormArgument, value, dataType, True)
self.gknoArguments[argument].append(value)
# Check if this is a valid gkno short form argument.
elif argument in gkno.shortForms:
longFormArgument = gkno.shortForms[argument]
dataType = gkno.arguments[longFormArgument].dataType
if longFormArgument not in self.gknoArguments: self.gknoArguments[longFormArgument] = []
for value in values:
if not dataConsistency.isCorrectDataType(value, dataType): self.errors.invalidValue(longFormArgument, argument, value, dataType, True)
self.gknoArguments[longFormArgument].append(value)
# Check if this argument is valid for the pipeline.
elif argument in longFormArguments:
argumentName = longFormArguments[argument]
shortFormArgument = args.arguments[argumentName].shortFormArgument
dataType = args.arguments[argumentName].dataType
if argumentName not in self.pipelineArguments: self.pipelineArguments[argumentName] = []
# If the data type is flag, there are no values, so include 'set' as the value.
if dataType == 'flag': self.pipelineArguments[argumentName] = ['set']
# Handle non-flags.
else:
for value in values:
if not dataConsistency.isCorrectDataType(value, dataType): self.errors.invalidValue(argumentName, shortFormArgument, value, dataType, False)
self.pipelineArguments[argumentName].append(value)
# Check if this is a valid short form pipeline argument.
elif argument in shortFormArguments:
longFormArgument = shortFormArguments[argument]
dataType = args.arguments[longFormArgument].dataType
if longFormArgument not in self.pipelineArguments: self.pipelineArguments[longFormArgument] = []
if dataType == 'flag': self.pipelineArguments[longFormArgument] = ['set']
else:
for value in values:
if not dataConsistency.isCorrectDataType(value, dataType): self.errors.invalidValue(longFormArgument, argument, value, dataType, False)
self.pipelineArguments[longFormArgument].append(value)
# If the argument is invalid.
else: self.errors.invalidArgument(argument)
# Add more new arguments.
def addGknoArguments(self, gknoArguments):
# Loop over the set of gkno arguments to add.
for argument in gknoArguments:
# If the gkno argument is already set, use the value already supplied, otherwise add the
# argument to the gkno arguments.
if argument not in self.gknoArguments:
self.gknoArguments[argument] = []
for value in gknoArguments[argument]:
if isinstance(value, unicode): self.gknoArguments[argument].append(str(value))
else: self.gknoArguments[argument].append(value)
# Check if any of the arguments are linked and if so, check if multiple values have been provided to them.
# If so, ensure that the order in which the values are sorted is such that the first value for each argument
# is most similar to the first value for the other etc.
def linkedArguments(self, graph, superpipeline, args):
for task in graph.workflow:
tool = graph.getGraphNodeAttribute(task, 'tool')
toolData = superpipeline.getToolData(tool)
# Check if any of the arguments for this task have been given multiple values.
linkedArguments = {}
for nodeId in graph.graph.predecessors(task):
if len(graph.getGraphNodeAttribute(nodeId, 'values')) > 1:
argument = graph.getArgumentAttribute(nodeId, task, 'longFormArgument')
linkedArgument = toolData.getArgumentAttribute(argument, 'linkedArgument')
if linkedArgument != None: linkedArguments[nodeId] = linkedArgument
# If there are any linked arguments with multiple values, check the order of the values.
for nodeId in linkedArguments:
argumentValues = graph.getGraphNodeAttribute(nodeId, 'values')
# Determine the nodeId of the linked argument.
for linkedNodeId in graph.graph.predecessors(task):
if linkedArguments[nodeId] == graph.getArgumentAttribute(linkedNodeId, task, 'longFormArgument'): break
# Use the linkedArgumentValues as a reference list and order the values in argumentValues to be most
# similar to the reference list,
referenceList = graph.getGraphNodeAttribute(linkedNodeId, 'values')
queryList = []
for value in referenceList: queryList.append(strOps.findMostSimilar(argumentValues, value))
# If reordering has taken place, store the values so that the user can be warned. Update the graph to
# include the reordered list.
if queryList != argumentValues:
self.reorderedLists.append((task, nodeId, linkedNodeId, argumentValues, queryList, referenceList))
graph.setGraphNodeAttribute(nodeId, 'values', queryList)
# Determine the name (if any) of the requested parameter set.
def getParameterSetName(self, arguments, gkno):
# Loop over the gkno specific arguments looking for the --parameter-set argument,
for argument in arguments:
if argument == gkno.options['GKNO-PARAMETER-SET'].longFormArgument:
# Only one parameter set can be defined.
if len(arguments[argument]) != 1: self.errors.multipleParameterSets()
# Return the name of the parameter set.
return arguments[argument][0]
# If no parameter set was defined, return None.
return None
# For all tasks inputted on the command line, extract all the arguments supplied to the tasks.
def parseTasksAsArguments(self, superpipeline):
# Parse tasksAsNodes. Each entry is a task in the pipeline and has associated with it a list of
# arguments to apply to that task. Parse all of these arguments and identify the graph node that
# they point to. If there is no associated node, add the argument to a list of nodes that require
# creating.
for task in self.tasksAsArguments:
arguments = {}
# Get the tool associated with this task.
tool = superpipeline.tasks[task]
# Loop over all arguments supplied to this task (it is allowed that the same task is supplied
# on the command line multiple times.
for string in self.tasksAsArguments[task]:
# Break the string associated with the task into a list.
argumentList = string.split(' ')
# Parse all the commands supplied for this task.
argument = None
for counter, entry in enumerate(argumentList):
if entry.startswith('-'):
# If this entry starts with a '-' and isArgument is true, then the previous entry also started
# with a '-'. This implies that the previous entry was a flag argument.
if argument:
arguments[argument] = ['set']
# Check that the argument is a valid argument for this tool and convert to the long form
# version if necessary.
argument = superpipeline.toolConfigurationData[tool].getLongFormArgument(entry)
#TODO ERROR
if argument not in superpipeline.toolConfigurationData[tool].arguments.keys():
print('ERROR - parseTasksAsArguments - 1', tool, entry); exit(0)
arguments[argument] = []
# If isArgument is false, then this is a new argument (either it is the first argument in the
# list, or the previous entry was a value associated with a different argument).
else:
# Check that the argument is a valid argument for this tool and convert to the long form
# version if necessary.
#TODO ERROR
argument = superpipeline.toolConfigurationData[tool].getLongFormArgument(entry)
if argument not in superpipeline.toolConfigurationData[tool].arguments.keys():
print('ERROR - parseTasksAsArguments - 2', tool, entry); exit(0)
if argument not in arguments: arguments[argument] = []
# If this entry does not begin with a dash and there is no defined argument, then the previous
# entry also did not start with a '-' and so there is a problem with the supplied arguments.
elif not argument: print('ERROR - command.associateArgumentsWithGraphNodes - 1', argument); exit(0)
# If this entry does not begin with a '-'. but the argument is set, this is a value for the argument,
# so associate the value with the argument.
elif argument:
arguments[argument].append(entry)
argument = None
# If the previous list ended on a flag, the value will not have been set. Set it here.
if argument: arguments[argument] = ['set']
# Store the list of arguments for each task.
self.taskArguments[task] = arguments
# Associate the command line arguments with the graph nodes.
def associateArgumentsWithGraphNodes(self, graph, superpipeline):
associatedNodes = []
# Loop over all the arguments supplied to individual tasks.
for taskAddress in self.taskArguments:
for argument in self.taskArguments[taskAddress]:
values = self.taskArguments[taskAddress][argument]
# Get the tool associated with this task.
tool = superpipeline.tasks[taskAddress]
toolData = superpipeline.toolConfigurationData[tool]
# Search the successor and predecessor nodes for this task for the argument supplied.
foundArgument = False
associatedNodeId = None
for nodeId in gr.pipelineGraph.CM_getInputNodes(graph, taskAddress):
longFormArgument = gr.pipelineGraph.CM_getArgumentAttribute(graph, nodeId, taskAddress, 'longFormArgument')
if longFormArgument == argument:
foundArgument = True
associatedNodeId = nodeId
break
# Only check the output nodes if the argument has not already been associated with an input node.
if not foundArgument:
for nodeId in gr.pipelineGraph.CM_getOutputNodes(graph, taskAddress):
longFormArgument = gr.pipelineGraph.CM_getArgumentAttribute(graph.graph, nodeId, taskAddress, 'longFormArgument')
if longFormArgument == argument:
foundArgument = True
associatedNodeId = nodeId
break
# Add the node to the list.
if associatedNodeId: associatedNodes.append((taskAddress, associatedNodeId, tool, argument, values, False))
else: associatedNodes.append((taskAddress, str(taskAddress + '.' + argument), tool, argument, values, True))
# Return the list with information on the nodes to create.
return associatedNodes
# Check if multiple makefiles were requested and if an id is to be added to any makefiles.
def checkMakefiles(self, gknoArguments):
multiple = gknoArguments['GKNO-MULTIPLE-MAKEFILES'].longFormArgument
idArgument = gknoArguments['GKNO-MAKEFILE-ID'].longFormArgument
isMultipleMakefiles = True if multiple in self.gknoArguments else False
makefileId = self.gknoArguments[idArgument][0] if idArgument in self.gknoArguments else None
return isMultipleMakefiles, makefileId
|
|
#!/usr/bin/env python
import logging
import sys
import unittest
import mango.unittest
import scipy as sp
import numpy as np
import numpy.random
import random
import mango.mpi as mpi
import mango.fmm
import mango.data
import mango.io
import os, os.path
logger, rootLogger = mpi.getLoggers(__name__)
class PValueTest(mango.unittest.TestCase):
def testGaussianPValue(self):
for typePair in [(None, "float32"), ("tomo", None)]:
mtype = typePair[0]
dtype = typePair[1]
mean = 32000.0
stdd = 1000.0
noisDds = mango.data.gaussian_noise(shape=(105,223,240), mean=mean, stdd=stdd, mtype=mtype, dtype=dtype)
pvalDds = \
mango.fmm.gaussian_pvalue(
noisDds,
mean=mean,
stdd=stdd,
sidedness=mango.fmm.PValueSidednessType.RIGHT_SIDEDNESS
)
alpha = 0.05
count = sp.sum(sp.where(pvalDds.asarray() <= alpha, 1, 0))
if (pvalDds.mpi.comm != None):
count = pvalDds.mpi.comm.allreduce(count)
expCount = sp.product(noisDds.shape)*alpha
count = float(count)
relErr = sp.absolute(expCount-float(count))/sp.absolute(max(expCount,count))
rootLogger.info("relErr = %s" % relErr)
self.assertTrue(relErr < 0.10)
def testConvolvedGaussianPValue(self):
import matplotlib
import matplotlib.pyplot as plt
for typePair in [("float64", None), ("tomo_float", None)]:
plt.clf()
mtype = typePair[0]
dtype = typePair[1]
mean = 32000.0
stdd = 1000.0
noisDds = mango.data.gaussian_noise(shape=(100,101,102), mean=mean, stdd=stdd, mtype=mtype, dtype=dtype)
kernelSigma = 0.6
kernel = mango.image.discrete_gaussian_kernel(kernelSigma)
rootLogger.info("kernal.shape=%s" % (kernel.shape,))
rootLogger.info("sp.sum(kernal)=%s" % (sp.sum(kernel),))
rootLogger.info("kernal (min,max)=(%s,%s)" % (np.min(kernel), np.max(kernel),))
convMeanDds, convStddDds = mango.image.discrete_gaussian_mean_stdd(noisDds, kernelSigma)
convStddDds = mango.image.discrete_gaussian_stdd(noisDds, kernelSigma, mean=mean)
estMean = sp.sum(noisDds.asarray())
if (noisDds.mpi.comm != None):
estMean = noisDds.mpi.comm.allreduce(estMean)
estMean /= float(sp.product(noisDds.shape))
d = noisDds.asarray()-estMean
estStdd = sp.sum(d*d)
if (noisDds.mpi.comm != None):
estStdd = noisDds.mpi.comm.allreduce(estStdd)
estStdd = sp.sqrt(estStdd/float(sp.product(noisDds.shape)-1))
convAvgStdd = sp.sum(convStddDds.asarray())
if (convStddDds.mpi.comm != None):
convAvgStdd = convStddDds.mpi.comm.allreduce(convAvgStdd)
convAvgStdd /= float(sp.product(convStddDds.shape))
convolveMean = sp.sum(kernel*mean)
convolveStdd = sp.sqrt(sp.sum((kernel*kernel)*(stdd*stdd)))
if ((convStddDds.mpi.comm == None) or (convStddDds.mpi.comm.Get_size() == 1)):
rootLogger.info("Calculating histogram...")
hst,edg = sp.histogram((convStddDds.asarray()*convStddDds.asarray()).flatten(), bins=1024)
rootLogger.info("Done calculating histogram...edge (min,max)=(%s,%s)" % (np.min(edg), np.max(edg)))
hst = sp.array(hst, copy=True, dtype="float64")
hst /= sp.sum(hst)
hst = hst[0:hst.size//2]
edg = edg[0:hst.size + 1]
xx = (edg[1:]+edg[0:-1])*0.5
plt.plot(xx, hst)
rootLogger.info("Calculating chi2...")
chi2xx0 = ((sp.product(kernel.shape)-1)/(stdd*stdd))*edg
df = sp.product(kernel.shape)-1
chi2cdf0 = sp.stats.chi2.cdf(chi2xx0, df)
y0 = chi2cdf0[1:]-chi2cdf0[0:-1]
dof = ((sp.sum(kernel)**2)-(sp.sum(kernel*kernel)))/(sp.sum(kernel))
coeffs = kernel.flatten()
rootLogger.info("Calculating gchi2...")
gchi2xx1 = (dof/(stdd*stdd))*edg
gchi2cdf1 = mango.fmm.gchisqrd(coeffs, sp.zeros_like(coeffs)).cdf(gchi2xx1)
y1 = gchi2cdf1[1:]-gchi2cdf1[0:-1]
rootLogger.info("Done calculating gchi2.")
#plt.plot(xx, y0)
plt.plot(xx, y1)
plt.title("sp.sum(hst)=%s, sp.sum(chi2)=%s" % (sp.sum(hst), sp.sum(y0)))
dir = self.createTmpDir("testConvolvedGaussianPValue")
plt.savefig(os.path.join(dir,"convStddDds_%s.png" % mtype), dpi=128)
plt.figure()
hst,edg = sp.histogram(convMeanDds.asarray().flatten(), bins=256)
hst = sp.array(hst, copy=True, dtype="float64")
####hst /= sp.array(edg[1:]-edg[0:-1], dtype="float64")
hst /= sp.sum(hst)
xx = (edg[1:]+edg[0:-1])*0.5
plt.plot(xx, hst)
nd0 = sp.stats.norm.cdf(edg, loc = mean, scale = sp.sqrt(sp.sum(kernel*kernel)*stdd*stdd))
y0 = nd0[1:]-nd0[0:-1]
plt.plot(xx,y0)
plt.savefig(os.path.join(dir,"convMeanDds_%s.png" % mtype), dpi=128)
rootLogger.info("%14s=%10.5f, %14s=%10.5f" % ("mean", mean, "stdd", stdd))
rootLogger.info("%14s=%10.5f, %14s=%10.5f" % ("convolve-mean", convolveMean, "convolve-stdd", convolveStdd))
rootLogger.info("%14s=%10.5f, %14s=%10.5f" % ("est-stdd", estStdd, "conv-avg-stdd", convAvgStdd))
rootLogger.info("Calculating gaussian_pvalue image for from mean-image...")
meanPValDds = \
mango.fmm.gaussian_pvalue(
convMeanDds,
mean=convolveMean,
stdd=convolveStdd,
sidedness=mango.fmm.PValueSidednessType.RIGHT_SIDEDNESS
)
alpha = 0.05
count = sp.sum(sp.where(meanPValDds.asarray() <= alpha, 1, 0))
if (meanPValDds.mpi.comm != None):
count = meanPValDds.mpi.comm.allreduce(count)
expCount = sp.product(noisDds.shape)*alpha
count = float(count)
relErr = sp.absolute(expCount-float(count))/sp.absolute(max(expCount,count))
rootLogger.info("relErr = %s" % relErr)
self.assertTrue(relErr < 0.10)
convVarDds = mango.copy(convStddDds)
convVarDds.asarray()[...] *= convVarDds.asarray()*((sp.product(kernel.shape)-1)/(stdd*stdd))
rootLogger.info("Calculating generalised_chi_squared_pvalue image from variance-image...")
###
### Takes too long to compute pvalue image!!!!
###
# stddPValDds = \
# mango.fmm.generalised_chi_squared_pvalue(
# convVarDds,
# coefficients=coeffs,
# noncentralities=sp.zeros_like(coeffs),
# sidedness=mango.fmm.PValueSidednessType.RIGHT_SIDEDNESS
# )
#
# alpha = 0.05
# count = sp.sum(sp.where(stddPValDds.asarray() <= alpha, 1, 0))
# if (stddPValDds.mpi.comm != None):
# count = stddPValDds.mpi.comm.allreduce(count)
#
# expCount = sp.product(stddPValDds.shape)*alpha
# count = float(count)
# relErr = sp.absolute(expCount-float(count))/sp.absolute(max(expCount,count))
# rootLogger.info("relErr = %s" % relErr)
# self.assertTrue(relErr < 0.10)
def testStructuringElementPValue(self):
import matplotlib
import matplotlib.pyplot as plt
dir = self.createTmpDir("testStructuringElementPValue")
for typePair in [("float64", None), ("tomo_float", None)]:
plt.clf()
mtype = typePair[0]
dtype = typePair[1]
mean = 32000.0
stdd = 1000.0
noisDds = mango.data.gaussian_noise(shape=(100,101,102), mean=mean, stdd=stdd, mtype=mtype, dtype=dtype)
se = mango.image.sphere_se(radius=2.0)
seNumElems = sp.sum(sp.where(se.toFootprint(), 1, 0))
rootLogger.info("se.shape=%s" % (se.toFootprint().shape,))
rootLogger.info("sp.sum(se.toFootprint())=%s" % (seNumElems,))
seMeanDds = mango.image.mean_filter(noisDds, se)
seStddDds = mango.image.stdd_filter(noisDds, se)
estMean = sp.sum(noisDds.asarray())
if (noisDds.mpi.comm != None):
estMean = noisDds.mpi.comm.allreduce(estMean)
estMean /= float(sp.product(noisDds.shape))
d = noisDds.asarray()-estMean
estStdd = sp.sum(d*d)
if (noisDds.mpi.comm != None):
estStdd = noisDds.mpi.comm.allreduce(estStdd)
estStdd = sp.sqrt(estStdd/float(sp.product(noisDds.shape)-1))
seAvgStdd = sp.sum(seStddDds.asarray())
if (seStddDds.mpi.comm != None):
seAvgStdd = seStddDds.mpi.comm.allreduce(seAvgStdd)
seAvgStdd /= float(sp.product(seStddDds.shape))
seTheoryMean = mean
seTheoryStdd = sp.sqrt(seNumElems*((1.0/seNumElems)**2)*(stdd*stdd))
if ((seStddDds.mpi.comm == None) or (seStddDds.mpi.comm.Get_size() == 1)):
rootLogger.info("Calculating histogram...")
hst,edg = sp.histogram((seStddDds.asarray()*seStddDds.asarray()).flatten(), bins=1024)
rootLogger.info("Done calculating histogram...edge (min,max)=(%s,%s)" % (np.min(edg), np.max(edg)))
hst = sp.array(hst, copy=True, dtype="float64")
hst /= sp.sum(hst)
hst = hst[0:hst.size//2]
edg = edg[0:hst.size + 1]
xx = (edg[1:]+edg[0:-1])*0.5
plt.plot(xx, hst)
rootLogger.info("Calculating chi2...")
chi2xx0 = ((seNumElems-1)/(stdd*stdd))*edg
df = seNumElems-1
chi2cdf0 = sp.stats.chi2.cdf(chi2xx0, df)
y0 = chi2cdf0[1:]-chi2cdf0[0:-1]
rootLogger.info("Done calculating chi2.")
plt.plot(xx, y0)
plt.title("sp.sum(hst)=%s, sp.sum(chi2)=%s" % (sp.sum(hst), sp.sum(y0)))
plt.savefig(os.path.join(dir,"seStddDds_%s.png" % mtype), dpi=128)
plt.figure()
hst,edg = sp.histogram(seMeanDds.asarray().flatten(), bins=256)
hst = sp.array(hst, copy=True, dtype="float64")
####hst /= sp.array(edg[1:]-edg[0:-1], dtype="float64")
hst /= sp.sum(hst)
xx = (edg[1:]+edg[0:-1])*0.5
plt.plot(xx, hst)
nd0 = sp.stats.norm.cdf(edg, loc = mean, scale = sp.sqrt(seNumElems*((1.0/seNumElems)**2)*stdd*stdd))
y0 = nd0[1:]-nd0[0:-1]
plt.plot(xx,y0)
plt.savefig(os.path.join(dir,"seMeanDds_%s.png" % mtype), dpi=128)
rootLogger.info("%14s=%10.5f, %14s=%10.5f" % ("mean", mean, "stdd", stdd))
rootLogger.info("%14s=%10.5f, %14s=%10.5f" % ("seTheory-mean", seTheoryMean, "seTheory-stdd", seTheoryStdd))
rootLogger.info("%14s=%10.5f, %14s=%10.5f" % ("est-stdd", estStdd, "se-avg-stdd", seAvgStdd))
rootLogger.info("Calculating gaussian_pvalue image for from mean-image...")
meanPValDds = \
mango.fmm.gaussian_pvalue(
seMeanDds,
mean=seTheoryMean,
stdd=seTheoryStdd,
sidedness=mango.fmm.PValueSidednessType.RIGHT_SIDEDNESS
)
alpha = 0.05
count = sp.sum(sp.where(meanPValDds.asarray() <= alpha, 1, 0))
if (meanPValDds.mpi.comm != None):
count = meanPValDds.mpi.comm.allreduce(count)
expCount = sp.product(noisDds.shape)*alpha
count = float(count)
relErr = sp.absolute(expCount-float(count))/sp.absolute(max(expCount,count))
rootLogger.info("relErr = %s" % relErr)
self.assertTrue(relErr < 0.10)
seVarDds = mango.copy(seStddDds)
seVarDds.asarray()[...] *= seVarDds.asarray()*((seNumElems)-1)/(stdd*stdd)
rootLogger.info("Calculating chi_squared_pvalue image from variance-image...")
###
### Takes too long to compute pvalue image!!!!
###
stddPValDds = \
mango.fmm.chi_squared_pvalue(
seVarDds,
dof=float(seNumElems-1),
sidedness=mango.fmm.PValueSidednessType.RIGHT_SIDEDNESS
)
alpha = 0.05
count = sp.sum(sp.where(stddPValDds.asarray() <= alpha, 1, 0))
if (stddPValDds.mpi.comm != None):
count = stddPValDds.mpi.comm.allreduce(count)
expCount = sp.product(stddPValDds.shape)*alpha
count = float(count)
relErr = sp.absolute(expCount-float(count))/sp.absolute(max(expCount,count))
rootLogger.info("relErr = %s" % relErr)
self.assertTrue(relErr < 0.10)
def testChiSquaredPValue(self):
for typePair in [(None, "float32"), ("float64", None)]:
mtype = typePair[0]
dtype = typePair[1]
dof = 12.0
noisDds = mango.data.chi_squared_noise(shape=(105,223,240), dof=dof, mtype=mtype, dtype=dtype)
pvalDds = \
mango.fmm.chi_squared_pvalue(
noisDds,
dof=dof,
sidedness=mango.fmm.PValueSidednessType.RIGHT_SIDEDNESS
)
alpha = 0.05
count = sp.sum(sp.where(pvalDds.asarray() <= alpha, 1, 0))
if (pvalDds.mpi.comm != None):
count = pvalDds.mpi.comm.allreduce(count)
expCount = sp.product(noisDds.shape)*alpha
count = float(count)
relErr = sp.absolute(expCount-float(count))/sp.absolute(max(expCount,count))
rootLogger.info("relErr = %s" % relErr)
self.assertTrue(relErr < 0.10)
class GradientChiSquaredPValueTest(mango.unittest.TestCase):
def testDiscreteGaussianGradientChiSquaredPValue(self):
dir = self.createTmpDir("testDiscreteGaussianGradientChiSquaredPValue")
gSigma = 0.5
grdKernelZ = mango.image.discrete_gaussian_gradient_kernel(axis=0, sigma=gSigma)
sumSqrd = sp.sum(grdKernelZ*grdKernelZ)
rootLogger.info("grdKernelZ = \n%s" % (grdKernelZ,))
mean = 32000.0
stdd = 4000.0
imgDds = mango.data.gaussian_noise(shape=(100,200,200), mean=mean, stdd=stdd, mtype="tomo_float", halo=(3,3,3))
grdDds = mango.image.discrete_gaussian_gradient_magnitude(input=imgDds, sigma=gSigma)
mango.io.writeDds(os.path.join(dir, "tomo_floatImg.nc"), imgDds)
mango.io.writeDds(os.path.join(dir, "tomo_floatImgGrd.nc"), grdDds)
grdDds.updateHaloRegions()
grdDds.mirrorOuterLayersToBorder(False)
grdDds.subd.asarray()[...] = grdDds.subd.asarray()*grdDds.subd.asarray()
grdDds.subd.asarray()[...] /= (sumSqrd*(stdd*stdd))
if ((grdDds.mpi.comm == None) or (grdDds.mpi.comm.Get_size() <= 1)):
import matplotlib
import matplotlib.pyplot as plt
hst,edg = sp.histogram(grdDds.subd.asarray().flatten(), bins=1024)
rootLogger.info("Done calculating histogram...edge (min,max)=(%s,%s)" % (np.min(edg), np.max(edg)))
hst = sp.array(hst, copy=True, dtype="float64")
hst /= sp.sum(hst)
hst = hst[0:hst.size//2]
edg = edg[0:hst.size + 1]
xx = (edg[1:]+edg[0:-1])*0.5
plt.plot(xx, hst, label="gradient histogram")
rootLogger.info("Calculating chi2...")
chi2xx0 = edg
df = 3
chi2cdf0 = sp.stats.chi2.cdf(chi2xx0, df)
y0 = chi2cdf0[1:]-chi2cdf0[0:-1]
plt.plot(xx, y0, label="Chi-squared distribution")
plt.legend()
plt.savefig(os.path.join(dir,"gradient_hist_and_chi_sqrd_dist.png"), dpi=100)
if __name__ == "__main__":
#mango.setLoggingVerbosityLevel("high")
mpi.initialiseLoggers(
[__name__, "mango.unittest", "mango.mpi", "mango.fmm", "mango.fmmTest"],
logLevel=logging.DEBUG
)
random.seed((mpi.rank+1)*23456243)
numpy.random.seed((mpi.rank+1)*23456134)
mango.unittest.main()
|
|
#
# Copyright (C) 2000-2008 greg Landrum and Rational Discovery LLC
#
""" Contains functionality for doing tree pruning
"""
import numpy
from rdkit.ML.DecTree import CrossValidate, DecTree
import copy
_verbose = 0
def MaxCount(examples):
""" given a set of examples, returns the most common result code
**Arguments**
examples: a list of examples to be counted
**Returns**
the most common result code
"""
resList = [x[-1] for x in examples]
maxVal = max(resList)
counts = [None]*(maxVal+1)
for i in xrange(maxVal+1):
counts[i] = sum([x==i for x in resList])
return numpy.argmax(counts)
def _GetLocalError(node):
nWrong = 0
for example in node.GetExamples():
pred = node.ClassifyExample(example,appendExamples=0)
if pred != example[-1]:
nWrong +=1
#if _verbose: print '------------------>MISS:',example,pred
return nWrong
def _Pruner(node,level=0):
"""Recursively finds and removes the nodes whose removals improve classification
**Arguments**
- node: the tree to be pruned. The pruning data should already be contained
within node (i.e. node.GetExamples() should return the pruning data)
- level: (optional) the level of recursion, used only in _verbose printing
**Returns**
the pruned version of node
**Notes**
- This uses a greedy algorithm which basically does a DFS traversal of the tree,
removing nodes whenever possible.
- If removing a node does not affect the accuracy, it *will be* removed. We
favor smaller trees.
"""
if _verbose: print ' '*level,'<%d> '%level,'>>> Pruner'
children = node.GetChildren()[:]
bestTree = copy.deepcopy(node)
bestErr = 1e6
emptyChildren=[]
#
# Loop over the children of this node, removing them when doing so
# either improves the local error or leaves it unchanged (we're
# introducing a bias for simpler trees).
#
for i in range(len(children)):
child = children[i]
examples = child.GetExamples()
if _verbose:
print ' '*level,'<%d> '%level,' Child:',i,child.GetLabel()
bestTree.Print()
print
if len(examples):
if _verbose: print ' '*level,'<%d> '%level,' Examples',len(examples)
if not child.GetTerminal():
if _verbose: print ' '*level,'<%d> '%level,' Nonterminal'
workTree = copy.deepcopy(bestTree)
#
# First recurse on the child (try removing things below it)
#
newNode = _Pruner(child,level=level+1)
workTree.ReplaceChildIndex(i,newNode)
tempErr = _GetLocalError(workTree)
if tempErr<=bestErr:
bestErr = tempErr
bestTree = copy.deepcopy(workTree)
if _verbose:
print ' '*level,'<%d> '%level,'>->->->->->'
print ' '*level,'<%d> '%level,'replacing:',i,child.GetLabel()
child.Print()
print ' '*level,'<%d> '%level,'with:'
newNode.Print()
print ' '*level,'<%d> '%level,'<-<-<-<-<-<'
else:
workTree.ReplaceChildIndex(i,child)
#
# Now try replacing the child entirely
#
bestGuess = MaxCount(child.GetExamples())
newNode = DecTree.DecTreeNode(workTree,'L:%d'%(bestGuess),
label=bestGuess,isTerminal=1)
newNode.SetExamples(child.GetExamples())
workTree.ReplaceChildIndex(i,newNode)
if _verbose:
print ' '*level,'<%d> '%level,'ATTEMPT:'
workTree.Print()
newErr = _GetLocalError(workTree)
if _verbose: print ' '*level,'<%d> '%level,'---> ',newErr,bestErr
if newErr <= bestErr:
bestErr = newErr
bestTree = copy.deepcopy(workTree)
if _verbose:
print ' '*level,'<%d> '%level,'PRUNING:'
workTree.Print()
else:
if _verbose: print ' '*level,'<%d> '%level,'FAIL'
# whoops... put the child back in:
workTree.ReplaceChildIndex(i,child)
else:
if _verbose: print ' '*level,'<%d> '%level,' Terminal'
else:
if _verbose: print ' '*level,'<%d> '%level,' No Examples',len(examples)
#
# FIX: we need to figure out what to do here (nodes that contain
# no examples in the testing set). I can concoct arguments for
# leaving them in and for removing them. At the moment they are
# left intact.
#
pass
if _verbose: print ' '*level,'<%d> '%level,'<<< out'
return bestTree
def PruneTree(tree,trainExamples,testExamples,minimizeTestErrorOnly=1):
""" implements a reduced-error pruning of decision trees
This algorithm is described on page 69 of Mitchell's book.
Pruning can be done using just the set of testExamples (the validation set)
or both the testExamples and the trainExamples by setting minimizeTestErrorOnly
to 0.
**Arguments**
- tree: the initial tree to be pruned
- trainExamples: the examples used to train the tree
- testExamples: the examples held out for testing the tree
- minimizeTestErrorOnly: if this toggle is zero, all examples (i.e.
_trainExamples_ + _testExamples_ will be used to evaluate the error.
**Returns**
a 2-tuple containing:
1) the best tree
2) the best error (the one which corresponds to that tree)
"""
if minimizeTestErrorOnly:
testSet = testExamples
else:
testSet = trainExamples + testExamples
# remove any stored examples the tree may have
tree.ClearExamples()
#
# screen the test data through the tree so that we end up with the
# appropriate points stored at each node of the tree
#
totErr,badEx = CrossValidate.CrossValidate(tree,testSet,appendExamples=1)
#
# Prune
#
newTree = _Pruner(tree)
#
# And recalculate the errors
#
totErr,badEx = CrossValidate.CrossValidate(newTree,testSet)
newTree.SetBadExamples(badEx)
return newTree,totErr
# -------
# testing code
# -------
def _testRandom():
from rdkit.ML.DecTree import randomtest
#examples,attrs,nPossibleVals = randomtest.GenRandomExamples(nVars=20,randScale=0.25,nExamples = 200)
examples,attrs,nPossibleVals = randomtest.GenRandomExamples(nVars=10,randScale=0.5,nExamples = 200)
tree,frac = CrossValidate.CrossValidationDriver(examples,attrs,nPossibleVals)
tree.Print()
tree.Pickle('orig.pkl')
print 'original error is:', frac
print '----Pruning'
newTree,frac2 = PruneTree(tree,tree.GetTrainingExamples(),tree.GetTestExamples())
newTree.Print()
print 'pruned error is:',frac2
newTree.Pickle('prune.pkl')
def _testSpecific():
from rdkit.ML.DecTree import ID3
oPts= [ \
[0,0,1,0],
[0,1,1,1],
[1,0,1,1],
[1,1,0,0],
[1,1,1,1],
]
tPts = oPts+[[0,1,1,0],[0,1,1,0]]
tree = ID3.ID3Boot(oPts,attrs=range(3),nPossibleVals=[2]*4)
tree.Print()
err,badEx = CrossValidate.CrossValidate(tree,oPts)
print 'original error:',err
err,badEx = CrossValidate.CrossValidate(tree,tPts)
print 'original holdout error:',err
newTree,frac2 = PruneTree(tree,oPts,tPts)
newTree.Print()
err,badEx = CrossValidate.CrossValidate(newTree,tPts)
print 'pruned holdout error is:',err
print badEx
print len(tree),len(newTree)
def _testChain():
from rdkit.ML.DecTree import ID3
oPts= [ \
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[0,0,1,1,0],
[0,0,1,1,0],
[0,0,1,1,1],
[0,1,0,1,0],
[0,1,0,1,0],
[0,1,0,0,1],
]
tPts = oPts
tree = ID3.ID3Boot(oPts,attrs=range(len(oPts[0])-1),nPossibleVals=[2]*len(oPts[0]))
tree.Print()
err,badEx = CrossValidate.CrossValidate(tree,oPts)
print 'original error:',err
err,badEx = CrossValidate.CrossValidate(tree,tPts)
print 'original holdout error:',err
newTree,frac2 = PruneTree(tree,oPts,tPts)
newTree.Print()
err,badEx = CrossValidate.CrossValidate(newTree,tPts)
print 'pruned holdout error is:',err
print badEx
if __name__ == '__main__':
_verbose=1
#_testRandom()
_testChain()
|
|
#!/usr/bin/env python
# Copyright 2013 The Swarming Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0 that
# can be found in the LICENSE file.
"""Client tool to trigger tasks or retrieve results from a Swarming server."""
__version__ = '0.6.1'
import collections
import json
import logging
import os
import re
import shutil
import StringIO
import subprocess
import sys
import threading
import time
import urllib
import urlparse
import zipfile
from third_party import colorama
from third_party.depot_tools import fix_encoding
from third_party.depot_tools import subcommand
from utils import file_path
from third_party.chromium import natsort
from utils import net
from utils import on_error
from utils import threading_utils
from utils import tools
from utils import zip_package
import auth
import isolated_format
import isolateserver
import run_isolated
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
class Failure(Exception):
"""Generic failure."""
pass
### Isolated file handling.
def isolated_upload_zip_bundle(isolate_server, bundle):
"""Uploads a zip package to Isolate Server and returns raw fetch URL.
Args:
isolate_server: URL of an Isolate Server.
bundle: instance of ZipPackage to upload.
Returns:
URL to get the file from.
"""
# Swarming bot needs to be able to grab the file from the Isolate Server using
# a simple HTTPS GET. Use 'default' namespace so that the raw data returned to
# a bot is not zipped, since the swarming_bot doesn't understand compressed
# data. This namespace have nothing to do with |namespace| passed to
# run_isolated.py that is used to store files for isolated task.
logging.info('Zipping up and uploading files...')
start_time = time.time()
isolate_item = isolateserver.BufferItem(bundle.zip_into_buffer())
with isolateserver.get_storage(isolate_server, 'default') as storage:
uploaded = storage.upload_items([isolate_item])
bundle_url = storage.get_fetch_url(isolate_item)
elapsed = time.time() - start_time
if isolate_item in uploaded:
logging.info('Upload complete, time elapsed: %f', elapsed)
else:
logging.info('Zip file already on server, time elapsed: %f', elapsed)
return bundle_url
def isolated_get_data(isolate_server):
"""Returns the 'data' section with all files necessary to bootstrap a task
execution running an isolated task.
It's mainly zipping run_isolated.zip over and over again.
TODO(maruel): Get rid of this with.
https://code.google.com/p/swarming/issues/detail?id=173
"""
bundle = zip_package.ZipPackage(ROOT_DIR)
bundle.add_buffer(
'run_isolated.zip',
run_isolated.get_as_zip_package().zip_into_buffer(compress=False))
bundle_url = isolated_upload_zip_bundle(isolate_server, bundle)
return [(bundle_url, 'swarm_data.zip')]
def isolated_get_run_commands(
isolate_server, namespace, isolated_hash, extra_args, verbose):
"""Returns the 'commands' to run an isolated task via run_isolated.zip.
Returns:
commands list to be added to the request.
"""
run_cmd = [
'python', 'run_isolated.zip',
'--isolated', isolated_hash,
'--isolate-server', isolate_server,
'--namespace', namespace,
]
if verbose:
run_cmd.append('--verbose')
# Pass all extra args for run_isolated.py, it will pass them to the command.
if extra_args:
run_cmd.append('--')
run_cmd.extend(extra_args)
return run_cmd
def isolated_archive(isolate_server, namespace, isolated, algo, verbose):
"""Archives a .isolated and all the dependencies on the Isolate Server."""
logging.info(
'isolated_archive(%s, %s, %s)', isolate_server, namespace, isolated)
print('Archiving: %s' % isolated)
cmd = [
sys.executable,
os.path.join(ROOT_DIR, 'isolate.py'),
'archive',
'--isolate-server', isolate_server,
'--namespace', namespace,
'--isolated', isolated,
]
cmd.extend(['--verbose'] * verbose)
logging.info(' '.join(cmd))
if subprocess.call(cmd, verbose):
return None
return isolated_format.hash_file(isolated, algo)
def isolated_to_hash(isolate_server, namespace, arg, algo, verbose):
"""Archives a .isolated file if needed.
Returns the file hash to trigger and a bool specifying if it was a file (True)
or a hash (False).
"""
if arg.endswith('.isolated'):
file_hash = isolated_archive(isolate_server, namespace, arg, algo, verbose)
if not file_hash:
on_error.report('Archival failure %s' % arg)
return None, True
return file_hash, True
elif isolated_format.is_valid_hash(arg, algo):
return arg, False
else:
on_error.report('Invalid hash %s' % arg)
return None, False
def isolated_handle_options(options, args):
"""Handles '--isolated <isolated>', '<isolated>' and '-- <args...>' arguments.
Returns:
tuple(command, data).
"""
isolated_cmd_args = []
if not options.isolated:
if '--' in args:
index = args.index('--')
isolated_cmd_args = args[index+1:]
args = args[:index]
else:
# optparse eats '--' sometimes.
isolated_cmd_args = args[1:]
args = args[:1]
if len(args) != 1:
raise ValueError(
'Use --isolated, --raw-cmd or \'--\' to pass arguments to the called '
'process.')
# Old code. To be removed eventually.
options.isolated, is_file = isolated_to_hash(
options.isolate_server, options.namespace, args[0],
isolated_format.get_hash_algo(options.namespace), options.verbose)
if not options.isolated:
raise ValueError('Invalid argument %s' % args[0])
elif args:
is_file = False
if '--' in args:
index = args.index('--')
isolated_cmd_args = args[index+1:]
if index != 0:
raise ValueError('Unexpected arguments.')
else:
# optparse eats '--' sometimes.
isolated_cmd_args = args
command = isolated_get_run_commands(
options.isolate_server, options.namespace, options.isolated,
isolated_cmd_args, options.verbose)
# If a file name was passed, use its base name of the isolated hash.
# Otherwise, use user name as an approximation of a task name.
if not options.task_name:
if is_file:
key = os.path.splitext(os.path.basename(args[0]))[0]
else:
key = options.user
options.task_name = u'%s/%s/%s' % (
key,
'_'.join(
'%s=%s' % (k, v)
for k, v in sorted(options.dimensions.iteritems())),
options.isolated)
try:
data = isolated_get_data(options.isolate_server)
except (IOError, OSError):
on_error.report('Failed to upload the zip file')
raise ValueError('Failed to upload the zip file')
return command, data
### Triggering.
TaskRequest = collections.namedtuple(
'TaskRequest',
[
'command',
'data',
'dimensions',
'env',
'expiration',
'hard_timeout',
'idempotent',
'io_timeout',
'name',
'priority',
'tags',
'user',
'verbose',
])
def task_request_to_raw_request(task_request):
"""Returns the json dict expected by the Swarming server for new request.
This is for the v1 client Swarming API.
"""
return {
'name': task_request.name,
'parent_task_id': os.environ.get('SWARMING_TASK_ID', ''),
'priority': task_request.priority,
'properties': {
'commands': [task_request.command],
'data': task_request.data,
'dimensions': task_request.dimensions,
'env': task_request.env,
'execution_timeout_secs': task_request.hard_timeout,
'io_timeout_secs': task_request.io_timeout,
'idempotent': task_request.idempotent,
},
'scheduling_expiration_secs': task_request.expiration,
'tags': task_request.tags,
'user': task_request.user,
}
def swarming_handshake(swarming):
"""Initiates the connection to the Swarming server."""
headers = {'X-XSRF-Token-Request': '1'}
response = net.url_read_json(
swarming + '/swarming/api/v1/client/handshake',
headers=headers,
data={})
if not response:
logging.error('Failed to handshake with server')
return None
logging.info('Connected to server version: %s', response['server_version'])
return response['xsrf_token']
def swarming_trigger(swarming, raw_request, xsrf_token):
"""Triggers a request on the Swarming server and returns the json data.
It's the low-level function.
Returns:
{
'request': {
'created_ts': u'2010-01-02 03:04:05',
'name': ..
},
'task_id': '12300',
}
"""
logging.info('Triggering: %s', raw_request['name'])
headers = {'X-XSRF-Token': xsrf_token}
result = net.url_read_json(
swarming + '/swarming/api/v1/client/request',
data=raw_request,
headers=headers)
if not result:
on_error.report('Failed to trigger task %s' % raw_request['name'])
return None
return result
def setup_googletest(env, shards, index):
"""Sets googletest specific environment variables."""
if shards > 1:
env = env.copy()
env['GTEST_SHARD_INDEX'] = str(index)
env['GTEST_TOTAL_SHARDS'] = str(shards)
return env
def trigger_task_shards(swarming, task_request, shards):
"""Triggers one or many subtasks of a sharded task.
Returns:
Dict with task details, returned to caller as part of --dump-json output.
None in case of failure.
"""
def convert(index):
req = task_request
if shards > 1:
req = req._replace(
env=setup_googletest(req.env, shards, index),
name='%s:%s:%s' % (req.name, index, shards))
return task_request_to_raw_request(req)
requests = [convert(index) for index in xrange(shards)]
xsrf_token = swarming_handshake(swarming)
if not xsrf_token:
return None
tasks = {}
priority_warning = False
for index, request in enumerate(requests):
task = swarming_trigger(swarming, request, xsrf_token)
if not task:
break
logging.info('Request result: %s', task)
if (not priority_warning and
task['request']['priority'] != task_request.priority):
priority_warning = True
print >> sys.stderr, (
'Priority was reset to %s' % task['request']['priority'])
tasks[request['name']] = {
'shard_index': index,
'task_id': task['task_id'],
'view_url': '%s/user/task/%s' % (swarming, task['task_id']),
}
# Some shards weren't triggered. Abort everything.
if len(tasks) != len(requests):
if tasks:
print >> sys.stderr, 'Only %d shard(s) out of %d were triggered' % (
len(tasks), len(requests))
for task_dict in tasks.itervalues():
abort_task(swarming, task_dict['task_id'])
return None
return tasks
### Collection.
# How often to print status updates to stdout in 'collect'.
STATUS_UPDATE_INTERVAL = 15 * 60.
class State(object):
"""States in which a task can be.
WARNING: Copy-pasted from appengine/swarming/server/task_result.py. These
values are part of the API so if they change, the API changed.
It's in fact an enum. Values should be in decreasing order of importance.
"""
RUNNING = 0x10
PENDING = 0x20
EXPIRED = 0x30
TIMED_OUT = 0x40
BOT_DIED = 0x50
CANCELED = 0x60
COMPLETED = 0x70
STATES = (RUNNING, PENDING, EXPIRED, TIMED_OUT, BOT_DIED, CANCELED, COMPLETED)
STATES_RUNNING = (RUNNING, PENDING)
STATES_NOT_RUNNING = (EXPIRED, TIMED_OUT, BOT_DIED, CANCELED, COMPLETED)
STATES_DONE = (TIMED_OUT, COMPLETED)
STATES_ABANDONED = (EXPIRED, BOT_DIED, CANCELED)
_NAMES = {
RUNNING: 'Running',
PENDING: 'Pending',
EXPIRED: 'Expired',
TIMED_OUT: 'Execution timed out',
BOT_DIED: 'Bot died',
CANCELED: 'User canceled',
COMPLETED: 'Completed',
}
@classmethod
def to_string(cls, state):
"""Returns a user-readable string representing a State."""
if state not in cls._NAMES:
raise ValueError('Invalid state %s' % state)
return cls._NAMES[state]
class TaskOutputCollector(object):
"""Assembles task execution summary (for --task-summary-json output).
Optionally fetches task outputs from isolate server to local disk (used when
--task-output-dir is passed).
This object is shared among multiple threads running 'retrieve_results'
function, in particular they call 'process_shard_result' method in parallel.
"""
def __init__(self, task_output_dir, task_name, shard_count):
"""Initializes TaskOutputCollector, ensures |task_output_dir| exists.
Args:
task_output_dir: (optional) local directory to put fetched files to.
task_name: name of the swarming task results belong to.
shard_count: expected number of task shards.
"""
self.task_output_dir = task_output_dir
self.task_name = task_name
self.shard_count = shard_count
self._lock = threading.Lock()
self._per_shard_results = {}
self._storage = None
if self.task_output_dir and not os.path.isdir(self.task_output_dir):
os.makedirs(self.task_output_dir)
def process_shard_result(self, shard_index, result):
"""Stores results of a single task shard, fetches output files if necessary.
Modifies |result| in place.
Called concurrently from multiple threads.
"""
# Sanity check index is in expected range.
assert isinstance(shard_index, int)
if shard_index < 0 or shard_index >= self.shard_count:
logging.warning(
'Shard index %d is outside of expected range: [0; %d]',
shard_index, self.shard_count - 1)
return
assert not 'isolated_out' in result
result['isolated_out'] = None
for output in result['outputs']:
isolated_files_location = extract_output_files_location(output)
if isolated_files_location:
if result['isolated_out']:
raise ValueError('Unexpected two task with output')
result['isolated_out'] = isolated_files_location
# Store result dict of that shard, ignore results we've already seen.
with self._lock:
if shard_index in self._per_shard_results:
logging.warning('Ignoring duplicate shard index %d', shard_index)
return
self._per_shard_results[shard_index] = result
# Fetch output files if necessary.
if self.task_output_dir and result['isolated_out']:
storage = self._get_storage(
result['isolated_out']['server'],
result['isolated_out']['namespace'])
if storage:
# Output files are supposed to be small and they are not reused across
# tasks. So use MemoryCache for them instead of on-disk cache. Make
# files writable, so that calling script can delete them.
isolateserver.fetch_isolated(
result['isolated_out']['hash'],
storage,
isolateserver.MemoryCache(file_mode_mask=0700),
os.path.join(self.task_output_dir, str(shard_index)),
False)
def finalize(self):
"""Assembles and returns task summary JSON, shutdowns underlying Storage."""
with self._lock:
# Write an array of shard results with None for missing shards.
summary = {
'shards': [
self._per_shard_results.get(i) for i in xrange(self.shard_count)
],
}
# Write summary.json to task_output_dir as well.
if self.task_output_dir:
tools.write_json(
os.path.join(self.task_output_dir, 'summary.json'),
summary,
False)
if self._storage:
self._storage.close()
self._storage = None
return summary
def _get_storage(self, isolate_server, namespace):
"""Returns isolateserver.Storage to use to fetch files."""
assert self.task_output_dir
with self._lock:
if not self._storage:
self._storage = isolateserver.get_storage(isolate_server, namespace)
else:
# Shards must all use exact same isolate server and namespace.
if self._storage.location != isolate_server:
logging.error(
'Task shards are using multiple isolate servers: %s and %s',
self._storage.location, isolate_server)
return None
if self._storage.namespace != namespace:
logging.error(
'Task shards are using multiple namespaces: %s and %s',
self._storage.namespace, namespace)
return None
return self._storage
def extract_output_files_location(task_log):
"""Task log -> location of task output files to fetch.
TODO(vadimsh,maruel): Use side-channel to get this information.
See 'run_tha_test' in run_isolated.py for where the data is generated.
Returns:
Tuple (isolate server URL, namespace, isolated hash) on success.
None if information is missing or can not be parsed.
"""
if not task_log:
return None
match = re.search(
r'\[run_isolated_out_hack\](.*)\[/run_isolated_out_hack\]',
task_log,
re.DOTALL)
if not match:
return None
def to_ascii(val):
if not isinstance(val, basestring):
raise ValueError()
return val.encode('ascii')
try:
data = json.loads(match.group(1))
if not isinstance(data, dict):
raise ValueError()
isolated_hash = to_ascii(data['hash'])
namespace = to_ascii(data['namespace'])
isolate_server = to_ascii(data['storage'])
if not file_path.is_url(isolate_server):
raise ValueError()
data = {
'hash': isolated_hash,
'namespace': namespace,
'server': isolate_server,
'view_url': '%s/browse?%s' % (isolate_server, urllib.urlencode(
[('namespace', namespace), ('hash', isolated_hash)])),
}
return data
except (KeyError, ValueError):
logging.warning(
'Unexpected value of run_isolated_out_hack: %s', match.group(1))
return None
def now():
"""Exists so it can be mocked easily."""
return time.time()
def retrieve_results(
base_url, shard_index, task_id, timeout, should_stop, output_collector):
"""Retrieves results for a single task ID.
Returns:
<result dict> on success.
None on failure.
"""
assert isinstance(timeout, float), timeout
result_url = '%s/swarming/api/v1/client/task/%s' % (base_url, task_id)
output_url = '%s/swarming/api/v1/client/task/%s/output/all' % (
base_url, task_id)
started = now()
deadline = started + timeout if timeout else None
attempt = 0
while not should_stop.is_set():
attempt += 1
# Waiting for too long -> give up.
current_time = now()
if deadline and current_time >= deadline:
logging.error('retrieve_results(%s) timed out on attempt %d',
base_url, attempt)
return None
# Do not spin too fast. Spin faster at the beginning though.
# Start with 1 sec delay and for each 30 sec of waiting add another second
# of delay, until hitting 15 sec ceiling.
if attempt > 1:
max_delay = min(15, 1 + (current_time - started) / 30.0)
delay = min(max_delay, deadline - current_time) if deadline else max_delay
if delay > 0:
logging.debug('Waiting %.1f sec before retrying', delay)
should_stop.wait(delay)
if should_stop.is_set():
return None
# Disable internal retries in net.url_read_json, since we are doing retries
# ourselves.
# TODO(maruel): We'd need to know if it's a 404 and not retry at all.
result = net.url_read_json(result_url, retry_50x=False)
if not result:
continue
if result['state'] in State.STATES_NOT_RUNNING:
out = net.url_read_json(output_url)
result['outputs'] = (out or {}).get('outputs', [])
if not result['outputs']:
logging.error('No output found for task %s', task_id)
# Record the result, try to fetch attached output files (if any).
if output_collector:
# TODO(vadimsh): Respect |should_stop| and |deadline| when fetching.
output_collector.process_shard_result(shard_index, result)
return result
def yield_results(
swarm_base_url, task_ids, timeout, max_threads, print_status_updates,
output_collector):
"""Yields swarming task results from the swarming server as (index, result).
Duplicate shards are ignored. Shards are yielded in order of completion.
Timed out shards are NOT yielded at all. Caller can compare number of yielded
shards with len(task_keys) to verify all shards completed.
max_threads is optional and is used to limit the number of parallel fetches
done. Since in general the number of task_keys is in the range <=10, it's not
worth normally to limit the number threads. Mostly used for testing purposes.
output_collector is an optional instance of TaskOutputCollector that will be
used to fetch files produced by a task from isolate server to the local disk.
Yields:
(index, result). In particular, 'result' is defined as the
GetRunnerResults() function in services/swarming/server/test_runner.py.
"""
number_threads = (
min(max_threads, len(task_ids)) if max_threads else len(task_ids))
should_stop = threading.Event()
results_channel = threading_utils.TaskChannel()
with threading_utils.ThreadPool(number_threads, number_threads, 0) as pool:
try:
# Adds a task to the thread pool to call 'retrieve_results' and return
# the results together with shard_index that produced them (as a tuple).
def enqueue_retrieve_results(shard_index, task_id):
task_fn = lambda *args: (shard_index, retrieve_results(*args))
pool.add_task(
0, results_channel.wrap_task(task_fn), swarm_base_url, shard_index,
task_id, timeout, should_stop, output_collector)
# Enqueue 'retrieve_results' calls for each shard key to run in parallel.
for shard_index, task_id in enumerate(task_ids):
enqueue_retrieve_results(shard_index, task_id)
# Wait for all of them to finish.
shards_remaining = range(len(task_ids))
active_task_count = len(task_ids)
while active_task_count:
shard_index, result = None, None
try:
shard_index, result = results_channel.pull(
timeout=STATUS_UPDATE_INTERVAL)
except threading_utils.TaskChannel.Timeout:
if print_status_updates:
print(
'Waiting for results from the following shards: %s' %
', '.join(map(str, shards_remaining)))
sys.stdout.flush()
continue
except Exception:
logging.exception('Unexpected exception in retrieve_results')
# A call to 'retrieve_results' finished (successfully or not).
active_task_count -= 1
if not result:
logging.error('Failed to retrieve the results for a swarming key')
continue
# Yield back results to the caller.
assert shard_index in shards_remaining
shards_remaining.remove(shard_index)
yield shard_index, result
finally:
# Done or aborted with Ctrl+C, kill the remaining threads.
should_stop.set()
def decorate_shard_output(
swarming, shard_index, result, shard_exit_code, shard_duration):
"""Returns wrapped output for swarming task shard."""
url = '%s/user/task/%s' % (swarming, result['id'])
tag_header = 'Shard %d %s' % (shard_index, url)
tag_footer = 'End of shard %d Duration: %.1fs Bot: %s Exit code %s' % (
shard_index, shard_duration, result['bot_id'], shard_exit_code)
tag_len = max(len(tag_header), len(tag_footer))
dash_pad = '+-%s-+\n' % ('-' * tag_len)
tag_header = '| %s |\n' % tag_header.ljust(tag_len)
tag_footer = '| %s |\n' % tag_footer.ljust(tag_len)
header = dash_pad + tag_header + dash_pad
footer = dash_pad + tag_footer + dash_pad[:-1]
output = '\n'.join(o for o in result['outputs'] if o).rstrip() + '\n'
return header + output + footer
def collect(
swarming, task_name, task_ids, timeout, decorate, print_status_updates,
task_summary_json, task_output_dir):
"""Retrieves results of a Swarming task."""
# Collect summary JSON and output files (if task_output_dir is not None).
output_collector = TaskOutputCollector(
task_output_dir, task_name, len(task_ids))
seen_shards = set()
exit_code = 0
total_duration = 0
try:
for index, output in yield_results(
swarming, task_ids, timeout, None, print_status_updates,
output_collector):
seen_shards.add(index)
# Grab first non-zero exit code as an overall shard exit code. Default to
# failure if there was no process that even started.
shard_exit_code = 1
shard_exit_codes = sorted(output['exit_codes'], key=lambda x: not x)
if shard_exit_codes:
shard_exit_code = shard_exit_codes[0]
if shard_exit_code:
exit_code = shard_exit_code
shard_duration = sum(i for i in output['durations'] if i)
total_duration += shard_duration
if decorate:
print(decorate_shard_output(
swarming, index, output, shard_exit_code, shard_duration))
if len(seen_shards) < len(task_ids):
print('')
else:
print('%s: %s %d' % (output['bot_id'], output['id'], shard_exit_code))
for output in output['outputs']:
if not output:
continue
output = output.rstrip()
if output:
print(''.join(' %s\n' % l for l in output.splitlines()))
finally:
summary = output_collector.finalize()
if task_summary_json:
tools.write_json(task_summary_json, summary, False)
if decorate and total_duration:
print('Total duration: %.1fs' % total_duration)
if len(seen_shards) != len(task_ids):
missing_shards = [x for x in range(len(task_ids)) if x not in seen_shards]
print >> sys.stderr, ('Results from some shards are missing: %s' %
', '.join(map(str, missing_shards)))
return 1
return exit_code
### Commands.
def abort_task(_swarming, _manifest):
"""Given a task manifest that was triggered, aborts its execution."""
# TODO(vadimsh): No supported by the server yet.
def add_filter_options(parser):
parser.filter_group = tools.optparse.OptionGroup(parser, 'Filtering slaves')
parser.filter_group.add_option(
'-d', '--dimension', default=[], action='append', nargs=2,
dest='dimensions', metavar='FOO bar',
help='dimension to filter on')
parser.add_option_group(parser.filter_group)
def add_sharding_options(parser):
parser.sharding_group = tools.optparse.OptionGroup(parser, 'Sharding options')
parser.sharding_group.add_option(
'--shards', type='int', default=1,
help='Number of shards to trigger and collect.')
parser.add_option_group(parser.sharding_group)
def add_trigger_options(parser):
"""Adds all options to trigger a task on Swarming."""
isolateserver.add_isolate_server_options(parser)
add_filter_options(parser)
parser.task_group = tools.optparse.OptionGroup(parser, 'Task properties')
parser.task_group.add_option(
'-s', '--isolated',
help='Hash of the .isolated to grab from the isolate server')
parser.task_group.add_option(
'-e', '--env', default=[], action='append', nargs=2, metavar='FOO bar',
help='Environment variables to set')
parser.task_group.add_option(
'--priority', type='int', default=100,
help='The lower value, the more important the task is')
parser.task_group.add_option(
'-T', '--task-name',
help='Display name of the task. Defaults to '
'<base_name>/<dimensions>/<isolated hash>/<timestamp> if an '
'isolated file is provided, if a hash is provided, it defaults to '
'<user>/<dimensions>/<isolated hash>/<timestamp>')
parser.task_group.add_option(
'--tags', action='append', default=[],
help='Tags to assign to the task.')
parser.task_group.add_option(
'--user', default='',
help='User associated with the task. Defaults to authenticated user on '
'the server.')
parser.task_group.add_option(
'--idempotent', action='store_true', default=False,
help='When set, the server will actively try to find a previous task '
'with the same parameter and return this result instead if possible')
parser.task_group.add_option(
'--expiration', type='int', default=6*60*60,
help='Seconds to allow the task to be pending for a bot to run before '
'this task request expires.')
parser.task_group.add_option(
'--deadline', type='int', dest='expiration',
help=tools.optparse.SUPPRESS_HELP)
parser.task_group.add_option(
'--hard-timeout', type='int', default=60*60,
help='Seconds to allow the task to complete.')
parser.task_group.add_option(
'--io-timeout', type='int', default=20*60,
help='Seconds to allow the task to be silent.')
parser.task_group.add_option(
'--raw-cmd', action='store_true', default=False,
help='When set, the command after -- is used as-is without run_isolated. '
'In this case, no .isolated file is expected.')
parser.add_option_group(parser.task_group)
def process_trigger_options(parser, options, args):
"""Processes trigger options and uploads files to isolate server if necessary.
"""
options.dimensions = dict(options.dimensions)
options.env = dict(options.env)
data = []
if not options.dimensions:
parser.error('Please at least specify one --dimension')
if options.raw_cmd:
if not args:
parser.error(
'Arguments with --raw-cmd should be passed after -- as command '
'delimiter.')
if options.isolate_server:
parser.error('Can\'t use both --raw-cmd and --isolate-server.')
command = args
if not options.task_name:
options.task_name = u'%s/%s' % (
options.user,
'_'.join(
'%s=%s' % (k, v)
for k, v in sorted(options.dimensions.iteritems())))
else:
isolateserver.process_isolate_server_options(parser, options, False)
try:
command, data = isolated_handle_options(options, args)
except ValueError as e:
parser.error(str(e))
return TaskRequest(
command=command,
data=data,
dimensions=options.dimensions,
env=options.env,
expiration=options.expiration,
hard_timeout=options.hard_timeout,
idempotent=options.idempotent,
io_timeout=options.io_timeout,
name=options.task_name,
priority=options.priority,
tags=options.tags,
user=options.user,
verbose=options.verbose)
def add_collect_options(parser):
parser.server_group.add_option(
'-t', '--timeout',
type='float',
default=80*60.,
help='Timeout to wait for result, set to 0 for no timeout; default: '
'%default s')
parser.group_logging.add_option(
'--decorate', action='store_true', help='Decorate output')
parser.group_logging.add_option(
'--print-status-updates', action='store_true',
help='Print periodic status updates')
parser.task_output_group = tools.optparse.OptionGroup(parser, 'Task output')
parser.task_output_group.add_option(
'--task-summary-json',
metavar='FILE',
help='Dump a summary of task results to this file as json. It contains '
'only shards statuses as know to server directly. Any output files '
'emitted by the task can be collected by using --task-output-dir')
parser.task_output_group.add_option(
'--task-output-dir',
metavar='DIR',
help='Directory to put task results into. When the task finishes, this '
'directory contains per-shard directory with output files produced '
'by shards: <task-output-dir>/<zero-based-shard-index>/.')
parser.add_option_group(parser.task_output_group)
def CMDbots(parser, args):
"""Returns information about the bots connected to the Swarming server."""
add_filter_options(parser)
parser.filter_group.add_option(
'--dead-only', action='store_true',
help='Only print dead bots, useful to reap them and reimage broken bots')
parser.filter_group.add_option(
'-k', '--keep-dead', action='store_true',
help='Do not filter out dead bots')
parser.filter_group.add_option(
'-b', '--bare', action='store_true',
help='Do not print out dimensions')
options, args = parser.parse_args(args)
if options.keep_dead and options.dead_only:
parser.error('Use only one of --keep-dead and --dead-only')
bots = []
cursor = None
limit = 250
# Iterate via cursors.
base_url = options.swarming + '/swarming/api/v1/client/bots?limit=%d' % limit
while True:
url = base_url
if cursor:
url += '&cursor=%s' % urllib.quote(cursor)
data = net.url_read_json(url)
if data is None:
print >> sys.stderr, 'Failed to access %s' % options.swarming
return 1
bots.extend(data['items'])
cursor = data['cursor']
if not cursor:
break
for bot in natsort.natsorted(bots, key=lambda x: x['id']):
if options.dead_only:
if not bot['is_dead']:
continue
elif not options.keep_dead and bot['is_dead']:
continue
# If the user requested to filter on dimensions, ensure the bot has all the
# dimensions requested.
dimensions = bot['dimensions']
for key, value in options.dimensions:
if key not in dimensions:
break
# A bot can have multiple value for a key, for example,
# {'os': ['Windows', 'Windows-6.1']}, so that --dimension os=Windows will
# be accepted.
if isinstance(dimensions[key], list):
if value not in dimensions[key]:
break
else:
if value != dimensions[key]:
break
else:
print bot['id']
if not options.bare:
print ' %s' % json.dumps(dimensions, sort_keys=True)
if bot.get('task_id'):
print ' task: %s' % bot['task_id']
return 0
@subcommand.usage('--json file | task_id...')
def CMDcollect(parser, args):
"""Retrieves results of one or multiple Swarming task by its ID.
The result can be in multiple part if the execution was sharded. It can
potentially have retries.
"""
add_collect_options(parser)
parser.add_option(
'-j', '--json',
help='Load the task ids from .json as saved by trigger --dump-json')
(options, args) = parser.parse_args(args)
if not args and not options.json:
parser.error('Must specify at least one task id or --json.')
if args and options.json:
parser.error('Only use one of task id or --json.')
if options.json:
with open(options.json) as f:
tasks = sorted(
json.load(f)['tasks'].itervalues(), key=lambda x: x['shard_index'])
args = [t['task_id'] for t in tasks]
else:
valid = frozenset('0123456789abcdef')
if any(not valid.issuperset(task_id) for task_id in args):
parser.error('Task ids are 0-9a-f.')
try:
return collect(
options.swarming,
None,
args,
options.timeout,
options.decorate,
options.print_status_updates,
options.task_summary_json,
options.task_output_dir)
except Failure:
on_error.report(None)
return 1
@subcommand.usage('[resource name]')
def CMDquery(parser, args):
"""Returns raw JSON information via an URL endpoint. Use 'list' to gather the
list of valid values from the server.
Examples:
Printing the list of known URLs:
swarming.py query -S https://server-url list
Listing last 50 tasks on a specific bot named 'swarm1'
swarming.py query -S https://server-url --limit 50 bot/swarm1/tasks
"""
CHUNK_SIZE = 250
parser.add_option(
'-L', '--limit', type='int', default=200,
help='Limit to enforce on limitless items (like number of tasks); '
'default=%default')
(options, args) = parser.parse_args(args)
if len(args) != 1:
parser.error('Must specify only one resource name.')
base_url = options.swarming + '/swarming/api/v1/client/' + args[0]
url = base_url
if options.limit:
# Check check, change if not working out.
merge_char = '&' if '?' in url else '?'
url += '%slimit=%d' % (merge_char, min(CHUNK_SIZE, options.limit))
data = net.url_read_json(url)
if data is None:
print >> sys.stderr, 'Failed to access %s' % options.swarming
return 1
# Some items support cursors. Try to get automatically if cursors are needed
# by looking at the 'cursor' items.
while (
data.get('cursor') and
(not options.limit or len(data['items']) < options.limit)):
url = base_url + '?cursor=%s' % urllib.quote(data['cursor'])
if options.limit:
url += '&limit=%d' % min(CHUNK_SIZE, options.limit - len(data['items']))
new = net.url_read_json(url)
if new is None:
print >> sys.stderr, 'Failed to access %s' % options.swarming
return 1
data['items'].extend(new['items'])
data['cursor'] = new['cursor']
if options.limit and len(data.get('items', [])) > options.limit:
data['items'] = data['items'][:options.limit]
data.pop('cursor', None)
json.dump(data, sys.stdout, indent=2, sort_keys=True)
sys.stdout.write('\n')
return 0
@subcommand.usage('(hash|isolated) [-- extra_args]')
def CMDrun(parser, args):
"""Triggers a task and wait for the results.
Basically, does everything to run a command remotely.
"""
add_trigger_options(parser)
add_collect_options(parser)
add_sharding_options(parser)
options, args = parser.parse_args(args)
task_request = process_trigger_options(parser, options, args)
try:
tasks = trigger_task_shards(
options.swarming, task_request, options.shards)
except Failure as e:
on_error.report(
'Failed to trigger %s(%s): %s' %
(options.task_name, args[0], e.args[0]))
return 1
if not tasks:
on_error.report('Failed to trigger the task.')
return 1
print('Triggered task: %s' % options.task_name)
task_ids = [
t['task_id']
for t in sorted(tasks.itervalues(), key=lambda x: x['shard_index'])
]
try:
return collect(
options.swarming,
options.task_name,
task_ids,
options.timeout,
options.decorate,
options.print_status_updates,
options.task_summary_json,
options.task_output_dir)
except Failure:
on_error.report(None)
return 1
@subcommand.usage('task_id')
def CMDreproduce(parser, args):
"""Runs a task locally that was triggered on the server.
This running locally the same commands that have been run on the bot. The data
downloaded will be in a subdirectory named 'work' of the current working
directory.
"""
options, args = parser.parse_args(args)
if len(args) != 1:
parser.error('Must specify exactly one task id.')
url = options.swarming + '/swarming/api/v1/client/task/%s/request' % args[0]
request = net.url_read_json(url)
if not request:
print >> sys.stderr, 'Failed to retrieve request data for the task'
return 1
if not os.path.isdir('work'):
os.mkdir('work')
swarming_host = urlparse.urlparse(options.swarming).netloc
properties = request['properties']
for data_url, _ in properties['data']:
assert data_url.startswith('https://'), data_url
data_host = urlparse.urlparse(data_url).netloc
if data_host != swarming_host:
auth.ensure_logged_in('https://' + data_host)
content = net.url_read(data_url)
if content is None:
print >> sys.stderr, 'Failed to download %s' % data_url
return 1
with zipfile.ZipFile(StringIO.StringIO(content)) as zip_file:
zip_file.extractall('work')
env = None
if properties['env']:
env = os.environ.copy()
logging.info('env: %r', properties['env'])
env.update(
(k.encode('utf-8'), v.encode('utf-8'))
for k, v in properties['env'].iteritems())
exit_code = 0
for cmd in properties['commands']:
try:
c = subprocess.call(cmd, env=env, cwd='work')
except OSError as e:
print >> sys.stderr, 'Failed to run: %s' % ' '.join(cmd)
print >> sys.stderr, str(e)
c = 1
if not exit_code:
exit_code = c
return exit_code
@subcommand.usage("(hash|isolated) [-- extra_args|raw command]")
def CMDtrigger(parser, args):
"""Triggers a Swarming task.
Accepts either the hash (sha1) of a .isolated file already uploaded or the
path to an .isolated file to archive.
If an .isolated file is specified instead of an hash, it is first archived.
Passes all extra arguments provided after '--' as additional command line
arguments for an isolated command specified in *.isolate file.
"""
add_trigger_options(parser)
add_sharding_options(parser)
parser.add_option(
'--dump-json',
metavar='FILE',
help='Dump details about the triggered task(s) to this file as json')
options, args = parser.parse_args(args)
task_request = process_trigger_options(parser, options, args)
try:
tasks = trigger_task_shards(
options.swarming, task_request, options.shards)
if tasks:
print('Triggered task: %s' % options.task_name)
tasks_sorted = sorted(
tasks.itervalues(), key=lambda x: x['shard_index'])
if options.dump_json:
data = {
'base_task_name': options.task_name,
'tasks': tasks,
}
tools.write_json(options.dump_json, data, True)
print('To collect results, use:')
print(' swarming.py collect -S %s --json %s' %
(options.swarming, options.dump_json))
else:
print('To collect results, use:')
print(' swarming.py collect -S %s %s' %
(options.swarming, ' '.join(t['task_id'] for t in tasks_sorted)))
print('Or visit:')
for t in tasks_sorted:
print(' ' + t['view_url'])
return int(not tasks)
except Failure:
on_error.report(None)
return 1
class OptionParserSwarming(tools.OptionParserWithLogging):
def __init__(self, **kwargs):
tools.OptionParserWithLogging.__init__(
self, prog='swarming.py', **kwargs)
self.server_group = tools.optparse.OptionGroup(self, 'Server')
self.server_group.add_option(
'-S', '--swarming',
metavar='URL', default=os.environ.get('SWARMING_SERVER', ''),
help='Swarming server to use')
self.add_option_group(self.server_group)
auth.add_auth_options(self)
def parse_args(self, *args, **kwargs):
options, args = tools.OptionParserWithLogging.parse_args(
self, *args, **kwargs)
auth.process_auth_options(self, options)
user = self._process_swarming(options)
if hasattr(options, 'user') and not options.user:
options.user = user
return options, args
def _process_swarming(self, options):
"""Processes the --swarming option and aborts if not specified.
Returns the identity as determined by the server.
"""
if not options.swarming:
self.error('--swarming is required.')
try:
options.swarming = net.fix_url(options.swarming)
except ValueError as e:
self.error('--swarming %s' % e)
on_error.report_on_exception_exit(options.swarming)
try:
user = auth.ensure_logged_in(options.swarming)
except ValueError as e:
self.error(str(e))
return user
def main(args):
dispatcher = subcommand.CommandDispatcher(__name__)
return dispatcher.execute(OptionParserSwarming(version=__version__), args)
if __name__ == '__main__':
fix_encoding.fix_encoding()
tools.disable_buffering()
colorama.init()
sys.exit(main(sys.argv[1:]))
|
|
# coding: utf-8
from functools import partial
import sublime
from sublime_plugin import WindowCommand
from ..util import noop, StatusSpinner
from ..cmd import GitFlowCmd
enabled = True
__all__ = ['GitFlowInitCommand', 'GitFlowFeatureCommand', 'GitFlowFeatureStartCommand', 'GitFlowFeatureFinishCommand',
'GitFlowReleaseCommand', 'GitFlowReleaseStartCommand', 'GitFlowReleaseFinishCommand', 'GitFlowHotfixStartCommand',
'GitFlowHotfixFinishCommand']
class GitFlowWindowCmd(GitFlowCmd):
def is_visible(self):
return enabled
def is_enabled(self):
return enabled
def get_branch_choices(self, repo, kind):
lines = self.git_flow_lines([kind], cwd=repo)
branches, choices = [], []
lines = [l for l in lines if l.strip()]
for l in sorted(lines, key=lambda x: (0 if x[0] == '*' else 1, x[2:])):
current = l[0:2]
name = l[2:]
choices.append(['%s%s' % (current, name.strip())])
branches.append(name)
return branches, choices
def show_branches_panel(self, repo, on_selection, *args, **kwargs):
branches, choices = self.get_branch_choices(repo, *args, **kwargs)
def on_done(idx):
if idx != -1:
branch = branches[idx]
on_selection(branch)
self.window.show_quick_panel(choices, on_done, sublime.MONOSPACE_FONT)
def run_async_gitflow_with_panel(self, repo, cmd, progress, panel_name):
self.panel = self.window.get_output_panel(panel_name)
self.panel_name = panel_name
self.panel_shown = False
thread = self.git_flow_async(cmd, cwd=repo, on_data=self.on_data)
runner = StatusSpinner(thread, progress)
runner.start()
def on_data(self, d):
if not self.panel_shown:
self.window.run_command('show_panel', {'panel': 'output.%s' % self.panel_name})
self.panel.run_command('git_panel_append', {'content': d, 'scroll': True})
self.window.run_command('git_status', {'refresh_only': True})
def run_sync_gitflow_with_panel(self, repo, cmd, panel_name):
out = self.git_flow_string(cmd, cwd=repo)
panel = self.window.get_output_panel(panel_name)
panel.run_command('git_panel_write', {'content': out})
self.window.run_command('show_panel', {'panel': 'output.%s' % panel_name})
self.window.run_command('git_status', {'refresh_only': True})
class GitFlowInitCommand(GitFlowWindowCmd, WindowCommand):
def run(self, defaults=True):
repo = self.get_repo()
if not repo:
return
self.run_async_gitflow_with_panel(repo, ['init', '-d'], "Initializing git-flow", "git-flow-init")
# Generic
class GitFlowStartCommand(GitFlowWindowCmd):
def start(self, kind, base=False):
repo = self.get_repo()
if not repo:
return
self.kind = kind
self.base = base
self.window.show_input_panel('%s:' % self.kind.capitalize(), '', partial(self.on_select, repo), noop, noop)
def on_select(self, repo, selection):
selection = selection.strip()
if not selection:
return
if self.base:
self.window.show_input_panel('Base:', '', partial(self.on_complete, repo, selection), noop, noop)
else:
self.on_complete(repo, selection)
def on_complete(self, repo, selection, base=None):
cmd = [self.kind, 'start', selection]
if base and base.strip():
cmd.append(base.strip())
self.run_sync_gitflow_with_panel(repo, cmd, 'git-flow-%s-start' % self.kind)
self.window.run_command('git_status', {'refresh_only': True})
class GitFlowFinishCommand(GitFlowWindowCmd):
def finish(self, kind):
repo = self.get_repo()
if not repo:
return
self.kind = kind
self.show_branches_panel(repo, partial(self.on_complete, repo), self.kind)
def on_complete(self, repo, selection):
progress = "Finishing %s: %s" % (self.kind, selection)
panel_name = 'git-flow-%s-finish' % self.kind
self.run_async_gitflow_with_panel(repo, [self.kind, 'finish', selection], progress, panel_name)
# Start commands
class GitFlowFeatureStartCommand(GitFlowStartCommand, WindowCommand):
"""
Documentation coming soon.
"""
def run(self, base=False):
self.start('feature', base)
class GitFlowReleaseStartCommand(GitFlowStartCommand, WindowCommand):
"""
Documentation coming soon.
"""
def run(self, base=False):
self.start('release', base)
class GitFlowHotfixStartCommand(GitFlowStartCommand, WindowCommand):
"""
Documentation coming soon.
"""
def run(self, base=False):
self.start('hotfix', base)
# Finish commands
class GitFlowFeatureFinishCommand(GitFlowFinishCommand, WindowCommand):
"""
Documentation coming soon.
"""
def run(self):
self.finish('feature')
class GitFlowReleaseFinishCommand(GitFlowFinishCommand, WindowCommand):
"""
Documentation coming soon.
"""
def run(self):
self.finish('release')
class GitFlowHotfixFinishCommand(GitFlowFinishCommand, WindowCommand):
"""
Documentation coming soon.
"""
def run(self):
self.finish('hotfix')
# Features
class GitFlowFeatureCommand(GitFlowWindowCmd, WindowCommand):
"""
Documentation coming soon.
"""
def run(self):
repo = self.get_repo()
if not repo:
return
self.show_branches_panel(repo, noop, 'feature')
class GitFlowFeaturePublishCommand(GitFlowWindowCmd, WindowCommand):
"""
Documentation coming soon.
"""
def run(self):
pass
class GitFlowFeaturePullCommand(GitFlowWindowCmd, WindowCommand):
"""
Documentation coming soon.
"""
def run(self):
pass
# Releases
class GitFlowReleaseCommand(GitFlowWindowCmd, WindowCommand):
"""
Documentation coming soon.
"""
def run(self):
repo = self.get_repo()
if not repo:
return
self.show_branches_panel(repo, noop, 'release')
|
|
#!/usr/bin/env python
#
# The purpose of the script is to parse the DeepLearning.java file and emit
# code related to parameters.
#
# Currently pieces of R code get emitted and need to be pasted in manually to the R file.
#
import sys
import os
import shutil
import signal
import time
import random
import getpass
import re
import subprocess
class Blob:
def __init__(self, n, help):
self.n = n
self.help = help
def read_deeplearning_file(deeplearning_file):
"""
Read deep learning file and generate R parameter stub stuff.
@param deeplearning_file: Java source code file
@return: none
"""
try:
nlist = []
in_api = False
help = None
f = open(deeplearning_file, "r")
s = f.readline()
lineno = 0
while (len(s) != 0):
lineno = lineno + 1
stripped = s.strip()
if (len(stripped) == 0):
s = f.readline()
continue
if (stripped.startswith("@API")):
# print("")
if (in_api):
assert(False)
in_api = True
match_groups = re.search("help\s*=\s*\"([^\"]*)\"", stripped)
if (match_groups == None):
print("Missing help")
sys.exit(1)
help = match_groups.group(1)
# print(help)
s = f.readline()
continue
if (in_api):
skip = False
if "checkpoint" in stripped:
skip = True
if "expert_mode" in stripped:
skip = True
# if "activation" in stripped:
# skip = True
# if "initial_weight_distribution" in stripped:
# skip = True
# if "loss" in stripped:
# skip = True
# if "score_validation_sampling" in stripped:
# skip = True
if (skip):
in_api = False
s = f.readline()
continue
match_groups = re.search("public boolean (\S+) = (\S+);", s)
if (match_groups is not None):
t = "boolean"
n = match_groups.group(1)
v = match_groups.group(2)
print(" parms = .addBooleanParm(parms, k=\"{}\", v={})".format(n,n))
nlist.append(Blob(n, help))
# print(t, n, v)
in_api = False
s = f.readline()
continue
match_groups = re.search("public Activation (\S+) = (\S+);", s)
if (match_groups is not None):
t = "string"
n = match_groups.group(1)
v = match_groups.group(2)
print(" parms = .addStringParm(parms, k=\"{}\", v={})".format(n,n))
nlist.append(Blob(n, help))
# print(t, n, v)
in_api = False
s = f.readline()
continue
match_groups = re.search("public int\[\] (\S+) = .*;", s)
if (match_groups is not None):
t = "int array"
n = match_groups.group(1)
print(" parms = .addIntArrayParm(parms, k=\"{}\", v={})".format(n,n))
nlist.append(Blob(n, help))
# print(t, n)
in_api = False
s = f.readline()
continue
match_groups = re.search("public int (\S+) = .*;", s)
if (match_groups is not None):
t = "int"
n = match_groups.group(1)
print(" parms = .addIntParm(parms, k=\"{}\", v={})".format(n,n))
nlist.append(Blob(n, help))
# print(t, n)
in_api = False
s = f.readline()
continue
match_groups = re.search("public double (\S+) = (\S+);", s)
if (match_groups is not None):
t = "double"
n = match_groups.group(1)
v = match_groups.group(2)
print(" parms = .addDoubleParm(parms, k=\"{}\", v={})".format(n,n))
nlist.append(Blob(n, help))
# print(t, n, v)
in_api = False
s = f.readline()
continue
match_groups = re.search("public float (\S+) = (\S+);", s)
if (match_groups is not None):
t = "float"
n = match_groups.group(1)
v = match_groups.group(2)
print(" parms = .addFloatParm(parms, k=\"{}\", v={})".format(n,n))
nlist.append(Blob(n, help))
# print(t, n, v)
in_api = False
s = f.readline()
continue
match_groups = re.search("public double\[\] (\S+);", s)
if (match_groups is not None):
t = "double array"
n = match_groups.group(1)
print(" parms = .addDoubleArrayParm(parms, k=\"{}\", v={})".format(n,n))
nlist.append(Blob(n, help))
# print(t, n)
in_api = False
s = f.readline()
continue
match_groups = re.search("public long (\S+) = new Random.*;", s)
if (match_groups is not None):
t = "long"
n = match_groups.group(1)
v = -1
print(" parms = .addLongParm(parms, k=\"{}\", v={})".format(n,n))
nlist.append(Blob(n, help))
# print(t, n, v)
in_api = False
s = f.readline()
continue
match_groups = re.search("public long (\S+) = (\S+);", s)
if (match_groups is not None):
t = "long"
n = match_groups.group(1)
v = match_groups.group(2)
print(" parms = .addLongParm(parms, k=\"{}\", v={})".format(n,n))
nlist.append(Blob(n, help))
# print(t, n, v)
in_api = False
s = f.readline()
continue
if (stripped == "public InitialWeightDistribution initial_weight_distribution = InitialWeightDistribution.UniformAdaptive;"):
t = "string"
n = "initial_weight_distribution"
print(" parms = .addStringParm(parms, k=\"{}\", v={})".format(n,n))
nlist.append(Blob(n, help))
# print(t, "initial_weight_distribution", "UniformAdaptive")
in_api = False
s = f.readline()
continue
if (stripped == "public Loss loss = Loss.CrossEntropy;"):
t = "string"
n = "loss"
print(" parms = .addStringParm(parms, k=\"{}\", v={})".format(n,n))
nlist.append(Blob(n, help))
# print(t, "loss", "CrossEntropy")
in_api = False
s = f.readline()
continue
if (stripped == "public ClassSamplingMethod score_validation_sampling = ClassSamplingMethod.Uniform;"):
t = "string"
n = "score_validation_sampling"
print(" parms = .addStringParm(parms, k=\"{}\", v={})".format(n,n))
nlist.append(Blob(n, help))
# print(t, "score_validation_sampling", "Uniform")
in_api = False
s = f.readline()
continue
print("ERROR: No match group found on line ", lineno)
sys.exit(1)
s = f.readline()
f.close()
print("")
print("")
for blob in nlist:
print(" {},".format(blob.n))
print("")
print("")
for blob in nlist:
print(" \item{\code{" + blob.n + "}: " + blob.help + "}")
except IOError as e:
print("")
print("ERROR: Failure reading test list: " + deeplearning_file)
print(" (errno {0}): {1}".format(e.errno, e.strerror))
print("")
sys.exit(1)
def main(argv):
read_deeplearning_file("./src/main/java/hex/deeplearning/DeepLearning.java")
if __name__ == "__main__":
main(sys.argv)
|
|
import logging
from voluptuous import *
from ...adjust import adjust as adjustmodule
from ... import pull as pullmodule
from ... import repo
def mode_b_ify(raw):
clone = raw.copy()
del clone["name"]
clone["internal_url"] = {
"readwrite": Url(), #pylint: disable=no-value-for-parameter
"readonly": Url(), #pylint: disable=no-value-for-parameter
}
return clone
#
# Primitives
#
nonempty_str = All(str, Length(min=1))
nonempty_noblank_str = All(str, Match(r'^\S+$'))
port_num = All(int, Range(min=1, max=65535))
name_str = Match(r'^[a-zA-Z0-9_.][a-zA-Z0-9_.-]*(?<!\.git)$')
#
# Callback
#
callback_raw = {
"url": Url(), #pylint: disable=no-value-for-parameter
Optional("method"): Any("PUT", "POST"),
}
callback = Schema(
{"callback": callback_raw},
required=True,
extra=True,
)
#
# Adjust
#
adjust_raw = {
"name": name_str,
"ref": nonempty_str,
Optional("adjustParameters"): All(dict),
Optional("callback"): callback_raw,
}
adjust = Schema(
adjust_raw,
required=True,
extra=False,
)
adjust_modeb = Schema(
mode_b_ify(adjust_raw),
required=True,
extra=False,
)
#
# Pull
#
pull_scm_raw = {
"name": name_str,
"type": Any(*pullmodule.scm_types),
Optional("ref"): nonempty_str,
"url": Url(), #pylint: disable=no-value-for-parameter
Optional("adjust"): bool,
Optional("callback"): callback_raw,
}
pull_scm = Schema(
pull_scm_raw,
required=True,
extra=False,
)
pull_archive_raw = {
"name": name_str,
"type": pullmodule.archive_type,
"url": Url(), #pylint: disable=no-value-for-parameter
Optional("adjust"): bool,
Optional("callback"): callback_raw,
}
pull_archive = Schema(
pull_archive_raw,
required=True,
extra=False,
)
pull_raw = Any(pull_scm, pull_archive)
pull = Schema(
pull_raw,
required=True,
extra=False,
)
pull_scm_modeb = mode_b_ify(pull_scm_raw)
pull_archive_modeb = mode_b_ify(pull_archive_raw)
pull_modeb = Schema(
Any(pull_scm_modeb, pull_archive_modeb),
required=True,
extra=False,
)
#
# Clone
#
clone_raw = {
#"name": name_str,
"type": "git", # only git supported for now
"ref": nonempty_str,
"originRepoUrl": Url(),
"targetRepoUrl": Url(),
Optional("callback"): callback_raw,
}
clone = Schema(
clone_raw,
required = True,
extra = False,
)
#
# Returns
#
error_validation = Schema(
[{
"error_message": str,
"path": [str],
"error_type": str,
}],
required=True,
extra=False,
)
error_described = Schema(
{
"error_type": nonempty_str,
"error_traceback": nonempty_str,
str: object,
},
required=True,
extra=False,
)
error_other = Schema(
{
"error_type": nonempty_str,
"error_traceback": nonempty_str,
},
required=True,
extra=False,
)
success_pull = Schema(
{
"branch": nonempty_str,
"tag": nonempty_str,
"url": {
"readonly": Url(), #pylint: disable=no-value-for-parameter
"readwrite": Url(), #pylint: disable=no-value-for-parameter
},
},
required=True,
extra=False,
)
success_adjust = success_pull
success_pull_adjust = Schema(
{
"branch": nonempty_str,
"tag": nonempty_str,
"url": {
"readonly": Url(), #pylint: disable=no-value-for-parameter
"readwrite": Url(), #pylint: disable=no-value-for-parameter
},
"pull": success_pull,
},
required=True,
extra=False,
)
#
# Server configuration
#
server_config_raw = {
"log": {
"path": nonempty_str,
"level": Any(*logging._nameToLevel.keys()),
},
"bind": {
"address": Any(nonempty_str, None),
"port": port_num,
},
"adjust_provider": {
"type": Any(nonempty_str, None),
"params": {Extra: object},
},
"repo_provider": {
"type": Any(*repo.provider_types.keys()),
"params": {Extra: object},
},
}
server_config = Schema(
server_config_raw,
required=True,
extra=False,
)
|
|
from __future__ import print_function, absolute_import, division
import abc
import uuid
import warnings
import tempfile
from six.moves import zip
import numpy as np
from numpy.lib.stride_tricks import as_strided
import dask.array as da
from astropy.wcs import InconsistentAxisTypesError
from astropy.io import fits
from . import wcs_utils
from .utils import WCSWarning
__all__ = ['MaskBase', 'InvertedMask', 'CompositeMask', 'BooleanArrayMask',
'LazyMask', 'LazyComparisonMask', 'FunctionMask']
# Global version of the with_spectral_unit docs to avoid duplicating them
with_spectral_unit_docs = """
Parameters
----------
unit : u.Unit
Any valid spectral unit: velocity, (wave)length, or frequency.
Only vacuum units are supported.
velocity_convention : u.doppler_relativistic, u.doppler_radio, or u.doppler_optical
The velocity convention to use for the output velocity axis.
Required if the output type is velocity.
rest_value : u.Quantity
A rest wavelength or frequency with appropriate units. Required if
output type is velocity. The cube's WCS should include this
already if the *input* type is velocity, but the WCS's rest
wavelength/frequency can be overridden with this parameter.
"""
def is_broadcastable_and_smaller(shp1, shp2):
"""
Test if shape 1 can be broadcast to shape 2, not allowing the case
where shape 2 has a dimension length 1
"""
for a, b in zip(shp1[::-1], shp2[::-1]):
# b==1 is broadcastable but not desired
if a == 1 or a == b:
pass
else:
return False
return True
def dims_to_skip(shp1, shp2):
"""
For a shape `shp1` that is broadcastable to shape `shp2`, specify which
dimensions are length 1.
Parameters
----------
keep : bool
If True, return the dimensions to keep rather than those to remove
"""
if not is_broadcastable_and_smaller(shp1, shp2):
raise ValueError("Cannot broadcast {0} to {1}".format(shp1,shp2))
dims = []
for ii,(a, b) in enumerate(zip(shp1[::-1], shp2[::-1])):
# b==1 is broadcastable but not desired
if a == 1:
dims.append(len(shp2) - ii - 1)
elif a == b:
pass
else:
raise ValueError("This should not be possible")
if len(shp1) < len(shp2):
dims += list(range(len(shp2)-len(shp1)))
return dims
def view_of_subset(shp1, shp2, view):
"""
Given two shapes and a view, assuming that shape 1 can be broadcast
to shape 2, return the sub-view that applies to shape 1
"""
# if the view is 1-dimensional, we can't subset it
if not hasattr(view, '__len__'):
return view
dts = dims_to_skip(shp1, shp2)
if view:
cv_view = [x for ii,x in enumerate(view) if ii not in dts]
else:
# if no view is specified, still need to slice
cv_view = [x for ii,x in enumerate([slice(None)]*3)
if ii not in dts]
# return type matters
# array[[0,0]] = [array[0], array[0]]
# array[(0,0)] = array[0,0]
return tuple(cv_view)
class MaskBase(object):
__metaclass__ = abc.ABCMeta
def include(self, data=None, wcs=None, view=(), **kwargs):
"""
Return a boolean array indicating which values should be included.
If ``view`` is passed, only the sliced mask will be returned, which
avoids having to load the whole mask in memory. Otherwise, the whole
mask is returned in-memory.
kwargs are passed to _validate_wcs
"""
self._validate_wcs(data, wcs, **kwargs)
return self._include(data=data, wcs=wcs, view=view)
# Commented out, but left as a possibility, because including this didn't fix any
# of the problems we encountered with matplotlib plotting
def view(self, view=()):
"""
Compatibility tool: if a numpy.ma.ufunc is run on the mask, it will try
to grab a view of the mask, which needs to appear to numpy as a true
array. This can be important for, e.g., plotting.
Numpy's convention is that masked=True means "masked out"
.. note::
I don't know if there are broader concerns or consequences from
including this 'view' tool here.
"""
return self.exclude(view=view)
def _validate_wcs(self, new_data=None, new_wcs=None, **kwargs):
"""
This method can be overridden in cases where the data and WCS have to
conform to some rules. This gets called automatically when
``include`` or ``exclude`` are called.
"""
pass
@abc.abstractmethod
def _include(self, data=None, wcs=None, view=()):
pass
def exclude(self, data=None, wcs=None, view=(), **kwargs):
"""
Return a boolean array indicating which values should be excluded.
If ``view`` is passed, only the sliced mask will be returned, which
avoids having to load the whole mask in memory. Otherwise, the whole
mask is returned in-memory.
kwargs are passed to _validate_wcs
"""
self._validate_wcs(data, wcs, **kwargs)
return self._exclude(data=data, wcs=wcs, view=view)
def _exclude(self, data=None, wcs=None, view=()):
return np.logical_not(self._include(data=data, wcs=wcs, view=view))
def any(self):
return np.any(self.exclude())
def _flattened(self, data, wcs=None, view=()):
"""
Return a flattened array of the included elements of cube
Parameters
----------
data : array-like
The data array to flatten
view : tuple, optional
Any slicing to apply to the data before flattening
Returns
-------
flat_array : `~numpy.ndarray`
A 1-D ndarray containing the flattened output
Notes
-----
This is an internal method used by :class:`SpectralCube`.
"""
mask = self.include(data=data, wcs=wcs, view=view)
# Workaround for https://github.com/dask/dask/issues/6089
if isinstance(data, da.Array) and not isinstance(mask, da.Array):
mask = da.asarray(mask, name=str(uuid.uuid4()))
# if not isinstance(data, da.Array) and isinstance(mask, da.Array):
# mask = mask.compute()
return data[view][mask]
def _filled(self, data, wcs=None, fill=np.nan, view=(), use_memmap=False,
**kwargs):
"""
Replace the excluded elements of *array* with *fill*.
Parameters
----------
data : array-like
Input array
fill : number
Replacement value
view : tuple, optional
Any slicing to apply to the data before flattening
use_memmap : bool
Use a memory map to store the output data?
Returns
-------
filled_array : `~numpy.ndarray`
A 1-D ndarray containing the filled output
Notes
-----
This is an internal method used by :class:`SpectralCube`.
Users should use the property :meth:`MaskBase.filled_data`
"""
# Must convert to floating point, but should not change from inherited
# type otherwise
dt = np.find_common_type([data.dtype], [float])
if use_memmap and data.size > 0:
ntf = tempfile.NamedTemporaryFile()
sliced_data = np.memmap(ntf, mode='w+', shape=data[view].shape,
dtype=dt)
sliced_data[:] = data[view]
else:
sliced_data = data[view].astype(dt)
ex = self.exclude(data=data, wcs=wcs, view=view, **kwargs)
return np.ma.masked_array(sliced_data, mask=ex).filled(fill)
def __and__(self, other):
return CompositeMask(self, other, operation='and')
def __or__(self, other):
return CompositeMask(self, other, operation='or')
def __xor__(self, other):
return CompositeMask(self, other, operation='xor')
def __invert__(self):
return InvertedMask(self)
@property
def shape(self):
raise NotImplementedError("{0} mask classes do not have shape attributes."
.format(self.__class__.__name__))
@property
def ndim(self):
return len(self.shape)
@property
def size(self):
return np.product(self.shape)
@property
def dtype(self):
return np.dtype('bool')
def __getitem__(self):
raise NotImplementedError("Slicing not supported by mask class {0}"
.format(self.__class__.__name__))
def quicklook(self, view, wcs=None, filename=None, use_aplpy=True,
aplpy_kwargs={}):
'''
View a 2D slice of the mask, specified by view.
Parameters
----------
view : tuple
Slicing to apply to the mask. Must return a 2D slice.
wcs : astropy.wcs.WCS, optional
WCS object to use in plotting the mask slice.
filename : str, optional
Filename of the output image. Enables saving of the plot.
use_aplpy : bool, optional
Try plotting with the aplpy package
aplpy_kwargs : dict, optional
kwargs passed to `~aplpy.FITSFigure`.
'''
view_twod = self.include(view=view, wcs=wcs)
if use_aplpy:
if wcs is not None:
hdu = fits.PrimaryHDU(view_twod.astype(int), wcs.to_header())
else:
hdu = fits.PrimaryHDU(view_twod.astype(int))
try:
import aplpy
FITSFigure = aplpy.FITSFigure(hdu,
**aplpy_kwargs)
FITSFigure.show_grayscale()
FITSFigure.add_colorbar()
if filename is not None:
FITSFigure.save(filename)
except (InconsistentAxisTypesError, ImportError):
use_aplpy = True
if not use_aplpy:
from matplotlib import pyplot
figure = pyplot.imshow(view_twod)
if filename is not None:
figure.savefig(filename)
def _get_new_wcs(self, unit, velocity_convention=None, rest_value=None):
"""
Returns a new WCS with a different Spectral Axis unit
"""
from .spectral_axis import convert_spectral_axis,determine_ctype_from_vconv
out_ctype = determine_ctype_from_vconv(self._wcs.wcs.ctype[self._wcs.wcs.spec],
unit,
velocity_convention=velocity_convention)
newwcs = convert_spectral_axis(self._wcs, unit, out_ctype,
rest_value=rest_value)
newwcs.wcs.set()
return newwcs
_get_new_wcs.__doc__ += with_spectral_unit_docs
class InvertedMask(MaskBase):
def __init__(self, mask):
self._mask = mask
@property
def shape(self):
return self._mask.shape
def _include(self, data=None, wcs=None, view=()):
return np.logical_not(self._mask.include(data=data, wcs=wcs, view=view))
def __getitem__(self, view):
return InvertedMask(self._mask[view])
def with_spectral_unit(self, unit, velocity_convention=None, rest_value=None):
"""
Get an InvertedMask copy with a WCS in the modified unit
"""
newmask = self._mask.with_spectral_unit(unit,
velocity_convention=velocity_convention,
rest_value=rest_value)
return InvertedMask(newmask)
with_spectral_unit.__doc__ += with_spectral_unit_docs
class CompositeMask(MaskBase):
"""
A combination of several masks. The included masks are treated with the specified
operation.
Parameters
----------
mask1, mask2 : Masks
The two masks to composite
operation : str
Either 'and' or 'or'; the operation used to combine the masks
"""
def __init__(self, mask1, mask2, operation='and'):
if isinstance(mask1, np.ndarray) and isinstance(mask2, MaskBase) and hasattr(mask2, 'shape'):
if not is_broadcastable_and_smaller(mask1.shape, mask2.shape):
raise ValueError("Mask1 shape is not broadcastable to Mask2 shape: "
"%s vs %s" % (mask1.shape, mask2.shape))
mask1 = BooleanArrayMask(mask1, mask2._wcs, shape=mask2.shape)
elif isinstance(mask2, np.ndarray) and isinstance(mask1, MaskBase) and hasattr(mask1, 'shape'):
if not is_broadcastable_and_smaller(mask2.shape, mask1.shape):
raise ValueError("Mask2 shape is not broadcastable to Mask1 shape: "
"%s vs %s" % (mask2.shape, mask1.shape))
mask2 = BooleanArrayMask(mask2, mask1._wcs, shape=mask1.shape)
# both entries must have compatible, which effectively means
# equal, WCSes. Unless one is a function.
if hasattr(mask1, '_wcs') and hasattr(mask2, '_wcs'):
mask1._validate_wcs(new_data=None, wcs=mask2._wcs)
# In order to composite composites, they must have a _wcs defined.
# (maybe this should be a property?)
self._wcs = mask1._wcs
elif hasattr(mask1, '_wcs'):
# if one mask doesn't have a WCS, but the other does, the
# compositemask should have the same WCS as the one that does
self._wcs = mask1._wcs
elif hasattr(mask2, '_wcs'):
self._wcs = mask2._wcs
self._mask1 = mask1
self._mask2 = mask2
self._operation = operation
def _validate_wcs(self, new_data=None, new_wcs=None, **kwargs):
self._mask1._validate_wcs(new_data=new_data, new_wcs=new_wcs, **kwargs)
self._mask2._validate_wcs(new_data=new_data, new_wcs=new_wcs, **kwargs)
@property
def shape(self):
try:
assert self._mask1.shape == self._mask2.shape
return self._mask1.shape
except AssertionError:
raise ValueError("The composite mask does not have a well-defined "
"shape; its two components have shapes {0} and "
"{1}.".format(self._mask1.shape,
self._mask2.shape))
except NotImplementedError:
raise ValueError("The composite mask contains at least one "
"component with no defined shape.")
def _include(self, data=None, wcs=None, view=()):
result_mask_1 = self._mask1._include(data=data, wcs=wcs, view=view)
result_mask_2 = self._mask2._include(data=data, wcs=wcs, view=view)
if self._operation == 'and':
return np.bitwise_and(result_mask_1, result_mask_2)
elif self._operation == 'or':
return np.bitwise_or(result_mask_1, result_mask_2)
elif self._operation == 'xor':
return np.bitwise_xor(result_mask_1, result_mask_2)
else:
raise ValueError("Operation '{0}' not supported".format(self._operation))
def __getitem__(self, view):
return CompositeMask(self._mask1[view], self._mask2[view],
operation=self._operation)
def with_spectral_unit(self, unit, velocity_convention=None, rest_value=None):
"""
Get a CompositeMask copy in which each component has a WCS in the
modified unit
"""
newmask1 = self._mask1.with_spectral_unit(unit,
velocity_convention=velocity_convention,
rest_value=rest_value)
newmask2 = self._mask2.with_spectral_unit(unit,
velocity_convention=velocity_convention,
rest_value=rest_value)
return CompositeMask(newmask1, newmask2, self._operation)
with_spectral_unit.__doc__ += with_spectral_unit_docs
class BooleanArrayMask(MaskBase):
"""
A mask defined as an array on a spectral cube WCS
Parameters
----------
mask: `numpy.ndarray`
A boolean numpy ndarray
wcs: `astropy.wcs.WCS`
The WCS object
shape: tuple
The shape of the region the array is masking. This is *required* if
``mask.ndim != data.ndim`` to provide rules for how to broadcast the
mask
"""
def __init__(self, mask, wcs, shape=None, include=True):
self._mask_type = 'include' if include else 'exclude'
self._wcs = wcs
self._wcs_whitelist = set()
#if mask.ndim != 3 and (shape is None or len(shape) != 3):
# raise ValueError("When creating a BooleanArrayMask with <3 dimensions, "
# "the shape of the 3D array must be specified.")
if shape is not None and not is_broadcastable_and_smaller(mask.shape, shape):
raise ValueError("Mask cannot be broadcast to the specified shape.")
self._shape = shape or mask.shape
self._mask = mask
"""
Developer note (AG):
The logic in this following section seems overly complicated. All
of it is added to make sure that a 1D boolean array along the
spectral axis can be created. I thought this was possible
previously, but experience many errors in my latest attempt to use
one.
"""
# If a shape is given, we may need to broadcast to that shape
if shape is not None:
# these are dimensions that simply don't exist
n_empty_dims = (len(self._shape)-mask.ndim)
# these are dimensions of shape 1 that would be squeezed away but may
# be needed to make the arrays broadcastable (e.g., mask[:,None,None])
# Need to add n_empty_dims because (1,2) will broadcast to (3,1,2)
# and there will be no extra dims.
extra_dims = [ii
for ii,(sh1,sh2) in
enumerate(zip((0,)*n_empty_dims + mask.shape, shape))
if sh1 == 1 and sh1 != sh2]
# Add the [None,]'s and the nonexistant
n_extra_dims = n_empty_dims + len(extra_dims)
# if there are no extra dims, we're done, the original shape is fine
if n_extra_dims > 0:
strides = (0,)*n_empty_dims + mask.strides
for ed in extra_dims:
# all of the [None,] dims should have 0 stride
assert strides[ed] == 0,"Stride shape failure"
self._mask = as_strided(mask, shape=self.shape,
strides=strides)
# Make sure the mask shape matches the Mask object shape
assert self._mask.shape == self.shape,"Shape initialization failure"
def _validate_wcs(self, new_data=None, new_wcs=None, **kwargs):
"""
Check that the new WCS matches the current one
Parameters
----------
kwargs : dict
Passed to `wcs_utils.check_equality`
"""
if new_data is not None and not is_broadcastable_and_smaller(self._mask.shape,
new_data.shape):
raise ValueError("data shape cannot be broadcast to match mask shape")
if new_wcs is not None:
if new_wcs not in self._wcs_whitelist:
try:
if not wcs_utils.check_equality(new_wcs, self._wcs,
warn_missing=True,
**kwargs):
raise ValueError("WCS does not match mask WCS")
else:
self._wcs_whitelist.add(new_wcs)
except InconsistentAxisTypesError:
warnings.warn("Inconsistent axis type encountered; WCS is "
"invalid and therefore will not be checked "
"against other WCSes.",
WCSWarning
)
self._wcs_whitelist.add(new_wcs)
def _include(self, data=None, wcs=None, view=()):
result_mask = self._mask[view]
return result_mask if self._mask_type == 'include' else np.logical_not(result_mask)
def _exclude(self, data=None, wcs=None, view=()):
result_mask = self._mask[view]
return result_mask if self._mask_type == 'exclude' else np.logical_not(result_mask)
@property
def shape(self):
return self._shape
def __getitem__(self, view):
return BooleanArrayMask(self._mask[view],
wcs_utils.slice_wcs(self._wcs, view,
shape=self.shape,
drop_degenerate=True),
shape=self._mask[view].shape)
def with_spectral_unit(self, unit, velocity_convention=None, rest_value=None):
"""
Get a BooleanArrayMask copy with a WCS in the modified unit
"""
newwcs = self._get_new_wcs(unit, velocity_convention, rest_value)
newmask = BooleanArrayMask(self._mask, newwcs,
include=self._mask_type=='include')
return newmask
with_spectral_unit.__doc__ += with_spectral_unit_docs
class LazyMask(MaskBase):
"""
A boolean mask defined by the evaluation of a function on a fixed dataset.
This is conceptually identical to a fixed boolean mask as in
:class:`BooleanArrayMask` but defers the
evaluation of the mask until it is needed.
Parameters
----------
function : callable
The function to apply to ``data``. This method should accept
a numpy array, which will be a subset of the data array passed
to __init__. It should return a boolean array, where `True` values
indicate that which pixels are valid/unaffected by masking.
data : array-like
The array to evaluate ``function`` on. This should support Numpy-like
slicing syntax.
wcs : `~astropy.wcs.WCS`
The WCS of the input data, which is used to define the coordinates
for which the boolean mask is defined.
"""
def __init__(self, function, cube=None, data=None, wcs=None):
self._function = function
if cube is not None and (data is not None or wcs is not None):
raise ValueError("Pass only cube or (data & wcs)")
elif cube is not None:
self._data = cube._data
self._wcs = cube._wcs
elif data is not None and wcs is not None:
self._data = data
self._wcs = wcs
else:
raise ValueError("Either a cube or (data & wcs) is required.")
self._wcs_whitelist = set()
@property
def shape(self):
return self._data.shape
def _validate_wcs(self, new_data=None, new_wcs=None, **kwargs):
"""
Check that the new WCS matches the current one
Parameters
----------
kwargs : dict
Passed to `wcs_utils.check_equality`
"""
if new_data is not None:
if not is_broadcastable_and_smaller(new_data.shape, self._data.shape):
raise ValueError("data shape cannot be broadcast to match mask shape")
if new_wcs is not None:
if new_wcs not in self._wcs_whitelist:
if not wcs_utils.check_equality(new_wcs, self._wcs,
warn_missing=True, **kwargs):
raise ValueError("WCS does not match mask WCS")
else:
self._wcs_whitelist.add(new_wcs)
def _include(self, data=None, wcs=None, view=()):
self._validate_wcs(data, wcs)
return self._function(self._data[view])
def __getitem__(self, view):
return LazyMask(self._function, data=self._data[view],
wcs=wcs_utils.slice_wcs(self._wcs, view,
shape=self._data.shape,
drop_degenerate=True))
def with_spectral_unit(self, unit, velocity_convention=None, rest_value=None):
"""
Get a LazyMask copy with a WCS in the modified unit
"""
newwcs = self._get_new_wcs(unit, velocity_convention, rest_value)
newmask = LazyMask(self._function, data=self._data, wcs=newwcs)
return newmask
with_spectral_unit.__doc__ += with_spectral_unit_docs
class LazyComparisonMask(LazyMask):
"""
A boolean mask defined by the evaluation of a comparison function between a
fixed dataset and some other value.
This is conceptually similar to the :class:`LazyMask` but it will ensure
that the comparison value can be compared to the data
Parameters
----------
function : callable
The function to apply to ``data``. This method should accept
a numpy array, which will be the data array passed to __init__, and a
second argument also passed to __init__. It should return a boolean
array, where `True` values indicate that which pixels are
valid/unaffected by masking.
comparison_value : float or array
The comparison value for the array
data : array-like
The array to evaluate ``function`` on. This should support Numpy-like
slicing syntax.
wcs : `~astropy.wcs.WCS`
The WCS of the input data, which is used to define the coordinates
for which the boolean mask is defined.
"""
def __init__(self, function, comparison_value, cube=None, data=None,
wcs=None):
self._function = function
if cube is not None and (data is not None or wcs is not None):
raise ValueError("Pass only cube or (data & wcs)")
elif cube is not None:
self._data = cube._data
self._wcs = cube._wcs
elif data is not None and wcs is not None:
self._data = data
self._wcs = wcs
else:
raise ValueError("Either a cube or (data & wcs) is required.")
if (hasattr(comparison_value, 'shape') and not
is_broadcastable_and_smaller(self._data.shape,
comparison_value.shape)):
raise ValueError("The data and the comparison value cannot "
"be broadcast to match shape")
self._comparison_value = comparison_value
self._wcs_whitelist = set()
def _include(self, data=None, wcs=None, view=()):
self._validate_wcs(data, wcs)
if hasattr(self._comparison_value, 'shape') and self._comparison_value.shape:
cv_view = view_of_subset(self._comparison_value.shape,
self._data.shape, view)
return self._function(self._data[view],
self._comparison_value[cv_view])
else:
return self._function(self._data[view],
self._comparison_value)
def __getitem__(self, view):
if hasattr(self._comparison_value, 'shape') and self._comparison_value.shape:
cv_view = view_of_subset(self._comparison_value.shape,
self._data.shape, view)
return LazyComparisonMask(self._function, data=self._data[view],
comparison_value=self._comparison_value[cv_view],
wcs=wcs_utils.slice_wcs(self._wcs, view,
drop_degenerate=True))
else:
return LazyComparisonMask(self._function, data=self._data[view],
comparison_value=self._comparison_value,
wcs=wcs_utils.slice_wcs(self._wcs, view,
drop_degenerate=True))
def with_spectral_unit(self, unit, velocity_convention=None, rest_value=None):
"""
Get a LazyComparisonMask copy with a WCS in the modified unit
"""
newwcs = self._get_new_wcs(unit, velocity_convention, rest_value)
newmask = LazyComparisonMask(self._function, data=self._data,
comparison_value=self._comparison_value,
wcs=newwcs)
return newmask
class FunctionMask(MaskBase):
"""
A mask defined by a function that is evaluated at run-time using the data
passed to the mask.
This function differs from :class:`LazyMask` in the arguments which
are passed to the function. FunctionMasks receive an array,
wcs object, and view, whereas LazyMasks receive pre-sliced views
into an array specified at mask-creation time.
Parameters
----------
function : callable
The function to evaluate the mask. The call signature should be
``function(data, wcs, slice)`` where ``data`` and ``wcs`` are the
arguments that get passed to e.g. ``include``, ``exclude``,
``_filled``, and ``_flattened``. The function should return
a boolean array, where `True` values indicate that which pixels
are valid / unaffected by masking.
"""
def __init__(self, function):
self._function = function
def _validate_wcs(self, new_data=None, new_wcs=None, **kwargs):
pass
def _include(self, data=None, wcs=None, view=()):
result = self._function(data, wcs, view)
if result.shape != data[view].shape:
raise ValueError("Function did not return mask with correct shape - expected {0}, got {1}".format(data[view].shape, result.shape))
return result
def __getitem__(self, slice):
return self
def with_spectral_unit(self, unit, velocity_convention=None, rest_value=None):
"""
Functional masks do not have WCS defined, so this simply returns a copy
of the current mask in order to be consistent with
``with_spectral_unit`` from other Masks
"""
return FunctionMask(self._function)
|
|
"""
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin
from .cross_validation import _check_cv as check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
samples = []
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# get complete grid and yield from it
param_grid = list(ParameterGrid(self.param_distributions))
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
while len(samples) < self.n_iter:
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
samples.append(params)
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
This estimator will be cloned and then fitted.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class ChangedBehaviorWarning(UserWarning):
pass
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each grid point.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default 1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, default=3
If an integer is passed, it is the number of folds.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
degree=..., gamma=..., kernel='rbf', max_iter=-1,
probability=False, random_state=None, shrinking=True,
tol=..., verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a an hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, loss_func=None,
score_func=None, fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each parameter setting.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of folds (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
|
|
#!/usr/bin/python
"""
sc_video.py
This file includes functions to:
Initialise the camera
Initialise the output video
Image size is held in the smart_camera.cnf
"""
import sys
from os.path import expanduser
import time
import math
import multiprocessing
import cv2
import numpy as np
import sc_config
from sc_logger import sc_logger
class SmartCameraVideo:
def __init__(self):
# get which camera we will use
self.camera_index = sc_config.config.get_integer('camera','index',0)
# get image resolution
self.img_width = sc_config.config.get_integer('camera','width',640)
self.img_height = sc_config.config.get_integer('camera','height',480)
# get image center
self.img_center_x = self.img_width / 2
self.img_center_y = self.img_height / 2
# define field of view
self.cam_hfov = sc_config.config.get_float('camera','horizontal-fov',70.42)
self.cam_vfov = sc_config.config.get_float('camera','vertical-fov',43.3)
#get camera distortion matrix and intrinsics. Defaults: logitech c920
mtx = np.array([[ 614.01269552,0,315.00073982],
[0,614.43556296,237.14926858],
[0,0,1.0]])
dist = np.array([0.12269303, -0.26618881,0.00129035, 0.00081791,0.17005303])
self.matrix = sc_config.config.get_array('camera','matrix', mtx)
self.distortion = sc_config.config.get_array('camera', 'distortion', dist)
self.newcameramtx, self.roi=cv2.getOptimalNewCameraMatrix(self.matrix,self.distortion,(self.img_width,self.img_height),1,(self.img_width,self.img_height))
#create a camera object
self.camera = None
#number of cores available for use
desiredCores = sc_config.config.get_integer('processing', 'desired_cores', 4)
self.cores_available = min(desiredCores, multiprocessing.cpu_count())
#does the user want to capture images in the background
self.background_capture = sc_config.config.get_boolean('processing','background_capture', True)
# background image processing variables
self.proc = None # background process object
self.parent_conn = None # parent end of communicatoin pipe
self.img_counter = 0 # num images requested so far
self.is_backgroundCap = False #state variable for background capture
# __str__ - print position vector as string
def __str__(self):
return "SmartCameraVideo Object W:%d H:%d" % (self.img_width, self.img_height)
# get_camera - initialises camera and returns VideoCapture object
def get_camera(self,index):
sc_logger.text(sc_logger.GENERAL, 'Starting Camera....')
# setup video capture
self.camera = cv2.VideoCapture(index)
self.camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH,self.img_width)
self.camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT,self.img_height)
# check we can connect to camera
if not self.camera.isOpened():
sc_logger.text(sc_logger.GENERAL,"failed to open camera, exiting!")
sys.exit(0)
sc_logger.text(sc_logger.GENERAL, 'Camera Open!')
return self.camera
#
# background image processing routines
#
# image_capture_background - captures all images from the camera in the background and returning the latest image via the pipe when the parent requests it
def image_capture_background(self, imgcap_connection):
# exit immediately if imgcap_connection is invalid
if imgcap_connection is None:
sc_logger.text(sc_logger.GENERAL, "image_capture failed because pipe is uninitialised")
return
# clear latest image
latest_image = None
while True:
# constantly get the image from the webcam
success_flag, image=self.camera.read()
# if successful overwrite our latest image
if success_flag:
latest_image = image
# check if the parent wants the image
if imgcap_connection.poll():
recv_obj = imgcap_connection.recv()
# if -1 is received we exit
if recv_obj == -1:
break
# otherwise we return the latest image
imgcap_connection.send(latest_image)
# release camera when exiting
self.camera.release()
def stop_capture(self):
#Clean up when exitting background capture
if(self.is_backgroundCap):
# send exit command to image capture process
self.parent_conn.send(-1)
# join process
self.proc.join()
#no clean up required with regular capture
def start_capture(self,index = 0):
#make sure a camera is intialized
if self.camera is None:
self.get_camera(index)
#background capture is desired
if self.background_capture:
#if we have more than one core available, then start background capture
if(self.cores_available > 1):
# create pipe
self.parent_conn, imgcap_conn = multiprocessing.Pipe()
# create and start the sub process and pass it it's end of the pipe
self.proc = multiprocessing.Process(target=self.image_capture_background, args=(imgcap_conn,))
self.proc.daemon = True
self.proc.start()
#Mark that we are in background capture mode
self.is_backgroundCap = True
else:
#Not enough cores for background capture or just doing regular capture
self.is_backgroundCap = False
# get_image - returns latest image from the camera captured from the background process
def get_image(self):
#grab image from pipe of background capture
if(self.is_backgroundCap):
# return immediately if pipe is not initialised
if self.parent_conn == None:
return None
# send request to image capture for image
self.parent_conn.send(self.img_counter)
# increment counter for next interation
self.img_counter = self.img_counter + 1
# wait endlessly until image is returned
img = self.parent_conn.recv()
#use standard image cap
else:
#Grab an image
success_flag, img= self.camera.read()
# return image to caller
return img
#undisort_image- removes any distortion caused by the camera lense
def undisort_image(self,frame):
#undistort
dst = cv2.undistort(frame, self.matrix, self.distortion, None, self.newcameramtx)
# crop the image
x,y,w,h = self.roi
dst = dst[y:y+h, x:x+w]
return dst
# main - tests SmartCameraVideo class
def main(self):
#open a camera
#self.get_camera(0)
# start background process
self.start_capture(self.camera_index)
#did we start background capture
print 'Background capture {0}'.format(self.is_backgroundCap)
while True:
# send request to image capture for image
img = self.get_image()
#undistort image
img = self.undisort_image(img)
# check image is valid
if not img is None:
# display image
cv2.imshow ('image_display', img)
else:
print "no image"
# check for ESC key being pressed
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
# take a rest for a bit
time.sleep(0.1)
# send exit command to image capture process
self.stop_capture()
print "a2p 10 = %f" % self.angle_to_pixels_x(10)
print "p2a 10 = %f" % self.pixels_to_angle_x(10)
# create a single global object
sc_video = SmartCameraVideo()
if __name__ == "__main__":
sc_video.main()
|
|
#! /usr/bin/env python
"""Static analysis tool for checking docstring conventions and style.
Implemented checks cover PEP257:
http://www.python.org/dev/peps/pep-0257/
Other checks can be added, e.g. NumPy docstring conventions:
https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
The repository is located at:
http://github.com/GreenSteam/pep257
"""
from __future__ import with_statement
import os
import sys
import logging
import tokenize as tk
from itertools import takewhile, dropwhile, chain
from optparse import OptionParser
from re import compile as re
import itertools
try: # Python 3.x
from ConfigParser import RawConfigParser
except ImportError: # Python 2.x
from configparser import RawConfigParser
log = logging.getLogger(__name__)
try:
from StringIO import StringIO
except ImportError: # Python 3.0 and later
from io import StringIO
try:
next
except NameError: # Python 2.5 and earlier
nothing = object()
def next(obj, default=nothing):
if default == nothing:
return obj.next()
else:
try:
return obj.next()
except StopIteration:
return default
# If possible (python >= 3.2) use tokenize.open to open files, so PEP 263
# encoding markers are interpreted.
try:
tokenize_open = tk.open
except AttributeError:
tokenize_open = open
__version__ = '0.6.1-alpha'
__all__ = ('check', 'collect')
PROJECT_CONFIG = ('setup.cfg', 'tox.ini', '.pep257')
NO_VIOLATIONS_RETURN_CODE = 0
VIOLATIONS_RETURN_CODE = 1
INVALID_OPTIONS_RETURN_CODE = 2
def humanize(string):
return re(r'(.)([A-Z]+)').sub(r'\1 \2', string).lower()
def is_magic(name):
return name.startswith('__') and name.endswith('__')
def is_ascii(string):
return all(ord(char) < 128 for char in string)
def is_blank(string):
return not string.strip()
def leading_space(string):
return re('\s*').match(string).group()
class Value(object):
def __init__(self, *args):
vars(self).update(zip(self._fields, args))
def __hash__(self):
return hash(repr(self))
def __eq__(self, other):
return other and vars(self) == vars(other)
def __repr__(self):
kwargs = ', '.join('{}={!r}'.format(field, getattr(self, field))
for field in self._fields)
return '{}({})'.format(self.__class__.__name__, kwargs)
class Definition(Value):
_fields = ('name', '_source', 'start', 'end', 'decorators', 'docstring',
'children', 'parent')
_human = property(lambda self: humanize(type(self).__name__))
kind = property(lambda self: self._human.split()[-1])
module = property(lambda self: self.parent.module)
all = property(lambda self: self.module.all)
_slice = property(lambda self: slice(self.start - 1, self.end))
source = property(lambda self: ''.join(self._source[self._slice]))
def __iter__(self):
return chain([self], *self.children)
@property
def _publicity(self):
return {True: 'public', False: 'private'}[self.is_public]
def __str__(self):
return 'in %s %s `%s`' % (self._publicity, self._human, self.name)
class Module(Definition):
_fields = ('name', '_source', 'start', 'end', 'decorators', 'docstring',
'children', 'parent', '_all')
is_public = True
_nest = staticmethod(lambda s: {'def': Function, 'class': Class}[s])
module = property(lambda self: self)
all = property(lambda self: self._all)
def __str__(self):
return 'at module level'
class Package(Module):
"""A package is a __init__.py module."""
class Function(Definition):
_nest = staticmethod(lambda s: {'def': NestedFunction,
'class': NestedClass}[s])
@property
def is_public(self):
if self.all is not None:
return self.name in self.all
else: # TODO: are there any magic functions? not methods
return not self.name.startswith('_') or is_magic(self.name)
class NestedFunction(Function):
is_public = False
class Method(Function):
@property
def is_public(self):
# Check if we are a setter/deleter method, and mark as private if so.
for decorator in self.decorators:
# Given 'foo', match 'foo.bar' but not 'foobar' or 'sfoo'
if re(r"^{0}\.".format(self.name)).match(decorator.name):
return False
name_is_public = not self.name.startswith('_') or is_magic(self.name)
return self.parent.is_public and name_is_public
class Class(Definition):
_nest = staticmethod(lambda s: {'def': Method, 'class': NestedClass}[s])
is_public = Function.is_public
class NestedClass(Class):
is_public = False
class Decorator(Value):
"""A decorator for function, method or class."""
_fields = 'name arguments'.split()
class TokenKind(int):
def __repr__(self):
return "tk.{}".format(tk.tok_name[self])
class Token(Value):
_fields = 'kind value start end source'.split()
def __init__(self, *args):
super(Token, self).__init__(*args)
self.kind = TokenKind(self.kind)
class TokenStream(object):
def __init__(self, filelike):
self._generator = tk.generate_tokens(filelike.readline)
self.current = Token(*next(self._generator, None))
self.line = self.current.start[0]
def move(self):
previous = self.current
current = next(self._generator, None)
self.current = None if current is None else Token(*current)
self.line = self.current.start[0] if self.current else self.line
return previous
def __iter__(self):
while True:
if self.current is not None:
yield self.current
else:
return
self.move()
class AllError(Exception):
def __init__(self, message):
Exception.__init__(
self, message +
'That means pep257 cannot decide which definitions are public. '
'Variable __all__ should be present at most once in each file, '
"in form `__all__ = ('a_public_function', 'APublicClass', ...)`. "
'More info on __all__: http://stackoverflow.com/q/44834/. ')
class Parser(object):
def __call__(self, filelike, filename):
self.source = filelike.readlines()
src = ''.join(self.source)
self.stream = TokenStream(StringIO(src))
self.filename = filename
self.all = None
self._accumulated_decorators = []
return self.parse_module()
current = property(lambda self: self.stream.current)
line = property(lambda self: self.stream.line)
def consume(self, kind):
assert self.stream.move().kind == kind
def leapfrog(self, kind, value=None):
"""Skip tokens in the stream until a certain token kind is reached.
If `value` is specified, tokens whose values are different will also
be skipped.
"""
while self.current is not None:
if (self.current.kind == kind and
(value is None or self.current.value == value)):
self.consume(kind)
return
self.stream.move()
def parse_docstring(self):
"""Parse a single docstring and return its value."""
log.debug("parsing docstring, token is %r (%s)",
self.current.kind, self.current.value)
while self.current.kind in (tk.COMMENT, tk.NEWLINE, tk.NL):
self.stream.move()
log.debug("parsing docstring, token is %r (%s)",
self.current.kind, self.current.value)
if self.current.kind == tk.STRING:
docstring = self.current.value
self.stream.move()
return docstring
return None
def parse_decorators(self):
"""Called after first @ is found.
Parse decorators into self._accumulated_decorators.
Continue to do so until encountering the 'def' or 'class' start token.
"""
name = []
arguments = []
at_arguments = False
while self.current is not None:
if (self.current.kind == tk.NAME and
self.current.value in ['def', 'class']):
# Done with decorators - found function or class proper
break
elif self.current.kind == tk.OP and self.current.value == '@':
# New decorator found. Store the decorator accumulated so far:
self._accumulated_decorators.append(
Decorator(''.join(name), ''.join(arguments)))
# Now reset to begin accumulating the new decorator:
name = []
arguments = []
at_arguments = False
elif self.current.kind == tk.OP and self.current.value == '(':
at_arguments = True
elif self.current.kind == tk.OP and self.current.value == ')':
# Ignore close parenthesis
pass
elif self.current.kind == tk.NEWLINE or self.current.kind == tk.NL:
# Ignore newlines
pass
else:
# Keep accumulating current decorator's name or argument.
if not at_arguments:
name.append(self.current.value)
else:
arguments.append(self.current.value)
self.stream.move()
# Add decorator accumulated so far
self._accumulated_decorators.append(
Decorator(''.join(name), ''.join(arguments)))
def parse_definitions(self, class_, all=False):
"""Parse multiple defintions and yield them."""
while self.current is not None:
log.debug("parsing defintion list, current token is %r (%s)",
self.current.kind, self.current.value)
if all and self.current.value == '__all__':
self.parse_all()
elif self.current.kind == tk.OP and self.current.value == '@':
self.consume(tk.OP)
self.parse_decorators()
elif self.current.value in ['def', 'class']:
yield self.parse_definition(class_._nest(self.current.value))
elif self.current.kind == tk.INDENT:
self.consume(tk.INDENT)
for definition in self.parse_definitions(class_):
yield definition
elif self.current.kind == tk.DEDENT:
self.consume(tk.DEDENT)
return
else:
self.stream.move()
def parse_all(self):
"""Parse the __all__ definition in a module."""
assert self.current.value == '__all__'
self.consume(tk.NAME)
if self.current.value != '=':
raise AllError('Could not evaluate contents of __all__. ')
self.consume(tk.OP)
if self.current.value not in '([':
raise AllError('Could not evaluate contents of __all__. ')
if self.current.value == '[':
msg = ("%s WARNING: __all__ is defined as a list, this means "
"pep257 cannot reliably detect contents of the __all__ "
"variable, because it can be mutated. Change __all__ to be "
"an (immutable) tuple, to remove this warning. Note, "
"pep257 uses __all__ to detect which definitions are "
"public, to warn if public definitions are missing "
"docstrings. If __all__ is a (mutable) list, pep257 cannot "
"reliably assume its contents. pep257 will proceed "
"assuming __all__ is not mutated.\n" % self.filename)
sys.stderr.write(msg)
self.consume(tk.OP)
self.all = []
all_content = "("
while self.current.kind != tk.OP or self.current.value not in ")]":
if self.current.kind in (tk.NL, tk.COMMENT):
pass
elif (self.current.kind == tk.STRING or
self.current.value == ','):
all_content += self.current.value
else:
kind = token.tok_name[self.current.kind]
raise AllError('Unexpected token kind in __all__: %s' % kind)
self.stream.move()
self.consume(tk.OP)
all_content += ")"
try:
self.all = eval(all_content, {})
except BaseException as e:
raise AllError('Could not evaluate contents of __all__.'
'\bThe value was %s. The exception was:\n%s'
% (all_content, e))
def parse_module(self):
"""Parse a module (and its children) and return a Module object."""
log.debug("parsing module.")
start = self.line
docstring = self.parse_docstring()
children = list(self.parse_definitions(Module, all=True))
assert self.current is None, self.current
end = self.line
cls = Module
if self.filename.endswith('__init__.py'):
cls = Package
module = cls(self.filename, self.source, start, end,
[], docstring, children, None, self.all)
for child in module.children:
child.parent = module
log.debug("finished parsing module.")
return module
def parse_definition(self, class_):
"""Parse a defintion and return its value in a `class_` object."""
start = self.line
self.consume(tk.NAME)
name = self.current.value
log.debug("parsing %s '%s'", class_.__name__, name)
self.stream.move()
if self.current.kind == tk.OP and self.current.value == '(':
parenthesis_level = 0
while True:
if self.current.kind == tk.OP:
if self.current.value == '(':
parenthesis_level += 1
elif self.current.value == ')':
parenthesis_level -= 1
if parenthesis_level == 0:
break
self.stream.move()
if self.current.kind != tk.OP or self.current.value != ':':
self.leapfrog(tk.OP, value=":")
else:
self.consume(tk.OP)
if self.current.kind in (tk.NEWLINE, tk.COMMENT):
self.leapfrog(tk.INDENT)
assert self.current.kind != tk.INDENT
docstring = self.parse_docstring()
decorators = self._accumulated_decorators
self._accumulated_decorators = []
log.debug("parsing nested defintions.")
children = list(self.parse_definitions(class_))
log.debug("finished parsing nested defintions for '%s'", name)
end = self.line - 1
else: # one-liner definition
docstring = self.parse_docstring()
decorators = [] # TODO
children = []
end = self.line
self.leapfrog(tk.NEWLINE)
definition = class_(name, self.source, start, end,
decorators, docstring, children, None)
for child in definition.children:
child.parent = definition
log.debug("finished parsing %s '%s'. Next token is %r (%s)",
class_.__name__, name, self.current.kind,
self.current.value)
return definition
class Error(object):
"""Error in docstring style."""
# should be overridden by inheriting classes
code = None
short_desc = None
context = None
# Options that define how errors are printed:
explain = False
source = False
def __init__(self, *parameters):
self.parameters = parameters
self.definition = None
self.explanation = None
def set_context(self, definition, explanation):
self.definition = definition
self.explanation = explanation
filename = property(lambda self: self.definition.module.name)
line = property(lambda self: self.definition.start)
@property
def message(self):
ret = '%s: %s' % (self.code, self.short_desc)
if self.context is not None:
ret += ' (' + self.context % self.parameters + ')'
return ret
@property
def lines(self):
source = ''
lines = self.definition._source[self.definition._slice]
offset = self.definition.start
lines_stripped = list(reversed(list(dropwhile(is_blank,
reversed(lines)))))
numbers_width = 0
for n, line in enumerate(lines_stripped):
numbers_width = max(numbers_width, n + offset)
numbers_width = len(str(numbers_width))
numbers_width = 6
for n, line in enumerate(lines_stripped):
source += '%*d: %s' % (numbers_width, n + offset, line)
if n > 5:
source += ' ...\n'
break
return source
def __str__(self):
self.explanation = '\n'.join(l for l in self.explanation.split('\n')
if not is_blank(l))
template = '%(filename)s:%(line)s %(definition)s:\n %(message)s'
if self.source and self.explain:
template += '\n\n%(explanation)s\n\n%(lines)s\n'
elif self.source and not self.explain:
template += '\n\n%(lines)s\n'
elif self.explain and not self.source:
template += '\n\n%(explanation)s\n\n'
return template % dict((name, getattr(self, name)) for name in
['filename', 'line', 'definition', 'message',
'explanation', 'lines'])
__repr__ = __str__
def __lt__(self, other):
return (self.filename, self.line) < (other.filename, other.line)
class ErrorRegistry(object):
groups = []
class ErrorGroup(object):
def __init__(self, prefix, name):
self.prefix = prefix
self.name = name
self.errors = []
def create_error(self, error_code, error_desc, error_context=None):
# TODO: check prefix
class _Error(Error):
code = error_code
short_desc = error_desc
context = error_context
self.errors.append(_Error)
return _Error
@classmethod
def create_group(cls, prefix, name):
group = cls.ErrorGroup(prefix, name)
cls.groups.append(group)
return group
@classmethod
def get_error_codes(cls):
for group in cls.groups:
for error in group.errors:
yield error.code
@classmethod
def to_rst(cls):
sep_line = '+' + 6 * '-' + '+' + '-' * 71 + '+\n'
blank_line = '|' + 78 * ' ' + '|\n'
table = ''
for group in cls.groups:
table += sep_line
table += blank_line
table += '|' + ('**%s**' % group.name).center(78) + '|\n'
table += blank_line
for error in group.errors:
table += sep_line
table += ('|' + error.code.center(6) + '| ' +
error.short_desc.ljust(70) + '|\n')
table += sep_line
return table
D1xx = ErrorRegistry.create_group('D1', 'Missing Docstrings')
D100 = D1xx.create_error('D100', 'Missing docstring in public module')
D101 = D1xx.create_error('D101', 'Missing docstring in public class')
D102 = D1xx.create_error('D102', 'Missing docstring in public method')
D103 = D1xx.create_error('D103', 'Missing docstring in public function')
D104 = D1xx.create_error('D104', 'Missing docstring in public package')
D2xx = ErrorRegistry.create_group('D2', 'Whitespace Issues')
D200 = D2xx.create_error('D200', 'One-line docstring should fit on one line '
'with quotes', 'found %s')
D201 = D2xx.create_error('D201', 'No blank lines allowed before function '
'docstring', 'found %s')
D202 = D2xx.create_error('D202', 'No blank lines allowed after function '
'docstring', 'found %s')
D203 = D2xx.create_error('D203', '1 blank line required before class '
'docstring', 'found %s')
D204 = D2xx.create_error('D204', '1 blank line required after class '
'docstring', 'found %s')
D205 = D2xx.create_error('D205', '1 blank line required between summary line '
'and description', 'found %s')
D206 = D2xx.create_error('D206', 'Docstring should be indented with spaces, '
'not tabs')
D207 = D2xx.create_error('D207', 'Docstring is under-indented')
D208 = D2xx.create_error('D208', 'Docstring is over-indented')
D209 = D2xx.create_error('D209', 'Multi-line docstring closing quotes should '
'be on a separate line')
D210 = D2xx.create_error('D210', 'No whitespaces allowed surrounding '
'docstring text')
D3xx = ErrorRegistry.create_group('D3', 'Quotes Issues')
D300 = D3xx.create_error('D300', 'Use """triple double quotes"""',
'found %s-quotes')
D301 = D3xx.create_error('D301', 'Use r""" if any backslashes in a docstring')
D302 = D3xx.create_error('D302', 'Use u""" for Unicode docstrings')
D4xx = ErrorRegistry.create_group('D4', 'Docstring Content Issues')
D400 = D4xx.create_error('D400', 'First line should end with a period',
'not %r')
D401 = D4xx.create_error('D401', 'First line should be in imperative mood',
'%r, not %r')
D402 = D4xx.create_error('D402', 'First line should not be the function\'s '
'"signature"')
class Conventions(object):
pep257 = set(ErrorRegistry.get_error_codes())
def get_option_parser():
parser = OptionParser(version=__version__,
usage='Usage: pep257 [options] [<file|dir>...]')
parser.config_options = ('explain', 'source', 'ignore', 'match', 'select',
'match-dir', 'debug', 'verbose', 'count',
'convention')
option = parser.add_option
option('-e', '--explain', action='store_true',
help='show explanation of each error')
option('-s', '--source', action='store_true',
help='show source for each error')
option('--select', metavar='<codes>', default='',
help='choose the basic list of checked errors by specifying which '
'errors to check for (with a list of comma-separated error '
'codes). for example: --select=D101,D202')
option('--ignore', metavar='<codes>', default='',
help='choose the basic list of checked errors by specifying which '
'errors to ignore (with a list of comma-separated error '
'codes). for example: --ignore=D101,D202')
option('--convention', metavar='<name>', default='',
help='choose the basic list of checked errors by specifying an '
'existing convention. for example: --convention=pep257')
option('--add-select', metavar='<codes>', default='',
help='amend the list of errors to check for by specifying more '
'error codes to check.')
option('--add-ignore', metavar='<codes>', default='',
help='amend the list of errors to check for by specifying more '
'error codes to ignore.')
option('--match', metavar='<pattern>', default='(?!test_).*\.py',
help="check only files that exactly match <pattern> regular "
"expression; default is --match='(?!test_).*\.py' which "
"matches files that don't start with 'test_' but end with "
"'.py'")
option('--match-dir', metavar='<pattern>', default='[^\.].*',
help="search only dirs that exactly match <pattern> regular "
"expression; default is --match-dir='[^\.].*', which matches "
"all dirs that don't start with a dot")
option('-d', '--debug', action='store_true',
help='print debug information')
option('-v', '--verbose', action='store_true',
help='print status information')
option('--count', action='store_true',
help='print total number of errors to stdout')
return parser
def collect(names, match=lambda name: True, match_dir=lambda name: True):
"""Walk dir trees under `names` and generate filnames that `match`.
Example
-------
>>> sorted(collect(['non-dir.txt', './'],
... match=lambda name: name.endswith('.py')))
['non-dir.txt', './pep257.py', './setup.py', './test_pep257.py']
"""
for name in names: # map(expanduser, names):
if os.path.isdir(name):
for root, dirs, filenames in os.walk(name):
# Skip any dirs that do not match match_dir
dirs[:] = [dir for dir in dirs if match_dir(dir)]
for filename in filenames:
if match(filename):
yield os.path.join(root, filename)
else:
yield name
def check(filenames, select=None, ignore=None):
"""Generate PEP 257 errors that exist in `filenames` iterable.
Only returns errors with error-codes defined in `checked_codes` iterable.
Example
-------
>>> check(['pep257.py'], checked_codes=['D100'])
<generator object check at 0x...>
"""
if select and ignore:
raise ValueError('Cannot pass both select and ignore. They are '
'mutually exclusive.')
elif select or ignore:
checked_codes = (select or
set(ErrorRegistry.get_error_codes()) - set(ignore))
else:
checked_codes = Conventions.pep257
for filename in filenames:
log.info('Checking file %s.', filename)
try:
with tokenize_open(filename) as file:
source = file.read()
for error in PEP257Checker().check_source(source, filename):
code = getattr(error, 'code', None)
if code in checked_codes:
yield error
except (EnvironmentError, AllError):
yield sys.exc_info()[1]
except tk.TokenError:
yield SyntaxError('invalid syntax in file %s' % filename)
def get_options(args, opt_parser):
config = RawConfigParser()
parent = tail = os.path.abspath(os.path.commonprefix(args))
config_found = False
while tail and not config_found:
log.info(tail)
for fn in PROJECT_CONFIG:
full_path = os.path.join(parent, fn)
if config.read(full_path):
log.info('local configuration: in %s.', full_path)
config_found = True
break
parent, tail = os.path.split(parent)
new_options = None
if config.has_section('pep257'):
option_list = dict([(o.dest, o.type or o.action)
for o in opt_parser.option_list])
# First, read the default values
new_options, _ = opt_parser.parse_args([])
# Second, parse the configuration
pep257_section = 'pep257'
for opt in config.options(pep257_section):
if opt.replace('_', '-') not in opt_parser.config_options:
log.warning("Unknown option '{}' ignored".format(opt))
continue
normalized_opt = opt.replace('-', '_')
opt_type = option_list[normalized_opt]
if opt_type in ('int', 'count'):
value = config.getint(pep257_section, opt)
elif opt_type == 'string':
value = config.get(pep257_section, opt)
else:
assert opt_type in ('store_true', 'store_false')
value = config.getboolean(pep257_section, opt)
setattr(new_options, normalized_opt, value)
# Third, overwrite with the command-line options
options, _ = opt_parser.parse_args(values=new_options)
log.debug("options: %s", options)
return options
def setup_stream_handlers(options):
"""Setup logging stream handlers according to the options."""
class StdoutFilter(logging.Filter):
def filter(self, record):
return record.levelno in (logging.DEBUG, logging.INFO)
if log.handlers:
for handler in log.handlers:
log.removeHandler(handler)
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(logging.WARNING)
stdout_handler.addFilter(StdoutFilter())
if options.debug:
stdout_handler.setLevel(logging.DEBUG)
elif options.verbose:
stdout_handler.setLevel(logging.INFO)
else:
stdout_handler.setLevel(logging.WARNING)
log.addHandler(stdout_handler)
stderr_handler = logging.StreamHandler(sys.stderr)
stderr_handler.setLevel(logging.WARNING)
log.addHandler(stderr_handler)
def get_checked_error_codes(options):
codes = set(ErrorRegistry.get_error_codes())
if options.ignore:
checked_codes = codes - set(options.ignore.split(','))
elif options.select:
checked_codes = set(options.select.split(','))
elif options.convention:
checked_codes = getattr(Conventions, options.convention)
else:
checked_codes = Conventions.pep257
checked_codes -= set(options.add_ignore.split(','))
checked_codes |= set(options.add_select.split(','))
return checked_codes - set('')
def validate_options(options):
mutually_exclusive = ('ignore', 'select', 'convention')
for opt1, opt2 in itertools.permutations(mutually_exclusive, 2):
if getattr(options, opt1) and getattr(options, opt2):
log.error('Cannot pass both {0} and {1}. They are '
'mutually exclusive.'.format(opt1, opt2))
return False
if options.convention and not hasattr(Conventions, options.convention):
return False
return True
def run_pep257():
log.setLevel(logging.DEBUG)
opt_parser = get_option_parser()
# setup the logger before parsing the config file, so that command line
# arguments for debug / verbose will be printed.
options, arguments = opt_parser.parse_args()
setup_stream_handlers(options)
# We parse the files before opening the config file, since it changes where
# we look for the file.
options = get_options(arguments, opt_parser)
if not validate_options(options):
return INVALID_OPTIONS_RETURN_CODE
# Setup the handler again with values from the config file.
setup_stream_handlers(options)
collected = collect(arguments or ['.'],
match=re(options.match + '$').match,
match_dir=re(options.match_dir + '$').match)
log.debug("starting pep257 in debug mode.")
Error.explain = options.explain
Error.source = options.source
collected = list(collected)
checked_codes = get_checked_error_codes(options)
errors = check(collected, select=checked_codes)
code = NO_VIOLATIONS_RETURN_CODE
count = 0
for error in errors:
sys.stderr.write('%s\n' % error)
code = VIOLATIONS_RETURN_CODE
count += 1
if options.count:
print(count)
return code
parse = Parser()
def check_for(kind, terminal=False):
def decorator(f):
f._check_for = kind
f._terminal = terminal
return f
return decorator
class PEP257Checker(object):
"""Checker for PEP 257.
D10x: Missing docstrings
D20x: Whitespace issues
D30x: Docstring formatting
D40x: Docstring content issues
"""
def check_source(self, source, filename):
module = parse(StringIO(source), filename)
for definition in module:
for check in self.checks:
terminate = False
if isinstance(definition, check._check_for):
error = check(None, definition, definition.docstring)
errors = error if hasattr(error, '__iter__') else [error]
for error in errors:
if error is not None:
partition = check.__doc__.partition('.\n')
message, _, explanation = partition
error.set_context(explanation=explanation,
definition=definition)
yield error
if check._terminal:
terminate = True
break
if terminate:
break
@property
def checks(self):
all = [check for check in vars(type(self)).values()
if hasattr(check, '_check_for')]
return sorted(all, key=lambda check: not check._terminal)
@check_for(Definition, terminal=True)
def check_docstring_missing(self, definition, docstring):
"""D10{0,1,2,3}: Public definitions should have docstrings.
All modules should normally have docstrings. [...] all functions and
classes exported by a module should also have docstrings. Public
methods (including the __init__ constructor) should also have
docstrings.
Note: Public (exported) definitions are either those with names listed
in __all__ variable (if present), or those that do not start
with a single underscore.
"""
if (not docstring and definition.is_public or
docstring and is_blank(eval(docstring))):
codes = {Module: D100, Class: D101, NestedClass: D101,
Method: D102, Function: D103, NestedFunction: D103,
Package: D104}
return codes[type(definition)]()
@check_for(Definition)
def check_one_liners(self, definition, docstring):
"""D200: One-liner docstrings should fit on one line with quotes.
The closing quotes are on the same line as the opening quotes.
This looks better for one-liners.
"""
if docstring:
lines = eval(docstring).split('\n')
if len(lines) > 1:
non_empty_lines = sum(1 for l in lines if not is_blank(l))
if non_empty_lines == 1:
return D200(len(lines))
@check_for(Function)
def check_no_blank_before(self, function, docstring): # def
"""D20{1,2}: No blank lines allowed around function/method docstring.
There's no blank line either before or after the docstring.
"""
# NOTE: This does not take comments into account.
# NOTE: This does not take into account functions with groups of code.
if docstring:
before, _, after = function.source.partition(docstring)
blanks_before = list(map(is_blank, before.split('\n')[:-1]))
blanks_after = list(map(is_blank, after.split('\n')[1:]))
blanks_before_count = sum(takewhile(bool, reversed(blanks_before)))
blanks_after_count = sum(takewhile(bool, blanks_after))
if blanks_before_count != 0:
yield D201(blanks_before_count)
if not all(blanks_after) and blanks_after_count != 0:
yield D202(blanks_after_count)
@check_for(Class)
def check_blank_before_after_class(slef, class_, docstring):
"""D20{3,4}: Class docstring should have 1 blank line around them.
Insert a blank line before and after all docstrings (one-line or
multi-line) that document a class -- generally speaking, the class's
methods are separated from each other by a single blank line, and the
docstring needs to be offset from the first method by a blank line;
for symmetry, put a blank line between the class header and the
docstring.
"""
# NOTE: this gives false-positive in this case
# class Foo:
#
# """Docstring."""
#
#
# # comment here
# def foo(): pass
if docstring:
before, _, after = class_.source.partition(docstring)
blanks_before = list(map(is_blank, before.split('\n')[:-1]))
blanks_after = list(map(is_blank, after.split('\n')[1:]))
blanks_before_count = sum(takewhile(bool, reversed(blanks_before)))
blanks_after_count = sum(takewhile(bool, blanks_after))
if blanks_before_count != 1:
yield D203(blanks_before_count)
if not all(blanks_after) and blanks_after_count != 1:
yield D204(blanks_after_count)
@check_for(Definition)
def check_blank_after_summary(self, definition, docstring):
"""D205: Put one blank line between summary line and description.
Multi-line docstrings consist of a summary line just like a one-line
docstring, followed by a blank line, followed by a more elaborate
description. The summary line may be used by automatic indexing tools;
it is important that it fits on one line and is separated from the
rest of the docstring by a blank line.
"""
if docstring:
lines = eval(docstring).strip().split('\n')
if len(lines) > 1:
post_summary_blanks = list(map(is_blank, lines[1:]))
blanks_count = sum(takewhile(bool, post_summary_blanks))
if blanks_count != 1:
return D205(blanks_count)
@check_for(Definition)
def check_indent(self, definition, docstring):
"""D20{6,7,8}: The entire docstring should be indented same as code.
The entire docstring is indented the same as the quotes at its
first line.
"""
if docstring:
before_docstring, _, _ = definition.source.partition(docstring)
_, _, indent = before_docstring.rpartition('\n')
lines = docstring.split('\n')
if len(lines) > 1:
lines = lines[1:] # First line does not need indent.
indents = [leading_space(l) for l in lines if not is_blank(l)]
if set(' \t') == set(''.join(indents) + indent):
yield D206()
if (len(indents) > 1 and min(indents[:-1]) > indent or
indents[-1] > indent):
yield D208()
if min(indents) < indent:
yield D207()
@check_for(Definition)
def check_newline_after_last_paragraph(self, definition, docstring):
"""D209: Put multi-line docstring closing quotes on separate line.
Unless the entire docstring fits on a line, place the closing
quotes on a line by themselves.
"""
if docstring:
lines = [l for l in eval(docstring).split('\n') if not is_blank(l)]
if len(lines) > 1:
if docstring.split("\n")[-1].strip() not in ['"""', "'''"]:
return D209()
@check_for(Definition)
def check_surrounding_whitespaces(self, definition, docstring):
"""D210: No whitespaces allowed surrounding docstring text."""
if docstring:
lines = eval(docstring).split('\n')
if lines[0].startswith(' ') or \
len(lines) == 1 and lines[0].endswith(' '):
return D210()
@check_for(Definition)
def check_triple_double_quotes(self, definition, docstring):
r'''D300: Use """triple double quotes""".
For consistency, always use """triple double quotes""" around
docstrings. Use r"""raw triple double quotes""" if you use any
backslashes in your docstrings. For Unicode docstrings, use
u"""Unicode triple-quoted strings""".
Note: Exception to this is made if the docstring contains
""" quotes in its body.
'''
if docstring and '"""' in eval(docstring) and docstring.startswith(
("'''", "r'''", "u'''", "ur'''")):
# Allow ''' quotes if docstring contains """, because otherwise """
# quotes could not be expressed inside docstring. Not in PEP 257.
return
if docstring and not docstring.startswith(
('"""', 'r"""', 'u"""', 'ur"""')):
quotes = "'''" if "'''" in docstring[:4] else "'"
return D300(quotes)
@check_for(Definition)
def check_backslashes(self, definition, docstring):
r'''D301: Use r""" if any backslashes in a docstring.
Use r"""raw triple double quotes""" if you use any backslashes
(\) in your docstrings.
'''
# Just check that docstring is raw, check_triple_double_quotes
# ensures the correct quotes.
if docstring and '\\' in docstring and not docstring.startswith(
('r', 'ur')):
return D301()
@check_for(Definition)
def check_unicode_docstring(self, definition, docstring):
r'''D302: Use u""" for docstrings with Unicode.
For Unicode docstrings, use u"""Unicode triple-quoted strings""".
'''
# Just check that docstring is unicode, check_triple_double_quotes
# ensures the correct quotes.
if docstring and sys.version_info[0] <= 2:
if not is_ascii(docstring) and not docstring.startswith(
('u', 'ur')):
return D302()
@check_for(Definition)
def check_ends_with_period(self, definition, docstring):
"""D400: First line should end with a period.
The [first line of a] docstring is a phrase ending in a period.
"""
if docstring:
summary_line = eval(docstring).strip().split('\n')[0]
if not summary_line.endswith('.'):
return D400(summary_line[-1])
@check_for(Function)
def check_imperative_mood(self, function, docstring): # def context
"""D401: First line should be in imperative mood: 'Do', not 'Does'.
[Docstring] prescribes the function or method's effect as a command:
("Do this", "Return that"), not as a description; e.g. don't write
"Returns the pathname ...".
"""
if docstring:
stripped = eval(docstring).strip()
if stripped:
first_word = stripped.split()[0]
if first_word.endswith('s') and not first_word.endswith('ss'):
return D401(first_word[:-1], first_word)
@check_for(Function)
def check_no_signature(self, function, docstring): # def context
"""D402: First line should not be function's or method's "signature".
The one-line docstring should NOT be a "signature" reiterating the
function/method parameters (which can be obtained by introspection).
"""
if docstring:
first_line = eval(docstring).strip().split('\n')[0]
if function.name + '(' in first_line.replace(' ', ''):
return D402()
# Somewhat hard to determine if return value is mentioned.
# @check(Function)
def SKIP_check_return_type(self, function, docstring):
"""D40x: Return value type should be mentioned.
[T]he nature of the return value cannot be determined by
introspection, so it should be mentioned.
"""
if docstring and function.returns_value:
if 'return' not in docstring.lower():
return Error()
def main():
try:
sys.exit(run_pep257())
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
|
|
from panda3d.core import *
from panda3d.physics import PhysicalNode
from panda3d.physics import ParticleSystem
from panda3d.physics import PointParticleFactory
from panda3d.physics import ZSpinParticleFactory
#from panda3d.physics import OrientedParticleFactory
from panda3d.physics import BaseParticleRenderer
from panda3d.physics import PointParticleRenderer
from panda3d.physics import LineParticleRenderer
from panda3d.physics import GeomParticleRenderer
from panda3d.physics import SparkleParticleRenderer
#from panda3d.physics import SpriteParticleRenderer
from panda3d.physics import BaseParticleEmitter
from panda3d.physics import ArcEmitter
from panda3d.physics import BoxEmitter
from panda3d.physics import DiscEmitter
from panda3d.physics import LineEmitter
from panda3d.physics import PointEmitter
from panda3d.physics import RectangleEmitter
from panda3d.physics import RingEmitter
from panda3d.physics import SphereSurfaceEmitter
from panda3d.physics import SphereVolumeEmitter
from panda3d.physics import TangentRingEmitter
from . import SpriteParticleRendererExt
from direct.directnotify.DirectNotifyGlobal import directNotify
import sys
class Particles(ParticleSystem):
notify = directNotify.newCategory('Particles')
id = 1
def __init__(self, name=None, poolSize=1024):
if (name == None):
self.name = 'particles-%d' % Particles.id
Particles.id += 1
else:
self.name = name
ParticleSystem.__init__(self, poolSize)
# self.setBirthRate(0.02)
# self.setLitterSize(10)
# self.setLitterSpread(0)
# Set up a physical node
self.node = PhysicalNode(self.name)
self.nodePath = NodePath(self.node)
self.setRenderParent(self.node)
self.node.addPhysical(self)
self.factory = None
self.factoryType = "undefined"
# self.setFactory("PointParticleFactory")
self.renderer = None
self.rendererType = "undefined"
# self.setRenderer("PointParticleRenderer")
self.emitter = None
self.emitterType = "undefined"
# self.setEmitter("SphereVolumeEmitter")
# Enable particles by default
self.fEnabled = 0
#self.enable()
self.geomReference = ""
def cleanup(self):
self.disable()
self.clearLinearForces()
self.clearAngularForces()
self.setRenderParent(self.node)
self.node.removePhysical(self)
self.nodePath.removeNode()
del self.node
del self.nodePath
del self.factory
del self.renderer
del self.emitter
def enable(self):
if (self.fEnabled == 0):
base.physicsMgr.attachPhysical(self)
base.particleMgr.attachParticlesystem(self)
self.fEnabled = 1
def disable(self):
if (self.fEnabled == 1):
base.physicsMgr.removePhysical(self)
base.particleMgr.removeParticlesystem(self)
self.fEnabled = 0
def isEnabled(self):
return self.fEnabled
def getNode(self):
return self.node
def setFactory(self, type):
if (self.factoryType == type):
return None
if (self.factory):
self.factory = None
self.factoryType = type
if (type == "PointParticleFactory"):
self.factory = PointParticleFactory()
elif (type == "ZSpinParticleFactory"):
self.factory = ZSpinParticleFactory()
elif (type == "OrientedParticleFactory"):
self.factory = OrientedParticleFactory()
else:
print("unknown factory type: %s" % type)
return None
self.factory.setLifespanBase(0.5)
ParticleSystem.setFactory(self, self.factory)
def setRenderer(self, type):
if (self.rendererType == type):
return None
if (self.renderer):
self.renderer = None
self.rendererType = type
if (type == "PointParticleRenderer"):
self.renderer = PointParticleRenderer()
self.renderer.setPointSize(1.0)
elif (type == "LineParticleRenderer"):
self.renderer = LineParticleRenderer()
elif (type == "GeomParticleRenderer"):
self.renderer = GeomParticleRenderer()
# This was moved here because we do not want to download
# the direct tools with toontown.
if __dev__:
from direct.directtools import DirectSelection
npath = NodePath('default-geom')
bbox = DirectSelection.DirectBoundingBox(npath)
self.renderer.setGeomNode(bbox.lines.node())
elif (type == "SparkleParticleRenderer"):
self.renderer = SparkleParticleRenderer()
elif (type == "SpriteParticleRenderer"):
self.renderer = SpriteParticleRendererExt.SpriteParticleRendererExt()
# self.renderer.setTextureFromFile()
else:
print("unknown renderer type: %s" % type)
return None
ParticleSystem.setRenderer(self, self.renderer)
def setEmitter(self, type):
if (self.emitterType == type):
return None
if (self.emitter):
self.emitter = None
self.emitterType = type
if (type == "ArcEmitter"):
self.emitter = ArcEmitter()
elif (type == "BoxEmitter"):
self.emitter = BoxEmitter()
elif (type == "DiscEmitter"):
self.emitter = DiscEmitter()
elif (type == "LineEmitter"):
self.emitter = LineEmitter()
elif (type == "PointEmitter"):
self.emitter = PointEmitter()
elif (type == "RectangleEmitter"):
self.emitter = RectangleEmitter()
elif (type == "RingEmitter"):
self.emitter = RingEmitter()
elif (type == "SphereSurfaceEmitter"):
self.emitter = SphereSurfaceEmitter()
elif (type == "SphereVolumeEmitter"):
self.emitter = SphereVolumeEmitter()
self.emitter.setRadius(1.0)
elif (type == "TangentRingEmitter"):
self.emitter = TangentRingEmitter()
else:
print("unknown emitter type: %s" % type)
return None
ParticleSystem.setEmitter(self, self.emitter)
def addForce(self, force):
if (force.isLinear()):
self.addLinearForce(force)
else:
self.addAngularForce(force)
def removeForce(self, force):
if (force == None):
self.notify.warning('removeForce() - force == None!')
return
if (force.isLinear()):
self.removeLinearForce(force)
else:
self.removeAngularForce(force)
def setRenderNodePath(self, nodePath):
self.setRenderParent(nodePath.node())
## Getters ##
def getName(self):
return self.name
def getFactory(self):
return self.factory
def getEmitter(self):
return self.emitter
def getRenderer(self):
return self.renderer
def printParams(self, file = sys.stdout, targ = 'self'):
file.write('# Particles parameters\n')
file.write(targ + '.setFactory(\"' + self.factoryType + '\")\n')
file.write(targ + '.setRenderer(\"' + self.rendererType + '\")\n')
file.write(targ + '.setEmitter(\"' + self.emitterType + '\")\n')
# System parameters
file.write(targ + ('.setPoolSize(%d)\n' %
int(self.getPoolSize())))
file.write(targ + ('.setBirthRate(%.4f)\n' %
self.getBirthRate()))
file.write(targ + ('.setLitterSize(%d)\n' %
int(self.getLitterSize())))
file.write(targ + ('.setLitterSpread(%d)\n' %
self.getLitterSpread()))
file.write(targ + ('.setSystemLifespan(%.4f)\n' %
self.getSystemLifespan()))
file.write(targ + ('.setLocalVelocityFlag(%d)\n' %
self.getLocalVelocityFlag()))
file.write(targ + ('.setSystemGrowsOlderFlag(%d)\n' %
self.getSystemGrowsOlderFlag()))
file.write('# Factory parameters\n')
file.write(targ + ('.factory.setLifespanBase(%.4f)\n' %
self.factory.getLifespanBase()))
file.write(targ + '.factory.setLifespanSpread(%.4f)\n' % \
self.factory.getLifespanSpread())
file.write(targ + '.factory.setMassBase(%.4f)\n' % \
self.factory.getMassBase())
file.write(targ + '.factory.setMassSpread(%.4f)\n' % \
self.factory.getMassSpread())
file.write(targ + '.factory.setTerminalVelocityBase(%.4f)\n' % \
self.factory.getTerminalVelocityBase())
file.write(targ + '.factory.setTerminalVelocitySpread(%.4f)\n' % \
self.factory.getTerminalVelocitySpread())
if (self.factoryType == "PointParticleFactory"):
file.write('# Point factory parameters\n')
elif (self.factoryType == "ZSpinParticleFactory"):
file.write('# Z Spin factory parameters\n')
file.write(targ + '.factory.setInitialAngle(%.4f)\n' % \
self.factory.getInitialAngle())
file.write(targ + '.factory.setInitialAngleSpread(%.4f)\n' % \
self.factory.getInitialAngleSpread())
file.write(targ + '.factory.enableAngularVelocity(%d)\n' % \
self.factory.getAngularVelocityEnabled())
if(self.factory.getAngularVelocityEnabled()):
file.write(targ + '.factory.setAngularVelocity(%.4f)\n' % \
self.factory.getAngularVelocity())
file.write(targ + '.factory.setAngularVelocitySpread(%.4f)\n' % \
self.factory.getAngularVelocitySpread())
else:
file.write(targ + '.factory.setFinalAngle(%.4f)\n' % \
self.factory.getFinalAngle())
file.write(targ + '.factory.setFinalAngleSpread(%.4f)\n' % \
self.factory.getFinalAngleSpread())
elif (self.factoryType == "OrientedParticleFactory"):
file.write('# Oriented factory parameters\n')
file.write(targ + '.factory.setInitialOrientation(%.4f)\n' % \
self.factory.getInitialOrientation())
file.write(targ + '.factory.setFinalOrientation(%.4f)\n' % \
self.factory.getFinalOrientation())
file.write('# Renderer parameters\n')
alphaMode = self.renderer.getAlphaMode()
aMode = "PRALPHANONE"
if (alphaMode == BaseParticleRenderer.PRALPHANONE):
aMode = "PRALPHANONE"
elif (alphaMode == BaseParticleRenderer.PRALPHAOUT):
aMode = "PRALPHAOUT"
elif (alphaMode == BaseParticleRenderer.PRALPHAIN):
aMode = "PRALPHAIN"
elif (alphaMode == BaseParticleRenderer.PRALPHAINOUT):
aMode = "PRALPHAINOUT"
elif (alphaMode == BaseParticleRenderer.PRALPHAUSER):
aMode = "PRALPHAUSER"
file.write(targ + '.renderer.setAlphaMode(BaseParticleRenderer.' + aMode + ')\n')
file.write(targ + '.renderer.setUserAlpha(%.2f)\n' % \
self.renderer.getUserAlpha())
if (self.rendererType == "PointParticleRenderer"):
file.write('# Point parameters\n')
file.write(targ + '.renderer.setPointSize(%.2f)\n' % \
self.renderer.getPointSize())
sColor = self.renderer.getStartColor()
file.write((targ + '.renderer.setStartColor(Vec4(%.2f, %.2f, %.2f, %.2f))\n' % (sColor[0], sColor[1], sColor[2], sColor[3])))
sColor = self.renderer.getEndColor()
file.write((targ + '.renderer.setEndColor(Vec4(%.2f, %.2f, %.2f, %.2f))\n' % (sColor[0], sColor[1], sColor[2], sColor[3])))
blendType = self.renderer.getBlendType()
bType = "PPONECOLOR"
if (blendType == PointParticleRenderer.PPONECOLOR):
bType = "PPONECOLOR"
elif (blendType == PointParticleRenderer.PPBLENDLIFE):
bType = "PPBLENDLIFE"
elif (blendType == PointParticleRenderer.PPBLENDVEL):
bType = "PPBLENDVEL"
file.write(targ + '.renderer.setBlendType(PointParticleRenderer.' + bType + ')\n')
blendMethod = self.renderer.getBlendMethod()
bMethod = "PPNOBLEND"
if (blendMethod == BaseParticleRenderer.PPNOBLEND):
bMethod = "PPNOBLEND"
elif (blendMethod == BaseParticleRenderer.PPBLENDLINEAR):
bMethod = "PPBLENDLINEAR"
elif (blendMethod == BaseParticleRenderer.PPBLENDCUBIC):
bMethod = "PPBLENDCUBIC"
file.write(targ + '.renderer.setBlendMethod(BaseParticleRenderer.' + bMethod + ')\n')
elif (self.rendererType == "LineParticleRenderer"):
file.write('# Line parameters\n')
sColor = self.renderer.getHeadColor()
file.write((targ + '.renderer.setHeadColor(Vec4(%.2f, %.2f, %.2f, %.2f))\n' % (sColor[0], sColor[1], sColor[2], sColor[3])))
sColor = self.renderer.getTailColor()
file.write((targ + '.renderer.setTailColor(Vec4(%.2f, %.2f, %.2f, %.2f))\n' % (sColor[0], sColor[1], sColor[2], sColor[3])))
sf = self.renderer.getLineScaleFactor()
file.write((targ + '.renderer.setLineScaleFactor(%.2f)\n' % (sf)))
elif (self.rendererType == "GeomParticleRenderer"):
file.write('# Geom parameters\n')
node = self.renderer.getGeomNode()
file.write('geomRef = loader.loadModel("' + self.geomReference + '")\n')
file.write(targ + '.renderer.setGeomNode(geomRef.node())\n')
file.write(targ + '.geomReference = "' + self.geomReference + '"\n');
cbmLut = ('MNone','MAdd','MSubtract','MInvSubtract','MMin','MMax')
cboLut = ('OZero','OOne','OIncomingColor','OOneMinusIncomingColor','OFbufferColor',
'OOneMinusFbufferColor','OIncomingAlpha','OOneMinusIncomingAlpha',
'OFbufferAlpha','OOneMinusFbufferAlpha','OConstantColor',
'OOneMinusConstantColor','OConstantAlpha','OOneMinusConstantAlpha',
'OIncomingColorSaturate')
file.write(targ + '.renderer.setXScaleFlag(%d)\n' % self.renderer.getXScaleFlag())
file.write(targ + '.renderer.setYScaleFlag(%d)\n' % self.renderer.getYScaleFlag())
file.write(targ + '.renderer.setZScaleFlag(%d)\n' % self.renderer.getZScaleFlag())
file.write(targ + '.renderer.setInitialXScale(%.4f)\n' % self.renderer.getInitialXScale())
file.write(targ + '.renderer.setFinalXScale(%.4f)\n' % self.renderer.getFinalXScale())
file.write(targ + '.renderer.setInitialYScale(%.4f)\n' % self.renderer.getInitialYScale())
file.write(targ + '.renderer.setFinalYScale(%.4f)\n' % self.renderer.getFinalYScale())
file.write(targ + '.renderer.setInitialZScale(%.4f)\n' % self.renderer.getInitialZScale())
file.write(targ + '.renderer.setFinalZScale(%.4f)\n' % self.renderer.getFinalZScale())
cbAttrib = self.renderer.getRenderNode().getAttrib(ColorBlendAttrib.getClassType())
if(cbAttrib):
cbMode = cbAttrib.getMode()
if(cbMode > 0):
if(cbMode in (ColorBlendAttrib.MAdd, ColorBlendAttrib.MSubtract, ColorBlendAttrib.MInvSubtract)):
cboa = cbAttrib.getOperandA()
cbob = cbAttrib.getOperandB()
file.write(targ+'.renderer.setColorBlendMode(ColorBlendAttrib.%s, ColorBlendAttrib.%s, ColorBlendAttrib.%s)\n' %
(cbmLut[cbMode], cboLut[cboa], cboLut[cbob]))
else:
file.write(targ+'.renderer.setColorBlendMode(ColorBlendAttrib.%s)\n' % cbmLut[cbMode])
cim = self.renderer.getColorInterpolationManager()
segIdList = [int(seg) for seg in cim.getSegmentIdList().split()]
for sid in segIdList:
seg = cim.getSegment(sid)
if seg.isEnabled():
t_b = seg.getTimeBegin()
t_e = seg.getTimeEnd()
mod = seg.isModulated()
fun = seg.getFunction()
typ = type(fun).__name__
if typ == 'ColorInterpolationFunctionConstant':
c_a = fun.getColorA()
file.write(targ+'.renderer.getColorInterpolationManager().addConstant('+repr(t_b)+','+repr(t_e)+','+ \
'Vec4('+repr(c_a[0])+','+repr(c_a[1])+','+repr(c_a[2])+','+repr(c_a[3])+'),'+repr(mod)+')\n')
elif typ == 'ColorInterpolationFunctionLinear':
c_a = fun.getColorA()
c_b = fun.getColorB()
file.write(targ+'.renderer.getColorInterpolationManager().addLinear('+repr(t_b)+','+repr(t_e)+','+ \
'Vec4('+repr(c_a[0])+','+repr(c_a[1])+','+repr(c_a[2])+','+repr(c_a[3])+'),' + \
'Vec4('+repr(c_b[0])+','+repr(c_b[1])+','+repr(c_b[2])+','+repr(c_b[3])+'),'+repr(mod)+')\n')
elif typ == 'ColorInterpolationFunctionStepwave':
c_a = fun.getColorA()
c_b = fun.getColorB()
w_a = fun.getWidthA()
w_b = fun.getWidthB()
file.write(targ+'.renderer.getColorInterpolationManager().addStepwave('+repr(t_b)+','+repr(t_e)+','+ \
'Vec4('+repr(c_a[0])+','+repr(c_a[1])+','+repr(c_a[2])+','+repr(c_a[3])+'),' + \
'Vec4('+repr(c_b[0])+','+repr(c_b[1])+','+repr(c_b[2])+','+repr(c_b[3])+'),' + \
repr(w_a)+','+repr(w_b)+','+repr(mod)+')\n')
elif typ == 'ColorInterpolationFunctionSinusoid':
c_a = fun.getColorA()
c_b = fun.getColorB()
per = fun.getPeriod()
file.write(targ+'.renderer.getColorInterpolationManager().addSinusoid('+repr(t_b)+','+repr(t_e)+','+ \
'Vec4('+repr(c_a[0])+','+repr(c_a[1])+','+repr(c_a[2])+','+repr(c_a[3])+'),' + \
'Vec4('+repr(c_b[0])+','+repr(c_b[1])+','+repr(c_b[2])+','+repr(c_b[3])+'),' + \
repr(per)+','+repr(mod)+')\n')
elif (self.rendererType == "SparkleParticleRenderer"):
file.write('# Sparkle parameters\n')
sColor = self.renderer.getCenterColor()
file.write((targ + '.renderer.setCenterColor(Vec4(%.2f, %.2f, %.2f, %.2f))\n' % (sColor[0], sColor[1], sColor[2], sColor[3])))
sColor = self.renderer.getEdgeColor()
file.write((targ + '.renderer.setEdgeColor(Vec4(%.2f, %.2f, %.2f, %.2f))\n' % (sColor[0], sColor[1], sColor[2], sColor[3])))
file.write(targ + '.renderer.setBirthRadius(%.4f)\n' % self.renderer.getBirthRadius())
file.write(targ + '.renderer.setDeathRadius(%.4f)\n' % self.renderer.getDeathRadius())
lifeScale = self.renderer.getLifeScale()
lScale = "SPNOSCALE"
if (lifeScale == SparkleParticleRenderer.SPSCALE):
lScale = "SPSCALE"
file.write(targ + '.renderer.setLifeScale(SparkleParticleRenderer.' + lScale + ')\n')
elif (self.rendererType == "SpriteParticleRenderer"):
file.write('# Sprite parameters\n')
if (self.renderer.getAnimateFramesEnable()):
file.write(targ + '.renderer.setAnimateFramesEnable(True)\n')
rate = self.renderer.getAnimateFramesRate()
if(rate):
file.write(targ + '.renderer.setAnimateFramesRate(%.3f)\n'%rate)
animCount = self.renderer.getNumAnims()
for x in range(animCount):
anim = self.renderer.getAnim(x)
if(anim.getSourceType() == SpriteAnim.STTexture):
file.write(targ + '.renderer.addTextureFromFile(\'%s\')\n' % (anim.getTexSource(),))
else:
file.write(targ + '.renderer.addTextureFromNode(\'%s\',\'%s\')\n' % (anim.getModelSource(), anim.getNodeSource()))
sColor = self.renderer.getColor()
file.write((targ + '.renderer.setColor(Vec4(%.2f, %.2f, %.2f, %.2f))\n' % (sColor[0], sColor[1], sColor[2], sColor[3])))
file.write(targ + '.renderer.setXScaleFlag(%d)\n' % self.renderer.getXScaleFlag())
file.write(targ + '.renderer.setYScaleFlag(%d)\n' % self.renderer.getYScaleFlag())
file.write(targ + '.renderer.setAnimAngleFlag(%d)\n' % self.renderer.getAnimAngleFlag())
file.write(targ + '.renderer.setInitialXScale(%.4f)\n' % self.renderer.getInitialXScale())
file.write(targ + '.renderer.setFinalXScale(%.4f)\n' % self.renderer.getFinalXScale())
file.write(targ + '.renderer.setInitialYScale(%.4f)\n' % self.renderer.getInitialYScale())
file.write(targ + '.renderer.setFinalYScale(%.4f)\n' % self.renderer.getFinalYScale())
file.write(targ + '.renderer.setNonanimatedTheta(%.4f)\n' % self.renderer.getNonanimatedTheta())
blendMethod = self.renderer.getAlphaBlendMethod()
bMethod = "PPNOBLEND"
if (blendMethod == BaseParticleRenderer.PPNOBLEND):
bMethod = "PPNOBLEND"
elif (blendMethod == BaseParticleRenderer.PPBLENDLINEAR):
bMethod = "PPBLENDLINEAR"
elif (blendMethod == BaseParticleRenderer.PPBLENDCUBIC):
bMethod = "PPBLENDCUBIC"
file.write(targ + '.renderer.setAlphaBlendMethod(BaseParticleRenderer.' + bMethod + ')\n')
file.write(targ + '.renderer.setAlphaDisable(%d)\n' % self.renderer.getAlphaDisable())
# Save the color blending to file
cbmLut = ('MNone','MAdd','MSubtract','MInvSubtract','MMin','MMax')
cboLut = ('OZero','OOne','OIncomingColor','OOneMinusIncomingColor','OFbufferColor',
'OOneMinusFbufferColor','OIncomingAlpha','OOneMinusIncomingAlpha',
'OFbufferAlpha','OOneMinusFbufferAlpha','OConstantColor',
'OOneMinusConstantColor','OConstantAlpha','OOneMinusConstantAlpha',
'OIncomingColorSaturate')
cbAttrib = self.renderer.getRenderNode().getAttrib(ColorBlendAttrib.getClassType())
if(cbAttrib):
cbMode = cbAttrib.getMode()
if(cbMode > 0):
if(cbMode in (ColorBlendAttrib.MAdd, ColorBlendAttrib.MSubtract, ColorBlendAttrib.MInvSubtract)):
cboa = cbAttrib.getOperandA()
cbob = cbAttrib.getOperandB()
file.write(targ+'.renderer.setColorBlendMode(ColorBlendAttrib.%s, ColorBlendAttrib.%s, ColorBlendAttrib.%s)\n' %
(cbmLut[cbMode], cboLut[cboa], cboLut[cbob]))
else:
file.write(targ+'.renderer.setColorBlendMode(ColorBlendAttrib.%s)\n' % cbmLut[cbMode])
cim = self.renderer.getColorInterpolationManager()
segIdList = [int(seg) for seg in cim.getSegmentIdList().split()]
for sid in segIdList:
seg = cim.getSegment(sid)
if seg.isEnabled():
t_b = seg.getTimeBegin()
t_e = seg.getTimeEnd()
mod = seg.isModulated()
fun = seg.getFunction()
typ = type(fun).__name__
if typ == 'ColorInterpolationFunctionConstant':
c_a = fun.getColorA()
file.write(targ+'.renderer.getColorInterpolationManager().addConstant('+repr(t_b)+','+repr(t_e)+','+ \
'Vec4('+repr(c_a[0])+','+repr(c_a[1])+','+repr(c_a[2])+','+repr(c_a[3])+'),'+repr(mod)+')\n')
elif typ == 'ColorInterpolationFunctionLinear':
c_a = fun.getColorA()
c_b = fun.getColorB()
file.write(targ+'.renderer.getColorInterpolationManager().addLinear('+repr(t_b)+','+repr(t_e)+','+ \
'Vec4('+repr(c_a[0])+','+repr(c_a[1])+','+repr(c_a[2])+','+repr(c_a[3])+'),' + \
'Vec4('+repr(c_b[0])+','+repr(c_b[1])+','+repr(c_b[2])+','+repr(c_b[3])+'),'+repr(mod)+')\n')
elif typ == 'ColorInterpolationFunctionStepwave':
c_a = fun.getColorA()
c_b = fun.getColorB()
w_a = fun.getWidthA()
w_b = fun.getWidthB()
file.write(targ+'.renderer.getColorInterpolationManager().addStepwave('+repr(t_b)+','+repr(t_e)+','+ \
'Vec4('+repr(c_a[0])+','+repr(c_a[1])+','+repr(c_a[2])+','+repr(c_a[3])+'),' + \
'Vec4('+repr(c_b[0])+','+repr(c_b[1])+','+repr(c_b[2])+','+repr(c_b[3])+'),' + \
repr(w_a)+','+repr(w_b)+','+repr(mod)+')\n')
elif typ == 'ColorInterpolationFunctionSinusoid':
c_a = fun.getColorA()
c_b = fun.getColorB()
per = fun.getPeriod()
file.write(targ+'.renderer.getColorInterpolationManager().addSinusoid('+repr(t_b)+','+repr(t_e)+','+ \
'Vec4('+repr(c_a[0])+','+repr(c_a[1])+','+repr(c_a[2])+','+repr(c_a[3])+'),' + \
'Vec4('+repr(c_b[0])+','+repr(c_b[1])+','+repr(c_b[2])+','+repr(c_b[3])+'),' + \
repr(per)+','+repr(mod)+')\n')
file.write('# Emitter parameters\n')
emissionType = self.emitter.getEmissionType()
eType = "ETEXPLICIT"
if (emissionType == BaseParticleEmitter.ETEXPLICIT):
eType = "ETEXPLICIT"
elif (emissionType == BaseParticleEmitter.ETRADIATE):
eType = "ETRADIATE"
elif (emissionType == BaseParticleEmitter.ETCUSTOM):
eType = "ETCUSTOM"
file.write(targ + '.emitter.setEmissionType(BaseParticleEmitter.' + eType + ')\n')
file.write(targ + '.emitter.setAmplitude(%.4f)\n' % self.emitter.getAmplitude())
file.write(targ + '.emitter.setAmplitudeSpread(%.4f)\n' % self.emitter.getAmplitudeSpread())
oForce = self.emitter.getOffsetForce()
file.write((targ + '.emitter.setOffsetForce(Vec3(%.4f, %.4f, %.4f))\n' % (oForce[0], oForce[1], oForce[2])))
oForce = self.emitter.getExplicitLaunchVector()
file.write((targ + '.emitter.setExplicitLaunchVector(Vec3(%.4f, %.4f, %.4f))\n' % (oForce[0], oForce[1], oForce[2])))
orig = self.emitter.getRadiateOrigin()
file.write((targ + '.emitter.setRadiateOrigin(Point3(%.4f, %.4f, %.4f))\n' % (orig[0], orig[1], orig[2])))
if (self.emitterType == "BoxEmitter"):
file.write('# Box parameters\n')
bound = self.emitter.getMinBound()
file.write((targ + '.emitter.setMinBound(Point3(%.4f, %.4f, %.4f))\n' % (bound[0], bound[1], bound[2])))
bound = self.emitter.getMaxBound()
file.write((targ + '.emitter.setMaxBound(Point3(%.4f, %.4f, %.4f))\n' % (bound[0], bound[1], bound[2])))
elif (self.emitterType == "DiscEmitter"):
file.write('# Disc parameters\n')
file.write(targ + '.emitter.setRadius(%.4f)\n' % self.emitter.getRadius())
if (eType == "ETCUSTOM"):
file.write(targ + '.emitter.setOuterAngle(%.4f)\n' % self.emitter.getOuterAngle())
file.write(targ + '.emitter.setInnerAngle(%.4f)\n' % self.emitter.getInnerAngle())
file.write(targ + '.emitter.setOuterMagnitude(%.4f)\n' % self.emitter.getOuterMagnitude())
file.write(targ + '.emitter.setInnerMagnitude(%.4f)\n' % self.emitter.getInnerMagnitude())
file.write(targ + '.emitter.setCubicLerping(%d)\n' % self.emitter.getCubicLerping())
elif (self.emitterType == "LineEmitter"):
file.write('# Line parameters\n')
point = self.emitter.getEndpoint1()
file.write((targ + '.emitter.setEndpoint1(Point3(%.4f, %.4f, %.4f))\n' % (point[0], point[1], point[2])))
point = self.emitter.getEndpoint2()
file.write((targ + '.emitter.setEndpoint2(Point3(%.4f, %.4f, %.4f))\n' % (point[0], point[1], point[2])))
elif (self.emitterType == "PointEmitter"):
file.write('# Point parameters\n')
point = self.emitter.getLocation()
file.write((targ + '.emitter.setLocation(Point3(%.4f, %.4f, %.4f))\n' % (point[0], point[1], point[2])))
elif (self.emitterType == "RectangleEmitter"):
file.write('# Rectangle parameters\n')
point = self.emitter.getMinBound()
file.write((targ + '.emitter.setMinBound(Point2(%.4f, %.4f))\n' % (point[0], point[1])))
point = self.emitter.getMaxBound()
file.write((targ + '.emitter.setMaxBound(Point2(%.4f, %.4f))\n' % (point[0], point[1])))
elif (self.emitterType == "RingEmitter"):
file.write('# Ring parameters\n')
file.write(targ + '.emitter.setRadius(%.4f)\n' % self.emitter.getRadius())
file.write(targ + '.emitter.setRadiusSpread(%.4f)\n' % self.emitter.getRadiusSpread())
if (eType == "ETCUSTOM"):
file.write(targ + '.emitter.setAngle(%.4f)\n' % self.emitter.getAngle())
elif (self.emitterType == "SphereSurfaceEmitter"):
file.write('# Sphere Surface parameters\n')
file.write(targ + '.emitter.setRadius(%.4f)\n' % self.emitter.getRadius())
elif (self.emitterType == "SphereVolumeEmitter"):
file.write('# Sphere Volume parameters\n')
file.write(targ + '.emitter.setRadius(%.4f)\n' % self.emitter.getRadius())
elif (self.emitterType == "TangentRingEmitter"):
file.write('# Tangent Ring parameters\n')
file.write(targ + '.emitter.setRadius(%.4f)\n' % self.emitter.getRadius())
file.write(targ + '.emitter.setRadiusSpread(%.4f)\n' % self.emitter.getRadiusSpread())
def getPoolSizeRanges(self):
litterRange = [max(1,self.getLitterSize()-self.getLitterSpread()),
self.getLitterSize(),
self.getLitterSize()+self.getLitterSpread()]
lifespanRange = [self.factory.getLifespanBase()-self.factory.getLifespanSpread(),
self.factory.getLifespanBase(),
self.factory.getLifespanBase()+self.factory.getLifespanSpread()]
birthRateRange = [self.getBirthRate()] * 3
print('Litter Ranges: %s' % litterRange)
print('LifeSpan Ranges: %s' % lifespanRange)
print('BirthRate Ranges: %s' % birthRateRange)
return dict(zip(('min','median','max'),[l*s/b for l,s,b in zip(litterRange,lifespanRange,birthRateRange)]))
def accelerate(self,time,stepCount = 1,stepTime=0.0):
if time > 0.0:
if stepTime == 0.0:
stepTime = float(time)/stepCount
remainder = 0.0
else:
stepCount = int(float(time)/stepTime)
remainder = time-stepCount*stepTime
for step in range(stepCount):
base.particleMgr.doParticles(stepTime,self,False)
base.physicsMgr.doPhysics(stepTime,self)
if(remainder):
base.particleMgr.doParticles(remainder,self,False)
base.physicsMgr.doPhysics(remainder,self)
self.render()
|
|
"""Support for Automation Device Specification (ADS)."""
import threading
import struct
import logging
import ctypes
from collections import namedtuple
import voluptuous as vol
from homeassistant.const import (
CONF_DEVICE, CONF_IP_ADDRESS, CONF_PORT, EVENT_HOMEASSISTANT_STOP)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pyads==3.0.7']
_LOGGER = logging.getLogger(__name__)
DATA_ADS = 'data_ads'
# Supported Types
ADSTYPE_BOOL = 'bool'
ADSTYPE_BYTE = 'byte'
ADSTYPE_DINT = 'dint'
ADSTYPE_INT = 'int'
ADSTYPE_UDINT = 'udint'
ADSTYPE_UINT = 'uint'
CONF_ADS_FACTOR = 'factor'
CONF_ADS_TYPE = 'adstype'
CONF_ADS_VALUE = 'value'
CONF_ADS_VAR = 'adsvar'
CONF_ADS_VAR_BRIGHTNESS = 'adsvar_brightness'
DOMAIN = 'ads'
SERVICE_WRITE_DATA_BY_NAME = 'write_data_by_name'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_DEVICE): cv.string,
vol.Required(CONF_PORT): cv.port,
vol.Optional(CONF_IP_ADDRESS): cv.string,
})
}, extra=vol.ALLOW_EXTRA)
SCHEMA_SERVICE_WRITE_DATA_BY_NAME = vol.Schema({
vol.Required(CONF_ADS_TYPE):
vol.In([ADSTYPE_INT, ADSTYPE_UINT, ADSTYPE_BYTE, ADSTYPE_BOOL,
ADSTYPE_DINT, ADSTYPE_UDINT]),
vol.Required(CONF_ADS_VALUE): vol.Coerce(int),
vol.Required(CONF_ADS_VAR): cv.string,
})
def setup(hass, config):
"""Set up the ADS component."""
import pyads
conf = config[DOMAIN]
net_id = conf.get(CONF_DEVICE)
ip_address = conf.get(CONF_IP_ADDRESS)
port = conf.get(CONF_PORT)
client = pyads.Connection(net_id, port, ip_address)
AdsHub.ADS_TYPEMAP = {
ADSTYPE_BOOL: pyads.PLCTYPE_BOOL,
ADSTYPE_BYTE: pyads.PLCTYPE_BYTE,
ADSTYPE_DINT: pyads.PLCTYPE_DINT,
ADSTYPE_INT: pyads.PLCTYPE_INT,
ADSTYPE_UDINT: pyads.PLCTYPE_UDINT,
ADSTYPE_UINT: pyads.PLCTYPE_UINT,
}
AdsHub.ADSError = pyads.ADSError
AdsHub.PLCTYPE_BOOL = pyads.PLCTYPE_BOOL
AdsHub.PLCTYPE_BYTE = pyads.PLCTYPE_BYTE
AdsHub.PLCTYPE_DINT = pyads.PLCTYPE_DINT
AdsHub.PLCTYPE_INT = pyads.PLCTYPE_INT
AdsHub.PLCTYPE_UDINT = pyads.PLCTYPE_UDINT
AdsHub.PLCTYPE_UINT = pyads.PLCTYPE_UINT
try:
ads = AdsHub(client)
except pyads.ADSError:
_LOGGER.error(
"Could not connect to ADS host (netid=%s, ip=%s, port=%s)",
net_id, ip_address, port)
return False
hass.data[DATA_ADS] = ads
hass.bus.listen(EVENT_HOMEASSISTANT_STOP, ads.shutdown)
def handle_write_data_by_name(call):
"""Write a value to the connected ADS device."""
ads_var = call.data.get(CONF_ADS_VAR)
ads_type = call.data.get(CONF_ADS_TYPE)
value = call.data.get(CONF_ADS_VALUE)
try:
ads.write_by_name(ads_var, value, ads.ADS_TYPEMAP[ads_type])
except pyads.ADSError as err:
_LOGGER.error(err)
hass.services.register(
DOMAIN, SERVICE_WRITE_DATA_BY_NAME, handle_write_data_by_name,
schema=SCHEMA_SERVICE_WRITE_DATA_BY_NAME)
return True
# Tuple to hold data needed for notification
NotificationItem = namedtuple(
'NotificationItem', 'hnotify huser name plc_datatype callback'
)
class AdsHub:
"""Representation of an ADS connection."""
def __init__(self, ads_client):
"""Initialize the ADS hub."""
self._client = ads_client
self._client.open()
# All ADS devices are registered here
self._devices = []
self._notification_items = {}
self._lock = threading.Lock()
def shutdown(self, *args, **kwargs):
"""Shutdown ADS connection."""
import pyads
_LOGGER.debug("Shutting down ADS")
for notification_item in self._notification_items.values():
_LOGGER.debug(
"Deleting device notification %d, %d",
notification_item.hnotify, notification_item.huser)
try:
self._client.del_device_notification(
notification_item.hnotify,
notification_item.huser
)
except pyads.ADSError as err:
_LOGGER.error(err)
try:
self._client.close()
except pyads.ADSError as err:
_LOGGER.error(err)
def register_device(self, device):
"""Register a new device."""
self._devices.append(device)
def write_by_name(self, name, value, plc_datatype):
"""Write a value to the device."""
with self._lock:
return self._client.write_by_name(name, value, plc_datatype)
def read_by_name(self, name, plc_datatype):
"""Read a value from the device."""
with self._lock:
return self._client.read_by_name(name, plc_datatype)
def add_device_notification(self, name, plc_datatype, callback):
"""Add a notification to the ADS devices."""
from pyads import NotificationAttrib
attr = NotificationAttrib(ctypes.sizeof(plc_datatype))
with self._lock:
hnotify, huser = self._client.add_device_notification(
name, attr, self._device_notification_callback)
hnotify = int(hnotify)
self._notification_items[hnotify] = NotificationItem(
hnotify, huser, name, plc_datatype, callback)
_LOGGER.debug(
"Added device notification %d for variable %s", hnotify, name)
def _device_notification_callback(self, notification, name):
"""Handle device notifications."""
contents = notification.contents
hnotify = int(contents.hNotification)
_LOGGER.debug("Received notification %d", hnotify)
data = contents.data
try:
with self._lock:
notification_item = self._notification_items[hnotify]
except KeyError:
_LOGGER.error("Unknown device notification handle: %d", hnotify)
return
# Parse data to desired datatype
if notification_item.plc_datatype == self.PLCTYPE_BOOL:
value = bool(struct.unpack('<?', bytearray(data)[:1])[0])
elif notification_item.plc_datatype == self.PLCTYPE_INT:
value = struct.unpack('<h', bytearray(data)[:2])[0]
elif notification_item.plc_datatype == self.PLCTYPE_BYTE:
value = struct.unpack('<B', bytearray(data)[:1])[0]
elif notification_item.plc_datatype == self.PLCTYPE_UINT:
value = struct.unpack('<H', bytearray(data)[:2])[0]
elif notification_item.plc_datatype == self.PLCTYPE_DINT:
value = struct.unpack('<i', bytearray(data)[:4])[0]
elif notification_item.plc_datatype == self.PLCTYPE_UDINT:
value = struct.unpack('<I', bytearray(data)[:4])[0]
else:
value = bytearray(data)
_LOGGER.warning("No callback available for this datatype")
notification_item.callback(notification_item.name, value)
|
|
from lace.serialization.obj.objutils import LoadObjError # lint isn't able to find the defintion in a c++ module pylint: disable=no-name-in-module
EXTENSION = '.obj'
def load(f, existing_mesh=None):
from baiji.serialization.util.openlib import ensure_file_open_and_call
return ensure_file_open_and_call(f, _load, mode='r', mesh=existing_mesh)
def dump(obj, f, flip_faces=False, ungroup=False, comments=None,
copyright=False, split_normals=False, write_mtl=True): # pylint: disable=redefined-outer-name, redefined-builtin, unused-argument
from baiji.serialization.util.openlib import ensure_file_open_and_call
if comments is None:
comments = []
return ensure_file_open_and_call(f, _dump, mode='w', obj=obj, flip_faces=flip_faces,
ungroup=ungroup, comments=comments,
split_normals=split_normals, write_mtl=write_mtl)
def _load(fd, mesh=None):
from collections import OrderedDict
from baiji import s3
from lace.mesh import Mesh
import lace.serialization.obj.objutils as objutils # pylint: disable=no-name-in-module
v, vt, vn, vc, f, ft, fn, mtl_path, landm, segm = objutils.read(fd.name)
if not mesh:
mesh = Mesh()
if v.size != 0:
mesh.v = v
if f.size != 0:
mesh.f = f
if vn.size != 0:
mesh.vn = vn
if vt.size != 0:
mesh.vt = vt
if vc.size != 0:
mesh.vc = vc
if fn.size != 0:
mesh.fn = fn
if ft.size != 0:
mesh.ft = ft
if segm:
mesh.segm = OrderedDict([(k, v if isinstance(v, list) else v.tolist()) for k, v in segm.items()])
def path_relative_to_mesh(filename):
# The OBJ file we're loading may have come from a local path, or an s3
# url. Since OBJ defines materials and texture files with paths
# relative to the OBJ itself, we need tocope with the various
# possibilities and if it's a cached file make sure that the material
# and texture have been downloaded as well.
#
# If an absolute path is given and the file is missing, try looking in
# the same directory; this lets you find the most common intention when
# an abs path is used.
#
# NB: We do not support loading material & texture info from objs read
# from filelike objects without a location on the filesystem; what would
# the relative file names mean in that case, anyway? (unless we're given
# a valid absolute path, in which case go for it)
import os
import re
# The second term here let's us detect windows absolute paths when we're running on posix
if filename == os.path.abspath(filename) or re.match(r'^.\:(\\|/)', filename):
if s3.exists(filename):
return filename
else:
filename = s3.path.basename(filename)
if hasattr(fd, 'remotename'):
mesh_path = fd.remotename
elif hasattr(fd, 'name'):
mesh_path = fd.name
else:
return None
path = s3.path.join(s3.path.dirname(mesh_path), filename)
return path
mesh.materials_filepath = None
if mtl_path:
materials_filepath = path_relative_to_mesh(mtl_path.strip())
if materials_filepath and s3.exists(materials_filepath):
with s3.open(materials_filepath, 'r') as f:
mesh.materials_file = f.readlines()
mesh.materials_filepath = materials_filepath
if hasattr(mesh, 'materials_file'):
mesh.texture_filepaths = {
line.split(None, 1)[0].strip(): path_relative_to_mesh(line.split(None, 1)[1].strip())
for line in mesh.materials_file if line.startswith('map_K')
}
if 'map_Ka' in mesh.texture_filepaths:
mesh.texture_filepath = mesh.texture_filepaths['map_Ka']
elif 'map_Kd' in mesh.texture_filepaths:
mesh.texture_filepath = mesh.texture_filepaths['map_Kd']
if landm:
mesh.landm = landm
return mesh
def _dump(f, obj, flip_faces=False, ungroup=False, comments=None, split_normals=False, write_mtl=True): # pylint: disable=redefined-outer-name
'''
write_mtl: When True and mesh has a texture, includes a mtllib
reference in the .obj and writes a .mtl alongside.
'''
import six
import os
import numpy as np
from baiji import s3
ff = -1 if flip_faces else 1
def write_face_to_obj_file(obj, faces, face_index, obj_file):
vertex_indices = faces[face_index][::ff] + 1
write_normals = obj.fn is not None or (obj.vn is not None and obj.vn.shape == obj.v.shape)
write_texture = obj.ft is not None and obj.vt is not None
if write_normals and obj.fn is not None:
normal_indices = obj.fn[face_index][::ff] + 1
assert len(normal_indices) == len(vertex_indices)
elif write_normals: # unspecified fn but per-vertex normals, assume ordering is same as for v
normal_indices = faces[face_index][::ff] + 1
if write_texture:
texture_indices = obj.ft[face_index][::ff] + 1
assert len(texture_indices) == len(vertex_indices)
# Valid obj face lines are: v, v/vt, v//vn, v/vt/vn
if write_normals and write_texture:
pattern = '%d/%d/%d'
value = tuple(np.array([vertex_indices, texture_indices, normal_indices]).T.flatten())
elif write_normals:
pattern = '%d//%d'
value = tuple(np.array([vertex_indices, normal_indices]).T.flatten())
elif write_texture:
pattern = '%d/%d'
value = tuple(np.array([vertex_indices, texture_indices]).T.flatten())
else:
pattern = '%d'
value = tuple(vertex_indices)
obj_file.write(('f ' + ' '.join([pattern]*len(vertex_indices)) + '\n') % value)
if comments != None:
if isinstance(comments, six.string_types):
comments = [comments]
for comment in comments:
for line in comment.split("\n"):
f.write("# %s\n" % line)
if write_mtl and hasattr(obj, 'texture_filepath') and obj.texture_filepath is not None:
save_to = s3.path.dirname(f.name)
mtl_name = os.path.splitext(s3.path.basename(f.name))[0]
mtl_filename = mtl_name + '.mtl'
f.write('mtllib %s\n' % mtl_filename)
f.write('usemtl %s\n' % mtl_name)
texture_filename = mtl_name + os.path.splitext(obj.texture_filepath)[1]
if not s3.exists(s3.path.join(save_to, texture_filename)):
s3.cp(obj.texture_filepath, s3.path.join(save_to, texture_filename))
obj.write_mtl(s3.path.join(save_to, mtl_filename), mtl_name, texture_filename)
if obj.vc is not None:
for r, c in zip(obj.v, obj.vc):
f.write('v %f %f %f %f %f %f\n' % (r[0], r[1], r[2], c[0], c[1], c[2]))
elif obj.v is not None:
for r in obj.v:
f.write('v %f %f %f\n' % (r[0], r[1], r[2]))
if obj.vn is not None:
if split_normals:
for vn_idx in obj.fn:
r = obj.vn[vn_idx[0]]
f.write('vn %f %f %f\n' % (r[0], r[1], r[2]))
r = obj.vn[vn_idx[1]]
f.write('vn %f %f %f\n' % (r[0], r[1], r[2]))
r = obj.vn[vn_idx[2]]
f.write('vn %f %f %f\n' % (r[0], r[1], r[2]))
else:
for r in obj.vn:
f.write('vn %f %f %f\n' % (r[0], r[1], r[2]))
if obj.ft is not None and obj.vt is not None:
for r in obj.vt:
if len(r) == 3:
f.write('vt %f %f %f\n' % (r[0], r[1], r[2]))
else:
f.write('vt %f %f\n' % (r[0], r[1]))
if obj.f4 is not None:
faces = obj.f4
elif obj.f is not None:
faces = obj.f
else:
faces = None
if obj.segm is not None and not ungroup:
if faces is not None:
# An array of strings.
group_names = np.array(obj.segm.keys())
# A 2d array of booleans indicating which face is in which group.
group_mask = np.zeros((len(group_names), len(faces)), dtype=bool)
for i, segm_faces in enumerate(obj.segm.itervalues()):
group_mask[i][segm_faces] = True
# In an OBJ file, "g" changes the current state. This is a slice of
# group_mask that represents the current state.
current_group_mask = np.zeros((len(group_names),), dtype=bool)
for face_index in range(len(faces)):
# If the group has changed from the previous face, write the
# group entry.
this_group_mask = group_mask[:, face_index]
if any(current_group_mask != this_group_mask):
current_group_mask = this_group_mask
f.write('g %s\n' % ' '.join(group_names[current_group_mask]))
write_face_to_obj_file(obj, faces, face_index, f)
else:
if faces is not None:
for face_index in range(len(faces)):
write_face_to_obj_file(obj, faces, face_index, f)
def write_mtl(path, material_name, texture_name):
from baiji import s3
with s3.open(path, 'w') as f:
f.write('newmtl %s\n' % material_name)
# copied from another obj, no idea about what it does
f.write('ka 0.329412 0.223529 0.027451\n')
f.write('kd 0.780392 0.568627 0.113725\n')
f.write('ks 0.992157 0.941176 0.807843\n')
f.write('illum 0\n')
f.write('map_Ka %s\n'%texture_name)
f.write('map_Kd %s\n'%texture_name)
f.write('map_Ks %s\n'%texture_name)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from copy import deepcopy
from unittest import mock
from airflow.providers.amazon.aws.hooks.glue_crawler import AwsGlueCrawlerHook
mock_crawler_name = 'test-crawler'
mock_role_name = 'test-role'
mock_config = {
'Name': mock_crawler_name,
'Description': 'Test glue crawler from Airflow',
'DatabaseName': 'test_db',
'Role': mock_role_name,
'Targets': {
'S3Targets': [
{
'Path': 's3://test-glue-crawler/foo/',
'Exclusions': [
's3://test-glue-crawler/bar/',
],
'ConnectionName': 'test-s3-conn',
}
],
'JdbcTargets': [
{
'ConnectionName': 'test-jdbc-conn',
'Path': 'test_db/test_table>',
'Exclusions': [
'string',
],
}
],
'MongoDBTargets': [
{'ConnectionName': 'test-mongo-conn', 'Path': 'test_db/test_collection', 'ScanAll': True}
],
'DynamoDBTargets': [{'Path': 'test_db/test_table', 'scanAll': True, 'scanRate': 123.0}],
'CatalogTargets': [
{
'DatabaseName': 'test_glue_db',
'Tables': [
'test',
],
}
],
},
'Classifiers': ['test-classifier'],
'TablePrefix': 'test',
'SchemaChangePolicy': {
'UpdateBehavior': 'UPDATE_IN_DATABASE',
'DeleteBehavior': 'DEPRECATE_IN_DATABASE',
},
'RecrawlPolicy': {'RecrawlBehavior': 'CRAWL_EVERYTHING'},
'LineageConfiguration': 'ENABLE',
'Configuration': """
{
"Version": 1.0,
"CrawlerOutput": {
"Partitions": { "AddOrUpdateBehavior": "InheritFromTable" }
}
}
""",
'SecurityConfiguration': 'test',
'Tags': {'test': 'foo'},
}
class TestAwsGlueCrawlerHook(unittest.TestCase):
@classmethod
def setUp(cls):
cls.hook = AwsGlueCrawlerHook(aws_conn_id="aws_default")
def test_init(self):
self.assertEqual(self.hook.aws_conn_id, "aws_default")
@mock.patch.object(AwsGlueCrawlerHook, "get_conn")
def test_has_crawler(self, mock_get_conn):
response = self.hook.has_crawler(mock_crawler_name)
self.assertEqual(response, True)
mock_get_conn.return_value.get_crawler.assert_called_once_with(Name=mock_crawler_name)
@mock.patch.object(AwsGlueCrawlerHook, "get_conn")
def test_has_crawler_crawled_doesnt_exists(self, mock_get_conn):
class MockException(Exception):
pass
mock_get_conn.return_value.exceptions.EntityNotFoundException = MockException
mock_get_conn.return_value.get_crawler.side_effect = MockException("AAA")
response = self.hook.has_crawler(mock_crawler_name)
self.assertEqual(response, False)
mock_get_conn.return_value.get_crawler.assert_called_once_with(Name=mock_crawler_name)
@mock.patch.object(AwsGlueCrawlerHook, "get_conn")
def test_update_crawler_needed(self, mock_get_conn):
mock_get_conn.return_value.get_crawler.return_value = {'Crawler': mock_config}
mock_config_two = deepcopy(mock_config)
mock_config_two['Role'] = 'test-2-role'
response = self.hook.update_crawler(**mock_config_two)
self.assertEqual(response, True)
mock_get_conn.return_value.get_crawler.assert_called_once_with(Name=mock_crawler_name)
mock_get_conn.return_value.update_crawler.assert_called_once_with(**mock_config_two)
@mock.patch.object(AwsGlueCrawlerHook, "get_conn")
def test_update_crawler_not_needed(self, mock_get_conn):
mock_get_conn.return_value.get_crawler.return_value = {'Crawler': mock_config}
response = self.hook.update_crawler(**mock_config)
self.assertEqual(response, False)
mock_get_conn.return_value.get_crawler.assert_called_once_with(Name=mock_crawler_name)
@mock.patch.object(AwsGlueCrawlerHook, "get_conn")
def test_create_crawler(self, mock_get_conn):
mock_get_conn.return_value.create_crawler.return_value = {'Crawler': {'Name': mock_crawler_name}}
glue_crawler = self.hook.create_crawler(**mock_config)
self.assertIn("Crawler", glue_crawler)
self.assertIn("Name", glue_crawler["Crawler"])
self.assertEqual(glue_crawler["Crawler"]["Name"], mock_crawler_name)
@mock.patch.object(AwsGlueCrawlerHook, "get_conn")
def test_start_crawler(self, mock_get_conn):
result = self.hook.start_crawler(mock_crawler_name)
self.assertEqual(result, mock_get_conn.return_value.start_crawler.return_value)
mock_get_conn.return_value.start_crawler.assert_called_once_with(Name=mock_crawler_name)
@mock.patch.object(AwsGlueCrawlerHook, "get_crawler")
@mock.patch.object(AwsGlueCrawlerHook, "get_conn")
def test_wait_for_crawler_completion_instant_ready(self, mock_get_conn, mock_get_crawler):
mock_get_crawler.side_effect = [
{'State': 'READY', 'LastCrawl': {'Status': 'MOCK_STATUS'}},
]
mock_get_conn.return_value.get_crawler_metrics.return_value = {
'CrawlerMetricsList': [
{
'LastRuntimeSeconds': 'TEST-A',
'MedianRuntimeSeconds': 'TEST-B',
'TablesCreated': 'TEST-C',
'TablesUpdated': 'TEST-D',
'TablesDeleted': 'TEST-E',
}
]
}
result = self.hook.wait_for_crawler_completion(mock_crawler_name)
self.assertEqual(result, 'MOCK_STATUS')
mock_get_conn.assert_has_calls(
[
mock.call(),
mock.call().get_crawler_metrics(CrawlerNameList=[mock_crawler_name]),
]
)
mock_get_crawler.assert_has_calls(
[
mock.call(mock_crawler_name),
]
)
@mock.patch.object(AwsGlueCrawlerHook, "get_conn")
@mock.patch.object(AwsGlueCrawlerHook, "get_crawler")
@mock.patch('airflow.providers.amazon.aws.hooks.glue_crawler.sleep')
def test_wait_for_crawler_completion_retry_two_times(self, mock_sleep, mock_get_crawler, mock_get_conn):
mock_get_crawler.side_effect = [
{'State': 'RUNNING'},
{'State': 'READY', 'LastCrawl': {'Status': 'MOCK_STATUS'}},
]
mock_get_conn.return_value.get_crawler_metrics.side_effect = [
{'CrawlerMetricsList': [{'TimeLeftSeconds': 12}]},
{
'CrawlerMetricsList': [
{
'LastRuntimeSeconds': 'TEST-A',
'MedianRuntimeSeconds': 'TEST-B',
'TablesCreated': 'TEST-C',
'TablesUpdated': 'TEST-D',
'TablesDeleted': 'TEST-E',
}
]
},
]
result = self.hook.wait_for_crawler_completion(mock_crawler_name)
self.assertEqual(result, 'MOCK_STATUS')
mock_get_conn.assert_has_calls(
[
mock.call(),
mock.call().get_crawler_metrics(CrawlerNameList=[mock_crawler_name]),
]
)
mock_get_crawler.assert_has_calls(
[
mock.call(mock_crawler_name),
mock.call(mock_crawler_name),
]
)
if __name__ == '__main__':
unittest.main()
|
|
from __future__ import division
import unittest
from math import sqrt, pi
from ..path import CubicBezier, QuadraticBezier, Line, Arc, Path
# Most of these test points are not calculated serparately, as that would
# take too long and be too error prone. Instead the curves have been verified
# to be correct visually, by drawing them with the turtle module, with code
# like this:
#
# import turtle
# t = turtle.Turtle()
# t.penup()
#
# for arc in (path1, path2):
# p = arc.point(0)
# t.goto(p.real - 500, -p.imag + 300)
# t.dot(3, 'black')
# t.pendown()
# for x in range(1, 101):
# p = arc.point(x * 0.01)
# t.goto(p.real - 500, -p.imag + 300)
# t.penup()
# t.dot(3, 'black')
#
# raw_input()
#
# After the paths have been verified to be correct this way, the testing of
# points along the paths has been added as regression tests, to make sure
# nobody changes the way curves are drawn by mistake. Therefore, do not take
# these points religiously. They might be subtly wrong, unless otherwise
# noted.
class LineTest(unittest.TestCase):
def test_lines(self):
# These points are calculated, and not just regression tests.
line1 = Line(0j, 400 + 0j)
self.assertAlmostEqual(line1.point(0), (0j))
self.assertAlmostEqual(line1.point(0.3), (120 + 0j))
self.assertAlmostEqual(line1.point(0.5), (200 + 0j))
self.assertAlmostEqual(line1.point(0.9), (360 + 0j))
self.assertAlmostEqual(line1.point(1), (400 + 0j))
self.assertAlmostEqual(line1.length(), 400)
line2 = Line(400 + 0j, 400 + 300j)
self.assertAlmostEqual(line2.point(0), (400 + 0j))
self.assertAlmostEqual(line2.point(0.3), (400 + 90j))
self.assertAlmostEqual(line2.point(0.5), (400 + 150j))
self.assertAlmostEqual(line2.point(0.9), (400 + 270j))
self.assertAlmostEqual(line2.point(1), (400 + 300j))
self.assertAlmostEqual(line2.length(), 300)
line3 = Line(400 + 300j, 0j)
self.assertAlmostEqual(line3.point(0), (400 + 300j))
self.assertAlmostEqual(line3.point(0.3), (280 + 210j))
self.assertAlmostEqual(line3.point(0.5), (200 + 150j))
self.assertAlmostEqual(line3.point(0.9), (40 + 30j))
self.assertAlmostEqual(line3.point(1), (0j))
self.assertAlmostEqual(line3.length(), 500)
def test_equality(self):
# This is to test the __eq__ and __ne__ methods, so we can't use
# assertEqual and assertNotEqual
line = Line(0j, 400 + 0j)
self.assertTrue(line == Line(0, 400))
self.assertTrue(line != Line(100, 400))
self.assertFalse(line == str(line))
self.assertTrue(line != str(line))
self.assertFalse(CubicBezier(600 + 500j, 600 + 350j, 900 + 650j, 900 + 500j) ==
line)
class CubicBezierTest(unittest.TestCase):
def test_approx_circle(self):
"""This is a approximate circle drawn in Inkscape"""
arc1 = CubicBezier(
complex(0, 0),
complex(0, 109.66797),
complex(-88.90345, 198.57142),
complex(-198.57142, 198.57142)
)
self.assertAlmostEqual(arc1.point(0), (0j))
self.assertAlmostEqual(arc1.point(0.1), (-2.59896457 + 32.20931647j))
self.assertAlmostEqual(arc1.point(0.2), (-10.12330256 + 62.76392816j))
self.assertAlmostEqual(arc1.point(0.3), (-22.16418039 + 91.25500149j))
self.assertAlmostEqual(arc1.point(0.4), (-38.31276448 + 117.27370288j))
self.assertAlmostEqual(arc1.point(0.5), (-58.16022125 + 140.41119875j))
self.assertAlmostEqual(arc1.point(0.6), (-81.29771712 + 160.25865552j))
self.assertAlmostEqual(arc1.point(0.7), (-107.31641851 + 176.40723961j))
self.assertAlmostEqual(arc1.point(0.8), (-135.80749184 + 188.44811744j))
self.assertAlmostEqual(arc1.point(0.9), (-166.36210353 + 195.97245543j))
self.assertAlmostEqual(arc1.point(1), (-198.57142 + 198.57142j))
arc2 = CubicBezier(
complex(-198.57142, 198.57142),
complex(-109.66797 - 198.57142, 0 + 198.57142),
complex(-198.57143 - 198.57142, -88.90345 + 198.57142),
complex(-198.57143 - 198.57142, 0),
)
self.assertAlmostEqual(arc2.point(0), (-198.57142 + 198.57142j))
self.assertAlmostEqual(arc2.point(0.1), (-230.78073675 + 195.97245543j))
self.assertAlmostEqual(arc2.point(0.2), (-261.3353492 + 188.44811744j))
self.assertAlmostEqual(arc2.point(0.3), (-289.82642365 + 176.40723961j))
self.assertAlmostEqual(arc2.point(0.4), (-315.8451264 + 160.25865552j))
self.assertAlmostEqual(arc2.point(0.5), (-338.98262375 + 140.41119875j))
self.assertAlmostEqual(arc2.point(0.6), (-358.830082 + 117.27370288j))
self.assertAlmostEqual(arc2.point(0.7), (-374.97866745 + 91.25500149j))
self.assertAlmostEqual(arc2.point(0.8), (-387.0195464 + 62.76392816j))
self.assertAlmostEqual(arc2.point(0.9), (-394.54388515 + 32.20931647j))
self.assertAlmostEqual(arc2.point(1), (-397.14285 + 0j))
arc3 = CubicBezier(
complex(-198.57143 - 198.57142, 0),
complex(0 - 198.57143 - 198.57142, -109.66797),
complex(88.90346 - 198.57143 - 198.57142, -198.57143),
complex(-198.57142, -198.57143)
)
self.assertAlmostEqual(arc3.point(0), (-397.14285 + 0j))
self.assertAlmostEqual(arc3.point(0.1), (-394.54388515 - 32.20931675j))
self.assertAlmostEqual(arc3.point(0.2), (-387.0195464 - 62.7639292j))
self.assertAlmostEqual(arc3.point(0.3), (-374.97866745 - 91.25500365j))
self.assertAlmostEqual(arc3.point(0.4), (-358.830082 - 117.2737064j))
self.assertAlmostEqual(arc3.point(0.5), (-338.98262375 - 140.41120375j))
self.assertAlmostEqual(arc3.point(0.6), (-315.8451264 - 160.258662j))
self.assertAlmostEqual(arc3.point(0.7), (-289.82642365 - 176.40724745j))
self.assertAlmostEqual(arc3.point(0.8), (-261.3353492 - 188.4481264j))
self.assertAlmostEqual(arc3.point(0.9), (-230.78073675 - 195.97246515j))
self.assertAlmostEqual(arc3.point(1), (-198.57142 - 198.57143j))
arc4 = CubicBezier(
complex(-198.57142, -198.57143),
complex(109.66797 - 198.57142, 0 - 198.57143),
complex(0, 88.90346 - 198.57143),
complex(0, 0),
)
self.assertAlmostEqual(arc4.point(0), (-198.57142 - 198.57143j))
self.assertAlmostEqual(arc4.point(0.1), (-166.36210353 - 195.97246515j))
self.assertAlmostEqual(arc4.point(0.2), (-135.80749184 - 188.4481264j))
self.assertAlmostEqual(arc4.point(0.3), (-107.31641851 - 176.40724745j))
self.assertAlmostEqual(arc4.point(0.4), (-81.29771712 - 160.258662j))
self.assertAlmostEqual(arc4.point(0.5), (-58.16022125 - 140.41120375j))
self.assertAlmostEqual(arc4.point(0.6), (-38.31276448 - 117.2737064j))
self.assertAlmostEqual(arc4.point(0.7), (-22.16418039 - 91.25500365j))
self.assertAlmostEqual(arc4.point(0.8), (-10.12330256 - 62.7639292j))
self.assertAlmostEqual(arc4.point(0.9), (-2.59896457 - 32.20931675j))
self.assertAlmostEqual(arc4.point(1), (0j))
def test_svg_examples(self):
# M100,200 C100,100 250,100 250,200
path1 = CubicBezier(100 + 200j, 100 + 100j, 250 + 100j, 250 + 200j)
self.assertAlmostEqual(path1.point(0), (100 + 200j))
self.assertAlmostEqual(path1.point(0.3), (132.4 + 137j))
self.assertAlmostEqual(path1.point(0.5), (175 + 125j))
self.assertAlmostEqual(path1.point(0.9), (245.8 + 173j))
self.assertAlmostEqual(path1.point(1), (250 + 200j))
# S400,300 400,200
path2 = CubicBezier(250 + 200j, 250 + 300j, 400 + 300j, 400 + 200j)
self.assertAlmostEqual(path2.point(0), (250 + 200j))
self.assertAlmostEqual(path2.point(0.3), (282.4 + 263j))
self.assertAlmostEqual(path2.point(0.5), (325 + 275j))
self.assertAlmostEqual(path2.point(0.9), (395.8 + 227j))
self.assertAlmostEqual(path2.point(1), (400 + 200j))
# M100,200 C100,100 400,100 400,200
path3 = CubicBezier(100 + 200j, 100 + 100j, 400 + 100j, 400 + 200j)
self.assertAlmostEqual(path3.point(0), (100 + 200j))
self.assertAlmostEqual(path3.point(0.3), (164.8 + 137j))
self.assertAlmostEqual(path3.point(0.5), (250 + 125j))
self.assertAlmostEqual(path3.point(0.9), (391.6 + 173j))
self.assertAlmostEqual(path3.point(1), (400 + 200j))
# M100,500 C25,400 475,400 400,500
path4 = CubicBezier(100 + 500j, 25 + 400j, 475 + 400j, 400 + 500j)
self.assertAlmostEqual(path4.point(0), (100 + 500j))
self.assertAlmostEqual(path4.point(0.3), (145.9 + 437j))
self.assertAlmostEqual(path4.point(0.5), (250 + 425j))
self.assertAlmostEqual(path4.point(0.9), (407.8 + 473j))
self.assertAlmostEqual(path4.point(1), (400 + 500j))
# M100,800 C175,700 325,700 400,800
path5 = CubicBezier(100 + 800j, 175 + 700j, 325 + 700j, 400 + 800j)
self.assertAlmostEqual(path5.point(0), (100 + 800j))
self.assertAlmostEqual(path5.point(0.3), (183.7 + 737j))
self.assertAlmostEqual(path5.point(0.5), (250 + 725j))
self.assertAlmostEqual(path5.point(0.9), (375.4 + 773j))
self.assertAlmostEqual(path5.point(1), (400 + 800j))
# M600,200 C675,100 975,100 900,200
path6 = CubicBezier(600 + 200j, 675 + 100j, 975 + 100j, 900 + 200j)
self.assertAlmostEqual(path6.point(0), (600 + 200j))
self.assertAlmostEqual(path6.point(0.3), (712.05 + 137j))
self.assertAlmostEqual(path6.point(0.5), (806.25 + 125j))
self.assertAlmostEqual(path6.point(0.9), (911.85 + 173j))
self.assertAlmostEqual(path6.point(1), (900 + 200j))
# M600,500 C600,350 900,650 900,500
path7 = CubicBezier(600 + 500j, 600 + 350j, 900 + 650j, 900 + 500j)
self.assertAlmostEqual(path7.point(0), (600 + 500j))
self.assertAlmostEqual(path7.point(0.3), (664.8 + 462.2j))
self.assertAlmostEqual(path7.point(0.5), (750 + 500j))
self.assertAlmostEqual(path7.point(0.9), (891.6 + 532.4j))
self.assertAlmostEqual(path7.point(1), (900 + 500j))
# M600,800 C625,700 725,700 750,800
path8 = CubicBezier(600 + 800j, 625 + 700j, 725 + 700j, 750 + 800j)
self.assertAlmostEqual(path8.point(0), (600 + 800j))
self.assertAlmostEqual(path8.point(0.3), (638.7 + 737j))
self.assertAlmostEqual(path8.point(0.5), (675 + 725j))
self.assertAlmostEqual(path8.point(0.9), (740.4 + 773j))
self.assertAlmostEqual(path8.point(1), (750 + 800j))
# S875,900 900,800
inversion = (750 + 800j) + (750 + 800j) - (725 + 700j)
path9 = CubicBezier(750 + 800j, inversion, 875 + 900j, 900 + 800j)
self.assertAlmostEqual(path9.point(0), (750 + 800j))
self.assertAlmostEqual(path9.point(0.3), (788.7 + 863j))
self.assertAlmostEqual(path9.point(0.5), (825 + 875j))
self.assertAlmostEqual(path9.point(0.9), (890.4 + 827j))
self.assertAlmostEqual(path9.point(1), (900 + 800j))
def test_length(self):
# A straight line:
arc = CubicBezier(
complex(0, 0),
complex(0, 0),
complex(0, 100),
complex(0, 100)
)
self.assertAlmostEqual(arc.length(), 100)
# A diagonal line:
arc = CubicBezier(
complex(0, 0),
complex(0, 0),
complex(100, 100),
complex(100, 100)
)
self.assertAlmostEqual(arc.length(), sqrt(2 * 100 * 100))
# A quarter circle arc with radius 100:
kappa = 4 * (sqrt(2) - 1) / 3 # http://www.whizkidtech.redprince.net/bezier/circle/
arc = CubicBezier(
complex(0, 0),
complex(0, kappa * 100),
complex(100 - kappa * 100, 100),
complex(100, 100)
)
# We can't compare with pi*50 here, because this is just an
# approximation of a circle arc. pi*50 is 157.079632679
# So this is just yet another "warn if this changes" test.
# This value is not verified to be correct.
self.assertAlmostEqual(arc.length(), 157.1016698)
# A recursive solution has also been suggested, but for CubicBezier
# curves it could get a false solution on curves where the midpoint is on a
# straight line between the start and end. For example, the following
# curve would get solved as a straight line and get the length 300.
# Make sure this is not the case.
arc = CubicBezier(
complex(600, 500),
complex(600, 350),
complex(900, 650),
complex(900, 500)
)
self.assertTrue(arc.length() > 300.0)
def test_equality(self):
# This is to test the __eq__ and __ne__ methods, so we can't use
# assertEqual and assertNotEqual
segment = CubicBezier(complex(600, 500), complex(600, 350),
complex(900, 650), complex(900, 500))
self.assertTrue(segment ==
CubicBezier(600 + 500j, 600 + 350j, 900 + 650j, 900 + 500j))
self.assertTrue(segment !=
CubicBezier(600 + 501j, 600 + 350j, 900 + 650j, 900 + 500j))
self.assertTrue(segment != Line(0, 400))
class QuadraticBezierTest(unittest.TestCase):
def test_svg_examples(self):
"""These is the path in the SVG specs"""
# M200,300 Q400,50 600,300 T1000,300
path1 = QuadraticBezier(200 + 300j, 400 + 50j, 600 + 300j)
self.assertAlmostEqual(path1.point(0), (200 + 300j))
self.assertAlmostEqual(path1.point(0.3), (320 + 195j))
self.assertAlmostEqual(path1.point(0.5), (400 + 175j))
self.assertAlmostEqual(path1.point(0.9), (560 + 255j))
self.assertAlmostEqual(path1.point(1), (600 + 300j))
# T1000, 300
inversion = (600 + 300j) + (600 + 300j) - (400 + 50j)
path2 = QuadraticBezier(600 + 300j, inversion, 1000 + 300j)
self.assertAlmostEqual(path2.point(0), (600 + 300j))
self.assertAlmostEqual(path2.point(0.3), (720 + 405j))
self.assertAlmostEqual(path2.point(0.5), (800 + 425j))
self.assertAlmostEqual(path2.point(0.9), (960 + 345j))
self.assertAlmostEqual(path2.point(1), (1000 + 300j))
def test_length(self):
# expected results calculated with
# svg.path.segment_length(q, 0, 1, q.start, q.end, 1e-14, 20, 0)
q1 = QuadraticBezier(200 + 300j, 400 + 50j, 600 + 300j)
q2 = QuadraticBezier(200 + 300j, 400 + 50j, 500 + 200j)
closedq = QuadraticBezier(6+2j, 5-1j, 6+2j)
linq1 = QuadraticBezier(1, 2, 3)
linq2 = QuadraticBezier(1+3j, 2+5j, -9 - 17j)
nodalq = QuadraticBezier(1, 1, 1)
tests = [(q1, 487.77109389525975),
(q2, 379.90458193489155),
(closedq, 3.1622776601683795),
(linq1, 2),
(linq2, 22.73335777124786),
(nodalq, 0)]
for q, exp_res in tests:
self.assertAlmostEqual(q.length(), exp_res)
def test_equality(self):
# This is to test the __eq__ and __ne__ methods, so we can't use
# assertEqual and assertNotEqual
segment = QuadraticBezier(200 + 300j, 400 + 50j, 600 + 300j)
self.assertTrue(segment == QuadraticBezier(200 + 300j, 400 + 50j, 600 + 300j))
self.assertTrue(segment != QuadraticBezier(200 + 301j, 400 + 50j, 600 + 300j))
self.assertFalse(segment == Arc(0j, 100 + 50j, 0, 0, 0, 100 + 50j))
self.assertTrue(Arc(0j, 100 + 50j, 0, 0, 0, 100 + 50j) != segment)
class ArcTest(unittest.TestCase):
def test_points(self):
arc1 = Arc(0j, 100 + 50j, 0, 0, 0, 100 + 50j)
self.assertAlmostEqual(arc1.center, 100 + 0j)
self.assertAlmostEqual(arc1.theta, 180.0)
self.assertAlmostEqual(arc1.delta, -90.0)
self.assertAlmostEqual(arc1.point(0.0), (0j))
self.assertAlmostEqual(arc1.point(0.1), (1.23116594049 + 7.82172325201j))
self.assertAlmostEqual(arc1.point(0.2), (4.89434837048 + 15.4508497187j))
self.assertAlmostEqual(arc1.point(0.3), (10.8993475812 + 22.699524987j))
self.assertAlmostEqual(arc1.point(0.4), (19.0983005625 + 29.3892626146j))
self.assertAlmostEqual(arc1.point(0.5), (29.2893218813 + 35.3553390593j))
self.assertAlmostEqual(arc1.point(0.6), (41.2214747708 + 40.4508497187j))
self.assertAlmostEqual(arc1.point(0.7), (54.6009500260 + 44.5503262094j))
self.assertAlmostEqual(arc1.point(0.8), (69.0983005625 + 47.5528258148j))
self.assertAlmostEqual(arc1.point(0.9), (84.3565534960 + 49.3844170298j))
self.assertAlmostEqual(arc1.point(1.0), (100 + 50j))
arc2 = Arc(0j, 100 + 50j, 0, 1, 0, 100 + 50j)
self.assertAlmostEqual(arc2.center, 50j)
self.assertAlmostEqual(arc2.theta, 270.0)
self.assertAlmostEqual(arc2.delta, -270.0)
self.assertAlmostEqual(arc2.point(0.0), (0j))
self.assertAlmostEqual(arc2.point(0.1), (-45.399049974 + 5.44967379058j))
self.assertAlmostEqual(arc2.point(0.2), (-80.9016994375 + 20.6107373854j))
self.assertAlmostEqual(arc2.point(0.3), (-98.7688340595 + 42.178276748j))
self.assertAlmostEqual(arc2.point(0.4), (-95.1056516295 + 65.4508497187j))
self.assertAlmostEqual(arc2.point(0.5), (-70.7106781187 + 85.3553390593j))
self.assertAlmostEqual(arc2.point(0.6), (-30.9016994375 + 97.5528258148j))
self.assertAlmostEqual(arc2.point(0.7), (15.643446504 + 99.3844170298j))
self.assertAlmostEqual(arc2.point(0.8), (58.7785252292 + 90.4508497187j))
self.assertAlmostEqual(arc2.point(0.9), (89.1006524188 + 72.699524987j))
self.assertAlmostEqual(arc2.point(1.0), (100 + 50j))
arc3 = Arc(0j, 100 + 50j, 0, 0, 1, 100 + 50j)
self.assertAlmostEqual(arc3.center, 50j)
self.assertAlmostEqual(arc3.theta, 270.0)
self.assertAlmostEqual(arc3.delta, 90.0)
self.assertAlmostEqual(arc3.point(0.0), (0j))
self.assertAlmostEqual(arc3.point(0.1), (15.643446504 + 0.615582970243j))
self.assertAlmostEqual(arc3.point(0.2), (30.9016994375 + 2.44717418524j))
self.assertAlmostEqual(arc3.point(0.3), (45.399049974 + 5.44967379058j))
self.assertAlmostEqual(arc3.point(0.4), (58.7785252292 + 9.54915028125j))
self.assertAlmostEqual(arc3.point(0.5), (70.7106781187 + 14.6446609407j))
self.assertAlmostEqual(arc3.point(0.6), (80.9016994375 + 20.6107373854j))
self.assertAlmostEqual(arc3.point(0.7), (89.1006524188 + 27.300475013j))
self.assertAlmostEqual(arc3.point(0.8), (95.1056516295 + 34.5491502813j))
self.assertAlmostEqual(arc3.point(0.9), (98.7688340595 + 42.178276748j))
self.assertAlmostEqual(arc3.point(1.0), (100 + 50j))
arc4 = Arc(0j, 100 + 50j, 0, 1, 1, 100 + 50j)
self.assertAlmostEqual(arc4.center, 100 + 0j)
self.assertAlmostEqual(arc4.theta, 180.0)
self.assertAlmostEqual(arc4.delta, 270.0)
self.assertAlmostEqual(arc4.point(0.0), (0j))
self.assertAlmostEqual(arc4.point(0.1), (10.8993475812 - 22.699524987j))
self.assertAlmostEqual(arc4.point(0.2), (41.2214747708 - 40.4508497187j))
self.assertAlmostEqual(arc4.point(0.3), (84.3565534960 - 49.3844170298j))
self.assertAlmostEqual(arc4.point(0.4), (130.901699437 - 47.5528258148j))
self.assertAlmostEqual(arc4.point(0.5), (170.710678119 - 35.3553390593j))
self.assertAlmostEqual(arc4.point(0.6), (195.105651630 - 15.4508497187j))
self.assertAlmostEqual(arc4.point(0.7), (198.768834060 + 7.82172325201j))
self.assertAlmostEqual(arc4.point(0.8), (180.901699437 + 29.3892626146j))
self.assertAlmostEqual(arc4.point(0.9), (145.399049974 + 44.5503262094j))
self.assertAlmostEqual(arc4.point(1.0), (100 + 50j))
def test_length(self):
# I'll test the length calculations by making a circle, in two parts.
arc1 = Arc(0j, 100 + 100j, 0, 0, 0, 200 + 0j)
arc2 = Arc(200 + 0j, 100 + 100j, 0, 0, 0, 0j)
self.assertAlmostEqual(arc1.length(), pi * 100)
self.assertAlmostEqual(arc2.length(), pi * 100)
def test_equality(self):
# This is to test the __eq__ and __ne__ methods, so we can't use
# assertEqual and assertNotEqual
segment = Arc(0j, 100 + 50j, 0, 0, 0, 100 + 50j)
self.assertTrue(segment == Arc(0j, 100 + 50j, 0, 0, 0, 100 + 50j))
self.assertTrue(segment != Arc(0j, 100 + 50j, 0, 1, 0, 100 + 50j))
class TestPath(unittest.TestCase):
def test_circle(self):
arc1 = Arc(0j, 100 + 100j, 0, 0, 0, 200 + 0j)
arc2 = Arc(200 + 0j, 100 + 100j, 0, 0, 0, 0j)
path = Path(arc1, arc2)
self.assertAlmostEqual(path.point(0.0), (0j))
self.assertAlmostEqual(path.point(0.25), (100 + 100j))
self.assertAlmostEqual(path.point(0.5), (200 + 0j))
self.assertAlmostEqual(path.point(0.75), (100 - 100j))
self.assertAlmostEqual(path.point(1.0), (0j))
self.assertAlmostEqual(path.length(), pi * 200)
def test_svg_specs(self):
"""The paths that are in the SVG specs"""
# Big pie: M300,200 h-150 a150,150 0 1,0 150,-150 z
path = Path(Line(300 + 200j, 150 + 200j),
Arc(150 + 200j, 150 + 150j, 0, 1, 0, 300 + 50j),
Line(300 + 50j, 300 + 200j))
# The points and length for this path are calculated and not regression tests.
self.assertAlmostEqual(path.point(0.0), (300 + 200j))
self.assertAlmostEqual(path.point(0.14897825542), (150 + 200j))
self.assertAlmostEqual(path.point(0.5), (406.066017177 + 306.066017177j))
self.assertAlmostEqual(path.point(1 - 0.14897825542), (300 + 50j))
self.assertAlmostEqual(path.point(1.0), (300 + 200j))
# The errors seem to accumulate. Still 6 decimal places is more than good enough.
self.assertAlmostEqual(path.length(), pi * 225 + 300, places=6)
# Little pie: M275,175 v-150 a150,150 0 0,0 -150,150 z
path = Path(Line(275 + 175j, 275 + 25j),
Arc(275 + 25j, 150 + 150j, 0, 0, 0, 125 + 175j),
Line(125 + 175j, 275 + 175j))
# The points and length for this path are calculated and not regression tests.
self.assertAlmostEqual(path.point(0.0), (275 + 175j))
self.assertAlmostEqual(path.point(0.2800495767557787), (275 + 25j))
self.assertAlmostEqual(path.point(0.5), (168.93398282201787 + 68.93398282201787j))
self.assertAlmostEqual(path.point(1 - 0.2800495767557787), (125 + 175j))
self.assertAlmostEqual(path.point(1.0), (275 + 175j))
# The errors seem to accumulate. Still 6 decimal places is more than good enough.
self.assertAlmostEqual(path.length(), pi * 75 + 300, places=6)
# Bumpy path: M600,350 l 50,-25
# a25,25 -30 0,1 50,-25 l 50,-25
# a25,50 -30 0,1 50,-25 l 50,-25
# a25,75 -30 0,1 50,-25 l 50,-25
# a25,100 -30 0,1 50,-25 l 50,-25
path = Path(Line(600 + 350j, 650 + 325j),
Arc(650 + 325j, 25 + 25j, -30, 0, 1, 700 + 300j),
Line(700 + 300j, 750 + 275j),
Arc(750 + 275j, 25 + 50j, -30, 0, 1, 800 + 250j),
Line(800 + 250j, 850 + 225j),
Arc(850 + 225j, 25 + 75j, -30, 0, 1, 900 + 200j),
Line(900 + 200j, 950 + 175j),
Arc(950 + 175j, 25 + 100j, -30, 0, 1, 1000 + 150j),
Line(1000 + 150j, 1050 + 125j),
)
# These are *not* calculated, but just regression tests. Be skeptical.
self.assertAlmostEqual(path.point(0.0), (600 + 350j))
self.assertAlmostEqual(path.point(0.3), (755.31526434 + 217.51578768j))
self.assertAlmostEqual(path.point(0.5), (832.23324151 + 156.33454892j))
self.assertAlmostEqual(path.point(0.9), (974.00559321 + 115.26473532j))
self.assertAlmostEqual(path.point(1.0), (1050 + 125j))
# The errors seem to accumulate. Still 6 decimal places is more than good enough.
self.assertAlmostEqual(path.length(), 860.6756221710)
def test_repr(self):
path = Path(
Line(start=600 + 350j, end=650 + 325j),
Arc(start=650 + 325j, radius=25 + 25j, rotation=-30, arc=0, sweep=1, end=700 + 300j),
CubicBezier(start=700 + 300j, control1=800 + 400j, control2=750 + 200j, end=600 + 100j),
QuadraticBezier(start=600 + 100j, control=600, end=600 + 300j))
self.assertEqual(eval(repr(path)), path)
def test_reverse(self):
# Currently you can't reverse paths.
self.assertRaises(NotImplementedError, Path().reverse)
def test_equality(self):
# This is to test the __eq__ and __ne__ methods, so we can't use
# assertEqual and assertNotEqual
path1 = Path(
Line(start=600 + 350j, end=650 + 325j),
Arc(start=650 + 325j, radius=25 + 25j, rotation=-30, arc=0, sweep=1, end=700 + 300j),
CubicBezier(start=700 + 300j, control1=800 + 400j, control2=750 + 200j, end=600 + 100j),
QuadraticBezier(start=600 + 100j, control=600, end=600 + 300j))
path2 = Path(
Line(start=600 + 350j, end=650 + 325j),
Arc(start=650 + 325j, radius=25 + 25j, rotation=-30, arc=0, sweep=1, end=700 + 300j),
CubicBezier(start=700 + 300j, control1=800 + 400j, control2=750 + 200j, end=600 + 100j),
QuadraticBezier(start=600 + 100j, control=600, end=600 + 300j))
self.assertTrue(path1 == path2)
# Modify path2:
path2[0].start = 601 + 350j
self.assertTrue(path1 != path2)
# Modify back:
path2[0].start = 600 + 350j
self.assertFalse(path1 != path2)
# Get rid of the last segment:
del path2[-1]
self.assertFalse(path1 == path2)
# It's not equal to a list of it's segments
self.assertTrue(path1 != path1[:])
self.assertFalse(path1 == path1[:])
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities for the gcloud dataproc tool."""
import time
import urlparse
import uuid
from apitools.base.py import encoding
from apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.api_lib.dataproc import constants
from googlecloudsdk.api_lib.dataproc import exceptions
from googlecloudsdk.api_lib.dataproc import storage_helpers
from googlecloudsdk.core import log
from googlecloudsdk.core.console import progress_tracker
def FormatRpcError(error):
"""Returns a printable representation of a failed Google API's status.proto.
Args:
error: the failed Status to print.
Returns:
A ready-to-print string representation of the error.
"""
log.debug('Error:\n' + encoding.MessageToJson(error))
formatted_error = error.message
# Only display details if the log level is INFO or finer.
if error.details and log.GetVerbosity() <= log.info:
formatted_error += (
'\nDetails:\n' + encoding.MessageToJson(error.details))
return formatted_error
# TODO(user): Create a common wait_utils class to reuse common code.
def WaitForOperation(
operation, context, message, timeout_s=2100, poll_period_s=5):
"""Poll dataproc Operation until its status is done or timeout reached.
Args:
operation: Operation, message of the operation to be polled.
context: dict, dataproc Command context.
message: str, message to display to user while polling.
timeout_s: number, seconds to poll with retries before timing out.
poll_period_s: number, delay in seconds between requests.
Returns:
Operation: the return value of the last successful operations.get
request.
Raises:
OperationError: if the operation times out or finishes with an error.
"""
client = context['dataproc_client']
messages = context['dataproc_messages']
request = messages.DataprocProjectsRegionsOperationsGetRequest(
name=operation.name)
log.status.Print('Waiting on operation [{0}].'.format(operation.name))
start_time = time.time()
with progress_tracker.ProgressTracker(message, autotick=True):
while timeout_s > (time.time() - start_time):
try:
operation = client.projects_regions_operations.Get(request)
if operation.done:
break
except apitools_exceptions.HttpError:
# Keep trying until we timeout in case error is transient.
pass
time.sleep(poll_period_s)
# TODO(user): Parse operation metadata.
log.debug('Operation:\n' + encoding.MessageToJson(operation))
if not operation.done:
raise exceptions.OperationTimeoutError(
'Operation [{0}] timed out.'.format(operation.name))
elif operation.error:
raise exceptions.OperationError(
'Operation [{0}] failed: {1}.'.format(
operation.name, FormatRpcError(operation.error)))
log.info('Operation [%s] finished after %.3f seconds',
operation.name, (time.time() - start_time))
return operation
def WaitForResourceDeletion(
request_method,
resource_ref,
message,
timeout_s=60,
poll_period_s=5):
"""Poll Dataproc resource until it no longer exists."""
with progress_tracker.ProgressTracker(message, autotick=True):
start_time = time.time()
while timeout_s > (time.time() - start_time):
try:
request_method(resource_ref)
except apitools_exceptions.HttpError as error:
if error.status_code == 404:
# Object deleted
return
log.debug('Get request for [{0}] failed:\n{1}', resource_ref, error)
# Keep trying until we timeout in case error is transient.
time.sleep(poll_period_s)
raise exceptions.OperationTimeoutError(
'Deleting resource [{0}] timed out.'.format(resource_ref))
class NoOpProgressDisplay(object):
"""For use in place of a ProgressTracker in a 'with' block."""
def __enter__(self):
pass
def __exit__(self, *unused_args):
pass
def WaitForJobTermination(
job,
context,
message,
goal_state,
stream_driver_log=False,
log_poll_period_s=1,
dataproc_poll_period_s=10,
timeout_s=None):
"""Poll dataproc Job until its status is terminal or timeout reached.
Args:
job: The job to wait to finish.
context: dict, dataproc Command context.
message: str, message to display to user while polling.
goal_state: JobStatus.StateValueValuesEnum, the state to define success
stream_driver_log: bool, Whether to show the Job's driver's output.
log_poll_period_s: number, delay in seconds between checking on the log.
dataproc_poll_period_s: number, delay in seconds between requests to
the Dataproc API.
timeout_s: number, time out for job completion. None means no timeout.
Returns:
Operation: the return value of the last successful operations.get
request.
Raises:
OperationError: if the operation times out or finishes with an error.
"""
client = context['dataproc_client']
job_ref = ParseJob(job.reference.jobId, context)
request = client.MESSAGES_MODULE.DataprocProjectsRegionsJobsGetRequest(
projectId=job_ref.projectId,
region=job_ref.region,
jobId=job_ref.jobId)
driver_log_stream = None
last_job_poll_time = 0
job_complete = False
wait_display = None
def ReadDriverLogIfPresent():
if driver_log_stream and driver_log_stream.open:
# TODO(user): Don't read all output.
driver_log_stream.ReadIntoWritable(log.err)
if stream_driver_log:
log.status.Print('Waiting for job output...')
wait_display = NoOpProgressDisplay()
else:
wait_display = progress_tracker.ProgressTracker(message, autotick=True)
start_time = now = time.time()
with wait_display:
while not timeout_s or timeout_s > (now - start_time):
# Poll logs first to see if it closed.
ReadDriverLogIfPresent()
log_stream_closed = driver_log_stream and not driver_log_stream.open
if not job_complete and job.status.state in constants.TERMINAL_JOB_STATES:
job_complete = True
# Wait an 10s to get trailing output.
timeout_s = now - start_time + 10
if job_complete and (not stream_driver_log or log_stream_closed):
# Nothing left to wait for
break
regular_job_poll = (
not job_complete
# Poll less frequently on dataproc API
and now >= last_job_poll_time + dataproc_poll_period_s)
# Poll at regular frequency before output has streamed and after it has
# finished.
expecting_output_stream = stream_driver_log and not driver_log_stream
expecting_job_done = not job_complete and log_stream_closed
if regular_job_poll or expecting_output_stream or expecting_job_done:
last_job_poll_time = now
try:
job = client.projects_regions_jobs.Get(request)
if (stream_driver_log
and not driver_log_stream
and job.driverOutputResourceUri):
driver_log_stream = storage_helpers.StorageObjectSeriesStream(
job.driverOutputResourceUri)
except apitools_exceptions.HttpError as error:
log.warn('GetJob failed:\n%s', error)
# Keep trying until we timeout in case error is transient.
time.sleep(log_poll_period_s)
now = time.time()
state = job.status.state
if state is not goal_state and job.status.details:
# Just log details, because the state will be in the error message.
log.info(job.status.details)
if state in constants.TERMINAL_JOB_STATES:
if stream_driver_log:
if not driver_log_stream:
log.warn('Expected job output not found.')
elif driver_log_stream.open:
log.warn('Job terminated, but output did not finish streaming.')
if state is goal_state:
return job
raise exceptions.JobError(
'Job [{0}] entered state [{1}] while waiting for [{2}].'.format(
job_ref.jobId, state, goal_state))
raise exceptions.JobTimeoutError(
'Job [{0}] timed out while in state [{1}].'.format(
job_ref.jobId, state))
def ParseCluster(name, context):
resources = context['resources']
ref = resources.Parse(name, collection='dataproc.projects.regions.clusters')
return ref
def ParseJob(job_id, context):
resources = context['resources']
ref = resources.Parse(job_id, collection='dataproc.projects.regions.jobs')
return ref
def ParseOperation(operation, context):
resources = context['resources']
collection = 'dataproc.projects.regions.operations'
# Dataproc usually refers to Operations by relative name, which must be
# parsed explicitly until resources.Parse supports it.
# TODO(user): Remove once Parse delegates to ParseRelativeName.
url = urlparse.urlparse(operation)
if not url.scheme and '/' in url.path and not url.path.startswith('/'):
return resources.ParseRelativeName(operation, collection=collection)
return resources.Parse(operation, collection=collection)
def GetJobId(job_id=None):
if job_id:
return job_id
return str(uuid.uuid4())
class Bunch(object):
"""Class that converts a dictionary to javascript like object.
For example:
Bunch({'a': {'b': {'c': 0}}}).a.b.c == 0
"""
def __init__(self, dictionary):
for key, value in dictionary.iteritems():
if isinstance(value, dict):
value = Bunch(value)
self.__dict__[key] = value
def AddJvmDriverFlags(parser):
parser.add_argument(
'--jar',
dest='main_jar',
help='The HCFS URI of jar file containing the driver jar.')
parser.add_argument(
'--class',
dest='main_class',
help=('The class containing the main method of the driver. Must be in a'
' provided jar or jar that is already on the classpath'))
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
CERN@school - Make Plots
See the README.md file for more information.
"""
# Import the code needed to manage files.
import os, glob
#...for parsing the arguments.
import argparse
#...for the logging.
import logging as lg
#...for file manipulation.
from shutil import rmtree
# Import the JSON library.
import json
#...for the histograms.
from plotting.histograms import Hist, Hist2D
if __name__ == "__main__":
print("*")
print("*==============================*")
print("* CERN@school - make the plots *")
print("*==============================*")
# Get the datafile path from the command line.
parser = argparse.ArgumentParser()
parser.add_argument("inputPath", help="Path to the input dataset.")
parser.add_argument("outputPath", help="The path for the output files.")
parser.add_argument("-v", "--verbose", help="Increase output verbosity", action="store_true")
args = parser.parse_args()
## The path to the data file.
datapath = args.inputPath
## The output path.
outputpath = args.outputPath
# Set the logging level.
if args.verbose:
level=lg.DEBUG
else:
level=lg.INFO
# Configure the logging.
lg.basicConfig(filename=outputpath + '/log_make-plots.log', filemode='w', level=level)
print("*")
print("* Input path : '%s'" % (datapath))
print("* Output path : '%s'" % (outputpath))
print("*")
# Set up the directories
#------------------------
# Check if the output directory exists. If it doesn't, quit.
if not os.path.isdir(outputpath):
raise IOError("* ERROR: '%s' output directory does not exist!" % (outputpath))
# Create the subdirectories.
## The path to the frame plots.
fppath = os.path.join(outputpath, "frameplots")
#
if os.path.isdir(fppath):
rmtree(fppath)
lg.info(" * Removing directory '%s'..." % (fppath))
os.mkdir(fppath)
lg.info(" * Creating directory '%s'..." % (fppath))
lg.info("")
## The path to the cluster plots.
kppath = os.path.join(outputpath, "clusterplots")
#
if os.path.isdir(kppath):
rmtree(kppath)
lg.info(" * Removing directory '%s'..." % (kppath))
os.mkdir(kppath)
lg.info(" * Creating directory '%s'..." % (kppath))
lg.info("")
## The frame properties JSON file - FIXME: check it exists...
ff = open(datapath + "/frames.json", "r")
#
fd = json.load(ff)
ff.close()
## The cluster properties JSON file - FIXME: check it exists...
kf = open(datapath + "/klusters.json", "r")
#
kd = json.load(kf)
kf.close()
# The frames
#------------
## The number of clusters per frame.
ncs = []
## The number of non-gamma clusters per frame.
nlcs = []
## The number of gamma candidates per frame.
ngs = []
# Loop over the frames.
for f in fd:
# Add to the frame property dictionaries.
ncs.append( f["n_kluster"])
nlcs.append(f["n_non_gamma"])
ngs.append( f["n_gamma"])
## The number of clusters plot.
nlcsplot = Hist("ncs", 101, ncs, -1, "Number of clusters", "Number of frames", fppath)
## The number of non-gamma clusters plot.
ncsplot = Hist("nls", 102, nlcs, -1, "Number of non-gamma clusters", "Number of frames", fppath)
## The number of gamma clusters plot.
ngsplot = Hist("ngs", 103, ngs, -1, "Number of gamma clusters", "Number of frames", fppath)
# Make the plot display page.
fp = ""
fp += "<!DOCTYPE html>\n"
fp += "<html>\n"
fp += " <head>\n"
fp += " <link rel=\"stylesheet\" type=\"text/css\" "
fp += "href=\"assets/css/style.css\">\n"
fp += " </head>\n"
fp += " <body>\n"
fp += " <h1>Cluster Sorting: Frame Properties</h1>\n"
fp += " <h2>Dataset summary</h2>\n"
fp += " <p>\n"
fp += " <ul>\n"
fp += " <li>Dataset path = '%s'</li>\n" % (datapath)
fp += " <li>Number of frames = %d</li>\n" % (len(fd))
fp += " <li>Number of clusters (dat.) = %d</li>\n" % (len(kd))
fp += " </ul>\n"
fp += " </p>\n"
fp += " <h2>Frame properties</h2>\n"
fp += " <table>\n"
fp += " <caption>Fig. 1: Clusters per frame.</caption>\n"
fp += " <tr><td><img src=\"ncs.png\" /></td></tr>\n"
fp += " </table>\n"
fp += " <table>\n"
fp += " <caption>Fig. 2: Non-gamma clusters per frame.</caption>\n"
fp += " <tr><td><img src=\"nls.png\" /></td></tr>\n"
fp += " </table>\n"
fp += " <table>\n"
fp += " <caption>Fig. 3: Gamma clusters per frame.</caption>\n"
fp += " <tr><td><img src=\"ngs.png\" /></td></tr>\n"
fp += " </table>\n"
fp += " </body>\n"
fp += "</html>"
# Write out the frame property index page.
with open("%s/index.html" % (fppath), "w") as framepage:
framepage.write(fp)
# Clusters
#----------
## A list of clusters.
klusters = []
# Create container lists for the cluster properties.
cluster_size = []
cluster_counts = []
cluster_maxcounts = []
cluster_radius_u = []
cluster_density_u = []
cluster_linearity = []
cluster_innerfrac = []
# Loop over the klusters.
for k in kd:
# Add to the cluster property dictionaries.
if not k["isedgekluster"]:
cluster_size.append( k["size"])
cluster_radius_u.append( k["radius_uw"])
cluster_density_u.append(k["density_uw"])
cluster_linearity.append(k["lin_linearity"])
cluster_innerfrac.append(k["innerfrac"])
cluster_counts.append( k["totalcounts"])
cluster_maxcounts.append(k["maxcounts"])
# Cluster plots
#---------------
ksplot = Hist("kls", 1001, cluster_size, -1, "$N_{h}$", "Number of clusters", kppath)
kcplot = Hist("klc", 1002, cluster_counts, 100, "$N_{C}$", "Number of clusters", kppath)
krplot = Hist("klr", 1003, cluster_radius_u, 100, "$r$", "Number of clusters", kppath)
kdplot = Hist("kld", 1004, cluster_density_u, 100, "$\\rho$", "Number of clusters", kppath)
klplot = Hist("kll", 1005, cluster_linearity, 100, "Linearity", "Number of clusters", kppath)
kiplot = Hist("kli", 1006, cluster_innerfrac, 100, "Inner frac.", "Number of clusters", kppath)
kmplot = Hist("klm", 1007, cluster_maxcounts, 100, "Max. Count", "Number of clusters", kppath)
# Figure - hits vs radius.
hits_vs_rad = Hist2D(201, "hvr", cluster_size, "$N_h$", max(cluster_size), \
cluster_radius_u, "$r$", 100, \
kppath)
# Figure - hits vs counts.
hits_vs_counts = Hist2D(202, "hvc", cluster_size, "$N_h$", max(cluster_size), \
cluster_counts, "$N_c$", 100, \
kppath)
# Figure - hits vs linearity.
hits_vs_lin = Hist2D(203, "hvl", cluster_size, "$N_h$", max(cluster_size), \
cluster_linearity, "Linearity", 100, \
kppath)
# Figure - radius vs linearity.
rad_vs_lin = Hist2D(204, "rvl", cluster_radius_u, "$r$", 100, \
cluster_linearity, "Linearity", 100, \
kppath)
# Figure - density vs linearity.
rho_vs_lin = Hist2D(205, "dvl", cluster_density_u, "$\\rho$", 100, \
cluster_linearity, "Linearity", 100, \
kppath)
# Make the plot display page.
kp = ""
kp += "<!DOCTYPE html>\n"
kp += "<html>\n"
kp += " <head>\n"
kp += " <link rel=\"stylesheet\" type=\"text/css\" "
kp += "href=\"assets/css/style.css\">\n"
kp += " </head>\n"
kp += " <body>\n"
kp += " <h1>Cluster Sorting: Cluster Properties</h1>\n"
kp += " <h2>Dataset summary</h2>\n"
kp += " <p>\n"
kp += " <ul>\n"
kp += " <li>Dataset path = '%s'</li>\n" % (datapath)
kp += " <li>Number of frames = %d</li>\n" % (len(fd))
kp += " <li>Number of clusters (dat.) = %d</li>\n" % (len(cluster_size))
kp += " </ul>\n"
kp += " </p>\n"
kp += " <h2>Cluster properties</h2>\n"
kp += " <h3>Individual plots</h3>\n"
kp += " <table>\n"
kp += " <caption>Fig. 1.1: Cluster size.</caption>\n"
kp += " <tr><td><img src=\"kls.png\" /></td></tr>\n"
kp += " </table>\n"
kp += " <table>\n"
kp += " <caption>Fig. 1.2: Total counts per cluster.</caption>\n"
kp += " <tr><td><img src=\"klc.png\" /></td></tr>\n"
kp += " </table>\n"
kp += " <table>\n"
kp += " <caption>Fig. 1.3: Max. count value in the cluster.</caption>\n"
kp += " <tr><td><img src=\"klm.png\" /></td></tr>\n"
kp += " </table>\n"
kp += " <table>\n"
kp += " <caption>Fig. 1.4: Cluster radii.</caption>\n"
kp += " <tr><td><img src=\"klr.png\" /></td></tr>\n"
kp += " </table>\n"
kp += " <table>\n"
kp += " <caption>Fig. 1.5: Cluster density ρ.</caption>\n"
kp += " <tr><td><img src=\"kld.png\" /></td></tr>\n"
kp += " </table>\n"
kp += " <table>\n"
kp += " <caption>Fig. 1.6: Fraction of inner pixels.</caption>\n"
kp += " <tr><td><img src=\"kli.png\" /></td></tr>\n"
kp += " </table>\n"
kp += " <table>\n"
kp += " <caption>Fig. 1.7: Cluster linearity.</caption>\n"
kp += " <tr><td><img src=\"kll.png\" /></td></tr>\n"
kp += " </table>\n"
kp += " <h3>Comparison plots</h3>\n"
kp += " <table>\n"
kp += " <caption>Fig. 2.1: Cluster size vs radius.</caption>\n"
kp += " <tr><td><img src=\"hvr.png\" /></td></tr>\n"
kp += " </table>\n"
kp += " <table>\n"
kp += " <caption>Fig. 2.2: Cluster size vs counts.</caption>\n"
kp += " <tr><td><img src=\"hvc.png\" /></td></tr>\n"
kp += " </table>\n"
kp += " <table>\n"
kp += " <caption>Fig. 2.3: Cluster size vs linearity.</caption>\n"
kp += " <tr><td><img src=\"hvl.png\" /></td></tr>\n"
kp += " </table>\n"
kp += " <table>\n"
kp += " <caption>Fig. 2.4: Cluster radius vs linearity.</caption>\n"
kp += " <tr><td><img src=\"rvl.png\" /></td></tr>\n"
kp += " </table>\n"
kp += " <table>\n"
kp += " <caption>Fig. 2.5: Cluster density vs linearity.</caption>\n"
kp += " <tr><td><img src=\"dvl.png\" /></td></tr>\n"
kp += " </table>\n"
kp += " </body>\n"
kp += "</html>"
# Write out the cluster property index page.
with open("%s/index.html" % (kppath), "w") as clusterpage:
clusterpage.write(kp)
# Now you can view the "index.html" files to see the results!
print("*")
print("* Plotting complete.")
print("* View your results by opening '%s' or '%s' in a browser, e.g." % (fppath, kppath))
print("* $ firefox %s/index.html &" % (fppath))
print("* $ firefox %s/index.html &" % (kppath))
|
|
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.template import defaultfilters as filters
from django.utils import http
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard import policy
class AddPoolLink(tables.LinkAction):
name = "addpool"
verbose_name = _("Add Pool")
url = "horizon:project:lbaas:addpool"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_pool"),)
class AddVipLink(tables.LinkAction):
name = "addvip"
verbose_name = _("Add VIP")
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_vip"),)
def get_link_url(self, pool):
base_url = reverse("horizon:project:lbaas:addvip",
kwargs={'pool_id': pool.id})
return base_url
def allowed(self, request, datum=None):
if datum and datum.vip_id:
return False
return True
class AddMemberLink(tables.LinkAction):
name = "addmember"
verbose_name = _("Add Member")
url = "horizon:project:lbaas:addmember"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_member"),)
class AddMonitorLink(tables.LinkAction):
name = "addmonitor"
verbose_name = _("Add Monitor")
url = "horizon:project:lbaas:addmonitor"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_health_monitor"),)
class DeleteVipLink(policy.PolicyTargetMixin, tables.DeleteAction):
name = "deletevip"
action_present = _("Delete")
action_past = _("Scheduled deletion of %(data_type)s")
data_type_singular = _("VIP")
data_type_plural = _("VIPs")
policy_rules = (("network", "delete_vip"),)
def allowed(self, request, datum=None):
if datum and not datum.vip_id:
return False
return True
class DeletePoolLink(policy.PolicyTargetMixin, tables.DeleteAction):
name = "deletepool"
action_present = _("Delete")
action_past = _("Scheduled deletion of %(data_type)s")
data_type_singular = _("Pool")
data_type_plural = _("Pools")
policy_rules = (("network", "delete_pool"),)
def allowed(self, request, datum=None):
if datum and datum.vip_id:
return False
return True
class DeleteMonitorLink(policy.PolicyTargetMixin,
tables.DeleteAction):
name = "deletemonitor"
action_present = _("Delete")
action_past = _("Scheduled deletion of %(data_type)s")
data_type_singular = _("Monitor")
data_type_plural = _("Monitors")
policy_rules = (("network", "delete_health_monitor"),)
class DeleteMemberLink(policy.PolicyTargetMixin, tables.DeleteAction):
name = "deletemember"
action_present = _("Delete")
action_past = _("Scheduled deletion of %(data_type)s")
data_type_singular = _("Member")
data_type_plural = _("Members")
policy_rules = (("network", "delete_member"),)
class UpdatePoolLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "updatepool"
verbose_name = _("Edit Pool")
classes = ("ajax-modal", "btn-update",)
policy_rules = (("network", "update_pool"),)
def get_link_url(self, pool):
base_url = reverse("horizon:project:lbaas:updatepool",
kwargs={'pool_id': pool.id})
return base_url
class UpdateVipLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "updatevip"
verbose_name = _("Edit VIP")
classes = ("ajax-modal", "btn-update",)
policy_rules = (("network", "update_vip"),)
def get_link_url(self, pool):
base_url = reverse("horizon:project:lbaas:updatevip",
kwargs={'vip_id': pool.vip_id})
return base_url
def allowed(self, request, datum=None):
if datum and not datum.vip_id:
return False
return True
class UpdateMemberLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "updatemember"
verbose_name = _("Edit Member")
classes = ("ajax-modal", "btn-update",)
policy_rules = (("network", "update_member"),)
def get_link_url(self, member):
base_url = reverse("horizon:project:lbaas:updatemember",
kwargs={'member_id': member.id})
return base_url
class UpdateMonitorLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "updatemonitor"
verbose_name = _("Edit Monitor")
classes = ("ajax-modal", "btn-update",)
policy_rules = (("network", "update_health_monitor"),)
def get_link_url(self, monitor):
base_url = reverse("horizon:project:lbaas:updatemonitor",
kwargs={'monitor_id': monitor.id})
return base_url
def get_vip_link(pool):
if pool.vip_id:
return reverse("horizon:project:lbaas:vipdetails",
args=(http.urlquote(pool.vip_id),))
else:
return None
class AddPMAssociationLink(policy.PolicyTargetMixin,
tables.LinkAction):
name = "addassociation"
verbose_name = _("Associate Monitor")
url = "horizon:project:lbaas:addassociation"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_pool_health_monitor"),)
def allowed(self, request, datum=None):
try:
tenant_id = request.user.tenant_id
monitors = api.lbaas.pool_health_monitor_list(request,
tenant_id=tenant_id)
for m in monitors:
if m.id not in datum['health_monitors']:
return True
except Exception:
exceptions.handle(request,
_('Failed to retrieve health monitors.'))
return False
class DeletePMAssociationLink(policy.PolicyTargetMixin,
tables.LinkAction):
name = "deleteassociation"
verbose_name = _("Disassociate Monitor")
url = "horizon:project:lbaas:deleteassociation"
classes = ("ajax-modal", "btn-danger")
icon = "remove"
policy_rules = (("network", "delete_pool_health_monitor"),)
def allowed(self, request, datum=None):
if datum and not datum['health_monitors']:
return False
return True
class PoolsTable(tables.DataTable):
name = tables.Column("name",
verbose_name=_("Name"),
link="horizon:project:lbaas:pooldetails")
description = tables.Column('description', verbose_name=_("Description"))
provider = tables.Column('provider', verbose_name=_("Provider"),
filters=(lambda v: filters.default(v, _('N/A')),))
subnet_name = tables.Column('subnet_name', verbose_name=_("Subnet"))
protocol = tables.Column('protocol', verbose_name=_("Protocol"))
status = tables.Column('status', verbose_name=_("Status"))
vip_name = tables.Column('vip_name', verbose_name=_("VIP"),
link=get_vip_link)
class Meta:
name = "poolstable"
verbose_name = _("Pools")
table_actions = (AddPoolLink, DeletePoolLink)
row_actions = (UpdatePoolLink, AddVipLink, UpdateVipLink,
DeleteVipLink, AddPMAssociationLink,
DeletePMAssociationLink, DeletePoolLink)
def get_pool_link(member):
return reverse("horizon:project:lbaas:pooldetails",
args=(http.urlquote(member.pool_id),))
def get_member_link(member):
return reverse("horizon:project:lbaas:memberdetails",
args=(http.urlquote(member.id),))
class MembersTable(tables.DataTable):
address = tables.Column('address',
verbose_name=_("IP Address"),
link=get_member_link,
attrs={'data-type': "ip"})
protocol_port = tables.Column('protocol_port',
verbose_name=_("Protocol Port"))
weight = tables.Column('weight',
verbose_name=_("Weight"))
pool_name = tables.Column('pool_name',
verbose_name=_("Pool"), link=get_pool_link)
status = tables.Column('status', verbose_name=_("Status"))
class Meta:
name = "memberstable"
verbose_name = _("Members")
table_actions = (AddMemberLink, DeleteMemberLink)
row_actions = (UpdateMemberLink, DeleteMemberLink)
def get_monitor_details(monitor):
if monitor.type in ('HTTP', 'HTTPS'):
return ("%(http_method)s %(url_path)s => %(codes)s" %
{'http_method': monitor.http_method,
'url_path': monitor.url_path,
'codes': monitor.expected_codes})
else:
return _("-")
class MonitorsTable(tables.DataTable):
monitor_type = tables.Column(
"type", verbose_name=_("Monitor Type"),
link="horizon:project:lbaas:monitordetails")
delay = tables.Column("delay", verbose_name=_("Delay"))
timeout = tables.Column("timeout", verbose_name=_("Timeout"))
max_retries = tables.Column("max_retries", verbose_name=_("Max Retries"))
details = tables.Column(get_monitor_details, verbose_name=_("Details"))
class Meta:
name = "monitorstable"
verbose_name = _("Monitors")
table_actions = (AddMonitorLink, DeleteMonitorLink)
row_actions = (UpdateMonitorLink, DeleteMonitorLink)
|
|
"""Tests launch, teardown, and update of multiple Ray clusters using Kubernetes
operator. Also tests submission of jobs via Ray client."""
import copy
import sys
import os
import subprocess
import tempfile
import time
import unittest
from contextlib import contextmanager
import kubernetes
import pytest
import yaml
import ray
from ray.autoscaler._private._kubernetes.node_provider import\
KubernetesNodeProvider
IMAGE_ENV = "KUBERNETES_OPERATOR_TEST_IMAGE"
IMAGE = os.getenv(IMAGE_ENV, "rayproject/ray:nightly")
NAMESPACE_ENV = "KUBERNETES_OPERATOR_TEST_NAMESPACE"
NAMESPACE = os.getenv(NAMESPACE_ENV, "test-k8s-operator")
PULL_POLICY_ENV = "KUBERNETES_OPERATOR_TEST_PULL_POLICY"
PULL_POLICY = os.getenv(PULL_POLICY_ENV, "Always")
RAY_PATH = os.path.abspath(
os.path.dirname(
os.path.dirname(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))))))
@contextmanager
def client_connect_to_k8s(port="10001"):
command = f"kubectl -n {NAMESPACE}"\
f" port-forward service/example-cluster-ray-head {port}:{port}"
command = command.split()
print(">>>Port-forwarding head service.")
proc = subprocess.Popen(command)
# Wait a bit for the port-forwarding connection to be
# established.
time.sleep(10)
ray.client(f"127.0.0.1:{port}").connect()
try:
yield proc
finally:
ray.shutdown()
proc.kill()
def retry_until_true(f):
# Keep retrying for 8 minutes with 10 seconds between attempts.
def f_with_retries(*args, **kwargs):
for _ in range(49):
if f(*args, **kwargs):
return
else:
time.sleep(10)
pytest.fail("The condition wasn't met before the timeout expired.")
return f_with_retries
@retry_until_true
def wait_for_pods(n, namespace=NAMESPACE, name_filter=""):
client = kubernetes.client.CoreV1Api()
pods = client.list_namespaced_pod(namespace=namespace).items
count = 0
for pod in pods:
if name_filter in pod.metadata.name:
count += 1
# Double-check that the correct image is use.
assert pod.spec.containers[0].image == IMAGE,\
pod.spec.containers[0].image
return count == n
@retry_until_true
def wait_for_logs(operator_pod):
"""Check if logs indicate presence of nodes of types "head-node" and
"worker-nodes" in the "example-cluster" cluster."""
cmd = f"kubectl -n {NAMESPACE} logs {operator_pod}"\
f"| grep ^example-cluster,{NAMESPACE}: | tail -n 100"
log_tail = subprocess.check_output(cmd, shell=True).decode()
return ("head-node" in log_tail) and ("worker-node" in log_tail)
@retry_until_true
def wait_for_job(job_pod):
print(">>>Checking job logs.")
cmd = f"kubectl -n {NAMESPACE} logs {job_pod}"
try:
out = subprocess.check_output(
cmd, shell=True, stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError as e:
print(">>>Failed to check job logs.")
print(e.output.decode())
return False
success = "success" in out.lower()
if success:
print(">>>Job submission succeeded.")
else:
print(">>>Job logs do not indicate job sucess:")
print(out)
return success
@retry_until_true
def wait_for_command_to_succeed(cmd):
try:
subprocess.check_call(cmd, shell=True)
return True
except subprocess.CalledProcessError:
return False
@retry_until_true
def wait_for_pod_status(pod_name, status):
client = kubernetes.client.CoreV1Api()
pod = client.read_namespaced_pod(namespace=NAMESPACE, name=pod_name)
return pod.status.phase == status
@retry_until_true
def wait_for_status(cluster_name, status):
client = kubernetes.client.CustomObjectsApi()
cluster_cr = client.get_namespaced_custom_object(
namespace=NAMESPACE,
group="cluster.ray.io",
version="v1",
plural="rayclusters",
name=cluster_name)
return cluster_cr["status"]["phase"] == status
@retry_until_true
def wait_for_services(n):
return num_services() == n
def kubernetes_configs_directory():
relative_path = "deploy"
return os.path.join(RAY_PATH, relative_path)
def get_kubernetes_config_path(name):
return os.path.join(kubernetes_configs_directory(), name)
def get_component_config_path(file_name):
operator_configs = get_kubernetes_config_path("components")
return os.path.join(operator_configs, file_name)
def get_crd_path():
return get_kubernetes_config_path("charts/ray/crds/cluster_crd.yaml")
def pods():
client = kubernetes.client.CoreV1Api()
pod_items = client.list_namespaced_pod(namespace=NAMESPACE).items
return [
pod.metadata.name for pod in pod_items
if pod.status.phase in ["Running", "Pending"]
and pod.metadata.deletion_timestamp is None
]
def num_services():
cmd = f"kubectl -n {NAMESPACE} get services --no-headers -o"\
" custom-columns=\":metadata.name\""
service_list = subprocess.check_output(cmd, shell=True).decode().split()
return len(service_list)
class KubernetesOperatorTest(unittest.TestCase):
def test_basic(self):
# Validate terminate_node error handling
provider = KubernetesNodeProvider({
"namespace": NAMESPACE
}, "default_cluster_name")
# 404 caught, no error
provider.terminate_node("no-such-node")
with tempfile.NamedTemporaryFile("w+") as example_cluster_file, \
tempfile.NamedTemporaryFile("w+") as example_cluster2_file,\
tempfile.NamedTemporaryFile("w+") as operator_file,\
tempfile.NamedTemporaryFile("w+") as job_file:
# Get paths to operator configs
example_cluster_config_path = get_component_config_path(
"example_cluster.yaml")
operator_config_path = get_component_config_path(
"operator_namespaced.yaml")
job_path = os.path.join(RAY_PATH,
"doc/kubernetes/job-example.yaml")
# Load operator configs
example_cluster_config = yaml.safe_load(
open(example_cluster_config_path).read())
example_cluster2_config = copy.deepcopy(example_cluster_config)
# One worker for the second config
example_cluster2_config["spec"]["podTypes"][1]["minWorkers"] = 1
example_cluster2_config["metadata"]["name"] = "example-cluster2"
operator_config = list(
yaml.safe_load_all(open(operator_config_path).read()))
job_config = yaml.safe_load(open(job_path).read())
# Fill image fields
podTypes = example_cluster_config["spec"]["podTypes"]
podTypes2 = example_cluster2_config["spec"]["podTypes"]
pod_specs = ([operator_config[-1]["spec"]["template"]["spec"]] + [
job_config["spec"]["template"]["spec"]
] + [podType["podConfig"]["spec"] for podType in podTypes
] + [podType["podConfig"]["spec"] for podType in podTypes2])
for pod_spec in pod_specs:
pod_spec["containers"][0]["image"] = IMAGE
pod_spec["containers"][0]["imagePullPolicy"] = PULL_POLICY
# Use a custom Redis port for one of the clusters.
example_cluster_config["spec"]["headStartRayCommands"][1] += \
" --port 6400"
example_cluster_config["spec"]["workerStartRayCommands"][1] = \
" ulimit -n 65536; ray start --address=$RAY_HEAD_IP:6400"
# Dump to temporary files
yaml.dump(example_cluster_config, example_cluster_file)
yaml.dump(example_cluster2_config, example_cluster2_file)
yaml.dump(job_config, job_file)
yaml.dump_all(operator_config, operator_file)
files = [
example_cluster_file, example_cluster2_file, operator_file
]
for file in files:
file.flush()
# Start operator and two clusters
print("\n>>>Starting operator and two clusters.")
for file in files:
cmd = f"kubectl -n {NAMESPACE} apply -f {file.name}"
subprocess.check_call(cmd, shell=True)
# Check that autoscaling respects minWorkers by waiting for
# six pods in the namespace.
print(">>>Waiting for pods to join clusters.")
wait_for_pods(6)
# Check that head services are present.
print(">>>Checking that head services are present.")
wait_for_services(2)
# Check that logging output looks normal (two workers connected to
# ray cluster example-cluster.)
operator_pod = [pod for pod in pods() if "operator" in pod].pop()
wait_for_logs(operator_pod)
print(">>>Checking that Ray client connection is uninterrupted by"
" operator restart.")
with client_connect_to_k8s():
@ray.remote
class Test:
@ray.method()
def method(self):
return "success"
actor = Test.remote()
print(">>>Restarting operator pod.")
cmd = f"kubectl -n {NAMESPACE} delete pod {operator_pod}"
subprocess.check_call(cmd, shell=True)
wait_for_pods(6)
operator_pod = [pod for pod in pods()
if "operator" in pod].pop()
wait_for_pod_status(operator_pod, "Running")
time.sleep(5)
print(">>>Confirming Ray is uninterrupted.")
assert ray.get(actor.method.remote()) == "success"
# Delete head node of the first cluster. Recovery logic should
# allow the rest of the test to pass.
print(">>>Deleting cluster's head to test recovery.")
head_pod = [pod for pod in pods() if "r-ray-head" in pod].pop()
cd = f"kubectl -n {NAMESPACE} delete pod {head_pod}"
subprocess.check_call(cd, shell=True)
print(">>>Confirming recovery.")
# Status marked "Running".
wait_for_status("example-cluster", "Running")
# Head pod recovered.
wait_for_pods(6)
# Get the new head pod
head_pod = [pod for pod in pods() if "r-ray-head" in pod].pop()
wait_for_pod_status(head_pod, "Running")
stat_cmd = f"kubectl -n {NAMESPACE} exec {head_pod} -- ray status"
print(">>>Waiting for success of `ray status` on recovered head.")
wait_for_command_to_succeed(stat_cmd)
print(">>>Stopping ray on the head node to test recovery.")
stop_cmd = f"kubectl -n {NAMESPACE} exec {head_pod} -- ray stop"
subprocess.check_call(stop_cmd, shell=True)
# ray status should fail when called immediately after ray stop
with pytest.raises(subprocess.CalledProcessError):
subprocess.check_call(stat_cmd, shell=True)
print(">>>Waiting for success of `ray status` on recovered head.")
wait_for_command_to_succeed(stat_cmd)
# Delete the second cluster
print(">>>Deleting example-cluster2.")
cmd = f"kubectl -n {NAMESPACE} delete -f"\
f"{example_cluster2_file.name}"
subprocess.check_call(cmd, shell=True)
# Four pods remain
print(">>>Checking that example-cluster2 pods are gone.")
wait_for_pods(4)
# Cluster 2 service has been garbage-collected.
print(">>>Checking that deleted cluster's service is gone.")
wait_for_services(1)
# Check job submission
print(">>>Submitting a job to test Ray client connection.")
cmd = f"kubectl -n {NAMESPACE} create -f {job_file.name}"
subprocess.check_call(cmd, shell=True)
wait_for_pods(1, name_filter="job")
job_pod = [pod for pod in pods() if "job" in pod].pop()
time.sleep(10)
wait_for_job(job_pod)
cmd = f"kubectl -n {NAMESPACE} delete jobs --all"
subprocess.check_call(cmd, shell=True)
# Check that cluster updates work: increase minWorkers to 3
# and check that one worker is created.
print(">>>Updating cluster size.")
example_cluster_edit = copy.deepcopy(example_cluster_config)
example_cluster_edit["spec"]["podTypes"][1]["minWorkers"] = 3
yaml.dump(example_cluster_edit, example_cluster_file)
example_cluster_file.flush()
cm = f"kubectl -n {NAMESPACE} apply -f {example_cluster_file.name}"
subprocess.check_call(cm, shell=True)
print(">>>Checking that new cluster size is respected.")
wait_for_pods(5)
# Delete the first cluster
print(">>>Deleting second cluster.")
cmd = f"kubectl -n {NAMESPACE} delete -f"\
f"{example_cluster_file.name}"
subprocess.check_call(cmd, shell=True)
# Only operator pod remains.
print(">>>Checking that all Ray cluster pods are gone.")
wait_for_pods(1)
# Cluster 1 service has been garbage-collected.
print(">>>Checking that all Ray cluster services are gone.")
wait_for_services(0)
# Verify that cluster deletion earlier in this test did not break
# the operator.
print(">>>Checking cluster creation again.")
for file in [example_cluster_file, example_cluster2_file]:
cmd = f"kubectl -n {NAMESPACE} apply -f {file.name}"
subprocess.check_call(cmd, shell=True)
wait_for_pods(7)
print(">>>Checking cluster deletion again.")
for file in [example_cluster_file, example_cluster2_file]:
cmd = f"kubectl -n {NAMESPACE} delete -f {file.name}"
subprocess.check_call(cmd, shell=True)
wait_for_pods(1)
if __name__ == "__main__":
kubernetes.config.load_kube_config()
sys.exit(pytest.main(["-sv", __file__]))
|
|
"""
Supports flushing statsite metrics to Librato
"""
import ast
import sys
import socket
import logging
import ConfigParser
import re
import base64
import urllib2
import json
import os
##
# Librato sink for statsite
# =========================
#
# Use with the following stream command:
#
# stream_cmd = python sinks/librato.py librato.ini
#
# The Librato sink takes an INI format configuration file as a single
# argument. The following is an example configuration:
#
# Configuration example:
# ---------------------
#
# [librato]
# email = john@example.com
# token = 02ac4003c4fcd11bf9cee34e34263155dc7ba1906c322d167db6ab4b2cd2082b
# source_regex = ^([^-]+)--
# floor_time_secs = 60
#
# Options:
# -------
#
# - email / token: Librato account credentials (required).
# - source: Source name to use for samples, defaults to hostname if not set.
# - source_regex: Source name regex extraction see:
# https://github.com/librato/statsd-librato-backend#setting-the-source-per-metric
# - floor_time_secs: Floor samples to this time (should match statsite flush_interval.
# - prefix: Metric name prefix to set for all metrics.
# - extended_counters: true/false, look for statsite extended_counters, default false.
# This should match your statsite config for extended_counters.
#
###
class LibratoStore(object):
def __init__(self, conffile="/etc/statsite/librato.ini"):
"""
Implements an interface that allows metrics to be persisted to Librato.
Raises a :class:`ValueError` on bad arguments or `Exception` on missing
configuration section.
:Parameters:
- `conffile`: INI configuration file.
"""
self.logger = logging.getLogger("statsite.librato")
self.api = "https://metrics-api.librato.com"
self.parse_conf(conffile)
self.sink_name = "statsite-librato"
self.sink_version = "1.0.1"
self.flush_timeout_secs = 5
self.gauges = {}
self.measurements = {}
# Limit our payload sizes
self.max_metrics_payload = 500
self.timer_re = re.compile("^timers\.")
self.ex_count_re = re.compile("^counts\.")
self.type_re = re.compile("^(kv|timers|counts|gauges|sets)\.(.+)$")
self.sfx_map = {
'sum': 'sum',
'sum_sq': None,
'count' : 'count',
'stdev' : 'stddev_m2',
'lower' : 'min',
'upper' : 'max',
'mean' : None
}
self.sfx_re = re.compile("(.+)\.(sum|sum_sq|count|stdev|lower|upper|mean)$")
self.sanitize_re = re.compile("[^-A-Za-z0-9.:_]")
def parse_conf(self, conffile):
"""
Loads configuration from an INI format file.
"""
sect = "librato"
config = ConfigParser.RawConfigParser()
config.read(conffile)
if not config.has_section(sect):
raise Exception("Can not locate config section 'librato'")
if config.has_option(sect, 'email'):
self.email = config.get(sect, 'email')
else:
raise ValueError("email must be set in config")
if config.has_option(sect, 'token'):
self.token = config.get(sect, 'token')
else:
raise ValueError("token must be set in config")
if config.has_option(sect, 'api'):
self.api = config.get(sect, 'api')
if config.has_option(sect, 'source'):
self.source = config.get(sect, 'source')
else:
self.source = None
if config.has_option(sect, 'host'):
self.host = config.get(sect, 'host')
else:
self.host = socket.gethostname()
if config.has_option(sect, 'source_regex'):
reg = config.get(sect, 'source_regex')
# Strip /'s
if len(reg) > 2 and reg[0] == '/' and \
reg[len(reg) - 1] == "/":
reg = reg[1:len(reg)-1]
self.source_re = re.compile(reg)
else:
self.source_re = None
if config.has_option(sect, 'floor_time_secs'):
self.floor_time_secs = config.getint(sect, 'floor_time_secs')
else:
self.floor_time_secs = None
if config.has_option(sect, "prefix"):
self.prefix = config.get(sect, "prefix")
else:
self.prefix = None
if config.has_option(sect, "source_prefix"):
self.source_prefix = config.get(sect, "source_prefix")
else:
self.source_prefix = None
if config.has_option(sect, "extended_counters"):
self.extended_counters = config.getboolean(sect, "extended_counters")
else:
self.extended_counters = False
# Check if we want to also send measurements to legacy Librato API
if config.has_option(sect, "write_to_legacy"):
self.write_to_legacy = config.getboolean(sect, "write_to_legacy")
else:
self.write_to_legacy = False
# Global Tags
if config.has_option(sect, "tags"):
self.tags = ast.literal_eval(config.get(sect, "tags"))
else:
self.tags = {}
def split_multipart_metric(self, name):
m = self.sfx_re.match(name)
if m != None:
if self.sfx_map[m.group(2)] != None:
return m.group(1), self.sfx_map[m.group(2)]
else:
# These we drop
return None, None
else:
return name, None
def sanitize(self, name):
return self.sanitize_re.sub("_", name)
def parse_tags(self, name, multipart=False):
# Find and parse the tags from the name using the syntax name#tag1=value,tag2=value
s = name.split("#")
tags = {}
raw_tags = []
if len(s) > 1:
name = s.pop(0)
raw_tags = s.pop().split(",")
if multipart:
# Timers will append .p90, .p99 etc to the end of the "metric name"
# Parse the suffix out and append to the metric name
last_tag = raw_tags.pop()
last_tag_split = last_tag.split('.')
# Store the proper name. The suffix is the last element in split of the last tag.
name = name + "." + last_tag_split.pop()
# Put the tag, without the suffix, back in the list of raw tags
if len(last_tag_split) > 1:
t = ".".join(last_tag_split)
# # We had periods in the tag value...
raw_tags.append(t)
# raw_tags.extend(last_tag_split)
else:
raw_tags.extend(last_tag_split)
# Parse the tags out
for raw_tag in raw_tags:
# Get the key and value from tag=value
tag_key, tag_value = raw_tag.split("=")
tags[tag_key] = tag_value
return name, tags
def add_measure(self, key, value, time):
ts = int(time)
if self.floor_time_secs != None:
ts = (ts / self.floor_time_secs) * self.floor_time_secs
value = float(value)
source = self.source
name = self.type_re.match(key).group(2)
ismultipart = False
if self.timer_re.match(key) != None:
ismultipart = True
if self.extended_counters and \
self.ex_count_re.match(key) != None:
ismultipart = True
# Match the source regex
if self.source_re != None:
m = self.source_re.search(name)
if m != None:
source = m.group(1)
name = name[0:m.start(0)] + name[m.end(0):]
# Add a source prefix
if self.source_prefix:
source = "%s.%s" % (self.source_prefix, source)
# Parse the tags out
name, tags = self.parse_tags(name, ismultipart)
subf = None
if ismultipart:
name, subf = self.split_multipart_metric(name)
if subf == None:
subf = 'value'
# Bail if skipping
if name == None:
return
# Add a metric prefix
if self.prefix:
name = "%s.%s" % (self.prefix, name)
name = self.sanitize(name)
# Add the hostname as a global tag.
self.tags['host'] = self.host
if source:
# Sanitize
source = self.sanitize(source)
# Add a tag of source if not specified by the client
if 'source' not in tags:
tags['source'] = source
# Build a key for the dict that will hold all the measurements to
# submit
k = "%s\t%s" % (name, source)
else:
k = name
if k not in self.measurements:
m = [{'name': name, 'tags' : tags, 'time' : ts, subf: value}]
self.measurements[k] = m
else:
# Build summary statistics
processed = False
# Try to find an existing measurement for this tagset
# so we can add the next summary statistic
for m in self.measurements[k]:
if m['tags'] == tags:
m[subf] = value
processed = True
break
if not processed:
# New tagset
payload = {'name': name, 'tags' : tags, 'time' : ts, subf: value}
self.measurements[k].append(payload)
# Build out the legacy gauges
if self.write_to_legacy:
if k not in self.gauges:
# Truncate metric/source names to 255 for legacy
if len(name) > 255:
name = name[:255]
self.logger.warning(
"Truncating metric %s to 255 characters to avoid failing entire payload" % name
)
if source and len(source) > 255:
source = source[:255]
self.logger.warning(
"Truncating source %s to 255 characters to avoid failing entire payload" % source
)
self.gauges[k] = {
'name': name,
'source': source,
'measure_time': ts
}
self.gauges[k][subf] = value
def build(self, metrics):
"""
Build metric data to send to Librato
:Parameters:
- `metrics` : A list of (key,value,timestamp) tuples.
"""
if not metrics:
return
# Construct the output
for m in metrics:
k, vs, ts = m.split("|")
self.add_measure(k, vs, ts)
def flush_payload(self, headers, m, legacy = False):
"""
POST a payload to Librato.
"""
if legacy:
body = json.dumps({ 'gauges' : m })
url = "%s/v1/metrics" % (self.api)
else:
global_tags = self.tags
body = json.dumps({ 'measurements' : m, 'tags': global_tags })
url = "%s/v1/measurements" % (self.api)
req = urllib2.Request(url, body, headers)
try:
f = urllib2.urlopen(req, timeout = self.flush_timeout_secs)
response = f.read()
f.close()
# The new tags API supports partial payload accept/reject
# So let's show a message if any part fails
if 'errors' in response:
parsed_response = json.loads(response)
# errors could be [], so check that prior to logging anything
if parsed_response['errors']:
self.logger.error(parsed_response)
except urllib2.HTTPError as error:
body = error.read()
self.logger.warning('Failed to send metrics to Librato: Code: %d. Response: %s' % \
(error.code, body))
except IOError as error:
if hasattr(error, 'reason'):
self.logger.warning('Error when sending metrics Librato (%s)' % (error.reason))
elif hasattr(error, 'code'):
self.logger.warning('Error when sending metrics Librato (%s)' % (error.code))
else:
self.logger.warning('Error when sending metrics Librato and I dunno why')
def flush(self):
"""
POST a collection of gauges to Librato.
"""
# Nothing to do
if len(self.measurements) == 0:
return
headers = {
'Content-Type': 'application/json',
'User-Agent': self.build_user_agent(),
'Authorization': 'Basic %s' % self.build_basic_auth()
}
tagged_metrics = []
legacy_metrics = []
count = 0
for v in self.measurements.values():
for metric in v:
tagged_metrics.append(metric)
count += 1
if count >= self.max_metrics_payload:
self.flush_payload(headers, tagged_metrics)
count = 0
tagged_metrics = []
if count > 0:
self.flush_payload(headers, tagged_metrics)
# If enabled, submit flush metrics to Librato's legacy API
if self.write_to_legacy:
if len(self.gauges) == 0:
return
values = self.gauges.values()
count = 0
for measure in values:
legacy_metrics.append(measure)
count += 1
if count >= self.max_metrics_payload:
self.flush_payload(headers, legacy_metrics, True)
count = 0
legacy_metrics = []
if count > 0:
self.flush_payload(headers, legacy_metrics, True)
def build_basic_auth(self):
base64string = base64.encodestring('%s:%s' % (self.email, self.token))
return base64string.translate(None, '\n')
def build_user_agent(self):
try:
uname = os.uname()
system = "; ".join([uname[0], uname[4]])
except Exception:
system = os.name()
pver = sys.version_info
user_agent = '%s/%s (%s) Python-Urllib2/%d.%d' % \
(self.sink_name, self.sink_version, system, pver[0], pver[1])
return user_agent
if __name__ == "__main__":
# Initialize the logger
logging.basicConfig()
# Intialize from our arguments
librato = LibratoStore(*sys.argv[1:])
# Get all the inputs
metrics = sys.stdin.read()
# Flush
librato.build(metrics.splitlines())
librato.flush()
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A type for representing values that may or may not exist."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.data.util import structure
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export("experimental.Optional", "data.experimental.Optional")
@deprecation.deprecated_endpoints("data.experimental.Optional")
@six.add_metaclass(abc.ABCMeta)
class Optional(composite_tensor.CompositeTensor):
"""Represents a value that may or may not be present.
A `tf.experimental.Optional` can represent the result of an operation that may
fail as a value, rather than raising an exception and halting execution. For
example, `tf.data.Iterator.get_next_as_optional()` returns a
`tf.experimental.Optional` that either contains the next element of an
iterator if one exists, or an "empty" value that indicates the end of the
sequence has been reached.
`tf.experimental.Optional` can only be used with values that are convertible
to `tf.Tensor` or `tf.CompositeTensor`.
One can create a `tf.experimental.Optional` from a value using the
`from_value()` method:
>>> optional = tf.experimental.Optional.from_value(42)
>>> print(optional.has_value())
tf.Tensor(True, shape=(), dtype=bool)
>>> print(optional.get_value())
tf.Tensor(42, shape=(), dtype=int32)
or without a value using the `empty()` method:
>>> optional = tf.experimental.Optional.empty(
... tf.TensorSpec(shape=(), dtype=tf.int32, name=None))
>>> print(optional.has_value())
tf.Tensor(False, shape=(), dtype=bool)
"""
@abc.abstractmethod
def has_value(self, name=None):
"""Returns a tensor that evaluates to `True` if this optional has a value.
>>> optional = tf.experimental.Optional.from_value(42)
>>> print(optional.has_value())
tf.Tensor(True, shape=(), dtype=bool)
Args:
name: (Optional.) A name for the created operation.
Returns:
A scalar `tf.Tensor` of type `tf.bool`.
"""
raise NotImplementedError("Optional.has_value()")
@abc.abstractmethod
def get_value(self, name=None):
"""Returns the value wrapped by this optional.
If this optional does not have a value (i.e. `self.has_value()` evaluates to
`False`), this operation will raise `tf.errors.InvalidArgumentError` at
runtime.
>>> optional = tf.experimental.Optional.from_value(42)
>>> print(optional.get_value())
tf.Tensor(42, shape=(), dtype=int32)
Args:
name: (Optional.) A name for the created operation.
Returns:
The wrapped value.
"""
raise NotImplementedError("Optional.get_value()")
@abc.abstractproperty
def element_spec(self):
"""The type specification of an element of this optional.
>>> optional = tf.experimental.Optional.from_value(42)
>>> print(optional.element_spec)
tf.TensorSpec(shape=(), dtype=tf.int32, name=None)
Returns:
A (nested) structure of `tf.TypeSpec` objects matching the structure of an
element of this optional, specifying the type of individual components.
"""
raise NotImplementedError("Optional.element_spec")
@staticmethod
def empty(element_spec):
"""Returns an `Optional` that has no value.
NOTE: This method takes an argument that defines the structure of the value
that would be contained in the returned `Optional` if it had a value.
>>> optional = tf.experimental.Optional.empty(
... tf.TensorSpec(shape=(), dtype=tf.int32, name=None))
>>> print(optional.has_value())
tf.Tensor(False, shape=(), dtype=bool)
Args:
element_spec: A (nested) structure of `tf.TypeSpec` objects matching the
structure of an element of this optional.
Returns:
A `tf.experimental.Optional` with no value.
"""
return _OptionalImpl(gen_dataset_ops.optional_none(), element_spec)
@staticmethod
def from_value(value):
"""Returns a `tf.experimental.Optional` that wraps the given value.
>>> optional = tf.experimental.Optional.from_value(42)
>>> print(optional.has_value())
tf.Tensor(True, shape=(), dtype=bool)
>>> print(optional.get_value())
tf.Tensor(42, shape=(), dtype=int32)
Args:
value: A value to wrap. The value must be convertible to `tf.Tensor` or
`tf.CompositeTensor`.
Returns:
A `tf.experimental.Optional` that wraps `value`.
"""
with ops.name_scope("optional") as scope:
with ops.name_scope("value"):
element_spec = structure.type_spec_from_value(value)
encoded_value = structure.to_tensor_list(element_spec, value)
return _OptionalImpl(
gen_dataset_ops.optional_from_value(encoded_value, name=scope),
element_spec)
class _OptionalImpl(Optional):
"""Concrete implementation of `tf.experimental.Optional`.
NOTE(mrry): This implementation is kept private, to avoid defining
`Optional.__init__()` in the public API.
"""
def __init__(self, variant_tensor, element_spec):
self._variant_tensor = variant_tensor
self._element_spec = element_spec
def has_value(self, name=None):
with ops.colocate_with(self._variant_tensor):
return gen_dataset_ops.optional_has_value(self._variant_tensor, name=name)
def get_value(self, name=None):
# TODO(b/110122868): Consolidate the restructuring logic with similar logic
# in `Iterator.get_next()` and `StructuredFunctionWrapper`.
with ops.name_scope(name, "OptionalGetValue",
[self._variant_tensor]) as scope:
with ops.colocate_with(self._variant_tensor):
result = gen_dataset_ops.optional_get_value(
self._variant_tensor,
name=scope,
output_types=structure.get_flat_tensor_types(self._element_spec),
output_shapes=structure.get_flat_tensor_shapes(self._element_spec))
# NOTE: We do not colocate the deserialization of composite tensors
# because not all ops are guaranteed to have non-GPU kernels.
return structure.from_tensor_list(self._element_spec, result)
@property
def element_spec(self):
return self._element_spec
@property
def _type_spec(self):
return OptionalSpec.from_value(self)
@tf_export(
"OptionalSpec", v1=["OptionalSpec", "data.experimental.OptionalStructure"])
class OptionalSpec(type_spec.TypeSpec):
"""Type specification for `tf.experimental.Optional`.
For instance, `tf.OptionalSpec` can be used to define a tf.function that takes
`tf.experimental.Optional` as an input argument:
>>> @tf.function(input_signature=[tf.OptionalSpec(
... tf.TensorSpec(shape=(), dtype=tf.int32, name=None))])
... def maybe_square(optional):
... if optional.has_value():
... x = optional.get_value()
... return x * x
... return -1
>>> optional = tf.experimental.Optional.from_value(5)
>>> print(maybe_square(optional))
tf.Tensor(25, shape=(), dtype=int32)
Attributes:
element_spec: A (nested) structure of `TypeSpec` objects that represents the
type specification of the optional element.
"""
__slots__ = ["_element_spec"]
def __init__(self, element_spec):
self._element_spec = element_spec
@property
def value_type(self):
return _OptionalImpl
def _serialize(self):
return (self._element_spec,)
@property
def _component_specs(self):
return [tensor_spec.TensorSpec((), dtypes.variant)]
def _to_components(self, value):
return [value._variant_tensor] # pylint: disable=protected-access
def _from_components(self, flat_value):
# pylint: disable=protected-access
return _OptionalImpl(flat_value[0], self._element_spec)
@staticmethod
def from_value(value):
return OptionalSpec(value.element_spec)
def _to_legacy_output_types(self):
return self
def _to_legacy_output_shapes(self):
return self
def _to_legacy_output_classes(self):
return self
|
|
from expects.testing import failure
from expects import *
from datetime import datetime, timedelta
from dateutil import relativedelta
import calendar
import zipfile
from io import BytesIO
import json
import os
from esios import Esios
from esios.archives import Liquicomun, A1_liquicomun, A2_liquicomun, C2_liquicomun
from esios.archives import P48Cierre
def test_expected_to_work(the_class, start, end, expected_versions, next=0):
"""
General expected to work method
"""
res = the_class().download(start, end, next=next)
c = BytesIO(res)
zf = zipfile.ZipFile(c)
assert zf.testzip() is None
assert zf.namelist()[0][:2] in expected_versions
return True
def test_expected_to_break(the_class, start, end, assert_message, next=0):
"""
General assert to break method
"""
it_works = True
try:
res = the_class().download(start, end, next=next)
except:
it_works = False
assert not it_works, assert_message
return True
def validate_P48cierre(xml):
xsd_path = 'esios/data'
xsd_file = 'P48Cierre-esios-MP.xsd'
from lxml import etree, objectify
from lxml.etree import XMLSyntaxError
xmlschema_doc = etree.parse(xsd_path + '/' + xsd_file)
xmlschema = etree.XMLSchema(xmlschema_doc)
parser = objectify.makeparser(schema=xmlschema)
try:
objectify.fromstring(xml, parser)
return True
except XMLSyntaxError as e:
return False
with description('Base Liquicomun'):
with before.all:
ESIOS_TOKEN = os.getenv('ESIOS_TOKEN')
self.token = ESIOS_TOKEN
self.today = datetime.today()
self.e = Esios(self.token)
with context('Instance'):
with it('Returns liquicomun instance'):
liqui = Liquicomun(self.e)
assert isinstance(liqui, Liquicomun)
with it('Gets list'):
today = self.today
start = datetime(today.year, today.month, 1)
last_month_day = calendar.monthrange(today.year, today.month)[1]
end = datetime(today.year, today.month, last_month_day)
res = self.e.liquicomun().get(start, end)
assert len(res) >= 0
with it('Gets current liquicomun'):
today = self.today
start = datetime(today.year, today.month, 1)
last_month_day = calendar.monthrange(today.year, today.month)[1]
end = datetime(today.year, today.month, last_month_day)
expected_versions = ('A1', 'A2')
assert test_expected_to_work(the_class=self.e.liquicomun, start=start, end=end, expected_versions=expected_versions)
with it('should download C2 or A3 for 3 months ago'):
today = self.today - timedelta(days=93)
start = datetime(today.year, today.month, 1)
last_month_day = calendar.monthrange(start.year, start.month)[1]
end = datetime(start.year, start.month, last_month_day)
expected_versions = ('A3', 'C2')
assert test_expected_to_work(the_class=self.e.liquicomun, start=start, end=end, expected_versions=expected_versions)
with it('should download C6 o C5 for a year ago'):
today = self.today - timedelta(days=730)
start = datetime(today.year, today.month, 1)
last_month_day = calendar.monthrange(start.year, start.month)[1]
end = datetime(start.year, start.month, last_month_day)
expected_versions = ('A5', 'A6', 'C6', 'C5')
assert test_expected_to_work(the_class=self.e.liquicomun, start=start, end=end, expected_versions=expected_versions)
with it('should download C7,A7,C6,A6,C5 or A5 for a long time ago'):
today = self.today - timedelta(days=730)
start = datetime(today.year, today.month, 1)
last_month_day = calendar.monthrange(start.year, start.month)[1]
end = datetime(start.year, start.month, last_month_day)
expected_versions = ('A7', 'C7', 'A6', 'C6', 'C5', 'A5')
assert test_expected_to_work(the_class=self.e.liquicomun, start=start, end=end, expected_versions=expected_versions)
with context('A1 instance'):
with it('can get the A1 for a valid date'):
today = self.today
start = datetime(today.year, today.month, 1) + relativedelta.relativedelta(months=1)
end = start + relativedelta.relativedelta(months=1) - relativedelta.relativedelta(days=1)
expected_versions = ('A1')
assert test_expected_to_work(the_class=self.e.A1_liquicomun, start=start, end=end, expected_versions=expected_versions)
with it('can\'t get the A1 for an invalid date'):
today = self.today
# Previous month
start = datetime(today.year, today.month, 1) - relativedelta.relativedelta(months=1)
end = start + relativedelta.relativedelta(months=1) - relativedelta.relativedelta(days=1)
error_message = "A1 for previous month must not work! This must be an A2"
assert test_expected_to_break(the_class=self.e.A1_liquicomun, start=start, end=end, assert_message=error_message)
with context('A2 instance'):
with it('can get the related A2 for a valid date'):
today = self.today
# today
start = end = datetime(today.year, today.month, today.day)
expected_versions = ('A2')
assert test_expected_to_work(the_class=self.e.A2_liquicomun, start=start, end=end, expected_versions=expected_versions)
with it('can\'t get the related A2 for an invalid date'):
today = self.today
# This month
start = datetime(today.year, today.month, 1)
end = start + relativedelta.relativedelta(months=1) - relativedelta.relativedelta(days=1)
error_message = "A2 for this month must not work! This must be an A1"
assert test_expected_to_break(the_class=self.e.A2_liquicomun, start=start, end=end, assert_message=error_message)
with context('C2 instance'):
with it('can get the C2 for a valid date'):
today = self.today
# Previous month
start = datetime(today.year, today.month, 1) - relativedelta.relativedelta(months=2)
end = start + relativedelta.relativedelta(months=1) - relativedelta.relativedelta(days=1)
expected_versions = ('C2')
assert test_expected_to_work(the_class=self.e.C2_liquicomun, start=start, end=end, expected_versions=expected_versions)
with it('can\'t get the C2 for an invalid date'):
today = self.today
# This month
start = datetime(today.year, today.month, 1)
end = start + relativedelta.relativedelta(months=1) - relativedelta.relativedelta(days=1)
error_message = "C2 for this month must not work! This must be an A1"
assert test_expected_to_break(the_class=self.e.C2_liquicomun, start=start, end=end, assert_message=error_message)
with context('Instance with next'):
with before.all:
# Expected C5, C4, C3, C2
self.expected_version_list = ["C5", "C4", "C3", "C2"]
today = self.today
# A year ago
self.start = datetime(today.year, today.month, 1) - relativedelta.relativedelta(months=12)
self.end = self.start + relativedelta.relativedelta(months=1) - relativedelta.relativedelta(days=1)
with it('can get the n=0 //a C5'):
next = 0
expected_versions = self.expected_version_list[next] # "C5"
assert test_expected_to_work(the_class=self.e.liquicomun, start=self.start, end=self.end, expected_versions=expected_versions, next=next)
with it('can get the n=1 //a C4'):
next = 1
expected_versions = self.expected_version_list[next] # "C4"
assert test_expected_to_work(the_class=self.e.liquicomun, start=self.start, end=self.end, expected_versions=expected_versions, next=next)
with it('can get the n=2 //a C3'):
next = 1
expected_versions = self.expected_version_list[next] # "C4"
assert test_expected_to_work(the_class=self.e.liquicomun, start=self.start, end=self.end, expected_versions=expected_versions, next=next)
with it('can get the n=3 //a C2'):
next = 3
expected_versions = self.expected_version_list[next] # "C4"
assert test_expected_to_work(the_class=self.e.liquicomun, start=self.start, end=self.end, expected_versions=expected_versions, next=next)
with it('can\'t get the n=4 //last is n=3 C2!'):
next = 4
expected = "irreal"
error_message = "The next 4 version for one year ago must not exist. Available '{}'".format(self.expected_version_list)
assert test_expected_to_break(the_class=self.e.liquicomun, start=self.start, end=self.end, assert_message=error_message, next=next)
with it('can\'t get the n=-1'):
next = -1
expected = "irreal"
error_message = "Negative next version must break"
assert test_expected_to_break(the_class=self.e.liquicomun, start=self.start, end=self.end, assert_message=error_message, next=next)
with description('P48Cierre'):
with before.all:
ESIOS_TOKEN = os.getenv('ESIOS_TOKEN')
self.token = ESIOS_TOKEN
self.today = datetime.today()
self.e = Esios(self.token)
with context('Instance'):
with it('Returns P48Cierre instance'):
liqui = P48Cierre(self.e)
assert isinstance(liqui, P48Cierre)
with it('Gets list'):
today = self.today
start = datetime(today.year, today.month, 1)
last_month_day = calendar.monthrange(today.year, today.month)[1]
end = datetime(today.year, today.month, last_month_day)
res = self.e.p48cierre().get(start, end)
assert len(res) >= 0
with it('Gets Yesterday p48'):
today = self.today
start = today.replace(hour=0, minute=0, second=0, microsecond=0) - relativedelta.relativedelta(days=1)
end = today.replace(hour=23, minute=59, second=59, microsecond=0) - relativedelta.relativedelta(days=1)
res = P48Cierre(self.e).download(start, end)
assert validate_P48cierre(res)
assert not validate_P48cierre(res + b'ERROR')
with it('Gets today and yesterday p48'):
today = self.today
start = today.replace(hour=0, minute=0, second=0, microsecond=0) - relativedelta.relativedelta(days=1)
end = today.replace(hour=23, minute=59, second=59, microsecond=0)
res = P48Cierre(self.e).download(start, end)
c = BytesIO(res)
zf = zipfile.ZipFile(c)
assert zf.testzip() is None
expected_filenames = [
'p48cierre_{}.xml'.format(start.strftime('%Y%m%d')),
'p48cierre_{}.xml'.format(end.strftime('%Y%m%d')),
'p48_{}'.format(today.strftime('%Y%m%d'))]
assert len(zf.namelist()) == 2
for filename in zf.namelist():
if len(filename) == 22:
assert filename in expected_filenames
else:
assert filename[:12] in expected_filenames
with it('Gets current month p48'):
today = self.today
start = datetime(today.year, today.month, 1)
last_month_day = calendar.monthrange(today.year, today.month)[1]
end = datetime(today.year, today.month, today.day > 1 and today.day - 1 or 1)
res = P48Cierre(self.e).download(start, end)
c = BytesIO(res)
zf = zipfile.ZipFile(c)
assert zf.testzip() is None
expected_filenames = []
for day in range(0, today.day):
p48_day = start + relativedelta.relativedelta(days=day)
expected_filenames.append('p48cierre_{}.xml'.format(p48_day.strftime('%Y%m%d')))
assert len(zf.namelist()) == today.day - 1
for filename in zf.namelist():
assert filename in expected_filenames
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2010 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
from datetime import datetime
import doctest
from StringIO import StringIO
import unittest
import sys
from babel.messages.catalog import Catalog, Message
from babel.messages import pofile
from babel.util import FixedOffsetTimezone, LOCALTZ
class ReadPoTestCase(unittest.TestCase):
def test_preserve_locale(self):
buf = StringIO(r'''msgid "foo"
msgstr "Voh"''')
catalog = pofile.read_po(buf, locale='en_US')
self.assertEqual('en_US', catalog.locale)
def test_preserve_domain(self):
buf = StringIO(r'''msgid "foo"
msgstr "Voh"''')
catalog = pofile.read_po(buf, domain='mydomain')
self.assertEqual('mydomain', catalog.domain)
def test_read_multiline(self):
buf = StringIO(r'''msgid ""
"Here's some text that\n"
"includesareallylongwordthatmightbutshouldnt"
" throw us into an infinite "
"loop\n"
msgstr ""''')
catalog = pofile.read_po(buf)
self.assertEqual(1, len(catalog))
message = list(catalog)[1]
self.assertEqual("Here's some text that\nincludesareallylongwordthat"
"mightbutshouldnt throw us into an infinite loop\n",
message.id)
def test_fuzzy_header(self):
buf = StringIO(r'''\
# Translations template for AReallyReallyLongNameForAProject.
# Copyright (C) 2007 ORGANIZATION
# This file is distributed under the same license as the
# AReallyReallyLongNameForAProject project.
# FIRST AUTHOR <EMAIL@ADDRESS>, 2007.
#
#, fuzzy
''')
catalog = pofile.read_po(buf)
self.assertEqual(1, len(list(catalog)))
self.assertEqual(True, list(catalog)[0].fuzzy)
def test_not_fuzzy_header(self):
buf = StringIO(r'''\
# Translations template for AReallyReallyLongNameForAProject.
# Copyright (C) 2007 ORGANIZATION
# This file is distributed under the same license as the
# AReallyReallyLongNameForAProject project.
# FIRST AUTHOR <EMAIL@ADDRESS>, 2007.
#
''')
catalog = pofile.read_po(buf)
self.assertEqual(1, len(list(catalog)))
self.assertEqual(False, list(catalog)[0].fuzzy)
def test_header_entry(self):
buf = StringIO(r'''\
# SOME DESCRIPTIVE TITLE.
# Copyright (C) 2007 THE PACKAGE'S COPYRIGHT HOLDER
# This file is distributed under the same license as the PACKAGE package.
# FIRST AUTHOR <EMAIL@ADDRESS>, 2007.
#
#, fuzzy
msgid ""
msgstr ""
"Project-Id-Version: 3.15\n"
"Report-Msgid-Bugs-To: Fliegender Zirkus <fliegender@zirkus.de>\n"
"POT-Creation-Date: 2007-09-27 11:19+0700\n"
"PO-Revision-Date: 2007-09-27 21:42-0700\n"
"Last-Translator: John <cleese@bavaria.de>\n"
"Language-Team: German Lang <de@babel.org>\n"
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=iso-8859-2\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 1.0dev-r313\n"
''')
catalog = pofile.read_po(buf)
self.assertEqual(1, len(list(catalog)))
self.assertEqual(u'3.15', catalog.version)
self.assertEqual(u'Fliegender Zirkus <fliegender@zirkus.de>',
catalog.msgid_bugs_address)
self.assertEqual(datetime(2007, 9, 27, 11, 19,
tzinfo=FixedOffsetTimezone(7 * 60)),
catalog.creation_date)
self.assertEqual(u'John <cleese@bavaria.de>', catalog.last_translator)
self.assertEqual(u'German Lang <de@babel.org>', catalog.language_team)
self.assertEqual(u'iso-8859-2', catalog.charset)
self.assertEqual(True, list(catalog)[0].fuzzy)
def test_obsolete_message(self):
buf = StringIO(r'''# This is an obsolete message
#~ msgid "foo"
#~ msgstr "Voh"
# This message is not obsolete
#: main.py:1
msgid "bar"
msgstr "Bahr"
''')
catalog = pofile.read_po(buf)
self.assertEqual(1, len(catalog))
self.assertEqual(1, len(catalog.obsolete))
message = catalog.obsolete[u'foo']
self.assertEqual(u'foo', message.id)
self.assertEqual(u'Voh', message.string)
self.assertEqual(['This is an obsolete message'], message.user_comments)
def test_obsolete_message_ignored(self):
buf = StringIO(r'''# This is an obsolete message
#~ msgid "foo"
#~ msgstr "Voh"
# This message is not obsolete
#: main.py:1
msgid "bar"
msgstr "Bahr"
''')
catalog = pofile.read_po(buf, ignore_obsolete=True)
self.assertEqual(1, len(catalog))
self.assertEqual(0, len(catalog.obsolete))
def test_single_plural_form(self):
buf = StringIO(r'''msgid "foo"
msgid_plural "foos"
msgstr[0] "Voh"''')
catalog = pofile.read_po(buf, locale='ja_JP')
self.assertEqual(1, len(catalog))
self.assertEqual(1, catalog.num_plurals)
message = catalog['foo']
self.assertEqual(1, len(message.string))
def test_singular_plural_form(self):
buf = StringIO(r'''msgid "foo"
msgid_plural "foos"
msgstr[0] "Voh"
msgstr[1] "Vohs"''')
catalog = pofile.read_po(buf, locale='nl_NL')
self.assertEqual(1, len(catalog))
self.assertEqual(2, catalog.num_plurals)
message = catalog['foo']
self.assertEqual(2, len(message.string))
def test_more_than_two_plural_forms(self):
buf = StringIO(r'''msgid "foo"
msgid_plural "foos"
msgstr[0] "Voh"
msgstr[1] "Vohs"
msgstr[2] "Vohss"''')
catalog = pofile.read_po(buf, locale='lv_LV')
self.assertEqual(1, len(catalog))
self.assertEqual(3, catalog.num_plurals)
message = catalog['foo']
self.assertEqual(3, len(message.string))
self.assertEqual(u'Vohss', message.string[2])
def test_plural_with_square_brackets(self):
buf = StringIO(r'''msgid "foo"
msgid_plural "foos"
msgstr[0] "Voh [text]"
msgstr[1] "Vohs [text]"''')
catalog = pofile.read_po(buf, locale='nb_NO')
self.assertEqual(1, len(catalog))
self.assertEqual(2, catalog.num_plurals)
message = catalog['foo']
self.assertEqual(2, len(message.string))
class WritePoTestCase(unittest.TestCase):
def test_join_locations(self):
catalog = Catalog()
catalog.add(u'foo', locations=[('main.py', 1)])
catalog.add(u'foo', locations=[('utils.py', 3)])
buf = StringIO()
pofile.write_po(buf, catalog, omit_header=True)
self.assertEqual('''#: main.py:1 utils.py:3
msgid "foo"
msgstr ""''', buf.getvalue().strip())
def test_duplicate_comments(self):
catalog = Catalog()
catalog.add(u'foo', auto_comments=['A comment'])
catalog.add(u'foo', auto_comments=['A comment'])
buf = StringIO()
pofile.write_po(buf, catalog, omit_header=True)
self.assertEqual('''#. A comment
msgid "foo"
msgstr ""''', buf.getvalue().strip())
def test_wrap_long_lines(self):
text = """Here's some text where
white space and line breaks matter, and should
not be removed
"""
catalog = Catalog()
catalog.add(text, locations=[('main.py', 1)])
buf = StringIO()
pofile.write_po(buf, catalog, no_location=True, omit_header=True,
width=42)
self.assertEqual(r'''msgid ""
"Here's some text where \n"
"white space and line breaks matter, and"
" should\n"
"\n"
"not be removed\n"
"\n"
msgstr ""''', buf.getvalue().strip())
def test_wrap_long_lines_with_long_word(self):
text = """Here's some text that
includesareallylongwordthatmightbutshouldnt throw us into an infinite loop
"""
catalog = Catalog()
catalog.add(text, locations=[('main.py', 1)])
buf = StringIO()
pofile.write_po(buf, catalog, no_location=True, omit_header=True,
width=32)
self.assertEqual(r'''msgid ""
"Here's some text that\n"
"includesareallylongwordthatmightbutshouldnt"
" throw us into an infinite "
"loop\n"
msgstr ""''', buf.getvalue().strip())
def test_wrap_long_lines_in_header(self):
"""
Verify that long lines in the header comment are wrapped correctly.
"""
catalog = Catalog(project='AReallyReallyLongNameForAProject',
revision_date=datetime(2007, 4, 1))
buf = StringIO()
pofile.write_po(buf, catalog)
self.assertEqual('''\
# Translations template for AReallyReallyLongNameForAProject.
# Copyright (C) 2007 ORGANIZATION
# This file is distributed under the same license as the
# AReallyReallyLongNameForAProject project.
# FIRST AUTHOR <EMAIL@ADDRESS>, 2007.
#
#, fuzzy''', '\n'.join(buf.getvalue().splitlines()[:7]))
def test_wrap_locations_with_hyphens(self):
catalog = Catalog()
catalog.add(u'foo', locations=[
('doupy/templates/base/navmenu.inc.html.py', 60)
])
catalog.add(u'foo', locations=[
('doupy/templates/job-offers/helpers.html', 22)
])
buf = StringIO()
pofile.write_po(buf, catalog, omit_header=True)
self.assertEqual('''#: doupy/templates/base/navmenu.inc.html.py:60
#: doupy/templates/job-offers/helpers.html:22
msgid "foo"
msgstr ""''', buf.getvalue().strip())
def test_no_wrap_and_width_behaviour_on_comments(self):
catalog = Catalog()
catalog.add("Pretty dam long message id, which must really be big "
"to test this wrap behaviour, if not it won't work.",
locations=[("fake.py", n) for n in range(1, 30)])
buf = StringIO()
pofile.write_po(buf, catalog, width=None, omit_header=True)
self.assertEqual("""\
#: fake.py:1 fake.py:2 fake.py:3 fake.py:4 fake.py:5 fake.py:6 fake.py:7
#: fake.py:8 fake.py:9 fake.py:10 fake.py:11 fake.py:12 fake.py:13 fake.py:14
#: fake.py:15 fake.py:16 fake.py:17 fake.py:18 fake.py:19 fake.py:20 fake.py:21
#: fake.py:22 fake.py:23 fake.py:24 fake.py:25 fake.py:26 fake.py:27 fake.py:28
#: fake.py:29
msgid "pretty dam long message id, which must really be big to test this wrap behaviour, if not it won't work."
msgstr ""
""", buf.getvalue().lower())
buf = StringIO()
pofile.write_po(buf, catalog, width=100, omit_header=True)
self.assertEqual("""\
#: fake.py:1 fake.py:2 fake.py:3 fake.py:4 fake.py:5 fake.py:6 fake.py:7 fake.py:8 fake.py:9 fake.py:10
#: fake.py:11 fake.py:12 fake.py:13 fake.py:14 fake.py:15 fake.py:16 fake.py:17 fake.py:18 fake.py:19
#: fake.py:20 fake.py:21 fake.py:22 fake.py:23 fake.py:24 fake.py:25 fake.py:26 fake.py:27 fake.py:28
#: fake.py:29
msgid ""
"pretty dam long message id, which must really be big to test this wrap behaviour, if not it won't"
" work."
msgstr ""
""", buf.getvalue().lower())
def test_pot_with_translator_comments(self):
catalog = Catalog()
catalog.add(u'foo', locations=[('main.py', 1)],
auto_comments=['Comment About `foo`'])
catalog.add(u'bar', locations=[('utils.py', 3)],
user_comments=['Comment About `bar` with',
'multiple lines.'])
buf = StringIO()
pofile.write_po(buf, catalog, omit_header=True)
self.assertEqual('''#. Comment About `foo`
#: main.py:1
msgid "foo"
msgstr ""
# Comment About `bar` with
# multiple lines.
#: utils.py:3
msgid "bar"
msgstr ""''', buf.getvalue().strip())
def test_po_with_obsolete_message(self):
catalog = Catalog()
catalog.add(u'foo', u'Voh', locations=[('main.py', 1)])
catalog.obsolete['bar'] = Message(u'bar', u'Bahr',
locations=[('utils.py', 3)],
user_comments=['User comment'])
buf = StringIO()
pofile.write_po(buf, catalog, omit_header=True)
self.assertEqual('''#: main.py:1
msgid "foo"
msgstr "Voh"
# User comment
#~ msgid "bar"
#~ msgstr "Bahr"''', buf.getvalue().strip())
def test_po_with_multiline_obsolete_message(self):
catalog = Catalog()
catalog.add(u'foo', u'Voh', locations=[('main.py', 1)])
msgid = r"""Here's a message that covers
multiple lines, and should still be handled
correctly.
"""
msgstr = r"""Here's a message that covers
multiple lines, and should still be handled
correctly.
"""
catalog.obsolete[msgid] = Message(msgid, msgstr,
locations=[('utils.py', 3)])
buf = StringIO()
pofile.write_po(buf, catalog, omit_header=True)
self.assertEqual(r'''#: main.py:1
msgid "foo"
msgstr "Voh"
#~ msgid ""
#~ "Here's a message that covers\n"
#~ "multiple lines, and should still be handled\n"
#~ "correctly.\n"
#~ msgstr ""
#~ "Here's a message that covers\n"
#~ "multiple lines, and should still be handled\n"
#~ "correctly.\n"''', buf.getvalue().strip())
def test_po_with_obsolete_message_ignored(self):
catalog = Catalog()
catalog.add(u'foo', u'Voh', locations=[('main.py', 1)])
catalog.obsolete['bar'] = Message(u'bar', u'Bahr',
locations=[('utils.py', 3)],
user_comments=['User comment'])
buf = StringIO()
pofile.write_po(buf, catalog, omit_header=True, ignore_obsolete=True)
self.assertEqual('''#: main.py:1
msgid "foo"
msgstr "Voh"''', buf.getvalue().strip())
def test_po_with_previous_msgid(self):
catalog = Catalog()
catalog.add(u'foo', u'Voh', locations=[('main.py', 1)],
previous_id=u'fo')
buf = StringIO()
pofile.write_po(buf, catalog, omit_header=True, include_previous=True)
self.assertEqual('''#: main.py:1
#| msgid "fo"
msgid "foo"
msgstr "Voh"''', buf.getvalue().strip())
def test_po_with_previous_msgid_plural(self):
catalog = Catalog()
catalog.add((u'foo', u'foos'), (u'Voh', u'Voeh'),
locations=[('main.py', 1)], previous_id=(u'fo', u'fos'))
buf = StringIO()
pofile.write_po(buf, catalog, omit_header=True, include_previous=True)
self.assertEqual('''#: main.py:1
#| msgid "fo"
#| msgid_plural "fos"
msgid "foo"
msgid_plural "foos"
msgstr[0] "Voh"
msgstr[1] "Voeh"''', buf.getvalue().strip())
def test_sorted_po(self):
catalog = Catalog()
catalog.add(u'bar', locations=[('utils.py', 3)],
user_comments=['Comment About `bar` with',
'multiple lines.'])
catalog.add((u'foo', u'foos'), (u'Voh', u'Voeh'),
locations=[('main.py', 1)])
buf = StringIO()
pofile.write_po(buf, catalog, sort_output=True)
value = buf.getvalue().strip()
assert '''\
# Comment About `bar` with
# multiple lines.
#: utils.py:3
msgid "bar"
msgstr ""
#: main.py:1
msgid "foo"
msgid_plural "foos"
msgstr[0] "Voh"
msgstr[1] "Voeh"''' in value
assert value.find('msgid ""') < value.find('msgid "bar"') < value.find('msgid "foo"')
def test_silent_location_fallback(self):
buf = StringIO('''\
#: broken_file.py
msgid "missing line number"
msgstr ""
#: broken_file.py:broken_line_number
msgid "broken line number"
msgstr ""''')
catalog = pofile.read_po(buf)
self.assertEqual(catalog['missing line number'].locations, [])
self.assertEqual(catalog['broken line number'].locations, [])
def suite():
suite = unittest.TestSuite()
is_py23 = sys.version_info[0:2] == (2, 3)
if not is_py23:
suite.addTest(doctest.DocTestSuite(pofile))
suite.addTest(unittest.makeSuite(ReadPoTestCase))
suite.addTest(unittest.makeSuite(WritePoTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
|
import os.path as op
from functools import wraps
from flask import Blueprint, current_app, render_template, abort, g, url_for
from flask_admin import babel
from flask_admin._compat import with_metaclass, as_unicode
from flask_admin import helpers as h
# For compatibility reasons import MenuLink
from flask_admin.menu import MenuCategory, MenuView, MenuLink
def expose(url='/', methods=('GET',)):
"""
Use this decorator to expose views in your view classes.
:param url:
Relative URL for the view
:param methods:
Allowed HTTP methods. By default only GET is allowed.
"""
def wrap(f):
if not hasattr(f, '_urls'):
f._urls = []
f._urls.append((url, methods))
return f
return wrap
def expose_plugview(url='/'):
"""
Decorator to expose Flask's pluggable view classes
(``flask.views.View`` or ``flask.views.MethodView``).
:param url:
Relative URL for the view
.. versionadded:: 1.0.4
"""
def wrap(v):
handler = expose(url, v.methods)
if hasattr(v, 'as_view'):
return handler(v.as_view(v.__name__))
else:
return handler(v)
return wrap
# Base views
def _wrap_view(f):
# Avoid wrapping view method twice
if hasattr(f, '_wrapped'):
return f
@wraps(f)
def inner(self, *args, **kwargs):
# Store current admin view
h.set_current_view(self)
# Check if administrative piece is accessible
abort = self._handle_view(f.__name__, **kwargs)
if abort is not None:
return abort
return self._run_view(f, *args, **kwargs)
inner._wrapped = True
return inner
class AdminViewMeta(type):
"""
View metaclass.
Does some precalculations (like getting list of view methods from the class) to avoid
calculating them for each view class instance.
"""
def __init__(cls, classname, bases, fields):
type.__init__(cls, classname, bases, fields)
# Gather exposed views
cls._urls = []
cls._default_view = None
for p in dir(cls):
attr = getattr(cls, p)
if hasattr(attr, '_urls'):
# Collect methods
for url, methods in attr._urls:
cls._urls.append((url, p, methods))
if url == '/':
cls._default_view = p
# Wrap views
setattr(cls, p, _wrap_view(attr))
class BaseViewClass(object):
pass
class BaseView(with_metaclass(AdminViewMeta, BaseViewClass)):
"""
Base administrative view.
Derive from this class to implement your administrative interface piece. For example::
from flask_admin import BaseView, expose
class MyView(BaseView):
@expose('/')
def index(self):
return 'Hello World!'
Icons can be added to the menu by using `menu_icon_type` and `menu_icon_value`. For example::
admin.add_view(MyView(name='My View', menu_icon_type='glyph', menu_icon_value='glyphicon-home'))
"""
@property
def _template_args(self):
"""
Extra template arguments.
If you need to pass some extra parameters to the template,
you can override particular view function, contribute
arguments you want to pass to the template and call parent view.
These arguments are local for this request and will be discarded
in the next request.
Any value passed through ``_template_args`` will override whatever
parent view function passed to the template.
For example::
class MyAdmin(ModelView):
@expose('/')
def index(self):
self._template_args['name'] = 'foobar'
self._template_args['code'] = '12345'
super(MyAdmin, self).index()
"""
args = getattr(g, '_admin_template_args', None)
if args is None:
args = g._admin_template_args = dict()
return args
def __init__(self, name=None, category=None, endpoint=None, url=None,
static_folder=None, static_url_path=None,
menu_class_name=None, menu_icon_type=None, menu_icon_value=None):
"""
Constructor.
:param name:
Name of this view. If not provided, will default to the class name.
:param category:
View category. If not provided, this view will be shown as a top-level menu item. Otherwise, it will
be in a submenu.
:param endpoint:
Base endpoint name for the view. For example, if there's a view method called "index" and
endpoint is set to "myadmin", you can use `url_for('myadmin.index')` to get the URL to the
view method. Defaults to the class name in lower case.
:param url:
Base URL. If provided, affects how URLs are generated. For example, if the url parameter
is "test", the resulting URL will look like "/admin/test/". If not provided, will
use endpoint as a base url. However, if URL starts with '/', absolute path is assumed
and '/admin/' prefix won't be applied.
:param static_url_path:
Static URL Path. If provided, this specifies the path to the static url directory.
:param menu_class_name:
Optional class name for the menu item.
:param menu_icon_type:
Optional icon. Possible icon types:
- `flask_admin.consts.ICON_TYPE_GLYPH` - Bootstrap glyph icon
- `flask_admin.consts.ICON_TYPE_FONT_AWESOME` - Font Awesome icon
- `flask_admin.consts.ICON_TYPE_IMAGE` - Image relative to Flask static directory
- `flask_admin.consts.ICON_TYPE_IMAGE_URL` - Image with full URL
:param menu_icon_value:
Icon glyph name or URL, depending on `menu_icon_type` setting
"""
self.name = name
self.category = category
self.endpoint = self._get_endpoint(endpoint)
self.url = url
self.static_folder = static_folder
self.static_url_path = static_url_path
self.menu = None
self.menu_class_name = menu_class_name
self.menu_icon_type = menu_icon_type
self.menu_icon_value = menu_icon_value
# Initialized from create_blueprint
self.admin = None
self.blueprint = None
# Default view
if self._default_view is None:
raise Exception(u'Attempted to instantiate admin view %s without default view' % self.__class__.__name__)
def _get_endpoint(self, endpoint):
"""
Generate Flask endpoint name. By default converts class name to lower case if endpoint is
not explicitly provided.
"""
if endpoint:
return endpoint
return self.__class__.__name__.lower()
def _get_view_url(self, admin, url):
"""
Generate URL for the view. Override to change default behavior.
"""
if url is None:
if admin.url != '/':
url = '%s/%s' % (admin.url, self.endpoint)
else:
if self == admin.index_view:
url = '/'
else:
url = '/%s' % self.endpoint
else:
if not url.startswith('/'):
url = '%s/%s' % (admin.url, url)
return url
def create_blueprint(self, admin):
"""
Create Flask blueprint.
"""
# Store admin instance
self.admin = admin
# If the static_url_path is not provided, use the admin's
if not self.static_url_path:
self.static_url_path = admin.static_url_path
# Generate URL
self.url = self._get_view_url(admin, self.url)
# If we're working from the root of the site, set prefix to None
if self.url == '/':
self.url = None
# prevent admin static files from conflicting with flask static files
if not self.static_url_path:
self.static_folder = 'static'
self.static_url_path = '/static/admin'
# If name is not povided, use capitalized endpoint name
if self.name is None:
self.name = self._prettify_class_name(self.__class__.__name__)
# Create blueprint and register rules
self.blueprint = Blueprint(self.endpoint, __name__,
url_prefix=self.url,
subdomain=self.admin.subdomain,
template_folder=op.join('templates', self.admin.template_mode),
static_folder=self.static_folder,
static_url_path=self.static_url_path)
for url, name, methods in self._urls:
self.blueprint.add_url_rule(url,
name,
getattr(self, name),
methods=methods)
return self.blueprint
def render(self, template, **kwargs):
"""
Render template
:param template:
Template path to render
:param kwargs:
Template arguments
"""
# Store self as admin_view
kwargs['admin_view'] = self
kwargs['admin_base_template'] = self.admin.base_template
# Provide i18n support even if flask-babel is not installed
# or enabled.
kwargs['_gettext'] = babel.gettext
kwargs['_ngettext'] = babel.ngettext
kwargs['h'] = h
# Expose get_url helper
kwargs['get_url'] = self.get_url
# Expose config info
kwargs['config'] = current_app.config
# Contribute extra arguments
kwargs.update(self._template_args)
return render_template(template, **kwargs)
def _prettify_class_name(self, name):
"""
Split words in PascalCase string into separate words.
:param name:
String to prettify
"""
return h.prettify_class_name(name)
def is_visible(self):
"""
Override this method if you want dynamically hide or show administrative views
from Flask-Admin menu structure
By default, item is visible in menu.
Please note that item should be both visible and accessible to be displayed in menu.
"""
return True
def is_accessible(self):
"""
Override this method to add permission checks.
Flask-Admin does not make any assumptions about the authentication system used in your application, so it is
up to you to implement it.
By default, it will allow access for everyone.
"""
return True
def _handle_view(self, name, **kwargs):
"""
This method will be executed before calling any view method.
It will execute the ``inaccessible_callback`` if the view is not
accessible.
:param name:
View function name
:param kwargs:
View function arguments
"""
if not self.is_accessible():
return self.inaccessible_callback(name, **kwargs)
def _run_view(self, fn, *args, **kwargs):
"""
This method will run actual view function.
While it is similar to _handle_view, can be used to change
arguments that are passed to the view.
:param fn:
View function
:param kwargs:
Arguments
"""
return fn(self, *args, **kwargs)
def inaccessible_callback(self, name, **kwargs):
"""
Handle the response to inaccessible views.
By default, it throw HTTP 403 error. Override this method to
customize the behaviour.
"""
return abort(403)
def get_url(self, endpoint, **kwargs):
"""
Generate URL for the endpoint. If you want to customize URL generation
logic (persist some query string argument, for example), this is
right place to do it.
:param endpoint:
Flask endpoint name
:param kwargs:
Arguments for `url_for`
"""
return url_for(endpoint, **kwargs)
@property
def _debug(self):
if not self.admin or not self.admin.app:
return False
return self.admin.app.debug
class AdminIndexView(BaseView):
"""
Default administrative interface index page when visiting the ``/admin/`` URL.
It can be overridden by passing your own view class to the ``Admin`` constructor::
class MyHomeView(AdminIndexView):
@expose('/')
def index(self):
arg1 = 'Hello'
return self.render('admin/myhome.html', arg1=arg1)
admin = Admin(index_view=MyHomeView())
Also, you can change the root url from /admin to / with the following::
admin = Admin(
app,
index_view=AdminIndexView(
name='Home',
template='admin/myhome.html',
url='/'
)
)
Default values for the index page are:
* If a name is not provided, 'Home' will be used.
* If an endpoint is not provided, will default to ``admin``
* Default URL route is ``/admin``.
* Automatically associates with static folder.
* Default template is ``admin/index.html``
"""
def __init__(self, name=None, category=None,
endpoint=None, url=None,
template='admin/index.html',
menu_class_name=None,
menu_icon_type=None,
menu_icon_value=None):
super(AdminIndexView, self).__init__(name or babel.lazy_gettext('Home'),
category,
endpoint or 'admin',
url or '/admin',
'static',
menu_class_name=menu_class_name,
menu_icon_type=menu_icon_type,
menu_icon_value=menu_icon_value)
self._template = template
@expose()
def index(self):
return self.render(self._template)
class Admin(object):
"""
Collection of the admin views. Also manages menu structure.
"""
def __init__(self, app=None, name=None,
url=None, subdomain=None,
index_view=None,
translations_path=None,
endpoint=None,
static_url_path=None,
base_template=None,
template_mode=None,
category_icon_classes=None):
"""
Constructor.
:param app:
Flask application object
:param name:
Application name. Will be displayed in the main menu and as a page title. Defaults to "Admin"
:param url:
Base URL
:param subdomain:
Subdomain to use
:param index_view:
Home page view to use. Defaults to `AdminIndexView`.
:param translations_path:
Location of the translation message catalogs. By default will use the translations
shipped with Flask-Admin.
:param endpoint:
Base endpoint name for index view. If you use multiple instances of the `Admin` class with
a single Flask application, you have to set a unique endpoint name for each instance.
:param static_url_path:
Static URL Path. If provided, this specifies the default path to the static url directory for
all its views. Can be overridden in view configuration.
:param base_template:
Override base HTML template for all static views. Defaults to `admin/base.html`.
:param template_mode:
Base template path. Defaults to `bootstrap2`. If you want to use
Bootstrap 3 integration, change it to `bootstrap3`.
:param category_icon_classes:
A dict of category names as keys and html classes as values to be added to menu category icons.
Example: {'Favorites': 'glyphicon glyphicon-star'}
"""
self.app = app
self.translations_path = translations_path
self._views = []
self._menu = []
self._menu_categories = dict()
self._menu_links = []
if name is None:
name = 'Admin'
self.name = name
self.index_view = index_view or AdminIndexView(endpoint=endpoint, url=url)
self.endpoint = endpoint or self.index_view.endpoint
self.url = url or self.index_view.url
self.static_url_path = static_url_path
self.subdomain = subdomain
self.base_template = base_template or 'admin/base.html'
self.template_mode = template_mode or 'bootstrap2'
self.category_icon_classes = category_icon_classes or dict()
# Add predefined index view
self.add_view(self.index_view)
# Register with application
if app is not None:
self._init_extension()
def add_view(self, view):
"""
Add a view to the collection.
:param view:
View to add.
"""
# Add to views
self._views.append(view)
# If app was provided in constructor, register view with Flask app
if self.app is not None:
self.app.register_blueprint(view.create_blueprint(self))
self._add_view_to_menu(view)
def add_views(self, *args):
"""
Add one or more views to the collection.
Examples::
admin.add_views(view1)
admin.add_views(view1, view2, view3, view4)
admin.add_views(*my_list)
:param args:
Argument list including the views to add.
"""
for view in args:
self.add_view(view)
def add_link(self, link):
"""
Add link to menu links collection.
:param link:
Link to add.
"""
if link.category:
self._add_menu_item(link, link.category)
else:
self._menu_links.append(link)
def add_links(self, *args):
"""
Add one or more links to the menu links collection.
Examples::
admin.add_links(link1)
admin.add_links(link1, link2, link3, link4)
admin.add_links(*my_list)
:param args:
Argument list including the links to add.
"""
for link in args:
self.add_link(link)
def _add_menu_item(self, menu_item, target_category):
if target_category:
cat_text = as_unicode(target_category)
category = self._menu_categories.get(cat_text)
# create a new menu category if one does not exist already
if category is None:
category = MenuCategory(target_category)
category.class_name = self.category_icon_classes.get(cat_text)
self._menu_categories[cat_text] = category
self._menu.append(category)
category.add_child(menu_item)
else:
self._menu.append(menu_item)
def _add_view_to_menu(self, view):
"""
Add a view to the menu tree
:param view:
View to add
"""
self._add_menu_item(MenuView(view.name, view), view.category)
def get_category_menu_item(self, name):
return self._menu_categories.get(name)
def init_app(self, app):
"""
Register all views with the Flask application.
:param app:
Flask application instance
"""
self.app = app
self._init_extension()
# Register views
for view in self._views:
app.register_blueprint(view.create_blueprint(self))
def _init_extension(self):
if not hasattr(self.app, 'extensions'):
self.app.extensions = dict()
admins = self.app.extensions.get('admin', [])
for p in admins:
if p.endpoint == self.endpoint:
raise Exception(u'Cannot have two Admin() instances with same'
u' endpoint name.')
if p.url == self.url and p.subdomain == self.subdomain:
raise Exception(u'Cannot assign two Admin() instances with same'
u' URL and subdomain to the same application.')
admins.append(self)
self.app.extensions['admin'] = admins
def menu(self):
"""
Return the menu hierarchy.
"""
return self._menu
def menu_links(self):
"""
Return menu links.
"""
return self._menu_links
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument, not-context-manager
"""Automatic quantization toolkit."""
import tvm.ir
import tvm
from tvm.runtime import Object
from . import _quantize
from ._calibrate import calibrate
from ._partition_conversions import partition_conversions
from .. import expr as _expr
from .. import transform as _transform
class QAnnotateKind(object):
"""Denote the kind of annotation field, corresponding
to different nbit configure."""
IDENTITY = 0
INPUT = 1
WEIGHT = 2
ACTIVATION = 3
def kind2str(kind):
"""Convert a `QAnnotateKind` to string"""
str_map = {
QAnnotateKind.INPUT: "input",
QAnnotateKind.WEIGHT: "weight",
QAnnotateKind.ACTIVATION: "activation",
QAnnotateKind.IDENTITY: "identity",
}
assert kind in str_map
return str_map[kind]
def _forward_op(ref_call, args):
"""forward the operator of ref_call with provided arguments"""
return _expr.Call(ref_call.op, args, ref_call.attrs, ref_call.type_args, ref_call.span)
@tvm._ffi.register_object("relay.quantize.QConfig")
class QConfig(Object):
"""Configure the quantization behavior by setting config variables.
Note
----
This object is backed by node system in C++, with arguments that can be
exchanged between python and C++.
Do not construct directly, use qconfig instead.
The fields that are backed by the C++ node are immutable once an instance
is constructed. See _node_defaults for the fields.
"""
_node_defaults = {
"nbit_input": 8,
"nbit_weight": 8,
"nbit_activation": 32,
"dtype_input": "int8",
"dtype_weight": "int8",
"dtype_activation": "int32",
"calibrate_mode": "global_scale",
"global_scale": 8.0,
"weight_scale": "power2",
"skip_dense_layer": True,
"skip_conv_layers": [0],
"do_simulation": False,
"round_for_shift": True,
"debug_enabled_ops": None,
"rounding": "UPWARD",
"calibrate_chunk_by": -1,
"partition_conversions": "disabled",
}
# pylint: disable=no-member
def __init__(self, handle):
"""Initialize the function with handle
Parameters
----------
handle : SymbolHandle
the handle to the underlying C++ Symbol
"""
super(QConfig, self).__init__(handle)
self.handle = handle
def guard(self, ref_call):
"""Return true if op is enabled, otherwise return false"""
op_name = ref_call.op.name
if self.debug_enabled_ops is not None:
name_list = [x.value for x in self.debug_enabled_ops]
if op_name not in name_list:
return False
return True
def get_nbit_by_kind(self, kind):
name = kind2str(kind)
return getattr(self, "nbit_" + name)
def get_dtype_by_kind(self, kind):
name = kind2str(kind)
return getattr(self, "dtype_" + name)
def __enter__(self):
# pylint: disable=protected-access
_quantize._EnterQConfigScope(self)
return self
def __exit__(self, ptype, value, trace):
_quantize._ExitQConfigScope()
def __setattr__(self, name, value):
if name in QConfig._node_defaults:
raise AttributeError("'%s' object cannot set attribute '%s'" % (str(type(self)), name))
return super(QConfig, self).__setattr__(name, value)
def current_qconfig():
"""Get the current quantization configuration."""
return _quantize._GetCurrentQConfig()
def qconfig(**kwargs):
"""Configure the quantization behavior by setting config variables.
Parameters
---------
nbit_dict: dict of QAnnotateKind -> int
Number of bit for every kind of annotate field.
calibrate_mode: str
The calibration mode. 'global_scale' or 'kl_divergence'.
global_scale: use global scale
kl_divergence: find scales by kl divergence on the dataset.
global_scale: float
The global scale for calibration.
weight_scale: str
The way to calculate scales for weights (annotated with QAnnotateKind.WEIGHT).
power2: Find the maximum of the absolute value of the tensor, and then round up to power
of two.
max: Find the maximum of the absolute value of the tensor
skip_dense_layer: boolean
Whether to skip all nn.dense layer type. By default are skipped.
skip_conv_layers: list
Specifying which layers to be skipped. Provide a list of indices
that indicate which conv2d layers to leave untouched. Start from 0.
do_simulation: boolean
Whether to do simulation with float operation only.
round_for_shift: boolean
Whether to add bias for rounding during shift.
debug_enabled_ops: None or list of str
Partially quantize specified operators for debugging. The default value
is None, which means will try to call all operartors' annotate rewrite
function.
rounding: "UPWARD" or "TONEAREST"
Rounding direction for fixed point multiplications.
partition_conversions: 'disabled', 'enabled', or 'fully_integral'
If set to 'enabled' or 'fully_integral', partitions a quantized
result into a module containing
a prefix function (consisting of input conversion into the quantized data space),
a middle function (consisting of the core quantized network),
a suffix function (consisting of output dequantization),
and a main function (that calls the prefix, middle, and suffix functions in succession).
If set to 'fully_integral' and there are unquantized operators in the result,
an exception is raised.
The default value is 'disabled'.
Returns
-------
config: QConfig
The quantization configuration
"""
node_args = {k: v if k not in kwargs else kwargs[k] for k, v in QConfig._node_defaults.items()}
return tvm.ir.make_node("relay.quantize.QConfig", **node_args)
class QuantizeContext(object):
"""An internal used global context object for annotation,
for putting some state variables like `conv2d_counter`."""
Current = None
def __init__(self):
self.qnode_map = dict()
self._conv2d_counter = 0
self._stop_quantize = False
def check_to_skip(self, ref_call):
"""Check the index of conv2d layer to decide whether to
skip the current operator."""
if self._stop_quantize:
return True
if current_qconfig().skip_conv_layers is not None:
# check skip conv layers
skipped_indices = [int(x) for x in current_qconfig().skip_conv_layers]
if self._conv2d_counter in skipped_indices and ref_call.op.name == "nn.conv2d":
self._conv2d_counter += 1
return True
if ref_call.op.name == "nn.conv2d":
self._conv2d_counter += 1
return False
def stop_quantize(self):
self._stop_quantize = True
def reset(self):
self._conv2d_counter = 0
self._stop_quantize = False
def __enter__(self):
self.reset()
return self
def __exit__(self, ptype, value, traceback):
pass
def quantize_context():
"""Get the global singleton scope"""
if QuantizeContext.Current is None:
QuantizeContext.Current = QuantizeContext()
return QuantizeContext.Current
def partition():
"""Partition graph into small low-precision sections by `cast_hint` and
`stop_fusion`.
Returns
-------
ret: tvm.transform.Pass
The registered pass for VTA rewrite.
"""
return _quantize.QuantizePartition()
def annotate():
"""Given a float32 graph, this pass will rewrite the graph and return
a graph which simulates the error brought by the current quantization
scheme.
Returns
-------
ret: tvm.transform.Pass
The registered pass for quantization annotation.
"""
return _quantize.QuantizeAnnotate()
def realize():
"""The realize pass will transform the simulated quantized graph, which
actually computes with float32, to a real low-bit integer graph. It will
replace the `simulated_quantize` with several fine-grained operators like
add, multiply, and shift as much as possible for better performance.
Returns
-------
ret: tvm.transform.Pass
The registered pass for quantization realization.
"""
return _quantize.QuantizeRealize()
def _bind_params(func, params):
"""Bind the params to the expression."""
name_dict = {}
for arg in func.params:
name = arg.name_hint
if name in name_dict:
name_dict[name] = None
else:
name_dict[name] = arg
bind_dict = {}
for k, v in params.items():
if k not in name_dict:
continue
arg = name_dict[k]
if arg is None:
raise ValueError("Multiple args in the function have name %s" % k)
bind_dict[arg] = _expr.const(v)
return _expr.bind(func, bind_dict)
def prerequisite_optimize(mod, params=None):
"""Prerequisite optimization passes for quantization. Perform
"SimplifyInference", "FoldScaleAxis", "FoldConstant", and
"CanonicalizeOps" optimization before quantization."""
optimize = tvm.transform.Sequential(
[
_transform.SimplifyInference(),
_transform.FoldConstant(),
_transform.FoldScaleAxis(),
_transform.CanonicalizeOps(),
_transform.FoldConstant(),
]
)
if params:
mod["main"] = _bind_params(mod["main"], params)
mod = optimize(mod)
return mod
def quantize(mod, params=None, dataset=None):
"""The quantization procedure. Before running the three main
procedure of quantization, "annotate", "calibrate" and "realize"
, we need to do "SimplifyInference", "FoldScaleAxis", "FoldConstant"
first for optimizing.
Parameters
---------
mod: Module
The original module.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
dataset: list of dict of Var -> NDArray
The calibration dataset.
Returns
-------
ret: Function
The graph after quantization
"""
mod = prerequisite_optimize(mod, params)
calibrate_pass = tvm.transform.module_pass(
calibrate(dataset), opt_level=1, name="QuantizeCalibrate"
)
quant_passes = [partition(), annotate(), calibrate_pass, tvm.relay.transform.InferType()]
if not current_qconfig().do_simulation:
quant_passes.append(realize())
quant_passes.append(_transform.FoldConstant())
quantize_seq = tvm.transform.Sequential(quant_passes)
with tvm.transform.PassContext(
opt_level=3, required_pass=["QuantizeAnnotate", "QuantizeCalibrate", "QuantizeRealize"]
):
with quantize_context():
mod = quantize_seq(mod)
q_cfg = current_qconfig()
assert q_cfg.partition_conversions in ["disabled", "enabled", "fully_integral"]
if q_cfg.partition_conversions != "disabled":
quantized_dtypes = {q_cfg.dtype_input, q_cfg.dtype_weight, q_cfg.dtype_activation}
ensure_fully_integral = q_cfg.partition_conversions == "fully_integral"
return partition_conversions(mod, quantized_dtypes, ensure_fully_integral)
return mod
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from struct import pack, unpack
from TProtocol import *
class TBinaryProtocol(TProtocolBase):
"""Binary implementation of the Thrift protocol driver."""
# NastyHaxx. Python 2.4+ on 32-bit machines forces hex constants to be
# positive, converting this into a long. If we hardcode the int value
# instead it'll stay in 32 bit-land.
# VERSION_MASK = 0xffff0000
VERSION_MASK = -65536
# VERSION_1 = 0x80010000
VERSION_1 = -2147418112
TYPE_MASK = 0x000000ff
def __init__(self, trans, strictRead=False, strictWrite=True):
TProtocolBase.__init__(self, trans)
self.strictRead = strictRead
self.strictWrite = strictWrite
def writeMessageBegin(self, name, type, seqid):
if self.strictWrite:
self.writeI32(TBinaryProtocol.VERSION_1 | type)
self.writeString(name)
self.writeI32(seqid)
else:
self.writeString(name)
self.writeByte(type)
self.writeI32(seqid)
def writeMessageEnd(self):
pass
def writeStructBegin(self, name):
pass
def writeStructEnd(self):
pass
def writeFieldBegin(self, name, type, id):
self.writeByte(type)
self.writeI16(id)
def writeFieldEnd(self):
pass
def writeFieldStop(self):
self.writeByte(TType.STOP)
def writeMapBegin(self, ktype, vtype, size):
self.writeByte(ktype)
self.writeByte(vtype)
self.writeI32(size)
def writeMapEnd(self):
pass
def writeListBegin(self, etype, size):
self.writeByte(etype)
self.writeI32(size)
def writeListEnd(self):
pass
def writeSetBegin(self, etype, size):
self.writeByte(etype)
self.writeI32(size)
def writeSetEnd(self):
pass
def writeBool(self, bool):
if bool:
self.writeByte(1)
else:
self.writeByte(0)
def writeByte(self, byte):
buff = pack("!b", byte)
self.trans.write(buff)
def writeI16(self, i16):
buff = pack("!h", i16)
self.trans.write(buff)
def writeI32(self, i32):
buff = pack("!i", i32)
self.trans.write(buff)
def writeI64(self, i64):
buff = pack("!q", i64)
self.trans.write(buff)
def writeDouble(self, dub):
buff = pack("!d", dub)
self.trans.write(buff)
def writeString(self, str):
self.writeI32(len(str))
self.trans.write(str)
def readMessageBegin(self):
sz = self.readI32()
if sz < 0:
version = sz & TBinaryProtocol.VERSION_MASK
if version != TBinaryProtocol.VERSION_1:
raise TProtocolException(
type=TProtocolException.BAD_VERSION,
message='Bad version in readMessageBegin: %d' % (sz))
type = sz & TBinaryProtocol.TYPE_MASK
name = self.readString()
seqid = self.readI32()
else:
if self.strictRead:
raise TProtocolException(type=TProtocolException.BAD_VERSION,
message='No protocol version header')
name = self.trans.readAll(sz)
type = self.readByte()
seqid = self.readI32()
return (name, type, seqid)
def readMessageEnd(self):
pass
def readStructBegin(self):
pass
def readStructEnd(self):
pass
def readFieldBegin(self):
type = self.readByte()
if type == TType.STOP:
return (None, type, 0)
id = self.readI16()
return (None, type, id)
def readFieldEnd(self):
pass
def readMapBegin(self):
ktype = self.readByte()
vtype = self.readByte()
size = self.readI32()
return (ktype, vtype, size)
def readMapEnd(self):
pass
def readListBegin(self):
etype = self.readByte()
size = self.readI32()
return (etype, size)
def readListEnd(self):
pass
def readSetBegin(self):
etype = self.readByte()
size = self.readI32()
return (etype, size)
def readSetEnd(self):
pass
def readBool(self):
byte = self.readByte()
if byte == 0:
return False
return True
def readByte(self):
buff = self.trans.readAll(1)
val, = unpack('!b', buff)
return val
def readI16(self):
buff = self.trans.readAll(2)
val, = unpack('!h', buff)
return val
def readI32(self):
buff = self.trans.readAll(4)
val, = unpack('!i', buff)
return val
def readI64(self):
buff = self.trans.readAll(8)
val, = unpack('!q', buff)
return val
def readDouble(self):
buff = self.trans.readAll(8)
val, = unpack('!d', buff)
return val
def readString(self):
len = self.readI32()
str = self.trans.readAll(len)
return str
class TBinaryProtocolFactory:
def __init__(self, strictRead=False, strictWrite=True):
self.strictRead = strictRead
self.strictWrite = strictWrite
def getProtocol(self, trans):
prot = TBinaryProtocol(trans, self.strictRead, self.strictWrite)
return prot
class TBinaryProtocolAccelerated(TBinaryProtocol):
"""C-Accelerated version of TBinaryProtocol.
This class does not override any of TBinaryProtocol's methods,
but the generated code recognizes it directly and will call into
our C module to do the encoding, bypassing this object entirely.
We inherit from TBinaryProtocol so that the normal TBinaryProtocol
encoding can happen if the fastbinary module doesn't work for some
reason. (TODO(dreiss): Make this happen sanely in more cases.)
In order to take advantage of the C module, just use
TBinaryProtocolAccelerated instead of TBinaryProtocol.
NOTE: This code was contributed by an external developer.
The internal Thrift team has reviewed and tested it,
but we cannot guarantee that it is production-ready.
Please feel free to report bugs and/or success stories
to the public mailing list.
"""
pass
class TBinaryProtocolAcceleratedFactory:
def getProtocol(self, trans):
return TBinaryProtocolAccelerated(trans)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations often used for initializing tensors.
All variable initializers returned by functions in this file should have the
following signature:
def _initializer(shape, dtype=dtypes.float32, partition_info=None):
Args:
shape: List of `int` representing the shape of the output `Tensor`. Some
initializers may also be able to accept a `Tensor`.
dtype: (Optional) Type of the output `Tensor`.
partition_info: (Optional) variable_scope._PartitionInfo object holding
additional information about how the variable is partitioned. May be
`None` if the variable is not partitioned.
Returns:
A `Tensor` of type `dtype` and `shape`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
def _assert_float_dtype(dtype):
"""Validate and return floating point type based on `dtype`.
`dtype` must be a floating point type.
Args:
dtype: The data type to validate.
Returns:
Validated type.
Raises:
ValueError: if `dtype` is not a floating point type.
"""
if not dtype.is_floating:
raise ValueError("Expected floating point type, got %s." % dtype)
return dtype
def zeros_initializer(shape, dtype=dtypes.float32, partition_info=None):
"""An adaptor for zeros() to match the Initializer spec."""
return array_ops.zeros(shape, dtype)
def ones_initializer(shape, dtype=dtypes.float32, partition_info=None):
"""An adaptor for ones() to match the Initializer spec."""
return array_ops.ones(shape, dtype)
def constant_initializer(value=0, dtype=dtypes.float32):
"""Returns an initializer that generates tensors with constant values.
The resulting tensor is populated with values of type `dtype`, as
specified by arguments `value` following the desired `shape` of the
new tensor (see examples below).
The argument `value` can be a constant value, or a list of values of type
`dtype`. If `value` is a list, then the length of the list must be less
than or equal to the number of elements implied by the desired shape of the
tensor. In the case where the total number of elements in `value` is less
than the number of elements required by the tensor shape, the last element
in `value` will be used to fill the remaining entries. If the total number of
elements in `value` is greater than the number of elements required by the
tensor shape, the initializer will raise a `ValueError`.
Args:
value: A Python scalar, list of values, or a N-dimensional numpy array. All
elements of the initialized variable will be set to the corresponding
value in the `value` argument.
dtype: The data type.
Returns:
An initializer that generates tensors with constant values.
Examples:
The following example can be rewritten using a numpy.ndarray instead
of the `value` list, even reshaped, as shown in the two commented lines
below the `value` list initialization.
```python
>>> import numpy as np
>>> import tensorflow as tf
>>> value = [0, 1, 2, 3, 4, 5, 6, 7]
>>> # value = np.array(value)
>>> # value = value.reshape([2, 4])
>>> init = tf.constant_initializer(value)
>>> print('fitting shape:')
>>> tf.reset_default_graph()
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[2, 4], initializer=init)
>>> x.initializer.run()
>>> print(x.eval())
fitting shape:
[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]]
>>> print('larger shape:')
>>> tf.reset_default_graph()
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[3, 4], initializer=init)
>>> x.initializer.run()
>>> print(x.eval())
larger shape:
[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]
[ 7. 7. 7. 7.]]
>>> print('smaller shape:')
>>> tf.reset_default_graph()
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[2, 3], initializer=init)
ValueError: Too many elements provided. Needed at most 6, but received 8
```
"""
def _initializer(shape, dtype=dtype, partition_info=None):
return constant_op.constant(value, dtype=dtype, shape=shape)
return _initializer
def random_uniform_initializer(minval=0, maxval=None, seed=None,
dtype=dtypes.float32):
"""Returns an initializer that generates tensors with a uniform distribution.
Args:
minval: A python scalar or a scalar tensor. Lower bound of the range
of random values to generate.
maxval: A python scalar or a scalar tensor. Upper bound of the range
of random values to generate. Defaults to 1 for float types.
seed: A Python integer. Used to create random seeds. See
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
for behavior.
dtype: The data type.
Returns:
An initializer that generates tensors with a uniform distribution.
"""
def _initializer(shape, dtype=dtype, partition_info=None):
return random_ops.random_uniform(shape, minval, maxval, dtype, seed=seed)
return _initializer
def random_normal_initializer(mean=0.0, stddev=1.0, seed=None,
dtype=dtypes.float32):
"""Returns an initializer that generates tensors with a normal distribution.
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
for behavior.
dtype: The data type. Only floating point types are supported.
Returns:
An initializer that generates tensors with a normal distribution.
Raises:
ValueError: if `dtype` is not a floating point type.
"""
def _initializer(shape, dtype=_assert_float_dtype(dtype),
partition_info=None):
return random_ops.random_normal(shape, mean, stddev, dtype, seed=seed)
return _initializer
def truncated_normal_initializer(mean=0.0, stddev=1.0, seed=None,
dtype=dtypes.float32):
"""Returns an initializer that generates a truncated normal distribution.
These values are similar to values from a `random_normal_initializer`
except that values more than two standard deviations from the mean
are discarded and re-drawn. This is the recommended initializer for
neural network weights and filters.
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
for behavior.
dtype: The data type. Only floating point types are supported.
Returns:
An initializer that generates tensors with a truncated normal
distribution.
Raises:
ValueError: if `dtype` is not a floating point type.
"""
def _initializer(shape, dtype=_assert_float_dtype(dtype),
partition_info=None):
return random_ops.truncated_normal(shape, mean, stddev, dtype, seed=seed)
return _initializer
def uniform_unit_scaling_initializer(factor=1.0,
seed=None,
dtype=dtypes.float32):
"""Returns an initializer that generates tensors without scaling variance.
When initializing a deep network, it is in principle advantageous to keep
the scale of the input variance constant, so it does not explode or diminish
by reaching the final layer. If the input is `x` and the operation `x * W`,
and we want to initialize `W` uniformly at random, we need to pick `W` from
[-sqrt(3) / sqrt(dim), sqrt(3) / sqrt(dim)]
to keep the scale intact, where `dim = W.shape[0]` (the size of the input).
A similar calculation for convolutional networks gives an analogous result
with `dim` equal to the product of the first 3 dimensions. When
nonlinearities are present, we need to multiply this by a constant `factor`.
See [Sussillo et al., 2014](https://arxiv.org/abs/1412.6558)
([pdf](http://arxiv.org/pdf/1412.6558.pdf)) for deeper motivation, experiments
and the calculation of constants. In section 2.3 there, the constants were
numerically computed: for a linear layer it's 1.0, relu: ~1.43, tanh: ~1.15.
Args:
factor: Float. A multiplicative factor by which the values will be scaled.
seed: A Python integer. Used to create random seeds. See
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
for behavior.
dtype: The data type. Only floating point types are supported.
Returns:
An initializer that generates tensors with unit variance.
Raises:
ValueError: if `dtype` is not a floating point type.
"""
def _initializer(shape, dtype=_assert_float_dtype(dtype),
partition_info=None):
scale_shape = shape
if partition_info is not None:
scale_shape = partition_info.full_shape
input_size = 1.0
# Estimating input size is not possible to do perfectly, but we try.
# The estimate, obtained by multiplying all dimensions but the last one,
# is the right thing for matrix multiply and convolutions (see above).
for dim in scale_shape[:-1]:
input_size *= float(dim)
# Avoid errors when initializing zero-size tensors.
input_size = max(input_size, 1.0)
max_val = math.sqrt(3 / input_size) * factor
return random_ops.random_uniform(shape, -max_val, max_val,
dtype, seed=seed)
return _initializer
# TODO(vrv): Unhide when we are ready to expose this publicly.
def _random_walk(shape, nonlinearity, dtype=dtypes.float32, seed=None,
name="random_walk"):
"""Create a random tensor such that backprop neither vanishes nor explodes.
Args:
shape: a python array of int or a 1-d tensor. Sizes of the Tensor.
nonlinearity: the brain python function for implementing the
nonlinearity in tensor flow.
dtype: The type of the output.
seed: A Python integer. Used to create random seeds. See
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
for behavior.
name: string. Optional name for the op.
Returns:
A Tensor of the specified sizes filled with random values.
"""
assert len(shape) == 2, "Random Walk initialization only supports 2D tensors."
num_inputs = shape[0]
if nonlinearity == math_ops.tanh:
# No real formula for this case yet, but this works well for many
# layer widths.
rwg = 1.13
elif nonlinearity == array_ops.identity:
rwg = math.exp(1.0 / (2.0 * num_inputs))
elif nonlinearity == nn_ops.relu:
rwg = math.sqrt(2.0) * math.exp(1.2 / (max(num_inputs, 6) - 2.4))
else:
assert False, "Unsupported nonlinearity for Random Walk initialization."
mean = 0.0
stddev = rwg / math.sqrt(float(num_inputs))
return random_ops.random_normal(shape, mean=mean, stddev=stddev, dtype=dtype,
seed=seed, name=name)
# TODO(vrv): Unhide when we are ready to expose this publicly.
class _RandomWalkInitializer(object):
"""An Initializer that generates a tensor for Random Walk Initialization."""
def __init__(self, nonlinearity, seed=None):
"""Construct a RandomWalkInitializer.
Args:
nonlinearity: the python tensorflow function that computes a nonlinearity
in the graph, typically after a Wx+b type operation.
seed: A Python integer. Used to create random seeds. See
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
for behavior.
"""
self._nonlinearity = nonlinearity
self._seed = seed
def __call__(self, shape, dtype=dtypes.float32, partition_info=None):
"""Generate a tensor used to initialize a variable."""
return random_ops._random_walk(shape, self._nonlinearity, dtype,
seed=self._seed)
|
|
'''
Support
=======
Activate other frameworks/toolkits inside the kivy event loop.
'''
__all__ = ('install_gobject_iteration', 'install_twisted_reactor',
'uninstall_twisted_reactor', 'install_android')
def install_gobject_iteration():
'''Import and install gobject context iteration inside our event loop.
This is used as soon as gobject is used (like gstreamer).
'''
from kivy.clock import Clock
try:
from gi.repository import GObject as gobject
except ImportError:
import gobject
if hasattr(gobject, '_gobject_already_installed'):
# already installed, don't do it twice.
return
gobject._gobject_already_installed = True
# get gobject mainloop / context
loop = gobject.MainLoop()
gobject.threads_init()
context = loop.get_context()
# schedule the iteration each frame
def _gobject_iteration(*largs):
# XXX we need to loop over context here, otherwise, we might have a lag
loop = 0
while context.pending() and loop < 10:
context.iteration(False)
loop += 1
Clock.schedule_interval(_gobject_iteration, 0)
# -----------------------------------------------------------------------------
# Android support
# -----------------------------------------------------------------------------
g_android_redraw_count = 0
_redraw_event = None
def _android_ask_redraw(*largs):
# after wakeup, we need to redraw more than once, otherwise we get a
# black screen
global g_android_redraw_count
from kivy.core.window import Window
Window.canvas.ask_update()
g_android_redraw_count -= 1
if g_android_redraw_count < 0:
return False
def install_android():
'''Install hooks for the android platform.
* Automatically sleep when the device is paused.
* Automatically kill the application when the return key is pressed.
'''
try:
import android
except ImportError:
print('Android lib is missing, cannot install android hooks')
return
from kivy.clock import Clock
from kivy.logger import Logger
import pygame
Logger.info('Support: Android install hooks')
# Init the library
android.init()
android.map_key(android.KEYCODE_MENU, pygame.K_MENU)
android.map_key(android.KEYCODE_BACK, pygame.K_ESCAPE)
# Check if android should be paused or not.
# If pause is requested, just leave the app.
def android_check_pause(*largs):
# do nothing until android asks for it.
if not android.check_pause():
return
from kivy.app import App
from kivy.base import stopTouchApp
from kivy.logger import Logger
from kivy.core.window import Window
global g_android_redraw_count, _redraw_event
# try to get the current running application
Logger.info('Android: Must go into sleep mode, check the app')
app = App.get_running_app()
# no running application, stop our loop.
if app is None:
Logger.info('Android: No app running, stop everything.')
stopTouchApp()
return
# try to go to pause mode
if app.dispatch('on_pause'):
Logger.info('Android: App paused, now wait for resume.')
# app goes in pause mode, wait.
android.wait_for_resume()
# is it a stop or resume ?
if android.check_stop():
# app must stop
Logger.info('Android: Android wants to close our app.')
stopTouchApp()
else:
# app resuming now !
Logger.info('Android: Android has resumed, resume the app.')
app.dispatch('on_resume')
Window.canvas.ask_update()
g_android_redraw_count = 25 # 5 frames/seconds for 5 seconds
if _redraw_event is None:
_redraw_event = Clock.schedule_interval(
_android_ask_redraw, 1 / 5)
else:
_redraw_event.cancel()
_redraw_event()
Logger.info('Android: App resume completed.')
# app doesn't support pause mode, just stop it.
else:
Logger.info('Android: App doesn\'t support pause mode, stop.')
stopTouchApp()
Clock.schedule_interval(android_check_pause, 0)
_twisted_reactor_stopper = None
_twisted_reactor_work = None
def install_twisted_reactor(**kwargs):
'''Installs a threaded twisted reactor, which will schedule one
reactor iteration before the next frame only when twisted needs
to do some work.
Any arguments or keyword arguments passed to this function will be
passed on the the threadedselect reactors interleave function. These
are the arguments one would usually pass to twisted's reactor.startRunning.
Unlike the default twisted reactor, the installed reactor will not handle
any signals unless you set the 'installSignalHandlers' keyword argument
to 1 explicitly. This is done to allow kivy to handle the signals as
usual unless you specifically want the twisted reactor to handle the
signals (e.g. SIGINT).
.. note::
Twisted is not included in iOS build by default. To use it on iOS,
put the twisted distribution (and zope.interface dependency) in your
application directory.
'''
import twisted
# prevent installing more than once
if hasattr(twisted, '_kivy_twisted_reactor_installed'):
return
twisted._kivy_twisted_reactor_installed = True
# don't let twisted handle signals, unless specifically requested
kwargs.setdefault('installSignalHandlers', 0)
# install threaded-select reactor, to use with own event loop
from twisted.internet import _threadedselect
_threadedselect.install()
# now we can import twisted reactor as usual
from twisted.internet import reactor
from twisted.internet.error import ReactorNotRunning
from collections import deque
from kivy.base import EventLoop
from kivy.logger import Logger
from kivy.clock import Clock
# will hold callbacks to twisted callbacks
q = deque()
# twisted will call the wake function when it needs to do work
def reactor_wake(twisted_loop_next):
'''Wakeup the twisted reactor to start processing the task queue
'''
Logger.trace("Support: twisted wakeup call to schedule task")
q.append(twisted_loop_next)
# called every frame, to process the reactors work in main thread
def reactor_work(*args):
'''Process the twisted reactor task queue
'''
Logger.trace("Support: processing twisted task queue")
while len(q):
q.popleft()()
global _twisted_reactor_work
_twisted_reactor_work = reactor_work
# start the reactor, by telling twisted how to wake, and process
def reactor_start(*args):
'''Start the twisted reactor main loop
'''
Logger.info("Support: Starting twisted reactor")
reactor.interleave(reactor_wake, **kwargs)
Clock.schedule_interval(reactor_work, 0)
# make sure twisted reactor is shutdown if eventloop exists
def reactor_stop(*args):
'''Shutdown the twisted reactor main loop
'''
if reactor.threadpool:
Logger.info("Support: Stopping twisted threads")
reactor.threadpool.stop()
Logger.info("Support: Shutting down twisted reactor")
reactor._mainLoopShutdown()
try:
reactor.stop()
except ReactorNotRunning:
pass
import sys
sys.modules.pop('twisted.internet.reactor', None)
global _twisted_reactor_stopper
_twisted_reactor_stopper = reactor_stop
# start and stop the reactor along with kivy EventLoop
Clock.schedule_once(reactor_start, 0)
EventLoop.bind(on_stop=reactor_stop)
def uninstall_twisted_reactor():
'''Uninstalls the Kivy's threaded Twisted Reactor. No more Twisted
tasks will run after this got called. Use this to clean the
`twisted.internet.reactor` .
.. versionadded:: 1.9.0
'''
import twisted
# prevent uninstalling more than once
if not hasattr(twisted, '_kivy_twisted_reactor_installed'):
return
from kivy.base import EventLoop
global _twisted_reactor_stopper
_twisted_reactor_stopper()
EventLoop.unbind(on_stop=_twisted_reactor_stopper)
del twisted._kivy_twisted_reactor_installed
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""
Django settings for myshop project.
For more information on this file, see
https://docs.djangoproject.com/en/stable/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/stable/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from decimal import Decimal
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse_lazy
from cmsplugin_cascade.utils import format_lazy
SHOP_APP_LABEL = 'myshop'
BASE_DIR = os.path.dirname(__file__)
SHOP_TUTORIAL = os.environ.get('DJANGO_SHOP_TUTORIAL')
if SHOP_TUTORIAL is None:
raise ImproperlyConfigured("Environment variable DJANGO_SHOP_TUTORIAL is not set")
if SHOP_TUTORIAL not in ['commodity', 'i18n_commodity', 'smartcard', 'i18n_smartcard',
'i18n_polymorphic', 'polymorphic']:
msg = "Environment variable DJANGO_SHOP_TUTORIAL has an invalid value `{}`"
raise ImproperlyConfigured(msg.format(SHOP_TUTORIAL))
# Root directory for this django project
PROJECT_ROOT = os.path.abspath(os.path.join(BASE_DIR, os.path.pardir))
# Directory where working files, such as media and databases are kept
WORK_DIR = os.environ.get('DJANGO_WORKDIR', os.path.abspath(os.path.join(PROJECT_ROOT, os.path.pardir, 'workdir')))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
ADMINS = (("The Merchant", 'the.merchant@example.com'),)
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'nqniwbt=%@5a(e8%&h#c^0()64(ujs0=4%_nyajn*t6a$ca&at'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(os.environ.get('DJANGO_DEBUG'))
ALLOWED_HOSTS = ['*']
SITE_ID = 1
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Vienna'
# Application definition
# replace django.contrib.auth.models.User by implementation
# allowing to login via email address
AUTH_USER_MODEL = 'email_auth.User'
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
INSTALLED_APPS = [
'django.contrib.auth',
'email_auth',
'polymorphic',
# deprecated: 'djangocms_admin_style',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'djangocms_text_ckeditor',
'django_select2',
'cmsplugin_cascade',
'cmsplugin_cascade.clipboard',
'cmsplugin_cascade.sharable',
'cmsplugin_cascade.extra_fields',
'cmsplugin_cascade.icon',
'cmsplugin_cascade.segmentation',
'cms_bootstrap3',
'adminsortable2',
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'django_fsm',
'fsm_admin',
'djng',
'cms',
'menus',
'treebeard',
'compressor',
'sekizai',
'sass_processor',
'django_filters',
'filer',
'easy_thumbnails',
'easy_thumbnails.optimize',
'post_office',
'haystack',
'shop',
'shop_stripe',
'myshop',
]
if SHOP_TUTORIAL in ['i18n_commodity', 'i18n_smartcard', 'i18n_polymorphic']:
INSTALLED_APPS.append('parler')
MIDDLEWARE_CLASSES = [
'djng.middleware.AngularUrlMiddleware',
# 'django.middleware.cache.UpdateCacheMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'shop.middleware.CustomerMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.gzip.GZipMiddleware',
'shop.middleware.MethodOverrideMiddleware',
'cms.middleware.language.LanguageCookieMiddleware',
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.utils.ApphookReloadMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
# 'django.middleware.cache.FetchFromCacheMiddleware',
]
MIGRATION_MODULES = {
'myshop': 'myshop.migrations.{}'.format(SHOP_TUTORIAL)
}
ROOT_URLCONF = 'myshop.urls'
WSGI_APPLICATION = 'wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(WORK_DIR, SHOP_TUTORIAL, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/stable/topics/i18n/
LANGUAGE_CODE = 'en'
if SHOP_TUTORIAL in ['i18n_smartcard', 'i18n_commodity', 'i18n_polymorphic']:
USE_I18N = True
LANGUAGES = (
('en', "English"),
('de', "Deutsch"),
)
PARLER_DEFAULT_LANGUAGE = 'en'
PARLER_LANGUAGES = {
1: (
{'code': 'de'},
{'code': 'en'},
),
'default': {
'fallbacks': ['de', 'en'],
},
}
CMS_LANGUAGES = {
'default': {
'fallbacks': ['en', 'de'],
'redirect_on_fallback': True,
'public': True,
'hide_untranslated': False,
},
1: ({
'public': True,
'code': 'en',
'hide_untranslated': False,
'name': 'English',
'redirect_on_fallback': True,
}, {
'public': True,
'code': 'de',
'hide_untranslated': False,
'name': 'Deutsch',
'redirect_on_fallback': True,
},)
}
else:
USE_I18N = False
USE_L10N = True
USE_TZ = True
USE_X_FORWARDED_HOST = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(WORK_DIR, SHOP_TUTORIAL, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory that holds static files.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(WORK_DIR, 'static')
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
STATICFILES_FINDERS = [
'myshop.finders.FileSystemFinder', # or 'django.contrib.staticfiles.finders.FileSystemFinder',
'myshop.finders.AppDirectoriesFinder', # or 'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'sass_processor.finders.CssFinder',
'compressor.finders.CompressorFinder',
]
STATICFILES_DIRS = [
('node_modules', os.path.join(PROJECT_ROOT, 'node_modules')),
]
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [],
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.template.context_processors.csrf',
'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
'sekizai.context_processors.sekizai',
'cms.context_processors.cms_settings',
'shop.context_processors.customer',
'shop.context_processors.ng_model_options',
'shop_stripe.context_processors.public_keys',
)
}
}]
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'filters': {'require_debug_false': {'()': 'django.utils.log.RequireDebugFalse'}},
'formatters': {
'simple': {
'format': '[%(asctime)s %(module)s] %(levelname)s: %(message)s'
},
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'INFO',
'propagate': True,
},
'post_office': {
'handlers': ['console'],
'level': 'WARNING',
'propagate': True,
},
},
}
SILENCED_SYSTEM_CHECKS = ['auth.W004']
FIXTURE_DIRS = [os.path.join(WORK_DIR, SHOP_TUTORIAL, 'fixtures')]
############################################
# settings for sending mail
EMAIL_HOST = 'smtp.example.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = 'no-reply@example.com'
EMAIL_HOST_PASSWORD = 'smtp-secret-password'
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = 'My Shop <no-reply@example.com>'
EMAIL_REPLY_TO = 'info@example.com'
EMAIL_BACKEND = 'post_office.EmailBackend'
############################################
# settings for third party Django apps
NODE_MODULES_URL = STATIC_URL + 'node_modules/'
SASS_PROCESSOR_INCLUDE_DIRS = [
os.path.join(PROJECT_ROOT, 'node_modules'),
]
COERCE_DECIMAL_TO_STRING = True
FSM_ADMIN_FORCE_PERMIT = True
ROBOTS_META_TAGS = ('noindex', 'nofollow')
SERIALIZATION_MODULES = {'json': str('shop.money.serializers')}
############################################
# settings for django-restframework and plugins
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'shop.rest.money.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer', # can be disabled for production environments
),
# 'DEFAULT_AUTHENTICATION_CLASSES': (
# 'rest_framework.authentication.TokenAuthentication',
# ),
'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 12,
}
############################################
# settings for storing session data
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
SESSION_SAVE_EVERY_REQUEST = True
############################################
# settings for storing files and images
FILER_ADMIN_ICON_SIZES = ('16', '32', '48', '80', '128')
FILER_ALLOW_REGULAR_USERS_TO_ADD_ROOT_FOLDERS = True
FILER_DUMP_PAYLOAD = False
FILE_UPLOAD_MAX_MEMORY_SIZE = 5242880
THUMBNAIL_HIGH_RESOLUTION = False
THUMBNAIL_OPTIMIZE_COMMAND = {
'gif': '/usr/bin/optipng {filename}',
'jpeg': '/usr/bin/jpegoptim {filename}',
'png': '/usr/bin/optipng {filename}'
}
THUMBNAIL_PRESERVE_EXTENSIONS = True
THUMBNAIL_PROCESSORS = (
'easy_thumbnails.processors.colorspace',
'easy_thumbnails.processors.autocrop',
'filer.thumbnail_processors.scale_and_crop_with_subject_location',
'easy_thumbnails.processors.filters',
)
############################################
# settings for django-cms and its plugins
CMS_TEMPLATES = [
('myshop/pages/default.html', _("Default Page")),
('myshop/pages/test.html', _("Test Page")), # to show strides rendering via {% render_cascade ... %}
]
CMS_CACHE_DURATIONS = {
'content': 600,
'menus': 3600,
'permissions': 86400,
}
CMS_PERMISSION = False
cascade_workarea_glossary = {
'breakpoints': ['xs', 'sm', 'md', 'lg'],
'container_max_widths': {'xs': 750, 'sm': 750, 'md': 970, 'lg': 1170},
'fluid': False,
'media_queries': {
'xs': ['(max-width: 768px)'],
'sm': ['(min-width: 768px)', '(max-width: 992px)'],
'md': ['(min-width: 992px)', '(max-width: 1200px)'],
'lg': ['(min-width: 1200px)'],
},
}
CMS_PLACEHOLDER_CONF = {
'Breadcrumb': {
'plugins': ['BreadcrumbPlugin'],
'parent_classes': {'BreadcrumbPlugin': None},
'glossary': cascade_workarea_glossary,
},
'Commodity Details': {
'plugins': ['BootstrapContainerPlugin', 'BootstrapJumbotronPlugin'],
'parent_classes': {
'BootstrapContainerPlugin': None,
'BootstrapJumbotronPlugin': None,
},
'glossary': cascade_workarea_glossary,
},
'Main Content': {
'plugins': ['BootstrapContainerPlugin', 'BootstrapJumbotronPlugin'],
'parent_classes': {
'BootstrapContainerPlugin': None,
'BootstrapJumbotronPlugin': None,
'TextLinkPlugin': ['TextPlugin', 'AcceptConditionPlugin'],
},
'glossary': cascade_workarea_glossary,
},
'Static Footer': {
'plugins': ['BootstrapContainerPlugin', ],
'parent_classes': {
'BootstrapContainerPlugin': None,
},
'glossary': cascade_workarea_glossary,
},
}
CMSPLUGIN_CASCADE_PLUGINS = [
'cmsplugin_cascade.segmentation',
'cmsplugin_cascade.generic',
'cmsplugin_cascade.icon',
'cmsplugin_cascade.leaflet',
'cmsplugin_cascade.link',
'shop.cascade',
'cmsplugin_cascade.bootstrap3',
]
CMSPLUGIN_CASCADE = {
'link_plugin_classes': [
'shop.cascade.plugin_base.CatalogLinkPluginBase',
'cmsplugin_cascade.link.plugin_base.LinkElementMixin',
'shop.cascade.plugin_base.CatalogLinkForm',
],
'alien_plugins': ['TextPlugin', 'TextLinkPlugin', 'AcceptConditionPlugin'],
'bootstrap3': {
'template_basedir': 'angular-ui',
},
'plugins_with_extra_render_templates': {
'CustomSnippetPlugin': [
('shop/catalog/product-heading.html', _("Product Heading")),
('myshop/catalog/manufacturer-filter.html', _("Manufacturer Filter")),
],
},
'plugins_with_sharables': {
'BootstrapImagePlugin': ['image_shapes', 'image_width_responsive', 'image_width_fixed',
'image_height', 'resize_options'],
'BootstrapPicturePlugin': ['image_shapes', 'responsive_heights', 'image_size', 'resize_options'],
},
'leaflet': {
'tilesURL': 'https://api.tiles.mapbox.com/v4/{id}/{z}/{x}/{y}.png?access_token={accessToken}',
'accessToken': 'pk.eyJ1IjoibWFwYm94IiwiYSI6ImNpejY4NXVycTA2emYycXBndHRqcmZ3N3gifQ.rJcFIG214AriISLbB6B5aw',
'apiKey': 'AIzaSyD71sHrtkZMnLqTbgRmY_NsO0A9l9BQmv4',
},
'bookmark_prefix': '/',
'segmentation_mixins': [
('shop.cascade.segmentation.EmulateCustomerModelMixin', 'shop.cascade.segmentation.EmulateCustomerAdminMixin'),
],
'allow_plugin_hiding': True,
}
CKEDITOR_SETTINGS = {
'language': '{{ language }}',
'skin': 'moono',
'toolbar': 'CMS',
'toolbar_HTMLField': [
['Undo', 'Redo'],
['cmsplugins', '-', 'ShowBlocks'],
['Format', 'Styles'],
['TextColor', 'BGColor', '-', 'PasteText', 'PasteFromWord'],
['Maximize', ''],
'/',
['Bold', 'Italic', 'Underline', '-', 'Subscript', 'Superscript', '-', 'RemoveFormat'],
['JustifyLeft', 'JustifyCenter', 'JustifyRight'],
['HorizontalRule'],
['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'Table'],
['Source']
],
'stylesSet': format_lazy('default:{}', reverse_lazy('admin:cascade_texticon_wysiwig_config')),
}
CKEDITOR_SETTINGS_CAPTION = {
'language': '{{ language }}',
'skin': 'moono',
'height': 70,
'toolbar_HTMLField': [
['Undo', 'Redo'],
['Format', 'Styles'],
['Bold', 'Italic', 'Underline', '-', 'Subscript', 'Superscript', '-', 'RemoveFormat'],
['Source']
],
}
CKEDITOR_SETTINGS_DESCRIPTION = {
'language': '{{ language }}',
'skin': 'moono',
'height': 250,
'toolbar_HTMLField': [
['Undo', 'Redo'],
['cmsplugins', '-', 'ShowBlocks'],
['Format', 'Styles'],
['TextColor', 'BGColor', '-', 'PasteText', 'PasteFromWord'],
['Maximize', ''],
'/',
['Bold', 'Italic', 'Underline', '-', 'Subscript', 'Superscript', '-', 'RemoveFormat'],
['JustifyLeft', 'JustifyCenter', 'JustifyRight'],
['HorizontalRule'],
['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'Table'],
['Source']
],
}
SELECT2_CSS = 'node_modules/select2/dist/css/select2.min.css'
SELECT2_JS = 'node_modules/select2/dist/js/select2.min.js'
#############################################
# settings for full index text search (Haystack)
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
'URL': 'http://localhost:9200/',
'INDEX_NAME': 'myshop-{}-en'.format(SHOP_TUTORIAL),
},
}
if USE_I18N:
HAYSTACK_CONNECTIONS['de'] = {
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
'URL': 'http://localhost:9200/',
'INDEX_NAME': 'myshop-{}-de'.format(SHOP_TUTORIAL),
}
HAYSTACK_ROUTERS = [
'shop.search.routers.LanguageRouter',
]
############################################
# settings for django-shop and its plugins
SHOP_VALUE_ADDED_TAX = Decimal(19)
SHOP_DEFAULT_CURRENCY = 'EUR'
SHOP_PRODUCT_SUMMARY_SERIALIZER = 'myshop.serializers.ProductSummarySerializer'
if SHOP_TUTORIAL in ['i18n_polymorphic', 'polymorphic']:
SHOP_CART_MODIFIERS = ['myshop.polymorphic_modifiers.MyShopCartModifier']
else:
SHOP_CART_MODIFIERS = ['shop.modifiers.defaults.DefaultCartModifier']
SHOP_CART_MODIFIERS.extend([
'shop.modifiers.taxes.CartExcludedTaxModifier',
'myshop.modifiers.PostalShippingModifier',
'myshop.modifiers.CustomerPickupModifier',
'shop.modifiers.defaults.PayInAdvanceModifier',
])
if 'shop_stripe' in INSTALLED_APPS:
SHOP_CART_MODIFIERS.append('myshop.modifiers.StripePaymentModifier')
SHOP_EDITCART_NG_MODEL_OPTIONS = "{updateOn: 'default blur', debounce: {'default': 2500, 'blur': 0}}"
SHOP_ORDER_WORKFLOWS = [
'shop.payment.defaults.ManualPaymentWorkflowMixin',
'shop.payment.defaults.CancelOrderWorkflowMixin',
'shop_stripe.payment.OrderWorkflowMixin',
]
if SHOP_TUTORIAL in ['i18n_polymorphic', 'polymorphic']:
SHOP_ORDER_WORKFLOWS.append('shop.shipping.delivery.PartialDeliveryWorkflowMixin')
else:
SHOP_ORDER_WORKFLOWS.append('shop.shipping.defaults.CommissionGoodsWorkflowMixin')
SHOP_STRIPE = {
'PUBKEY': 'pk_test_HlEp5oZyPonE21svenqowhXp',
'APIKEY': 'sk_test_xUdHLeFasmOUDvmke4DHGRDP',
'PURCHASE_DESCRIPTION': _("Thanks for purchasing at MyShop"),
}
try:
from .private_settings import * # NOQA
except ImportError:
pass
|
|
#!/usr/bin/env python
# Copyright (c) 2010-2016 Stanford University
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Runs a RAMCloud.
Used to exercise a RAMCloud cluster (e.g., for performance measurements)
by running a collection of servers and clients.
"""
from __future__ import division, print_function
from common import *
import config
import itertools
import log
import os
import random
import pprint
import re
import subprocess
import sys
import time
from optparse import OptionParser
# Locations of various RAMCloud executables.
coordinator_binary = '%s/coordinator' % config.hooks.get_remote_obj_path()
server_binary = '%s/server' % config.hooks.get_remote_obj_path()
ensure_servers_bin = '%s/apps/ensureServers' % config.hooks.get_remote_obj_path()
# valgrind
valgrind_command = ''
# Info used to construct service locators for each of the transports
# supported by RAMCloud. In some cases the locator for the coordinator
# needs to be different from that for the servers.
server_locator_templates = {
'tcp': 'tcp:host=%(host)s,port=%(port)d',
'tcp-1g': 'tcp:host=%(host1g)s,port=%(port)d',
'basic+udp': 'basic+udp:host=%(host)s,port=%(port)d',
'basic+udp-1g': 'basic+udp:host=%(host1g)s,port=%(port)d',
'unreliable+udp': 'unreliable+udp:host=%(host)s,port=%(port)d',
'infrc': 'infrc:host=%(host)s,port=%(port)d',
'basic+infud': 'basic+infud:host=%(host1g)s',
'unreliable+infud': 'unreliable+infud:host=%(host1g)s',
'unreliable+infeth': 'unreliable+infeth:mac=00:11:22:33:44:%(id)02x',
'basic+dpdk': 'basic+dpdk:',
}
coord_locator_templates = {
'tcp': 'tcp:host=%(host)s,port=%(port)d',
'tcp-1g': 'tcp:host=%(host1g)s,port=%(port)d',
'basic+udp': 'basic+udp:host=%(host)s,port=%(port)d',
'basic+udp-1g': 'basic+udp:host=%(host1g)s,port=%(port)d',
'unreliable+udp': 'unreliable+udp:host=%(host)s,port=%(port)d',
'infrc': 'infrc:host=%(host)s,port=%(port)d',
# Coordinator uses udp even when rest of cluster uses infud
# or infeth.
'basic+infud': 'basic+udp:host=%(host)s,port=%(port)d',
'basic+dpdk': 'basic+udp:host=%(host)s,port=%(port)d',
}
def server_locator(transport, host, port=server_port):
"""Generate a service locator for a master/backup process.
@param transport: A transport name (e.g. infrc, basic+infud, tcp, ...)
@type transport: C{str}
@param host: A 3-tuple of (hostname, ip, id).
@type host: C{(str, str, int)}
@param port: Port which should be part of the locator (if any).
Allows multiple services to be started on the same host.
@type port: C{int}
@return: A service locator.
@rtype: C{str}
"""
locator = (server_locator_templates[transport] %
{'host': host[1],
'host1g': host[0],
'port': port,
'id': host[2]})
return locator
def coord_locator(transport, host):
"""Generate a service locator for a coordinator process.
@param transport: A transport name (e.g. infrc, basic+infud, tcp, ...)
@type transport: C{str}
@param host: A 3-tuple of (hostname, ip, id).
@type host: C{(str, str, int)}
@return: A service locator.
@rtype: C{str}
"""
locator = (coord_locator_templates[transport] %
{'host': host[1],
'host1g': host[0],
'port': coordinator_port,
'id': host[2]})
return locator
class Cluster(object):
"""Helper context manager for scripting and coordinating RAMCloud on a
cluster. Useful for configuring and running experiments. See run() for
a simpler interface useful for running single-shot experiments where the
cluster configuration is (mostly) static throughout the experiment.
=== Configuration/Defaults ===
The fields below control various aspects of the run as well as some
of the defaults that are used when creating processes in the cluster.
Most users of this class will want to override some of these after
an instance of Cluster is created but before any operations are
performed with it.
@ivar log_level: Log level to use for spawned servers. (default: NOTICE)
@ivar verbose: If True then print progress of starting clients/servers.
(default: False)
@ivar transport: Transport name to use for servers
(see server_locator_templates) (default: basic+infud).
@ivar replicas: Replication factor to use for each log segment. (default: 3)
@ivar disk: Server args for specifying the storage device to use for
backups (default: default_disk1 taken from {,local}config.py).
@ivar disjunct: Disjunct (not collocate) entities on each server.
=== Other Stuff ===
@ivar coordinator: None until start_coordinator() is run, then a
Sandbox.Process corresponding to the coordinator process.
@ivar servers: List of Sandbox.Process corresponding to each of the
server processes started with start_server().
@ivar masters_started: Number of masters started via start_server(). Notice,
ensure_servers() uses this, so if you kill processes
in the cluster ensure this is consistent.
@ivar backups_started: Number of backups started via start_server(). Notice,
ensure_servers() uses this, so if you kill processes
in the cluster ensure this is consistent.
@ivar log_subdir: Specific directory where all the processes created by
this cluster will log.
@ivar sandbox: Nested context manager that cleans up processes when the
the context of this cluster is exited.
"""
def __init__(self, log_dir='logs', log_exists=False,
cluster_name_exists=False):
"""
@param log_dir: Top-level directory in which to write log files.
A separate subdirectory will be created in this
directory for the log files from this run. This can
only be overridden by passing it to __init__() since
that method creates the subdirectory.
(default: logs)
@param log_exists:
Indicates whether the log directory already exists.
This will be true for cluster objects that are
created after starting the clusterperf test.
(default: False)
@param cluster_name_exists:
Indicates whether a cluster name already exists as
part of this test. Backups that are started/restarted
using the same cluster name will read data from the
replicas
(default: False)
"""
self.log_level = 'NOTICE'
self.verbose = False
self.transport = 'basic+infud'
self.replicas = 3
self.disk = default_disk1
self.disjunct = False
if cluster_name_exists: # do nothing if it exists
self.cluster_name = None
if self.verbose:
print ('Cluster name exists')
else:
self.cluster_name = 'cluster_' + ''.join([chr(random.choice(
range(ord('a'), ord('z'))))
for c in range(20)])
if self.verbose:
print ('Cluster name is %s' % (self.cluster_name))
self.coordinator = None
self.next_server_id = 1
self.next_client_id = 1
self.masters_started = 0
self.backups_started = 0
self.coordinator_host= getHosts()[0]
self.coordinator_locator = coord_locator(self.transport,
self.coordinator_host)
self.log_subdir = log.createDir(log_dir, log_exists)
# Create a perfcounters directory under the log directory.
os.mkdir(self.log_subdir + '/perfcounters')
if not log_exists:
self.sandbox = Sandbox()
else:
self.sandbox = Sandbox(cleanup=False)
# create the shm directory to store shared files
try:
os.mkdir('%s/logs/shm' % os.getcwd())
except:
pass
f = open('%s/logs/shm/README' % os.getcwd(), 'w+')
f.write('This directory contains files that correspond to'
'different server processes that were started during'
'the last run of clusterperf. Filename is\n'
'"<hostname>_<pid>". Each of these files stores'
'the service locator of the respective server which is'
'used to give information to the client.\nThe existence'
'of this file at the end of a clusterperf run means'
'that processes were not cleaned up properly the last'
' time. So one can use these pids during manual clean up')
if not cluster_name_exists:
# store the name of the cluster by creating an empty file with
# the appropriate file name in shm so that new backups when
# created using a different cluster object can use it to read
# data from their disks
f = open('%s/logs/shm/%s' % (os.getcwd(), self.cluster_name),
'w+')
def start_coordinator(self, host, args=''):
"""Start a coordinator on a node.
@param host: (hostname, ip, id) tuple describing the node on which
to start the RAMCloud coordinator.
@param args: Additional command-line args to pass to the coordinator.
(default: '')
@return: Sandbox.Process representing the coordinator process.
"""
if self.coordinator:
raise Exception('Coordinator already started')
self.coordinator_host = host
self.coordinator_locator = coord_locator(self.transport,
self.coordinator_host)
if not self.enable_logcabin:
command = (
'%s %s -C %s -l %s --logFile %s/coordinator.%s.log %s' %
(valgrind_command,
coordinator_binary, self.coordinator_locator,
self.log_level, self.log_subdir,
self.coordinator_host[0], args))
self.coordinator = self.sandbox.rsh(self.coordinator_host[0],
command, bg=True, stderr=subprocess.STDOUT)
else:
# currently hardcoding logcabin server because ankita's logcabin
# scripts are not on git.
command = (
'%s %s -C %s -z logcabin21:61023 -l %s '
'--logFile %s/coordinator.%s.log %s' %
(valgrind_command,
coordinator_binary, self.coordinator_locator,
self.log_level, self.log_subdir,
self.coordinator_host[0], args))
self.coordinator = self.sandbox.rsh(self.coordinator_host[0],
command, bg=True, stderr=subprocess.STDOUT)
# just wait for coordinator to start
time.sleep(1)
# invoke the script that restarts the coordinator if it dies
restart_command = ('%s/restart_coordinator %s/coordinator.%s.log'
' %s %s logcabin21:61023' %
(local_scripts_path, self.log_subdir,
self.coordinator_host[0],
obj_path, self.coordinator_locator))
restarted_coord = self.sandbox.rsh(self.coordinator_host[0],
restart_command, kill_on_exit=True, bg=True,
stderr=subprocess.STDOUT)
self.ensure_servers(0, 0)
if self.verbose:
print('Coordinator started on %s at %s' %
(self.coordinator_host[0], self.coordinator_locator))
print('Coordinator command line arguments %s' %
(command))
return self.coordinator
def start_server(self,
host,
args='',
master=True,
backup=True,
disk=None,
port=server_port,
kill_on_exit=True
):
"""Start a server on a node.
@param host: (hostname, ip, id) tuple describing the node on which
to start the RAMCloud server.
@param args: Additional command-line args to pass to the server.
(default: '')
@param master: If True then the started server provides a master
service. (default: True)
@param backup: If True then the started server provides a backup
service. (default: True)
@param disk: If backup is True then the started server passes these
additional arguments to select the storage type and
location. (default: self.disk)
@param port: The port the server should listen on.
(default: see server_locator())
@param kill_on_exit:
If False, this server process is not reaped at the end
of the clusterperf test.
(default: True)
@return: Sandbox.Process representing the server process.
"""
log_prefix = '%s/server%d.%s' % (
self.log_subdir, self.next_server_id, host[0])
command = ('%s %s -C %s -L %s -r %d -l %s --clusterName __unnamed__ '
'--logFile %s.log --preferredIndex %d %s' %
(valgrind_command,
server_binary, self.coordinator_locator,
server_locator(self.transport, host, port),
self.replicas,
self.log_level,
log_prefix,
self.next_server_id,
args))
self.next_server_id += 1
if master and backup:
pass
elif master:
command += ' --masterOnly'
elif backup:
command += ' --backupOnly'
else:
raise Exception('Cannot start a server that is neither a master '
'nor backup')
if backup:
if not disk:
disk = self.disk
command += ' %s' % disk
self.backups_started += 1
if master:
self.masters_started += 1
# Adding redirection for stdout and stderr.
stdout = open(log_prefix + '.out', 'w')
stderr = open(log_prefix + '.err', 'w')
if not kill_on_exit:
server = self.sandbox.rsh(host[0], command, is_server=True,
locator=server_locator(self.transport,
host, port),
kill_on_exit=False, bg=True,
stdout=stdout,
stderr=stderr)
else:
server = self.sandbox.rsh(host[0], command, is_server=True,
locator=server_locator(self.transport,
host, port),
bg=True,
stdout=stdout,
stderr=stderr)
if self.verbose:
print('Server started on %s at %s: %s' %
(host[0],
server_locator(self.transport, host, port), command))
return server
def kill_server(self, locator):
"""Kill a running server.
@param locator: service locator for the server that needs to be
killed.
"""
path = '%s/logs/shm' % os.getcwd()
files = sorted([f for f in os.listdir(path)
if os.path.isfile( os.path.join(path, f) )])
for file in files:
f = open('%s/logs/shm/%s' % (os.getcwd(), file),'r')
service_locator = f.read()
if (locator in service_locator):
to_kill = '1'
mhost = file
subprocess.Popen(['ssh', mhost.split('_')[0],
'%s/killserver' % config.hooks.get_remote_scripts_path(),
to_kill, os.getcwd(), mhost])
f.close()
try:
os.remove('%s/logs/shm/%s' % (os.getcwd(), file))
except:
pass
else:
f.close()
def ensure_servers(self, numMasters=None, numBackups=None, timeout=30):
"""Poll the coordinator and block until the specified number of
masters and backups have enlisted. Useful for ensuring that the
cluster is in the expected state before experiments begin.
If the expected state isn't acheived within 5 seconds the call
will throw an exception.
@param numMasters: Number of masters that must be part of the
cluster before this call returns successfully.
If unspecified then wait until all the masters
started with start_servers() have enlisted.
@param numBackups: Number of backups that must be part of the
cluster before this call returns successfully.
If unspecified then wait until all the backups
started with start_servers() have enlisted.
"""
if not numMasters:
numMasters = self.masters_started
if not numBackups:
numBackups = self.backups_started
self.sandbox.checkFailures()
try:
ensureCommand = ('%s -C %s -m %d -b %d -l 1 --wait %d '
'--logFile %s/ensureServers.log' %
(ensure_servers_bin, self.coordinator_locator,
numMasters, numBackups, timeout,
self.log_subdir))
if self.verbose:
print("ensureServers command: %s" % ensureCommand)
self.sandbox.rsh(self.coordinator_host[0], ensureCommand)
except:
# prefer exceptions from dead processes to timeout error
self.sandbox.checkFailures()
raise
def start_clients(self, hosts, client):
"""Start a client binary on a set of nodes.
@param hosts: List of (hostname, ip, id) tuples describing the
nodes on which to start the client binary.
Each binary is launch with a --numClients and
--clientIndex argument.
@param client: Path to the client binary to run along with any
args to pass to each client.
@return: Sandbox.Process representing the client process.
"""
num_clients = len(hosts)
args = client.split(' ')
client_bin = args[0]
client_args = ' '.join(args[1:])
clients = []
for i, client_host in enumerate(hosts):
command = ('%s %s -C %s --numClients %d --clientIndex %d '
'--logFile %s/client%d.%s.log %s' %
(valgrind_command,
client_bin, self.coordinator_locator, num_clients,
i, self.log_subdir, self.next_client_id,
client_host[0], client_args))
self.next_client_id += 1
clients.append(self.sandbox.rsh(client_host[0], command, bg=True))
if self.verbose:
print('Client %d started on %s: %s' % (i, client_host[0],
command))
return clients
def wait(self, processes, timeout=30):
"""Wait for a set of processes to exit.
@param processes: List of Sandbox.Process instances as returned by
start_coordinator, start_server, and start_clients
whose exit should be waited on.
@param timeout: Seconds to wait for exit before giving up and throwing
an exception. (default: 30)
"""
start = time.time()
for i, p in enumerate(processes):
while p.proc.returncode is None:
self.sandbox.checkFailures()
time.sleep(.1)
if time.time() - start > timeout:
raise Exception('timeout exceeded %s' % self.log_subdir)
if self.verbose:
print('%s finished' % p.sonce)
def remove_empty_files(self):
"""Remove blank files and empty directories within the log directory.
"""
root = self.log_subdir
for item in os.listdir(root):
path = os.path.join(root, item)
if os.path.isfile(path):
if os.path.getsize(path) == 0:
os.remove(path)
elif os.path.isdir(path):
try:
os.rmdir(path)
except:
None
def shutdown():
"""Kill all remaining processes started as part of this cluster and
wait for their exit. Usually called implicitly if 'with' keyword is
used with the cluster."""
self.__exit__(None, None, None)
def __enter__(self):
self.sandbox.__enter__()
config.hooks.cluster_enter(self)
return self
def __exit__(self, exc_type=None, exc_value=None, exc_tb=None):
config.hooks.cluster_exit()
self.sandbox.__exit__(exc_type, exc_value, exc_tb)
self.remove_empty_files()
return False # rethrow exception, if any
def run(
num_servers=4, # Number of hosts on which to start
# servers (not including coordinator).
backup_disks_per_server=2, # Number of backup disks to use on each
# server host (0, 1, or 2).
replicas=3, # Replication factor to use for each
# log segment.
disk1=default_disk1, # Server arguments specifying the
# backing device when one backup disk
# is used on each server.
disk2=default_disk2, # Server arguments specifying the
# backing devices when two backup disks
# are used on each server
# (if backup_disks_per_server= 2).
timeout=20, # How many seconds to wait for the
# clients to complete.
coordinator_args='', # Additional arguments for the
# coordinator.
master_args='', # Additional arguments for each server
# that runs a master
backup_args='', # Additional arguments for each server
# that runs a backup.
log_level='NOTICE', # Log level to use for all servers.
log_dir='logs', # Top-level directory in which to write
# log files. A separate subdirectory
# will be created in this directory
# for the log files from this run.
client=None, # Command-line to invoke for each client
# additional arguments will be prepended
# with configuration information such as
# -C.
num_clients=0, # Number of client processes to run.
# They will all run on separate
# machines, if possible, but if there
# aren't enough available machines then
# multiple clients will run on some
# machines.
client_hosts=None, # An explicit list of hosts (in
# host, ip, id triples) on which clients
# should be run. If this is set and
# share_hosts is set then share_hosts is
# ignored.
share_hosts=False, # True means clients can be run on
# machines running servers, if needed.
transport='basic+infud', # Name of transport to use for servers.
verbose=False, # Print information about progress in
# starting clients and servers
debug=False, # If True, pause after starting all
# to allow for debugging setup such as
# attaching gdb.
old_master_host=None, # Pass a (hostname, ip, id) tuple to
# construct a large master on that host
# before the others are started. Useful
# for creating the old master for
# recoveries.
old_master_args='', # Additional arguments to run on the
# old master (e.g. total RAM).
enable_logcabin=False, # Do not enable logcabin.
valgrind=False, # Do not run under valgrind
valgrind_args='', # Additional arguments for valgrind
disjunct=False, # Disjunct entities on a server
coordinator_host=None
):
"""
Start a coordinator and servers, as indicated by the arguments. If a
client is specified, then start one or more client processes and wait for
them to complete. Otherwise leave the cluster running.
@return: string indicating the path to the log files for this run.
"""
# client_hosts = [('rc52', '192.168.1.152', 52)]
if client:
if num_clients == 0:
num_clients = 1
if verbose:
print('num_servers=(%d), available hosts=(%d) defined in config.py'
% (num_servers, len(getHosts())))
print ('disjunct=', disjunct)
# When disjunct=True, disjuncts Coordinator and Clients on Server nodes.
if disjunct:
if num_servers + num_clients + 1 > len(getHosts()):
raise Exception('num_servers (%d)+num_clients (%d)+1(coord) exceeds the available hosts (%d)'
% (num_servers, num_clients, len(getHosts())))
else:
if num_servers > len(getHosts()):
raise Exception('num_servers (%d) exceeds the available hosts (%d)'
% (num_servers, len(getHosts())))
if not share_hosts and not client_hosts:
if (len(getHosts()) - num_servers) < 1:
raise Exception('Asked for %d servers without sharing hosts with %d '
'clients, but only %d hosts were available'
% (num_servers, num_clients, len(getHosts())))
masters_started = 0
backups_started = 0
global valgrind_command
if valgrind:
valgrind_command = ('valgrind %s' % valgrind_args)
with Cluster(log_dir) as cluster:
cluster.log_level = log_level
cluster.verbose = verbose
cluster.transport = transport
cluster.replicas = replicas
cluster.timeout = timeout
cluster.disk = disk1
cluster.enable_logcabin = enable_logcabin
cluster.disjunct = disjunct
cluster.hosts = getHosts()
if not coordinator_host:
coordinator_host = cluster.hosts[len(cluster.hosts)-1]
coordinator = cluster.start_coordinator(coordinator_host,
coordinator_args)
if disjunct:
cluster.hosts.pop(0)
if old_master_host:
oldMaster = cluster.start_server(old_master_host,
old_master_args,
backup=False)
oldMaster.ignoreFailures = True
masters_started += 1
cluster.ensure_servers(timeout=60)
for host in cluster.hosts[:num_servers]:
backup = False
args = master_args
disk_args = None
if backup_disks_per_server > 0:
backup = True
args += ' %s' % backup_args
backups_started += 1
disk_args = disk1 if backup_disks_per_server == 1 else disk2
cluster.start_server(host, args, backup=backup, disk=disk_args)
masters_started += 1
if disjunct:
cluster.hosts = cluster.hosts[num_servers:]
if masters_started > 0 or backups_started > 0:
cluster.ensure_servers()
if verbose:
print('All servers running')
if not client:
print('Servers started.')
raw_input('Type <Enter> to shutdown servers: ')
elif debug:
print('Servers started; pausing for debug setup.')
raw_input('Type <Enter> to continue: ')
if client:
# Note: even if it's OK to share hosts between clients and servers,
# don't do it unless necessary.
if not client_hosts:
if disjunct:
host_list = cluster.hosts[:]
else:
host_list = cluster.hosts[num_servers:]
if share_hosts:
host_list.extend(cluster.hosts[:num_servers])
client_hosts = [host_list[i % len(host_list)]
for i in range(num_clients)]
assert(len(client_hosts) == num_clients)
clients = cluster.start_clients(client_hosts, client)
cluster.wait(clients, timeout)
return cluster.log_subdir
if __name__ == '__main__':
parser = OptionParser(description=
'Start RAMCloud servers and run a client application.',
conflict_handler='resolve')
parser.add_option('--backupArgs', metavar='ARGS', default='',
dest='backup_args',
help='Additional command-line arguments to pass to '
'each backup')
parser.add_option('-b', '--backupDisks', type=int, default=1,
metavar='N', dest='backup_disks_per_server',
help='Number of backup disks to run on each server host '
'(0, 1, or 2)')
parser.add_option('--client', metavar='ARGS',
help='Command line to invoke the client application '
'(additional arguments will be inserted at the beginning '
'of the argument list)')
parser.add_option('-n', '--clients', type=int, default=1,
metavar='N', dest='num_clients',
help='Number of instances of the client application '
'to run')
parser.add_option('--coordinatorArgs', metavar='ARGS', default='',
dest='coordinator_args',
help='Additional command-line arguments to pass to the '
'cluster coordinator')
parser.add_option('--debug', action='store_true', default=False,
help='Pause after starting servers but before running '
'clients to enable debugging setup')
parser.add_option('--disk1', default=default_disk1,
help='Server arguments to specify disk for first backup')
parser.add_option('--disk2', default=default_disk2,
help='Server arguments to specify disk for second backup')
parser.add_option('-l', '--logLevel', default='NOTICE',
choices=['DEBUG', 'NOTICE', 'WARNING', 'ERROR', 'SILENT'],
metavar='L', dest='log_level',
help='Controls degree of logging in servers')
parser.add_option('-d', '--logDir', default='logs',
metavar='DIR',
dest='log_dir',
help='Top level directory for log files; the files for '
'each invocation will go in a subdirectory.')
parser.add_option('--masterArgs', metavar='ARGS', default='',
dest='master_args',
help='Additional command-line arguments to pass to '
'each master')
parser.add_option('-r', '--replicas', type=int, default=3,
metavar='N',
help='Number of disk backup copies for each segment')
parser.add_option('-s', '--servers', type=int, default=4,
metavar='N', dest='num_servers',
help='Number of hosts on which to run servers')
parser.add_option('--shareHosts', action='store_true', default=False,
dest='share_hosts',
help='Allow clients to run on machines running servers '
'(by default clients run on different machines than '
'the servers, though multiple clients may run on a '
'single machine)')
parser.add_option('-t', '--timeout', type=int, default=20,
metavar='SECS',
help="Abort if the client application doesn't finish within "
'SECS seconds')
parser.add_option('-T', '--transport', default='basic+infud',
help='Transport to use for communication with servers')
parser.add_option('-v', '--verbose', action='store_true', default=False,
help='Print progress messages')
parser.add_option('--valgrind', action='store_true', default=False,
help='Run all the processes under valgrind')
parser.add_option('--valgrindArgs', metavar='ARGS', default='',
dest='valgrind_args',
help='Arguments to pass to valgrind')
parser.add_option('--disjunct', action='store_true', default=False,
help='Disjunct entities (disable collocation) on each server')
(options, args) = parser.parse_args()
status = 0
try:
run(**vars(options))
finally:
logInfo = log.scan("logs/latest", ["WARNING", "ERROR"])
if len(logInfo) > 0:
print(logInfo, file=sys.stderr)
status = 1
quit(status)
|
|
"""The tests for numeric state automation."""
from datetime import timedelta
import logging
from unittest.mock import patch
import pytest
import voluptuous as vol
import homeassistant.components.automation as automation
from homeassistant.components.homeassistant.triggers import (
numeric_state as numeric_state_trigger,
)
from homeassistant.const import ATTR_ENTITY_ID, ENTITY_MATCH_ALL, SERVICE_TURN_OFF
from homeassistant.core import Context
from homeassistant.helpers import entity_registry as er
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import (
assert_setup_component,
async_fire_time_changed,
async_mock_service,
mock_component,
)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
@pytest.fixture(autouse=True)
async def setup_comp(hass):
"""Initialize components."""
mock_component(hass, "group")
await async_setup_component(
hass,
"input_number",
{
"input_number": {
"value_3": {"min": 0, "max": 255, "initial": 3},
"value_5": {"min": 0, "max": 255, "initial": 5},
"value_8": {"min": 0, "max": 255, "initial": 8},
"value_10": {"min": 0, "max": 255, "initial": 10},
"value_12": {"min": 0, "max": 255, "initial": 12},
"value_100": {"min": 0, "max": 255, "initial": 100},
}
},
)
hass.states.async_set("number.value_10", 10)
hass.states.async_set("sensor.value_10", 10)
@pytest.mark.parametrize(
"below", (10, "input_number.value_10", "number.value_10", "sensor.value_10")
)
async def test_if_not_fires_on_entity_removal(hass, calls, below):
"""Test the firing with removed entity."""
hass.states.async_set("test.entity", 11)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# Entity disappears
hass.states.async_remove("test.entity")
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize(
"below", (10, "input_number.value_10", "number.value_10", "sensor.value_10")
)
async def test_if_fires_on_entity_change_below(hass, calls, below):
"""Test the firing with changed entity."""
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
context = Context()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
},
"action": {
"service": "test.automation",
"data_template": {"id": "{{ trigger.id}}"},
},
}
},
)
# 9 is below 10
hass.states.async_set("test.entity", 9, context=context)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context.parent_id == context.id
# Set above 12 so the automation will fire again
hass.states.async_set("test.entity", 12)
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: ENTITY_MATCH_ALL},
blocking=True,
)
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["id"] == 0
@pytest.mark.parametrize(
"below", (10, "input_number.value_10", "number.value_10", "sensor.value_10")
)
async def test_if_fires_on_entity_change_below_uuid(hass, calls, below):
"""Test the firing with changed entity specified by registry entry id."""
registry = er.async_get(hass)
entry = registry.async_get_or_create(
"test", "hue", "1234", suggested_object_id="entity"
)
assert entry.entity_id == "test.entity"
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
context = Context()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": entry.id,
"below": below,
},
"action": {
"service": "test.automation",
"data_template": {"id": "{{ trigger.id}}"},
},
}
},
)
# 9 is below 10
hass.states.async_set("test.entity", 9, context=context)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context.parent_id == context.id
# Set above 12 so the automation will fire again
hass.states.async_set("test.entity", 12)
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: ENTITY_MATCH_ALL},
blocking=True,
)
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["id"] == 0
@pytest.mark.parametrize(
"below", (10, "input_number.value_10", "number.value_10", "sensor.value_10")
)
async def test_if_fires_on_entity_change_over_to_below(hass, calls, below):
"""Test the firing with changed entity."""
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 9 is below 10
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize(
"below", (10, "input_number.value_10", "number.value_10", "sensor.value_10")
)
async def test_if_fires_on_entities_change_over_to_below(hass, calls, below):
"""Test the firing with changed entities."""
hass.states.async_set("test.entity_1", 11)
hass.states.async_set("test.entity_2", 11)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": ["test.entity_1", "test.entity_2"],
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 9 is below 10
hass.states.async_set("test.entity_1", 9)
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set("test.entity_2", 9)
await hass.async_block_till_done()
assert len(calls) == 2
@pytest.mark.parametrize(
"below", (10, "input_number.value_10", "number.value_10", "sensor.value_10")
)
async def test_if_not_fires_on_entity_change_below_to_below(hass, calls, below):
"""Test the firing with changed entity."""
context = Context()
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 9 is below 10 so this should fire
hass.states.async_set("test.entity", 9, context=context)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context.parent_id == context.id
# already below so should not fire again
hass.states.async_set("test.entity", 5)
await hass.async_block_till_done()
assert len(calls) == 1
# still below so should not fire again
hass.states.async_set("test.entity", 3)
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize(
"below", (10, "input_number.value_10", "number.value_10", "sensor.value_10")
)
async def test_if_not_below_fires_on_entity_change_to_equal(hass, calls, below):
"""Test the firing with changed entity."""
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 10 is not below 10 so this should not fire again
hass.states.async_set("test.entity", 10)
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize(
"below", (10, "input_number.value_10", "number.value_10", "sensor.value_10")
)
async def test_if_not_fires_on_initial_entity_below(hass, calls, below):
"""Test the firing when starting with a match."""
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# Do not fire on first update when initial state was already below
hass.states.async_set("test.entity", 8)
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize(
"above", (10, "input_number.value_10", "number.value_10", "sensor.value_10")
)
async def test_if_not_fires_on_initial_entity_above(hass, calls, above):
"""Test the firing when starting with a match."""
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
},
"action": {"service": "test.automation"},
}
},
)
# Do not fire on first update when initial state was already above
hass.states.async_set("test.entity", 12)
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize(
"above", (10, "input_number.value_10", "number.value_10", "sensor.value_10")
)
async def test_if_fires_on_entity_change_above(hass, calls, above):
"""Test the firing with changed entity."""
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
},
"action": {"service": "test.automation"},
}
},
)
# 11 is above 10
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_entity_unavailable_at_startup(hass, calls):
"""Test the firing with changed entity at startup."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": 10,
},
"action": {"service": "test.automation"},
}
},
)
# 11 is above 10
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize("above", (10, "input_number.value_10"))
async def test_if_fires_on_entity_change_below_to_above(hass, calls, above):
"""Test the firing with changed entity."""
# set initial state
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
},
"action": {"service": "test.automation"},
}
},
)
# 11 is above 10 and 9 is below
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("above", (10, "input_number.value_10"))
async def test_if_not_fires_on_entity_change_above_to_above(hass, calls, above):
"""Test the firing with changed entity."""
# set initial state
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
},
"action": {"service": "test.automation"},
}
},
)
# 12 is above 10 so this should fire
hass.states.async_set("test.entity", 12)
await hass.async_block_till_done()
assert len(calls) == 1
# already above, should not fire again
hass.states.async_set("test.entity", 15)
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("above", (10, "input_number.value_10"))
async def test_if_not_above_fires_on_entity_change_to_equal(hass, calls, above):
"""Test the firing with changed entity."""
# set initial state
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
},
"action": {"service": "test.automation"},
}
},
)
# 10 is not above 10 so this should not fire again
hass.states.async_set("test.entity", 10)
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize(
"above, below",
(
(5, 10),
(5, "input_number.value_10"),
("input_number.value_5", 10),
("input_number.value_5", "input_number.value_10"),
),
)
async def test_if_fires_on_entity_change_below_range(hass, calls, above, below):
"""Test the firing with changed entity."""
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
"above": above,
},
"action": {"service": "test.automation"},
}
},
)
# 9 is below 10
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize(
"above, below",
(
(5, 10),
(5, "input_number.value_10"),
("input_number.value_5", 10),
("input_number.value_5", "input_number.value_10"),
),
)
async def test_if_fires_on_entity_change_below_above_range(hass, calls, above, below):
"""Test the firing with changed entity."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
"above": above,
},
"action": {"service": "test.automation"},
}
},
)
# 4 is below 5
hass.states.async_set("test.entity", 4)
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize(
"above, below",
(
(5, 10),
(5, "input_number.value_10"),
("input_number.value_5", 10),
("input_number.value_5", "input_number.value_10"),
),
)
async def test_if_fires_on_entity_change_over_to_below_range(hass, calls, above, below):
"""Test the firing with changed entity."""
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
"above": above,
},
"action": {"service": "test.automation"},
}
},
)
# 9 is below 10
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize(
"above, below",
(
(5, 10),
(5, "input_number.value_10"),
("input_number.value_5", 10),
("input_number.value_5", "input_number.value_10"),
),
)
async def test_if_fires_on_entity_change_over_to_below_above_range(
hass, calls, above, below
):
"""Test the firing with changed entity."""
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": above,
"above": below,
},
"action": {"service": "test.automation"},
}
},
)
# 4 is below 5 so it should not fire
hass.states.async_set("test.entity", 4)
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize("below", (100, "input_number.value_100"))
async def test_if_not_fires_if_entity_not_match(hass, calls, below):
"""Test if not fired with non matching entity."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.another_entity",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert len(calls) == 0
async def test_if_not_fires_and_warns_if_below_entity_unknown(hass, caplog, calls):
"""Test if warns with unknown below entity."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": "input_number.unknown",
},
"action": {"service": "test.automation"},
}
},
)
caplog.clear()
caplog.set_level(logging.WARNING)
hass.states.async_set("test.entity", 1)
await hass.async_block_till_done()
assert len(calls) == 0
assert len(caplog.record_tuples) == 1
assert caplog.record_tuples[0][1] == logging.WARNING
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_if_fires_on_entity_change_below_with_attribute(hass, calls, below):
"""Test attributes change."""
hass.states.async_set("test.entity", 11, {"test_attribute": 11})
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 9 is below 10
hass.states.async_set("test.entity", 9, {"test_attribute": 11})
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_if_not_fires_on_entity_change_not_below_with_attribute(
hass, calls, below
):
"""Test attributes."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 11 is not below 10
hass.states.async_set("test.entity", 11, {"test_attribute": 9})
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_if_fires_on_attribute_change_with_attribute_below(hass, calls, below):
"""Test attributes change."""
hass.states.async_set("test.entity", "entity", {"test_attribute": 11})
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"value_template": "{{ state.attributes.test_attribute }}",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 9 is below 10
hass.states.async_set("test.entity", "entity", {"test_attribute": 9})
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_if_not_fires_on_attribute_change_with_attribute_not_below(
hass, calls, below
):
"""Test attributes change."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"value_template": "{{ state.attributes.test_attribute }}",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 11 is not below 10
hass.states.async_set("test.entity", "entity", {"test_attribute": 11})
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_if_not_fires_on_entity_change_with_attribute_below(hass, calls, below):
"""Test attributes change."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"value_template": "{{ state.attributes.test_attribute }}",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 11 is not below 10, entity state value should not be tested
hass.states.async_set("test.entity", "9", {"test_attribute": 11})
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_if_not_fires_on_entity_change_with_not_attribute_below(
hass, calls, below
):
"""Test attributes change."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"value_template": "{{ state.attributes.test_attribute }}",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 11 is not below 10, entity state value should not be tested
hass.states.async_set("test.entity", "entity")
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_fires_on_attr_change_with_attribute_below_and_multiple_attr(
hass, calls, below
):
"""Test attributes change."""
hass.states.async_set(
"test.entity", "entity", {"test_attribute": 11, "not_test_attribute": 11}
)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"value_template": "{{ state.attributes.test_attribute }}",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 9 is not below 10
hass.states.async_set(
"test.entity", "entity", {"test_attribute": 9, "not_test_attribute": 11}
)
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_template_list(hass, calls, below):
"""Test template list."""
hass.states.async_set("test.entity", "entity", {"test_attribute": [11, 15, 11]})
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"value_template": "{{ state.attributes.test_attribute[2] }}",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 3 is below 10
hass.states.async_set("test.entity", "entity", {"test_attribute": [11, 15, 3]})
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("below", (10.0, "input_number.value_10"))
async def test_template_string(hass, calls, below):
"""Test template string."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"value_template": "{{ state.attributes.test_attribute | multiply(10) }}",
"below": below,
},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"below",
"above",
"from_state.state",
"to_state.state",
)
)
},
},
}
},
)
hass.states.async_set("test.entity", "test state 1", {"test_attribute": "1.2"})
await hass.async_block_till_done()
hass.states.async_set("test.entity", "test state 2", {"test_attribute": "0.9"})
await hass.async_block_till_done()
assert len(calls) == 1
assert (
calls[0].data["some"]
== f"numeric_state - test.entity - {below} - None - test state 1 - test state 2"
)
async def test_not_fires_on_attr_change_with_attr_not_below_multiple_attr(hass, calls):
"""Test if not fired changed attributes."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"value_template": "{{ state.attributes.test_attribute }}",
"below": 10,
},
"action": {"service": "test.automation"},
}
},
)
# 11 is not below 10
hass.states.async_set(
"test.entity", "entity", {"test_attribute": 11, "not_test_attribute": 9}
)
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_action(hass, calls, above, below):
"""Test if action."""
entity_id = "domain.test_entity"
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {
"condition": "numeric_state",
"entity_id": entity_id,
"above": above,
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set(entity_id, 10)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set(entity_id, 8)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set(entity_id, 9)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_fails_setup_bad_for(hass, calls, above, below):
"""Test for setup failure for bad for."""
hass.states.async_set("test.entity", 5)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
"below": below,
"for": {"invalid": 5},
},
"action": {"service": "homeassistant.turn_on"},
}
},
)
with patch.object(numeric_state_trigger, "_LOGGER") as mock_logger:
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert mock_logger.error.called
async def test_if_fails_setup_for_without_above_below(hass, calls):
"""Test for setup failures for missing above or below."""
with assert_setup_component(0, automation.DOMAIN):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"for": {"seconds": 5},
},
"action": {"service": "homeassistant.turn_on"},
}
},
)
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_not_fires_on_entity_change_with_for(hass, calls, above, below):
"""Test for not firing on entity change with for."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
"below": below,
"for": {"seconds": 5},
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
hass.states.async_set("test.entity", 15)
await hass.async_block_till_done()
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_not_fires_on_entities_change_with_for_after_stop(
hass, calls, above, below
):
"""Test for not firing on entities change with for after stop."""
hass.states.async_set("test.entity_1", 0)
hass.states.async_set("test.entity_2", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": ["test.entity_1", "test.entity_2"],
"above": above,
"below": below,
"for": {"seconds": 5},
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity_1", 9)
hass.states.async_set("test.entity_2", 9)
await hass.async_block_till_done()
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set("test.entity_1", 15)
hass.states.async_set("test.entity_2", 15)
await hass.async_block_till_done()
hass.states.async_set("test.entity_1", 9)
hass.states.async_set("test.entity_2", 9)
await hass.async_block_till_done()
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: ENTITY_MATCH_ALL},
blocking=True,
)
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_fires_on_entity_change_with_for_attribute_change(
hass, calls, above, below
):
"""Test for firing on entity change with for and attribute change."""
hass.states.async_set("test.entity", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
"below": below,
"for": {"seconds": 5},
},
"action": {"service": "test.automation"},
}
},
)
utcnow = dt_util.utcnow()
with patch("homeassistant.core.dt_util.utcnow") as mock_utcnow:
mock_utcnow.return_value = utcnow
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=4)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity", 9, attributes={"mock_attr": "attr_change"})
await hass.async_block_till_done()
assert len(calls) == 0
mock_utcnow.return_value += timedelta(seconds=4)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_fires_on_entity_change_with_for(hass, calls, above, below):
"""Test for firing on entity change with for."""
hass.states.async_set("test.entity", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
"below": below,
"for": {"seconds": 5},
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("above", (10, "input_number.value_10"))
async def test_wait_template_with_trigger(hass, calls, above):
"""Test using wait template with 'trigger.entity_id'."""
hass.states.async_set("test.entity", "0")
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
},
"action": [
{"wait_template": "{{ states(trigger.entity_id) | int < 10 }}"},
{
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(
("platform", "entity_id", "to_state.state")
)
},
},
],
}
},
)
await hass.async_block_till_done()
hass.states.async_set("test.entity", "12")
hass.states.async_set("test.entity", "8")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "numeric_state - test.entity - 12"
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_fires_on_entities_change_no_overlap(hass, calls, above, below):
"""Test for firing on entities change with no overlap."""
hass.states.async_set("test.entity_1", 0)
hass.states.async_set("test.entity_2", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": ["test.entity_1", "test.entity_2"],
"above": above,
"below": below,
"for": {"seconds": 5},
},
"action": {
"service": "test.automation",
"data_template": {"some": "{{ trigger.entity_id }}"},
},
}
},
)
await hass.async_block_till_done()
utcnow = dt_util.utcnow()
with patch("homeassistant.core.dt_util.utcnow") as mock_utcnow:
mock_utcnow.return_value = utcnow
hass.states.async_set("test.entity_1", 9)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=10)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "test.entity_1"
hass.states.async_set("test.entity_2", 9)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=10)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "test.entity_2"
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_fires_on_entities_change_overlap(hass, calls, above, below):
"""Test for firing on entities change with overlap."""
hass.states.async_set("test.entity_1", 0)
hass.states.async_set("test.entity_2", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": ["test.entity_1", "test.entity_2"],
"above": above,
"below": below,
"for": {"seconds": 5},
},
"action": {
"service": "test.automation",
"data_template": {"some": "{{ trigger.entity_id }}"},
},
}
},
)
await hass.async_block_till_done()
utcnow = dt_util.utcnow()
with patch("homeassistant.core.dt_util.utcnow") as mock_utcnow:
mock_utcnow.return_value = utcnow
hass.states.async_set("test.entity_1", 9)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", 9)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", 15)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", 9)
await hass.async_block_till_done()
assert len(calls) == 0
mock_utcnow.return_value += timedelta(seconds=3)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "test.entity_1"
mock_utcnow.return_value += timedelta(seconds=3)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "test.entity_2"
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_fires_on_change_with_for_template_1(hass, calls, above, below):
"""Test for firing on change with for template."""
hass.states.async_set("test.entity", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
"below": below,
"for": {"seconds": "{{ 5 }}"},
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_fires_on_change_with_for_template_2(hass, calls, above, below):
"""Test for firing on change with for template."""
hass.states.async_set("test.entity", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
"below": below,
"for": "{{ 5 }}",
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_fires_on_change_with_for_template_3(hass, calls, above, below):
"""Test for firing on change with for template."""
hass.states.async_set("test.entity", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
"below": below,
"for": "00:00:{{ 5 }}",
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_not_fires_on_error_with_for_template(hass, calls):
"""Test for not firing on error with for template."""
hass.states.async_set("test.entity", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": 100,
"for": "00:00:05",
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", 101)
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=3))
hass.states.async_set("test.entity", "unavailable")
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=3))
hass.states.async_set("test.entity", 101)
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_invalid_for_template(hass, calls, above, below):
"""Test for invalid for template."""
hass.states.async_set("test.entity", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
"below": below,
"for": "{{ five }}",
},
"action": {"service": "test.automation"},
}
},
)
with patch.object(numeric_state_trigger, "_LOGGER") as mock_logger:
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert mock_logger.error.called
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_fires_on_entities_change_overlap_for_template(
hass, calls, above, below
):
"""Test for firing on entities change with overlap and for template."""
hass.states.async_set("test.entity_1", 0)
hass.states.async_set("test.entity_2", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": ["test.entity_1", "test.entity_2"],
"above": above,
"below": below,
"for": '{{ 5 if trigger.entity_id == "test.entity_1"'
" else 10 }}",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.entity_id }} - {{ trigger.for }}"
},
},
}
},
)
await hass.async_block_till_done()
utcnow = dt_util.utcnow()
with patch("homeassistant.util.dt.utcnow") as mock_utcnow:
mock_utcnow.return_value = utcnow
hass.states.async_set("test.entity_1", 9)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", 9)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", 15)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", 9)
await hass.async_block_till_done()
assert len(calls) == 0
mock_utcnow.return_value += timedelta(seconds=3)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "test.entity_1 - 0:00:05"
mock_utcnow.return_value += timedelta(seconds=3)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 1
mock_utcnow.return_value += timedelta(seconds=5)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "test.entity_2 - 0:00:10"
async def test_below_above(hass):
"""Test above cannot be above below."""
with pytest.raises(vol.Invalid):
await numeric_state_trigger.async_validate_trigger_config(
hass, {"platform": "numeric_state", "above": 1200, "below": 1000}
)
async def test_schema_unacceptable_entities(hass):
"""Test input_number, number & sensor only is accepted for above/below."""
with pytest.raises(vol.Invalid):
await numeric_state_trigger.async_validate_trigger_config(
hass,
{
"platform": "numeric_state",
"above": "input_datetime.some_input",
"below": 1000,
},
)
with pytest.raises(vol.Invalid):
await numeric_state_trigger.async_validate_trigger_config(
hass,
{
"platform": "numeric_state",
"below": "input_datetime.some_input",
"above": 1200,
},
)
@pytest.mark.parametrize("above", (3, "input_number.value_3"))
async def test_attribute_if_fires_on_entity_change_with_both_filters(
hass, calls, above
):
"""Test for firing if both filters are match attribute."""
hass.states.async_set("test.entity", "bla", {"test-measurement": 1})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
"attribute": "test-measurement",
},
"action": {"service": "test.automation"},
}
},
)
await hass.async_block_till_done()
hass.states.async_set("test.entity", "bla", {"test-measurement": 4})
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("above", (3, "input_number.value_3"))
async def test_attribute_if_not_fires_on_entities_change_with_for_after_stop(
hass, calls, above
):
"""Test for not firing on entity change with for after stop trigger."""
hass.states.async_set("test.entity", "bla", {"test-measurement": 1})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
"attribute": "test-measurement",
"for": 5,
},
"action": {"service": "test.automation"},
}
},
)
await hass.async_block_till_done()
hass.states.async_set("test.entity", "bla", {"test-measurement": 4})
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize(
"above, below",
((8, 12),),
)
async def test_variables_priority(hass, calls, above, below):
"""Test an externally defined trigger variable is overridden."""
hass.states.async_set("test.entity_1", 0)
hass.states.async_set("test.entity_2", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger_variables": {"trigger": "illegal"},
"trigger": {
"platform": "numeric_state",
"entity_id": ["test.entity_1", "test.entity_2"],
"above": above,
"below": below,
"for": '{{ 5 if trigger.entity_id == "test.entity_1"'
" else 10 }}",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.entity_id }} - {{ trigger.for }}"
},
},
}
},
)
await hass.async_block_till_done()
utcnow = dt_util.utcnow()
with patch("homeassistant.util.dt.utcnow") as mock_utcnow:
mock_utcnow.return_value = utcnow
hass.states.async_set("test.entity_1", 9)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", 9)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", 15)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", 9)
await hass.async_block_till_done()
assert len(calls) == 0
mock_utcnow.return_value += timedelta(seconds=3)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "test.entity_1 - 0:00:05"
@pytest.mark.parametrize("multiplier", (1, 5))
async def test_template_variable(hass, calls, multiplier):
"""Test template variable."""
hass.states.async_set("test.entity", "entity", {"test_attribute": [11, 15, 11]})
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger_variables": {"multiplier": multiplier},
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"value_template": "{{ state.attributes.test_attribute[2] * multiplier}}",
"below": 10,
},
"action": {"service": "test.automation"},
}
},
)
# 3 is below 10
hass.states.async_set("test.entity", "entity", {"test_attribute": [11, 15, 3]})
await hass.async_block_till_done()
if multiplier * 3 < 10:
assert len(calls) == 1
else:
assert len(calls) == 0
|
|
#dice guideline
from random import randint
#FOR program module
moduleName="Dice Module"
#moduleName="The Lusty Orc Dice Module"
#FOR dice rolls
#mulligan_yes_or_no=True not implemented
the_lowest_possible_roll=1
the_number_of_sides_on_a_die=6
the_number_of_rolls_in_a_set=4
reroll_if_equal_or_less=0
number_of_lowest_rolls_to_drop_in_a_set=1
number_of_highest_rolls_to_drop_in_a_set=1
#Rules for attribute rolls, Do not alter anything past this line
'''
def dicerollbatch():
WorkingBatch=[]
for setNumber in range(self.dieSets):
rolls=[]
dropped=[]
#creates another recursion loop that runs for die_number_per_set times and addes a random roll result to the rolls and dropped lists using the x variable, each roll is between the values lowest_possible_roll and number_of_sides_on_die
for roll in range(self.setDieNumber):
r=(randint(lowest_possible_roll, self.dieSides))
rolls.append(r)
dropped.append(r)
#after the rolls are done, normally 4 of them, the set is added to the rollBatch variable container as well as adding to the dropped sets container
self.rollBatch.append(rolls)
dropped.remove(min(dropped))
self.droppedBatch.append(dropped)
#after the roll sets have been added to the batch the batch count is incremented up one
self.batch+=1
#after the numbers have been generated and appended to the batch the sets are printed out vertically
print("number of batch attempts:"+str(self.batch)+"\nStat Rolls")
for batchSets in range(len(self.rollBatch)):
at=0
for batchRolls in range(len(self.droppedBatch[batchSets])):at+=self.droppedBatch[batchSets][batchRolls]
self.attributeResults.append(at)
print((self.rollBatch[batchSets]), (self.attributeResults[batchSets]))
'''
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
#test_4_rolls_print
from random import randint
The_number_of_rolls_in_a_set=4
The_number_of_sides_on_a_die=6
the_lowest_possible_roll=1
#the followign 6 lines of code roll 4 numbers between 1 and 6 and then prints them out vertically
def roll_set_of_dice():
for roll in range(The_number_of_rolls_in_a_set):
roll_result=(randint(the_lowest_possible_roll, The_number_of_sides_on_a_die))
print("%s" % roll_result)
return
#roll_set_of_dice()
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
#test_4_rolls_output_to_list_print_list
from random import randint
the_number_of_rolls_in_a_set=4
the_number_of_sides_on_a_die=6
the_lowest_possible_roll=1
#the following 8 lines of code rolls 4 6 sided dice and copies the reuslts to a lsit and then print's the list
def roll_set_of_dice():
set_of_dice_rolls=[]
for roll in range(the_number_of_rolls_in_a_set):
roll_result=(randint(the_lowest_possible_roll, the_number_of_sides_on_a_die))
set_of_dice_rolls.append(roll_result)
print(set_of_dice_rolls)
return
#roll_set_of_dice()
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
#test_4_rolls with reroll and output
from random import randint
the_number_of_rolls_in_a_set=4
the_number_of_sides_on_a_die=6
the_lowest_possible_roll=1
reroll_if_equal_or_less=5
#rolls 4 dice between 1 and 6 and rerolls all results that are 5 or less then outputs them to roll_set_of_dice
def roll_set_of_dice():
set_of_dice_rolls=[]
for roll in range(the_number_of_rolls_in_a_set):
roll_result=(randint(the_lowest_possible_roll, the_number_of_sides_on_a_die))
while roll_result<=reroll_if_equal_or_less:
roll_result=(randint(the_lowest_possible_roll, the_number_of_sides_on_a_die))
print("reroll %s" %roll_result)
else:set_of_dice_rolls.append(roll_result)
print(set_of_dice_rolls)
return
#roll_set_of_dice()
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
#test_4_rolls if drop lowest or highest is greater than zero copy set_of_dice_rolls to set_of_dice_rolls_adjusted
from random import randint
the_number_of_rolls_in_a_set=4
the_number_of_sides_on_a_die=6
the_lowest_possible_roll=1
reroll_if_equal_or_less=0
number_of_lowest_rolls_to_drop_in_a_set=1
number_of_highest_rolls_to_drop_in_a_set=0
#rolls 4 dice between 1 and 6 and rerolls all results that are 5 or less then outputs them to roll_set_of_dice
def roll_set_of_dice():
set_of_dice_rolls=[]
set_of_dice_rolls_adjusted=[]
for roll in range(the_number_of_rolls_in_a_set):
roll_result=(randint(the_lowest_possible_roll, the_number_of_sides_on_a_die))
while roll_result<=reroll_if_equal_or_less:
roll_result=(randint(the_lowest_possible_roll, the_number_of_sides_on_a_die))
print("reroll %s" %roll_result)
else:set_of_dice_rolls.append(roll_result)
if (number_of_lowest_rolls_to_drop_in_a_set>0) or (number_of_highest_rolls_to_drop_in_a_set>0):
for roll_results in range(len(set_of_dice_rolls)):
set_of_dice_rolls_adjusted.append(set_of_dice_rolls[roll_results])
print(set_of_dice_rolls_adjusted)
print(set_of_dice_rolls)
return
#roll_set_of_dice()
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
#test_4_rolls drop highest and lowest
from random import randint
the_number_of_rolls_in_a_set=4
the_number_of_sides_on_a_die=6
the_lowest_possible_roll=1
reroll_if_equal_or_less=0
number_of_lowest_rolls_to_drop_in_a_set=1
number_of_highest_rolls_to_drop_in_a_set=1
#rolls 4 dice between 1 and 6 and rerolls all results that are 5 or less then outputs them to roll_set_of_dice
def roll_set_of_dice():
set_of_dice_rolls=[]
set_of_dice_rolls_adjusted=[]
for roll in range(the_number_of_rolls_in_a_set):
roll_result=(randint(the_lowest_possible_roll, the_number_of_sides_on_a_die))
while roll_result<=reroll_if_equal_or_less:
roll_result=(randint(the_lowest_possible_roll, the_number_of_sides_on_a_die))
print("reroll %s" %roll_result)
else:set_of_dice_rolls.append(roll_result)
for roll_results in range(len(set_of_dice_rolls)):
set_of_dice_rolls_adjusted.append(set_of_dice_rolls[roll_results])
print("\n////Break////\n%s\n%s\n////Break////\n" % (set_of_dice_rolls, set_of_dice_rolls_adjusted))
if (number_of_lowest_rolls_to_drop_in_a_set>0) or (number_of_highest_rolls_to_drop_in_a_set>0):
if number_of_lowest_rolls_to_drop_in_a_set>0:
drop_counter=0
drop_counter+=number_of_lowest_rolls_to_drop_in_a_set
#print(set_of_dice_rolls_adjusted)
#print(drop_counter)
while drop_counter>0:
set_of_dice_rolls_adjusted.remove(min(set_of_dice_rolls_adjusted))
#print(set_of_dice_rolls_adjusted)
drop_counter-=1
#print(drop_counter)
if number_of_highest_rolls_to_drop_in_a_set>0:
drop_counter=0
drop_counter+=number_of_highest_rolls_to_drop_in_a_set
#print(set_of_dice_rolls_adjusted)
#print(drop_counter)
while drop_counter>0:
set_of_dice_rolls_adjusted.remove(max(set_of_dice_rolls_adjusted))
#print(set_of_dice_rolls_adjusted)
drop_counter-=1
#print(drop_counter)
print("\n////Break////\n%s\n%s\n////Break////\n" % (set_of_dice_rolls, set_of_dice_rolls_adjusted))
return
roll_set_of_dice()
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
|
import theano.sandbox.cuda.basic_ops as sbcuda
import numpy as np
import load_data
import realtime_augmentation as ra
import time
import sys
import json
from custom_for_keras import input_generator
from datetime import datetime, timedelta
from keras.optimizers import Adam
from custom_keras_model_x_cat_x_maxout import kaggle_x_cat_x_maxout\
as kaggle_winsol
start_time = time.time()
copy_to_ram_beforehand = False
debug = True
predict = False # not implemented
continueAnalysis = True
saveAtEveryValidation = True
import_conv_weights = False
get_winsol_weights = False
# only relevant if not continued and not gets winsol weights, see http://arxiv.org/abs/1511.06422 for
# describtion
# for this to work, the batch size has to be something like 128, 256, 512,
# ... reason not found
DO_LSUV_INIT = True
BATCH_SIZE = 256 # keep in mind
NUM_INPUT_FEATURES = 3
MOMENTUM = 0.9
WEIGHT_DECAY = 0.0
EPOCHS = 200
VALIDATE_EVERY = 5 # 20 # 12 # 6 # 6 # 6 # 5 #
NUM_EPOCHS_NONORM = 0.
# this should be only a few, just .1 hopefully suffices.
INCLUDE_FLIP = True
TRAIN_LOSS_SF_PATH = "trainingNmbrs_started_adam_lsuv.txt"
# TARGET_PATH = "predictions/final/try_convnet.csv"
WEIGHTS_PATH='analysis/final/try_lsuv_adam_next.h5'
CONV_WEIGHT_PATH = ""#analysis/final/try_started_with_3cat_noMaxout_next_next.h5"
LEARNING_RATE_SCHEDULE = {
0: 0.005,
# 2: 0.1,
# 10: 0.05,
# 40: 0.01,
# 80: 0.005,
120: 0.001
# 500: 0.04,
# 0: 0.01,
# 1800: 0.004,
# 2300: 0.0004,
# 0: 0.08,
# 50: 0.04,
# 2000: 0.008,
# 3200: 0.0008,
# 4600: 0.0004,
}
if continueAnalysis or get_winsol_weights:
LEARNING_RATE_SCHEDULE = {
0: 0.005,
120: 0.001,
#40: 0.01,
#80: 0.005
# 0: 0.0001,
# 500: 0.002,
# 800: 0.0004,
# 3200: 0.0002,
# 4600: 0.0001,
}
optimizer = None#Adam(lr=LEARNING_RATE_SCHEDULE[0])
input_sizes = [(69, 69), (69, 69)]
PART_SIZE = 45
N_INPUT_VARIATION = 2
GEN_BUFFER_SIZE = 2
y_train = np.load("data/solutions_train.npy")
ra.y_train = y_train
# split training data into training + a small validation set
ra.num_train = y_train.shape[0]
# integer division, is defining validation size
ra.num_valid = ra.num_train // 10
ra.num_train -= ra.num_valid
# training num check for EV usage
if ra.num_train != 55420:
print "num_train = %s not %s" % (ra.num_train, 55420)
ra.y_valid = ra.y_train[ra.num_train:]
ra.y_train = ra.y_train[:ra.num_train]
load_data.num_train = y_train.shape[0]
load_data.train_ids = np.load("data/train_ids.npy")
ra.load_data.num_train = load_data.num_train
ra.load_data.train_ids = load_data.train_ids
ra.valid_ids = load_data.train_ids[ra.num_train:]
ra.train_ids = load_data.train_ids[:ra.num_train]
train_ids = load_data.train_ids
test_ids = load_data.test_ids
num_train = ra.num_train
num_test = len(test_ids)
num_valid = ra.num_valid
y_valid = ra.y_valid
y_train = ra.y_train
valid_ids = ra.valid_ids
train_ids = ra.train_ids
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train + num_valid)
test_indices = np.arange(num_test)
N_TRAIN = num_train
N_VALID = num_valid
print("The training sample contains %s , the validation sample contains %s images. \n" %
(ra.num_train, ra.num_valid))
# train without normalisation for this fraction of the traininng sample, to get the weights in
# the right 'zone'.
# maybe put into class
with open(TRAIN_LOSS_SF_PATH, 'a')as f:
f.write('#now with ADAM optimisation')
if continueAnalysis:
f.write('#continuing from ')
f.write(WEIGHTS_PATH)
# f.write("#wRandFlip \n")
f.write("#The training is running for %s epochs, each with %s images. The validation sample contains %s images. \n" % (
EPOCHS, N_TRAIN, ra.num_valid))
f.write("#validation is done every %s epochs\n" % VALIDATE_EVERY)
f.write("the learning rate schedule is ")
json.dump(LEARNING_RATE_SCHEDULE, f)
f.write('\n')
print 'initiate winsol class'
winsol = kaggle_winsol(BATCH_SIZE=BATCH_SIZE,
NUM_INPUT_FEATURES=NUM_INPUT_FEATURES,
PART_SIZE=PART_SIZE,
input_sizes=input_sizes,
LEARNING_RATE_SCHEDULE=LEARNING_RATE_SCHEDULE,
MOMENTUM=MOMENTUM,
LOSS_PATH=TRAIN_LOSS_SF_PATH,
WEIGHTS_PATH=WEIGHTS_PATH, include_flip=INCLUDE_FLIP)
print "Build model"
if debug:
print("input size: %s x %s x %s x %s" %
(input_sizes[0][0],
input_sizes[0][1],
NUM_INPUT_FEATURES,
BATCH_SIZE))
winsol.init_models(optimizer=Adam(lr=LEARNING_RATE_SCHEDULE[0]))
if debug:
winsol.print_summary()
print "Set up data loading"
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(
3.0, target_size=input_sizes[1])
+ ra.build_augmentation_transform(rotation=45)
]
num_input_representations = len(ds_transforms)
augmentation_params = {
'zoom_range': (1.0 / 1.3, 1.3),
'rotation_range': (0, 360),
'shear_range': (0, 0),
'translation_range': (-4, 4),
'do_flip': True,
}
def create_data_gen():
augmented_data_gen = ra.realtime_augmented_data_gen(
num_chunks=N_TRAIN / BATCH_SIZE * (EPOCHS + 1),
chunk_size=BATCH_SIZE,
augmentation_params=augmentation_params,
ds_transforms=ds_transforms,
target_sizes=input_sizes)
post_augmented_data_gen = ra.post_augment_brightness_gen(
augmented_data_gen, std=0.5)
train_gen = load_data.buffered_gen_mp(
post_augmented_data_gen, buffer_size=GEN_BUFFER_SIZE)
input_gen = input_generator(train_gen)
return input_gen
# # may need doubling the generator,can be done with
# itertools.tee(iterable, n=2)
input_gen = create_data_gen()
def create_valid_gen():
data_gen_valid = ra.realtime_fixed_augmented_data_gen(
valid_indices,
'train',
ds_transforms=ds_transforms,
chunk_size=N_VALID,
target_sizes=input_sizes)
# load_data.buffered_gen_mp(data_gen_valid, buffer_size=GEN_BUFFER_SIZE)
return data_gen_valid
print "Preprocess validation data upfront"
start_time_val1 = time.time()
xs_valid = [[] for _ in xrange(num_input_representations)]
for data, length in create_valid_gen():
for x_valid_list, x_chunk in zip(xs_valid, data):
x_valid_list.append(x_chunk[:length])
xs_valid = [np.vstack(x_valid) for x_valid in xs_valid]
# move the colour dimension up
xs_valid = [x_valid.transpose(0, 3, 1, 2) for x_valid in xs_valid]
validation_data = (
[xs_valid[0], xs_valid[1]], y_valid)
t_val = (time.time() - start_time_val1)
print " took %.2f seconds" % (t_val)
if continueAnalysis:
print "Load model weights"
winsol.load_weights(path=WEIGHTS_PATH)
winsol.WEIGHTS_PATH = ((WEIGHTS_PATH.split('.', 1)[0] + '_next.h5'))
elif get_winsol_weights:
print "import weights from run with original kaggle winner solution"
winsol.load_weights()
elif import_conv_weights:
print 'Import convnet weights from training with geometric forms'
winsol.load_conv_layers(path=CONV_WEIGHT_PATH)
elif DO_LSUV_INIT:
start_time_lsuv = time.time()
print 'Starting LSUV initialisation'
# TODO check influence on the first epoch of the data generation of this
# .next()
train_batch = input_gen.next()[0]
if debug:
print type(train_batch)
print np.shape(train_batch)
winsol.LSUV_init(train_batch)
print " took %.2f seconds" % (time.time() - start_time_lsuv)
if debug:
print("Free GPU Mem before first step %s MiB " %
(sbcuda.cuda_ndarray.cuda_ndarray.mem_info()[0] / 1024. / 1024.))
def save_exit():
print "\nsaving..."
winsol.save()
print "Done!"
print ' run for %s' % timedelta(seconds=(time.time() - start_time))
exit()
sys.exit(0)
try:
print ''
print "losses without training on validation sample up front"
evalHist = winsol.evaluate([xs_valid[0], xs_valid[1]], y_valid=y_valid)
if debug:
print("Free GPU Mem after validation check %s MiB " %
(sbcuda.cuda_ndarray.cuda_ndarray.mem_info()[0]
/ 1024. / 1024.))
print ''
print "Train %s epoch without norm" % NUM_EPOCHS_NONORM
time1 = time.time()
no_norm_events = int(NUM_EPOCHS_NONORM * N_TRAIN)
if no_norm_events:
hist = winsol.fit_gen(modelname='model_noNorm',
data_generator=input_gen,
validation=validation_data,
samples_per_epoch=no_norm_events)
if debug:
print("\nFree GPU Mem before train loop %s MiB " %
(sbcuda.cuda_ndarray.cuda_ndarray.mem_info()[0]
/ 1024. / 1024.))
print 'starting main training'
if no_norm_events:
eta = (time.time() - time1) / NUM_EPOCHS_NONORM * EPOCHS
print 'rough ETA %s sec. -> finishes at %s' % (
int(eta), datetime.now() + timedelta(seconds=eta))
winsol.full_fit(data_gen=input_gen,
validation=validation_data,
samples_per_epoch=N_TRAIN,
validate_every=VALIDATE_EVERY,
nb_epochs=EPOCHS,
data_gen_creator=create_data_gen)
except KeyboardInterrupt:
print "\ngot keyboard interuption"
save_exit()
except ValueError, e:
print "\ngot value error, could be the end of the generator in the fit"
print e
save_exit()
save_exit()
|
|
import os
import signal
import threading
import docker
import dockerpty
from subprocess import Popen, STDOUT, PIPE, SubprocessError, CalledProcessError
import spython
from spython.main.parse.parsers import DockerParser
from spython.main.parse.writers import SingularityWriter
from popper import utils as pu
from popper.cli import log as log
from popper.runner import StepRunner as StepRunner
from popper.utils import assert_executable_exists
class HostRunner(StepRunner):
"""Run a step directly on the host machine."""
def __init__(self, **kw):
super(HostRunner, self).__init__(**kw)
self._spawned_pids = set()
if self._config.reuse:
log.warning("Reuse not supported for HostRunner.")
def __enter__(self):
return self
def __exit__(self, exc_type, exc, traceback):
pass
def run(self, step):
step_env = self._prepare_environment(step, env=dict(os.environ))
if not step.runs:
raise AttributeError("Expecting 'runs' attribute in step.")
cmd = step.runs + tuple(step.args)
log.info(f"[{step.id}] {cmd}")
if self._config.dry_run:
return 0
log.debug(f"Environment:\n{pu.prettystr(step_env)}")
pid, ecode, _ = HostRunner._exec_cmd(
cmd, env=step_env, cwd=self._config.workspace_dir, pids=self._spawned_pids
)
if pid != 0:
self._spawned_pids.remove(pid)
return ecode
def stop_running_tasks(self):
for pid in self._spawned_pids:
log.info(f"Stopping proces {pid}")
os.kill(pid, signal.SIGKILL)
@staticmethod
def _exec_cmd(cmd, env=None, cwd=os.getcwd(), pids=set(), logging=True):
pid = 0
ecode = None
try:
with Popen(
cmd,
stdout=PIPE,
stderr=STDOUT,
universal_newlines=True,
preexec_fn=os.setsid,
env=env,
cwd=cwd,
) as p:
pid = p.pid
pids.add(p.pid)
log.debug("Reading process output")
output = []
for line in iter(p.stdout.readline, ""):
if logging:
log.step_info(line.rstrip())
else:
output.append(line.rstrip())
p.wait()
ecode = p.poll()
log.debug(f"Code returned by process: {ecode}")
except SubprocessError as ex:
output = ""
if not ecode:
ecode = 1
log.step_info(f"Command '{cmd[0]}' failed with: {ex}")
except Exception as ex:
output = ""
ecode = 1
log.step_info(f"Command raised non-SubprocessError error: {ex}")
return pid, ecode, "\n".join(output)
class DockerRunner(StepRunner):
"""Runs steps in docker on the local machine."""
def __init__(self, init_docker_client=True, **kw):
super(DockerRunner, self).__init__(**kw)
self._spawned_containers = set()
self._d = None
if not init_docker_client:
return
try:
self._d = docker.from_env()
self._d.version()
except Exception as e:
log.debug(f"Docker error: {e}")
log.fail("Unable to connect to the docker daemon.")
log.debug(f"Docker info: {pu.prettystr(self._d.info())}")
def __exit__(self, exc_type, exc_value, exc_traceback):
if self._d:
self._d.close()
def run(self, step):
"""Execute the given step in docker."""
cid = pu.sanitized_name(step.id, self._config.wid)
container = self._find_container(cid)
if not container and self._config.reuse:
log.fail(
f"Cannot find an existing container for step '{step.id}' to be reused"
)
if container and not self._config.reuse and not self._config.dry_run:
container.remove(force=True)
container = None
if not container and not self._config.reuse:
container = self._create_container(cid, step)
log.info(f"[{step.id}] docker start")
if self._config.dry_run:
return 0
self._spawned_containers.add(container)
try:
container.start()
if self._config.pty:
dockerpty.start(self._d.api, container.id)
else:
cout = container.logs(stream=True)
for line in cout:
log.step_info(line.decode().rstrip())
e = container.wait()["StatusCode"]
except Exception as exc:
log.fail(exc)
return e
def stop_running_tasks(self):
for c in self._spawned_containers:
log.info(f"Stopping container {c.name}")
c.stop()
def _create_container(self, cid, step):
build, _, img, tag, build_ctx_path = self._get_build_info(step)
if build:
log.info(f"[{step.id}] docker build {img}:{tag} {build_ctx_path}")
if not self._config.dry_run:
streamer = self._d.api.build(
decode=True, path=build_ctx_path, tag=f"{img}:{tag}", rm=True,
)
for chunk in streamer:
if self._config.quiet:
continue
if "stream" in chunk:
lines = [line for line in chunk["stream"].splitlines() if line]
for line in lines:
log.step_info(line.strip())
elif not self._config.skip_pull and not step.skip_pull:
log.info(f"[{step.id}] docker pull {img}:{tag}")
if not self._config.dry_run:
streamer = self._d.api.pull(repository=f"{img}:{tag}", decode=True,)
for chunk in streamer.splitlines():
if self._config.quiet:
continue
chunk = chunk.strip()
if chunk:
import json
chunk = json.loads(chunk)
if "id" in chunk:
log.step_info(chunk["id"] + ": " + chunk["status"])
else:
log.step_info(chunk["status"])
if self._config.dry_run:
return
container_args = self._get_container_kwargs(step, f"{img}:{tag}", cid)
if "volumes" not in container_args:
container_args["volumes"] = []
else:
container_args["volumes"] = list(container_args["volumes"])
container_args["volumes"].append("/var/run/docker.sock:/var/run/docker.sock")
log.debug(f"Container args: {container_args}")
msg = f"[{step.id}] docker create name={cid}"
msg += f' image={container_args["image"]}'
if container_args["entrypoint"]:
msg += f' entrypoint={container_args["entrypoint"]}'
if container_args["command"]:
msg += f' command={container_args["command"]}'
log.info(msg)
container = self._d.containers.create(**container_args)
return container
def _find_container(self, cid):
"""Check whether the container exists."""
containers = self._d.containers.list(all=True, filters={"name": cid})
filtered_containers = [c for c in containers if c.name == cid]
if len(filtered_containers):
return filtered_containers[0]
return None
class PodmanRunner(StepRunner):
"""Runs steps in podman on the local machine."""
def __init__(self, init_podman_client=True, **kw):
super(PodmanRunner, self).__init__(**kw)
self._spawned_containers = set()
if not init_podman_client:
return
podman_executables = ["podman"]
for exe in podman_executables:
assert_executable_exists(exe)
try:
_, _, self._p_info = HostRunner._exec_cmd(["podman", "info"], logging=False)
self._p_version = HostRunner._exec_cmd(["podman", "version"], logging=False)
except Exception as e:
log.debug(f"Podman error: {e}")
log.fail("Unable to connect to podman, is it installed?")
log.debug(f"Podman info: {pu.prettystr(self._p_info)}")
def run(self, step):
"""Executes the given step in podman."""
cid = pu.sanitized_name(step.id, self._config.wid)
container = self._find_container(cid)
if not container and self._config.reuse:
log.fail(
f"Cannot find an existing container for step '{step.id}' to be reused"
)
if container and not self._config.reuse and not self._config.dry_run:
cmd = ["podman", "rm", "-f", container]
HostRunner._exec_cmd(cmd, logging=False)
container = None
if not container and not self._config.reuse:
container = self._create_container(cid, step)
log.info(f"[{step.id}] podman start")
if self._config.dry_run:
return 0
self._spawned_containers.add(container)
cmd = ["podman", "start", "-a", container]
_, e, _ = HostRunner._exec_cmd(cmd)
return e
def stop_running_tasks(self):
"""Stop containers started by Popper."""
for c in self._spawned_containers:
log.info(f"Stopping container {c}")
_, ecode, _ = HostRunner._exec_cmd(["podman", "stop", c], logging=False)
if ecode != 0:
log.warning(f"Failed to stop the {c} container")
def _find_container(self, cid):
"""Checks whether the container exists."""
cmd = ["podman", "inspect", "-f", "{{.Id}}", cid]
_, ecode, containers = HostRunner._exec_cmd(cmd, logging=False)
if ecode == 125:
return None
if ecode != 0:
log.fail(f"podman inspect fail: {containers}")
return containers.strip()
def _create_container(self, cid, step):
build, _, img, tag, build_ctx_path = self._get_build_info(step)
if build:
log.info(f"[{step.id}] podman build {img}:{tag} {build_ctx_path}")
if not self._config.dry_run:
cmd = [
"podman",
"build",
"--tag",
f"{img}:{tag}",
"--rm",
"--file",
build_ctx_path,
]
HostRunner._exec_cmd(cmd)
elif not self._config.skip_pull and not step.skip_pull:
log.info(f"[{step.id}] podman pull {img}:{tag}")
if not self._config.dry_run:
cmd = ["podman", "pull", f"{img}:{tag}"]
HostRunner._exec_cmd(cmd, logging=False)
if self._config.dry_run:
return
container_args = self._get_container_kwargs(step, f"{img}:{tag}", cid)
log.debug(f"Container args: {container_args}")
msg = [f"{step.id}", "podman", "create", f"name={cid}"]
msg.append(f"image={container_args.get('image')}")
msg.append(f"entrypoint={container_args.get('entrypoint')}" or "")
msg.append(f"command={container_args.get('command')}" or "")
log.info(msg)
cmd = ["podman", "create"]
cmd.extend(["--name", container_args.get("name") or ""])
cmd.extend(["-v", container_args.get("volumes")[0] or ""])
env = container_args.get("environment")
if env:
for i, j in env.items():
cmd.extend(["-e", f"{i}={j}"])
cmd.extend(["-d" if container_args.get("detach") else ""])
cmd.extend(["-w", container_args.get("working_dir")])
h = container_args.get("hostname", None)
if h:
cmd.extend(["-h", h])
domain_name = container_args.get("domainname", None)
if domain_name:
cmd.extend(["--domainname", domain_name])
tty = container_args.get("tty", None)
if tty:
cmd.extend(["-t", tty])
entrypoint = container_args.get("entrypoint")
if entrypoint:
cmd.extend(["--entrypoint", entrypoint[0]])
entrypoint_rmd = entrypoint[1:]
cmd.append(container_args.get("image"))
if entrypoint:
for i in entrypoint_rmd:
cmd.extend([i or ""])
for i in container_args["command"]:
cmd.extend([i or ""])
_, ecode, container = HostRunner._exec_cmd(cmd, logging=False)
if ecode != 0:
return None
container = container.rsplit()
if len(container) == 0:
return None
return container[-1]
class SingularityRunner(StepRunner):
"""Runs steps in singularity on the local machine."""
lock = threading.Lock()
def __init__(self, init_spython_client=True, **kw):
super(SingularityRunner, self).__init__(**kw)
self._spawned_containers = set()
self._s = None
if SingularityRunner._in_docker():
log.fail(
(
"You seem to be running Popper in a Docker container.\n"
"Singularity cannot be executed this way.\n"
"Either run Popper without Singularity or install Popper "
"through PIP.\n"
"Instructions are available here:\n"
"https://github.com/getpopper/popper/"
"blob/master/docs/installation.md"
)
)
if self._config.reuse:
log.fail("Reuse not supported for SingularityRunner.")
if not init_spython_client:
return
singularity_executables = ["singularity"]
for exe in singularity_executables:
assert_executable_exists(exe)
self._s = spython.main.Client
self._s.quiet = True
def run(self, step):
self._setup_singularity_cache()
cid = pu.sanitized_name(step.id, self._config.wid) + ".sif"
self._container = os.path.join(self._singularity_cache, cid)
exists = os.path.exists(self._container)
if exists and not self._config.dry_run and not self._config.skip_pull:
os.remove(self._container)
self._create_container(step, cid)
ecode = self._singularity_start(step, cid)
return ecode
@staticmethod
def _convert(dockerfile, singularityfile):
parser = DockerParser(dockerfile)
for p in parser.recipe.files:
p[0] = p[0].strip('"')
p[1] = p[1].strip('"')
if os.path.isdir(p[0]):
p[0] += "/."
writer = SingularityWriter(parser.recipe)
recipe = writer.convert()
with open(singularityfile, "w") as sf:
sf.write(recipe)
return singularityfile
@staticmethod
def _get_recipe_file(build_ctx_path, cid):
dockerfile = os.path.join(build_ctx_path, "Dockerfile")
singularityfile = os.path.join(
build_ctx_path, "Singularity.{}".format(cid[:-4])
)
if os.path.isfile(dockerfile):
return SingularityRunner._convert(dockerfile, singularityfile)
else:
log.fail("No Dockerfile was found.")
@staticmethod
def _in_docker():
""" Returns TRUE if we are being executed in a Docker container. """
if os.path.isfile("/proc/1/cgroup"):
with open("/proc/1/cgroup", "r") as f:
return "docker" in f.read() or "lxc" in f.read()
def _build_from_recipe(self, build_ctx_path, build_dest, cid):
SingularityRunner.lock.acquire()
pwd = os.getcwd()
os.chdir(build_ctx_path)
recipefile = SingularityRunner._get_recipe_file(build_ctx_path, cid)
self._s.build(
recipe=recipefile,
image=cid,
build_folder=build_dest,
force=True,
quiet=self._config.quiet,
)
os.chdir(pwd)
SingularityRunner.lock.release()
def _setup_singularity_cache(self):
self._singularity_cache = os.path.join(
self._config.cache_dir, "singularity", self._config.wid
)
os.makedirs(self._singularity_cache, exist_ok=True)
def _get_container_options(self):
container_args = {
"userns": True,
"pwd": "/workspace",
"bind": [f"{self._config.workspace_dir}:/workspace"],
}
self._update_with_engine_config(container_args)
options = []
for k, v in container_args.items():
if isinstance(v, list):
for item in v:
options.append(pu.key_value_to_flag(k, item))
else:
options.append(pu.key_value_to_flag(k, v))
options = " ".join(options).split(" ")
log.debug(f"container options: {options}\n")
return options
def _create_container(self, step, cid):
build, image, _, _, build_ctx_path = self._get_build_info(step)
if "shub://" in step.uses or "library://" in step.uses:
build = False
image = step.uses
build_ctx_path = None
if build:
log.info(f"[{step.id}] singularity build {cid} {build_ctx_path}")
if not self._config.dry_run:
self._build_from_recipe(build_ctx_path, self._singularity_cache, cid)
elif not self._config.skip_pull and not step.skip_pull:
log.info(f"[{step.id}] singularity pull {cid} {image}")
if not self._config.dry_run:
self._s.pull(image=image, name=cid, pull_folder=self._singularity_cache)
def _singularity_start(self, step, cid):
env = self._prepare_environment(step)
# set the environment variables
for k, v in env.items():
os.environ[k] = str(v)
args = list(step.args)
runs = list(step.runs)
ecode = None
if runs:
info = f"[{step.id}] singularity exec {cid} {runs}"
commands = runs
start_fn = self._s.execute
else:
info = f"[{step.id}] singularity run {cid} {args}"
commands = args
start_fn = self._s.run
log.info(info)
if self._config.dry_run:
return 0
options = self._get_container_options()
output = start_fn(self._container, commands, stream=True, options=options)
try:
for line in output:
log.step_info(line.strip("\n"))
ecode = 0
except CalledProcessError as ex:
ecode = ex.returncode
return ecode
def stop_running_tasks(self):
pass
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ApplicationSecurityGroupsOperations(object):
"""ApplicationSecurityGroupsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
application_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
application_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified application security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_security_group_name: The name of the application security group.
:type application_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
application_security_group_name=application_security_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
application_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ApplicationSecurityGroup"
"""Gets information about the specified application security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_security_group_name: The name of the application security group.
:type application_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationSecurityGroup, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_11_01.models.ApplicationSecurityGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
application_security_group_name, # type: str
parameters, # type: "_models.ApplicationSecurityGroup"
**kwargs # type: Any
):
# type: (...) -> "_models.ApplicationSecurityGroup"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ApplicationSecurityGroup')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ApplicationSecurityGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ApplicationSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
application_security_group_name, # type: str
parameters, # type: "_models.ApplicationSecurityGroup"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ApplicationSecurityGroup"]
"""Creates or updates an application security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_security_group_name: The name of the application security group.
:type application_security_group_name: str
:param parameters: Parameters supplied to the create or update ApplicationSecurityGroup
operation.
:type parameters: ~azure.mgmt.network.v2018_11_01.models.ApplicationSecurityGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ApplicationSecurityGroup or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_11_01.models.ApplicationSecurityGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationSecurityGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
application_security_group_name=application_security_group_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ApplicationSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
application_security_group_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.ApplicationSecurityGroup"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
application_security_group_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ApplicationSecurityGroup"]
"""Updates an application security group's tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_security_group_name: The name of the application security group.
:type application_security_group_name: str
:param parameters: Parameters supplied to update application security group tags.
:type parameters: ~azure.mgmt.network.v2018_11_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ApplicationSecurityGroup or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_11_01.models.ApplicationSecurityGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationSecurityGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
application_security_group_name=application_security_group_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ApplicationSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ApplicationSecurityGroupListResult"]
"""Gets all application security groups in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_11_01.models.ApplicationSecurityGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationSecurityGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationSecurityGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationSecurityGroups'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ApplicationSecurityGroupListResult"]
"""Gets all the application security groups in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_11_01.models.ApplicationSecurityGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationSecurityGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationSecurityGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups'} # type: ignore
|
|
from django.test import TestCase
from django.core.files import File
from django.core.files.uploadedfile import SimpleUploadedFile
from django.conf import settings
import os.path
from validatedfile.fields import FileQuota
from testing.models import TestModel, TestModelNoValidate, TestContainer, TestElement
from testing.forms import TestModelForm, TestModelNoValidateForm, TestElementForm
class ValidatedFileFieldTest(TestCase):
SAMPLE_FILES_PATH = 'testing/sample_files'
def test_create_empty_instance(self):
instance = TestModel.objects.create()
def test_create_instance_with_file(self):
instance = TestModel.objects.create(
the_file = File(self._get_sample_file('image2k.png'), 'the_file.png')
)
self._check_file_url(instance.the_file, 'the_file.png')
instance.the_file.delete()
instance.delete()
def test_form_ok(self):
form = self._create_bound_test_model_form(form_class = TestModelForm,
orig_filename = 'image2k.png',
dest_filename = 'the_file.png',
content_type = 'image/png')
self.assertTrue(form.is_valid())
instance = form.save()
self._check_file_url(instance.the_file, 'the_file.png')
instance.the_file.delete()
instance.delete()
def test_form_invalid_size(self):
form = self._create_bound_test_model_form(form_class = TestModelForm,
orig_filename = 'image15k.png',
dest_filename = 'the_file.png',
content_type = 'image/png')
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(len(form.errors['the_file']), 1)
self.assertEqual(form.errors['the_file'][0], u'Files of size greater than 10.0 KB are not allowed. Your file is 14.2 KB')
def test_form_invalid_filetype(self):
form = self._create_bound_test_model_form(form_class = TestModelForm,
orig_filename = 'document1k.pdf',
dest_filename = 'the_file.pdf',
content_type = 'application/pdf')
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(len(form.errors['the_file']), 1)
self.assertEqual(form.errors['the_file'][0], u'Files of type application/pdf are not supported.')
def test_form_invalid_filetype_and_size(self):
form = self._create_bound_test_model_form(form_class = TestModelForm,
orig_filename = 'document15k.pdf',
dest_filename = 'the_file.pdf',
content_type = 'application/pdf')
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(len(form.errors['the_file']), 1)
self.assertEqual(form.errors['the_file'][0], u'Files of type application/pdf are not supported.')
def test_form_fake_filetype(self):
form = self._create_bound_test_model_form(form_class = TestModelForm,
orig_filename = 'document1k.pdf',
dest_filename = 'the_file.pdf',
content_type = 'image/png')
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(len(form.errors['the_file']), 1)
self.assertEqual(form.errors['the_file'][0], u'Files of type application/pdf are not supported.')
def test_form_no_validate(self):
form = self._create_bound_test_model_form(form_class = TestModelNoValidateForm,
orig_filename = 'document15k.pdf',
dest_filename = 'the_file.pdf',
content_type = 'application/pdf')
self.assertTrue(form.is_valid())
instance = form.save()
self._check_file_url(instance.the_file, 'the_file.pdf')
instance.the_file.delete()
instance.delete()
def test_form_null_file(self):
form = self._create_bound_test_model_form(form_class = TestModelNoValidateForm)
self.assertTrue(form.is_valid())
instance = form.save()
self.assertEqual(instance.the_file, None)
instance.delete()
def test_quota_empty(self):
container = self._create_container(name = 'container1')
quota = FileQuota()
quota.update(container.test_elements.all(), 'the_file')
self.assertEqual(quota.current_usage, 0)
self.assertFalse(quota.exceeds())
container.delete()
def test_quota_one_file(self):
container = self._create_container(name = 'container')
element = self._add_element(container = container,
orig_filename = 'image2k.png',
dest_filename = 'the_file.png')
quota = FileQuota()
quota.update(container.test_elements.all(), 'the_file')
self.assertEqual(quota.current_usage, 2120)
self.assertFalse(quota.exceeds())
element.the_file.delete()
element.delete()
container.delete()
def test_quota_several_files_several_containers(self):
container1 = self._create_container(name = 'container1')
element1 = self._add_element(container = container1,
orig_filename = 'image2k.png',
dest_filename = 'the_file1.png')
element2 = self._add_element(container = container1,
orig_filename = 'image15k.png',
dest_filename = 'the_file2.png')
container2 = self._create_container(name = 'container2')
element3 = self._add_element(container = container2,
orig_filename = 'document15k.pdf',
dest_filename = 'the_file3.pdf')
quota = FileQuota(max_usage = 20000)
quota.update(container1.test_elements.all(), 'the_file')
self.assertEqual(quota.current_usage, 16706)
self.assertFalse(quota.exceeds())
element1.the_file.delete()
element2.the_file.delete()
element3.the_file.delete()
element1.delete()
element2.delete()
element3.delete()
container1.delete()
container2.delete()
def test_quota_exceeds(self):
quota = FileQuota(max_usage = 1000)
container = self._create_container(name = 'container1')
quota.update(container.test_elements.all(), 'the_file')
self.assertEqual(quota.current_usage, 0)
self.assertFalse(quota.exceeds())
self.assertTrue(quota.exceeds(2120))
element = self._add_element(container = container,
orig_filename = 'image2k.png',
dest_filename = 'the_file.png')
quota.update(container.test_elements.all(), 'the_file')
self.assertEqual(quota.current_usage, 2120)
self.assertTrue(quota.exceeds())
element.the_file.delete()
element.delete()
container.delete()
def test_quota_near_limit(self):
quota = FileQuota(max_usage = 6500)
container = self._create_container(name = 'container1')
quota.update(container.test_elements.all(), 'the_file')
self.assertEqual(quota.current_usage, 0)
self.assertFalse(quota.near_limit())
element1 = self._add_element(container = container,
orig_filename = 'image2k.png',
dest_filename = 'the_file.png')
quota.update(container.test_elements.all(), 'the_file')
self.assertEqual(quota.current_usage, 2120)
self.assertFalse(quota.near_limit())
element2 = self._add_element(container = container,
orig_filename = 'image2k.png',
dest_filename = 'the_file.png')
quota.update(container.test_elements.all(), 'the_file')
self.assertEqual(quota.current_usage, 4240)
self.assertFalse(quota.near_limit())
element3 = self._add_element(container = container,
orig_filename = 'image2k.png',
dest_filename = 'the_file.png')
quota.update(container.test_elements.all(), 'the_file')
self.assertEqual(quota.current_usage, 6360)
self.assertTrue(quota.near_limit())
element1.the_file.delete()
element2.the_file.delete()
element3.the_file.delete()
element1.delete()
element2.delete()
element3.delete()
container.delete()
def test_form_quota_check(self):
container = self._create_container(name = 'container1')
form1 = self._create_unbound_test_element_form(container = container)
self.assertFalse(form1.exceeds_quota())
element = self._add_element(container = container,
orig_filename = 'image15k.png',
dest_filename = 'the_file.png')
form2 = self._create_unbound_test_element_form(container = container)
self.assertTrue(form2.exceeds_quota())
element.the_file.delete()
element.delete()
container.delete()
def test_form_quota_ok(self):
container = self._create_container(name = 'container1')
form = self._create_bound_test_element_form(container = container,
orig_filename = 'image2k.png',
dest_filename = 'the_file.png',
content_type = 'image/png')
self.assertTrue(form.is_valid())
container.delete()
def test_form_quota_exceeded(self):
container = self._create_container(name = 'container1')
element = self._add_element(container = container,
orig_filename = 'image2k.png',
dest_filename = 'the_file.png')
form = self._create_bound_test_element_form(container = container,
orig_filename = 'image15k.png',
dest_filename = 'the_file.png',
content_type = 'image/png')
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(len(form.errors['the_file']), 1)
self.assertEqual(form.errors['the_file'][0],
u'Please keep the total uploaded files under 9.8 KB. With this file, the total would be 16.3 KB.')
element.the_file.delete()
element.delete()
container.delete()
# Utilities
def _get_sample_file(self, filename):
path = os.path.join(self.SAMPLE_FILES_PATH, filename)
return open(path)
def _check_file_url(self, filefield, filename):
url = os.path.join(settings.MEDIA_URL, filefield.field.upload_to, filename)
self.assertEqual(filefield.url, url)
def _get_file_url(self, filename):
return os.path.join(MEDIA_ROOT, prefix, filename)
def _create_bound_test_model_form(self, form_class, orig_filename = None,
dest_filename = None, content_type = None):
if orig_filename and dest_filename and content_type:
uploaded_file = SimpleUploadedFile(
name = dest_filename,
content = self._get_sample_file(orig_filename).read(),
content_type = content_type,
)
files = {'the_file': uploaded_file}
else:
files = {}
form = form_class(data = {}, files = files)
return form
def _create_container(self, name):
return TestContainer.objects.create(name = name)
def _add_element(self, container, orig_filename, dest_filename):
return container.test_elements.create(
the_file = File(self._get_sample_file(orig_filename), dest_filename)
)
def _create_unbound_test_element_form(self, container):
return TestElementForm(container = container)
def _create_bound_test_element_form(self, container, orig_filename = None,
dest_filename = None, content_type = None):
if orig_filename and dest_filename and content_type:
uploaded_file = SimpleUploadedFile(
name = dest_filename,
content = self._get_sample_file(orig_filename).read(),
content_type = content_type,
)
files = {'the_file': uploaded_file}
else:
files = {}
form = TestElementForm(container = container, data = {}, files = files)
return form
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Pipeline options validator.
For internal use only; no backwards-compatibility guarantees.
"""
# pytype: skip-file
import logging
import re
from apache_beam.internal import pickler
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PortableOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.pipeline_options import TestOptions
from apache_beam.options.pipeline_options import TypeOptions
from apache_beam.options.pipeline_options import WorkerOptions
_LOGGER = logging.getLogger(__name__)
class PipelineOptionsValidator(object):
"""Validates PipelineOptions.
Goes through a list of known PipelineOption subclassess and calls::
validate(validator)
if one is implemented. Aggregates a list of validation errors from all and
returns an aggregated list.
"""
# Validator will call validate on these subclasses of PipelineOptions
OPTIONS = [
DebugOptions,
GoogleCloudOptions,
PortableOptions,
SetupOptions,
StandardOptions,
TestOptions,
TypeOptions,
WorkerOptions
]
# Mutually exclusive options for different types of portable environments.
REQUIRED_ENVIRONMENT_OPTIONS = {
'DOCKER': [],
'PROCESS': ['process_command'],
'EXTERNAL': ['external_service_address'],
'LOOPBACK': []
}
OPTIONAL_ENVIRONMENT_OPTIONS = {
'DOCKER': ['docker_container_image'],
'PROCESS': ['process_variables'],
'EXTERNAL': [],
'LOOPBACK': []
}
# Possible validation errors.
ERR_MISSING_OPTION = 'Missing required option: %s.'
ERR_MISSING_GCS_PATH = 'Missing GCS path option: %s.'
ERR_INVALID_GCS_PATH = 'Invalid GCS path (%s), given for the option: %s.'
ERR_INVALID_GCS_BUCKET = (
'Invalid GCS bucket (%s), given for the option: %s. See '
'https://developers.google.com/storage/docs/bucketnaming '
'for more details.')
ERR_INVALID_GCS_OBJECT = 'Invalid GCS object (%s), given for the option: %s.'
ERR_INVALID_JOB_NAME = (
'Invalid job_name (%s); the name must consist of only the characters '
'[-a-z0-9], starting with a letter and ending with a letter or number')
ERR_INVALID_PROJECT_NUMBER = (
'Invalid Project ID (%s). Please make sure you specified the Project ID, '
'not project number.')
ERR_INVALID_PROJECT_ID = (
'Invalid Project ID (%s). Please make sure you specified the Project ID, '
'not project description.')
ERR_INVALID_NOT_POSITIVE = (
'Invalid value (%s) for option: %s. Value needs '
'to be positive.')
ERR_INVALID_TEST_MATCHER_TYPE = (
'Invalid value (%s) for option: %s. Please extend your matcher object '
'from hamcrest.core.base_matcher.BaseMatcher.')
ERR_INVALID_TEST_MATCHER_UNPICKLABLE = (
'Invalid value (%s) for option: %s. Please make sure the test matcher '
'is unpicklable.')
ERR_INVALID_TRANSFORM_NAME_MAPPING = (
'Invalid transform name mapping format. Please make sure the mapping is '
'string key-value pairs. Invalid pair: (%s:%s)')
ERR_INVALID_ENVIRONMENT = (
'Option %s is not compatible with environment type %s.')
ERR_ENVIRONMENT_CONFIG = (
'Option environment_config is incompatible with option(s) %s.')
ERR_MISSING_REQUIRED_ENVIRONMENT_OPTION = (
'Option %s is required for environment type %s.')
ERR_NUM_WORKERS_TOO_HIGH = (
'num_workers (%s) cannot exceed max_num_workers (%s)')
# GCS path specific patterns.
GCS_URI = '(?P<SCHEME>[^:]+)://(?P<BUCKET>[^/]+)(/(?P<OBJECT>.*))?'
GCS_BUCKET = '^[a-z0-9][-_a-z0-9.]+[a-z0-9]$'
GCS_SCHEME = 'gs'
# GoogleCloudOptions specific patterns.
JOB_PATTERN = '[a-z]([-a-z0-9]*[a-z0-9])?'
PROJECT_ID_PATTERN = '[a-z][-a-z0-9:.]+[a-z0-9]'
PROJECT_NUMBER_PATTERN = '[0-9]*'
ENDPOINT_PATTERN = r'https://[\S]*googleapis\.com[/]?'
def __init__(self, options, runner):
self.options = options
self.runner = runner
def validate(self):
"""Calls validate on subclassess and returns a list of errors.
validate will call validate method on subclasses, accumulate the returned
list of errors, and returns the aggregate list.
Returns:
Aggregate list of errors after all calling all possible validate methods.
"""
errors = []
for cls in self.OPTIONS:
if 'validate' in cls.__dict__ and callable(cls.__dict__['validate']):
errors.extend(self.options.view_as(cls).validate(self))
return errors
def is_service_runner(self):
"""True if pipeline will execute on the Google Cloud Dataflow service."""
is_service_runner = (
self.runner is not None and
type(self.runner).__name__ in ['DataflowRunner', 'TestDataflowRunner'])
dataflow_endpoint = (
self.options.view_as(GoogleCloudOptions).dataflow_endpoint)
is_service_endpoint = (
dataflow_endpoint is not None and
self.is_full_string_match(self.ENDPOINT_PATTERN, dataflow_endpoint))
return is_service_runner and is_service_endpoint
def is_full_string_match(self, pattern, string):
"""Returns True if the pattern matches the whole string."""
pattern = '^%s$' % pattern
return re.search(pattern, string) is not None
def _validate_error(self, err, *args):
return [err % args]
def validate_gcs_path(self, view, arg_name):
"""Validates a GCS path against gs://bucket/object URI format."""
arg = getattr(view, arg_name, None)
if arg is None:
return self._validate_error(self.ERR_MISSING_GCS_PATH, arg_name)
match = re.match(self.GCS_URI, arg, re.DOTALL)
if match is None:
return self._validate_error(self.ERR_INVALID_GCS_PATH, arg, arg_name)
scheme = match.group('SCHEME')
bucket = match.group('BUCKET')
gcs_object = match.group('OBJECT')
if ((scheme is None) or (scheme.lower() != self.GCS_SCHEME) or
(bucket is None)):
return self._validate_error(self.ERR_INVALID_GCS_PATH, arg, arg_name)
if not self.is_full_string_match(self.GCS_BUCKET, bucket):
return self._validate_error(self.ERR_INVALID_GCS_BUCKET, arg, arg_name)
if gcs_object is None or '\n' in gcs_object or '\r' in gcs_object:
return self._validate_error(self.ERR_INVALID_GCS_OBJECT, arg, arg_name)
return []
def validate_cloud_options(self, view):
"""Validates job_name and project arguments."""
errors = []
if (view.job_name and
not self.is_full_string_match(self.JOB_PATTERN, view.job_name)):
errors.extend(
self._validate_error(self.ERR_INVALID_JOB_NAME, view.job_name))
project = view.project
if project is None:
errors.extend(self._validate_error(self.ERR_MISSING_OPTION, 'project'))
else:
if self.is_full_string_match(self.PROJECT_NUMBER_PATTERN, project):
errors.extend(
self._validate_error(self.ERR_INVALID_PROJECT_NUMBER, project))
elif not self.is_full_string_match(self.PROJECT_ID_PATTERN, project):
errors.extend(
self._validate_error(self.ERR_INVALID_PROJECT_ID, project))
if view.update:
if not view.job_name:
errors.extend(
self._validate_error(
'Existing job name must be provided when updating a pipeline.'))
if view.transform_name_mapping:
if not view.update or not self.options.view_as(StandardOptions).streaming:
errors.append(
'Transform name mapping option is only useful when '
'--update and --streaming is specified')
for _, (key, value) in enumerate(view.transform_name_mapping.items()):
if not isinstance(key, str) or not isinstance(value, str):
errors.extend(
self._validate_error(
self.ERR_INVALID_TRANSFORM_NAME_MAPPING, key, value))
break
if view.region is None and self.is_service_runner():
default_region = self.runner.get_default_gcp_region()
if default_region is None:
errors.extend(self._validate_error(self.ERR_MISSING_OPTION, 'region'))
else:
view.region = default_region
return errors
def validate_sdk_container_image_options(self, view):
errors = []
if view.sdk_container_image and view.worker_harness_container_image:
# To be fully backwards-compatible, these options will be set to the same
# value. Check that the values are different.
if view.sdk_container_image != view.worker_harness_container_image:
errors.extend(
self._validate_error(
'Cannot use legacy flag --worker_harness_container_image along '
'with view.sdk_container_image'))
elif view.worker_harness_container_image:
# Warn about legacy flag and set new flag to value of old flag.
_LOGGER.warning(
'Setting sdk_container_image to value of legacy flag '
'worker_harness_container_image.')
view.sdk_container_image = view.worker_harness_container_image
elif view.sdk_container_image:
# Set legacy option to value of new option.
view.worker_harness_container_image = view.sdk_container_image
return errors
def validate_num_workers(self, view):
"""Validates that Dataflow worker number is valid."""
errors = self.validate_optional_argument_positive(view, 'num_workers')
errors.extend(
self.validate_optional_argument_positive(view, 'max_num_workers'))
num_workers = view.num_workers
max_num_workers = view.max_num_workers
if (num_workers is not None and max_num_workers is not None and
num_workers > max_num_workers):
errors.extend(
self._validate_error(
self.ERR_NUM_WORKERS_TOO_HIGH, num_workers, max_num_workers))
return errors
def validate_worker_region_zone(self, view):
"""Validates Dataflow worker region and zone arguments are consistent."""
errors = []
if view.zone and (view.worker_region or view.worker_zone):
errors.extend(
self._validate_error(
'Cannot use deprecated flag --zone along with worker_region or '
'worker_zone.'))
if self.options.view_as(DebugOptions).lookup_experiment('worker_region')\
and (view.worker_region or view.worker_zone):
errors.extend(
self._validate_error(
'Cannot use deprecated experiment worker_region along with '
'worker_region or worker_zone.'))
if view.worker_region and view.worker_zone:
errors.extend(
self._validate_error(
'worker_region and worker_zone are mutually exclusive.'))
if view.zone:
_LOGGER.warning(
'Option --zone is deprecated. Please use --worker_zone instead.')
view.worker_zone = view.zone
view.zone = None
return errors
def validate_optional_argument_positive(self, view, arg_name):
"""Validates that an optional argument (if set) has a positive value."""
arg = getattr(view, arg_name, None)
if arg is not None and int(arg) <= 0:
return self._validate_error(self.ERR_INVALID_NOT_POSITIVE, arg, arg_name)
return []
def validate_test_matcher(self, view, arg_name):
"""Validates that on_success_matcher argument if set.
Validates that on_success_matcher is unpicklable and is instance
of `hamcrest.core.base_matcher.BaseMatcher`.
"""
# This is a test only method and requires hamcrest
from hamcrest.core.base_matcher import BaseMatcher
pickled_matcher = view.on_success_matcher
errors = []
try:
matcher = pickler.loads(pickled_matcher)
if not isinstance(matcher, BaseMatcher):
errors.extend(
self._validate_error(
self.ERR_INVALID_TEST_MATCHER_TYPE, matcher, arg_name))
except: # pylint: disable=bare-except
errors.extend(
self._validate_error(
self.ERR_INVALID_TEST_MATCHER_UNPICKLABLE,
pickled_matcher,
arg_name))
return errors
def validate_environment_options(self, view):
"""Validates portable environment options."""
errors = []
actual_environment_type = (
view.environment_type.upper() if view.environment_type else None)
for environment_type, required in self.REQUIRED_ENVIRONMENT_OPTIONS.items():
found_required_options = [
opt for opt in required
if view.lookup_environment_option(opt) is not None
]
found_optional_options = [
opt for opt in self.OPTIONAL_ENVIRONMENT_OPTIONS[environment_type]
if view.lookup_environment_option(opt) is not None
]
found_options = found_required_options + found_optional_options
if environment_type == actual_environment_type:
if view.environment_config:
if found_options:
errors.extend(
self._validate_error(
self.ERR_ENVIRONMENT_CONFIG, ', '.join(found_options)))
else:
missing_options = set(required).difference(
set(found_required_options))
for opt in missing_options:
errors.extend(
self._validate_error(
self.ERR_MISSING_REQUIRED_ENVIRONMENT_OPTION,
opt,
environment_type))
else:
# Environment options classes are mutually exclusive.
for opt in found_options:
errors.extend(
self._validate_error(
self.ERR_INVALID_ENVIRONMENT, opt, actual_environment_type))
if actual_environment_type == 'LOOPBACK' and view.environment_config:
errors.extend(
self._validate_error(
self.ERR_INVALID_ENVIRONMENT, 'environment_config', 'LOOPBACK'))
return errors
|
|
import time
import pytest
import datetime
import cronparse
def test_input_cron():
"""
Make sure we can send values to cron
"""
parser = cronparse.CronParse()
with pytest.raises(TypeError):
parser.set_cron(input_cron=1)
with pytest.raises(ValueError):
parser.set_cron(input_cron='invalid string')
parser.set_cron(input_cron = '* * * * *')
for x in parser.cron_parts.values():
assert x == '*'
parser.set_cron(input_cron = '10 * * * *')
assert parser.cron_parts['minute'] == '10'
assert parser.cron_parts['hour'] == '*'
parser.set_cron(input_cron = '*/10 * * * *')
assert parser.cron_parts['minute'] == '*/10'
assert parser.cron_parts['hour'] == '*'
with pytest.raises(ValueError):
parser.set_cron(input_cron='*/70 * * * *')
with pytest.raises(ValueError):
parser.set_cron(input_cron='* 24 * * *')
def test_get_time(monkeypatch):
"""
Test time creation using fixed times.
"""
def fake_time(*args, **kwargs):
return 1411410214.388395
monkeypatch.setattr(cronparse.time, 'time', fake_time)
parser = cronparse.CronParse()
result = parser.get_time()
expected = datetime.datetime(year=2014, month=9, day=22,
hour=11, minute=23, second=34,
microsecond=388395)
assert result == expected
def test_get_day_of_week(monkeypatch):
class fake_weekday(object):
def weekday(self):
return 6
parser = cronparse.CronParse()
result = parser.get_day_of_week(date=fake_weekday())
assert result == 0
def test_validate_dt_part():
dt = datetime.datetime(year=2014, month=8, day=8, hour=8, minute=10)
parser = cronparse.CronParse()
parser.set_cron(input_cron='15 */2 8 * *')
assert parser.validate_dt_part(dt=dt, component='hour')
assert parser.validate_dt_part(dt=dt, component='day')
assert parser.validate_dt_part(dt=dt, component='month')
assert not parser.validate_dt_part(dt=dt, component='minute')
def test_validate_dow():
dt = datetime.datetime(year=2014, month=10, day=3, hour=8, minute=10)
parser = cronparse.CronParse()
parser.set_cron(input_cron='* * * * 5')
assert parser.validate_dow(dt=dt)
parser.set_cron(input_cron='* * * * 4')
assert not parser.validate_dow(dt=dt)
def test_brute_next():
"""
Can we just brute force this thing?
"""
dt = datetime.datetime(year=2014, month=8, day=8, hour=8, minute=8)
parser = cronparse.CronParse()
parser.set_cron(input_cron='* * * * *')
assert parser.brute_next(now=dt) == dt
parser.set_cron(input_cron='10 * * * *')
assert parser.brute_next(now=dt) == datetime.datetime(year=2014, month=8,
day=8, hour=8,
minute=10)
parser.set_cron(input_cron='* 10 * * *')
assert parser.brute_next(now=dt) == datetime.datetime(year=2014, month=8,
day=8, hour=10,
minute=0)
parser.set_cron(input_cron='5 * * * *')
assert parser.brute_next(now=dt) == datetime.datetime(year=2014, month=8,
day=8, hour=9,
minute=5)
parser.set_cron(input_cron='*/10 * * * *')
assert parser.brute_next(now=dt) == datetime.datetime(year=2014, month=8,
day=8, hour=8,
minute=10)
parser.set_cron(input_cron='5 */10 * * *')
assert parser.brute_next(now=dt) == datetime.datetime(year=2014, month=8,
day=8, hour=10,
minute=5)
parser.set_cron(input_cron='5 6 30 1 *')
assert parser.brute_next(now=dt) == datetime.datetime(year=2015, month=1,
day=30, hour=6,
minute=5)
parser.set_cron(input_cron='1 2 * * 3')
assert parser.brute_next(now=dt) == datetime.datetime(year=2014, month=8,
day=13, hour=2,
minute=1)
# Should use dow instead of day as that is closer
parser.set_cron(input_cron='1 2 22 * 3')
assert parser.brute_next(now=dt) == datetime.datetime(year=2014, month=8,
day=13, hour=2,
minute=1)
# Lists
parser.set_cron(input_cron='2,3,4,5 2 22 * 3')
assert parser.brute_next(now=dt) == datetime.datetime(year=2014, month=8,
day=13, hour=2,
minute=2)
# Range
parser.set_cron(input_cron='2-5 2 22 * 3')
assert parser.brute_next(now=dt) == datetime.datetime(year=2014, month=8,
day=13, hour=2,
minute=2)
# Longest test I know of
parser.set_cron(input_cron='59 14-23/23 29 2 *')
start = time.time()
result = parser.brute_next(now=dt)
print 'Timing test took %f' % (time.time() - start)
assert result == datetime.datetime(year=2016, month=2, day=29,
hour=23, minute=59)
def test_profile():
import cProfile, pstats
parser = cronparse.CronParse()
parser.set_cron(input_cron='1-59/59 14-23/23 29 2 *')
def run_test(parser, n):
for x in xrange(n):
dt = datetime.datetime(year=2014, month=8, day=8, hour=8, minute=8)
parser.brute_next(now=dt)
cProfile.runctx("run_test(parser=parser, n=1000)", globals(), locals())
def deprecated_test_pick_minute(monkeypatch):
now = datetime.datetime(year=2014, month=8, day=8, hour=8, minute=20)
parser = cronparse.CronParse()
# Should return right now
parser.set_cron(input_cron='* * * * *')
result = parser.pick_minute(now=now)
assert result == now
# Should return now
parser.set_cron(input_cron = '20 * * * *')
result = parser.pick_minute(now=now)
assert result == now
# Since the tenth minute has already passed, should go to the next hour
parser.set_cron(input_cron = '10 * * * *')
result = parser.pick_minute(now=now)
assert result == datetime.datetime(year=2014, month=8, day=8, hour=9,
minute=10)
parser.set_cron(input_cron = '*/15 * * * *')
result = parser.pick_minute(now=now)
assert result == datetime.datetime(year=2014, month=8, day=8, hour=8,
minute=30)
def deprecated_test_pick_hour():
"""
Grab an hour
"""
now = datetime.datetime(year=2014, month=8, day=8, hour=8, minute=20)
parser = cronparse.CronParse()
# Should return right now
parser.set_cron(input_cron='* * * * *')
result = parser.pick_hour(now=now)
assert result == now
# Should return right now
parser.set_cron(input_cron='* 8 * * *')
result = parser.pick_hour(now=now)
assert result == now
# Should return sometime tomorrow
parser.set_cron(input_cron='* 7 * * *')
result = parser.pick_hour(now=now)
assert result == datetime.datetime(year=2014, month=8, day=9,
hour=7, minute=0)
# Should return next hour
parser.set_cron(input_cron='* */2 * * *')
result = parser.pick_hour(now=now)
assert result == datetime.datetime(year=2014, month=8, day=8,
hour=10, minute=0)
|
|
#
# Copyright 2012 John Gerlock
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from elementtree.ElementTree import Element, SubElement
from elementtree.ElementTree import dump, tostring, fromstring
import requests
class WebToolsRequest():
def __init__(self, user_id):
self.user_id = user_id
self.api_url = 'https://secure.shippingapis.com/ShippingAPI.dll'
self.test_api_url = 'https://secure.shippingapis.com/ShippingAPITest.dll'
self.address_fields = ('FirmName', 'Address1', 'Address2',
'City', 'State', 'Zip5', 'Zip4')
self.verify_root_tag = 'AddressValidateRequest'
self.zipcode_lookup_root_tag = 'ZipCodeLookupRequest'
self.citystate_lookup_root_tag = 'CityStateLookupRequest'
self.test_data = [
{
'address2':'6406 Ivy Lane',
'city':'Greenbelt',
'state':'MD'
},
{
'address2':'8 Wildwood Drive',
'city':'Old Lyme',
'state':'CT',
'zip5':'06371'
}]
def build_request_xml(self, data, root_tag):
root = Element(root_tag, USERID=self.user_id)
for i, address in enumerate(data):
address_element = SubElement(root, 'Address', ID=str(i))
for field in self.address_fields:
SubElement(
address_element, field).text = address.get(field.lower())
return tostring(root)
def request(self, api_name, xml, test=False):
if test:
response = requests.get(
self.test_api_url, params={'API': api_name, 'XML': xml})
else:
response = requests.get(
self.api_url, params={'API': api_name, 'XML': xml})
return response
def verify(self, data):
api_name = 'Verify'
xml = self.build_request_xml(data, self.verify_root_tag)
response = self.request(api_name, xml)
return Response(response)
def zipcode_lookup(self, data):
api_name = 'ZipCodeLookup'
xml = self.build_request_xml(data, self.zipcode_lookup_root_tag)
response = self.request(api_name, xml)
return Response(response)
def citystate_lookup(self, data):
api_name = 'CityStateLookup'
xml = self.build_request_xml(data, 'CityStateLookupRequest')
response = self.request(api_name, xml)
return Response(response)
def verify_test(self):
api_name = 'Verify'
xml = self.build_request_xml(self.test_data, self.verify_root_tag)
response = self.request(api_name, xml)
print response.content
def zipcode_lookup_test(self):
api_name = 'ZipCodeLookup'
xml = self.build_request_xml(
self.test_data, self.zipcode_lookup_root_tag)
response = self.request(api_name, xml)
print response.content
def citystate_lookup_test(self):
api_name = 'CityStateLookup'
xml = self.build_request_xml(
self.test_data, self.citystate_lookup_root_tag)
response = self.request(api_name, xml)
print response.content
def make_all_test_requests(self):
self.verify_test()
self.zipcode_lookup_test()
self.citystate_lookup_test()
class Response():
def __init__(self, response):
self.address_fields = (
'FirmName',
'Address1',
'Address2',
'City',
'State',
'Zip5',
'Zip4')
self.response = response
self.et = self.response_to_et(self.response)
self.check_et_errors(self.et)
self.dict = self.build_address_dict(self.et)
self.index = self.address_count
def __iter__(self):
return self
def __getitem__(self, key):
if self.dict.get(str(key)):
return self.dict[str(key)]
else:
raise IndexError
def next(self):
if self.index == 0:
raise StopIteration
self.index = self.index - 1
return self.data[self.index]
def dump(self):
print self.response.status_code
print self.response.content
if self.et:
dump(self.et)
def check_respone_errors(self, response):
if response.status_code is not 200:
self.dump()
raise Exception
def response_to_et(self, response):
return fromstring(response.content)
def check_et_errors(self, et):
if et.tag == 'Error':
self.dump()
raise Exception
else:
return et
def build_address_dict(self, et):
addresses = {}
for address_element in et.getiterator('Address'):
address = {}
id = address_element.get('ID')
address['id'] = id
for key in self.address_fields:
address[str(key).lower()] = address_element.findtext(key)
addresses[id] = WebToolsAddress(address)
return addresses
@property
def address_count(self):
return len(self.et.getiterator('Address'))
class WebToolsAddress():
def __init__(self, address):
self.address = address
def __str__(self):
fields = ('firmname', 'address1', 'address2')
add_string = ''
for field in fields:
if self.address[field]:
add_string += '%s\n' % self.address[field]
add_string += self.last_line
return add_string
@property
def address(self):
return self.address
@property
def address1(self):
return self.address['address1']
@property
def address2(self):
return self.address['address2']
@property
def city(self):
return self.address['city']
@property
def state(self):
return self.address['state']
@property
def zip4(self):
return self.address['zip4']
@property
def zip5(self):
return self.address['zip5']
@property
def address_lines(self):
if self.address1:
return '%s\n%s' % (self.address1, self.address2)
else:
return '%s' % self.address2
@property
def zipcode(self):
return '%s-%s' % (self.zip5, self.zip4)
@property
def citystate(self):
return '%s, %s' % (self.city, self.state)
@property
def last_line(self):
return '%s %s' % (self.citystate, self.zipcode)
|
|
import arrow
from path import Path
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql.expression import func
from sqlalchemy import Column, Integer, String, Text
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import twittback
import twittback.config
class NoSuchId(Exception):
def __init__(self, twitter_id):
super().__init__(twitter_id)
self.twitter_id = twitter_id
Base = declarative_base()
class Tweet(Base):
__tablename__ = "tweets"
twitter_id = Column(Integer, primary_key=True)
text = Column(Text)
timestamp = Column(Integer)
def to_tweet(self):
return twittback.Tweet(
twitter_id=self.twitter_id, text=self.text, timestamp=self.timestamp
)
@classmethod
def from_(cls, tweet):
return cls(
twitter_id=tweet.twitter_id, text=tweet.text, timestamp=tweet.timestamp
)
class _UserModel:
screen_name = Column(String, primary_key=True)
name = Column(Text)
description = Column(Text)
location = Column(Text)
@classmethod
def from_(cls, user):
return cls(
screen_name=user.screen_name,
name=user.name,
description=user.description,
location=user.location,
)
def to_user(self):
return twittback.User(
screen_name=self.screen_name,
description=self.description,
location=self.location,
name=self.name,
)
class User(Base, _UserModel):
__tablename__ = "user"
class Following(Base, _UserModel):
__tablename__ = "following"
class Followers(Base, _UserModel):
__tablename__ = "followers"
class Repository:
def __init__(self, db_path):
self.db_path = db_path
connect_string = "sqlite:///" + db_path
self.engine = create_engine(connect_string)
session_maker = sessionmaker(bind=self.engine)
self.session = session_maker()
if self.db_path == ":memory:" or not self.db_path.exists():
self.init_db()
def init_db(self):
Base.metadata.create_all(self.engine)
def query(self, *args, **kwargs):
return self.session.query(*args, **kwargs)
def add(self, *args, **kwargs):
return self.session.add(*args, **kwargs)
def commit(self):
return self.session.commit()
def add_tweets(self, tweets):
for tweet in tweets:
to_add = Tweet.from_(tweet)
self.add(to_add)
self.commit()
def latest_tweet(self):
latest_tweets = self.latest_tweets()
try:
latest_tweet = next(latest_tweets)
return latest_tweet
except StopIteration:
return None
def latest_tweets(self):
query = self.query(Tweet).order_by(Tweet.twitter_id.desc())[0:10]
for entry in query:
yield entry.to_tweet()
def all_tweets(self):
query = self.query(Tweet).order_by(Tweet.twitter_id.asc())
for entry in query:
yield entry.to_tweet()
def num_tweets(self):
return self.query(Tweet).count()
def tweets_for_month(self, year, month_number):
start_date = arrow.Arrow(year, month_number, 1)
end_date = start_date.shift(months=+1)
query = (
self.query(Tweet)
.order_by(Tweet.twitter_id.asc())
.filter(start_date.timestamp < Tweet.timestamp)
.filter(Tweet.timestamp < end_date.timestamp)
)
for entry in query:
yield entry.to_tweet()
def date_range(self):
start_row = self.query(func.min(Tweet.timestamp)).scalar()
end_row = self.query(func.max(Tweet.timestamp)).scalar()
return (start_row, end_row)
def tweet_by_id(self, twitter_id):
entry = self._tweet_entry_by_id(twitter_id)
return entry.to_tweet()
def set_text(self, twitter_id, text):
entry = self._tweet_entry_by_id(twitter_id)
entry.text = text
self.commit()
def search_tweet(self, pattern):
full_pattern = "%" + pattern + "%"
query = self.query(Tweet).filter(Tweet.text.ilike(full_pattern))
for entry in query:
yield entry.to_tweet()
def _tweet_entry_by_id(self, twitter_id):
entry = self.query(Tweet).filter(Tweet.twitter_id == twitter_id).one_or_none()
if not entry:
raise NoSuchId(twitter_id)
return entry
def user(self):
entry = self.query(User).one()
return entry.to_user()
def save_user(self, user):
self.query(User).delete()
entry = User.from_(user)
entry.screen_name = user.screen_name
entry.name = user.name
entry.description = user.description
entry.location = user.location
self.add(entry)
self.commit()
def following(self):
return self._get_related_users(Following)
def save_following(self, following):
return self._set_related_users(Following, following)
def followers(self):
return self._get_related_users(Followers)
def save_followers(self, followers):
return self._set_related_users(Followers, followers)
def _get_related_users(self, userClass):
for entry in self.query(userClass).all():
yield entry.to_user()
def _set_related_users(self, userClass, users):
self.query(userClass).delete()
for user in users:
self.add(userClass.from_(user))
self.commit()
def get_repository():
config = twittback.config.read_config()
db_path = Path(config["db"]["path"])
db_path.parent.makedirs_p()
return Repository(db_path)
|
|
class SkipTest(Exception):
pass
class AssertRaisesContext:
def __init__(self, exc):
self.expected = exc
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
assert False, "%r not raised" % self.expected
if issubclass(exc_type, self.expected):
return True
return False
class TestCase:
def fail(self, msg=''):
assert False, msg
def assertEqual(self, x, y, msg=''):
if not msg:
msg = "%r vs (expected) %r" % (x, y)
assert x == y, msg
def assertNotEqual(self, x, y, msg=''):
if not msg:
msg = "%r not expected to be equal %r" % (x, y)
assert x != y, msg
def assertAlmostEqual(self, x, y, places=None, msg='', delta=None):
if x == y:
return
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if abs(x - y) <= delta:
return
if not msg:
msg = '%r != %r within %r delta' % (x, y, delta)
else:
if places is None:
places = 7
if round(abs(y-x), places) == 0:
return
if not msg:
msg = '%r != %r within %r places' % (x, y, places)
assert False, msg
def assertNotAlmostEqual(self, x, y, places=None, msg='', delta=None):
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if not (x == y) and abs(x - y) > delta:
return
if not msg:
msg = '%r == %r within %r delta' % (x, y, delta)
else:
if places is None:
places = 7
if not (x == y) and round(abs(y-x), places) != 0:
return
if not msg:
msg = '%r == %r within %r places' % (x, y, places)
assert False, msg
def assertIs(self, x, y, msg=''):
if not msg:
msg = "%r is not %r" % (x, y)
assert x is y, msg
def assertIsNot(self, x, y, msg=''):
if not msg:
msg = "%r is %r" % (x, y)
assert x is not y, msg
def assertIsNone(self, x, msg=''):
if not msg:
msg = "%r is not None" % x
assert x is None, msg
def assertIsNotNone(self, x, msg=''):
if not msg:
msg = "%r is None" % x
assert x is not None, msg
def assertTrue(self, x, msg=''):
if not msg:
msg = "Expected %r to be True" % x
assert x, msg
def assertFalse(self, x, msg=''):
if not msg:
msg = "Expected %r to be False" % x
assert not x, msg
def assertIn(self, x, y, msg=''):
if not msg:
msg = "Expected %r to be in %r" % (x, y)
assert x in y, msg
def assertIsInstance(self, x, y, msg=''):
assert isinstance(x, y), msg
def assertRaises(self, exc, func=None, *args, **kwargs):
if func is None:
return AssertRaisesContext(exc)
try:
func(*args, **kwargs)
assert False, "%r not raised" % exc
except Exception as e:
if isinstance(e, exc):
return
raise
def skip(msg):
def _decor(fun):
# We just replace original fun with _inner
def _inner(self):
raise SkipTest(msg)
return _inner
return _decor
def skipUnless(cond, msg):
if cond:
return lambda x: x
return skip(msg)
class TestSuite:
def __init__(self):
self.tests = []
def addTest(self, cls):
self.tests.append(cls)
class TestRunner:
def run(self, suite):
res = TestResult()
for c in suite.tests:
run_class(c, res)
return res
class TestResult:
def __init__(self):
self.errorsNum = 0
self.failuresNum = 0
self.skippedNum = 0
self.testsRun = 0
def wasSuccessful(self):
return self.errorsNum == 0 and self.failuresNum == 0
# TODO: Uncompliant
def run_class(c, test_result):
o = c()
set_up = getattr(o, "setUp", lambda: None)
tear_down = getattr(o, "tearDown", lambda: None)
for name in dir(o):
if name.startswith("test"):
print(name, end=' ...')
m = getattr(o, name)
try:
set_up()
test_result.testsRun += 1
m()
tear_down()
print(" ok")
except SkipTest as e:
print(" skipped:", e.args[0])
test_result.skippedNum += 1
def main(module="__main__"):
def test_cases(m):
for tn in dir(m):
c = getattr(m, tn)
if isinstance(c, object) and isinstance(c, type) and issubclass(c, TestCase):
yield c
m = __import__(module)
suite = TestSuite()
for c in test_cases(m):
suite.addTest(c)
runner = TestRunner()
result = runner.run(suite)
msg = "Ran %d tests" % result.testsRun
if result.skippedNum > 0:
msg += " (%d skipped)" % result.skippedNum
print(msg)
|
|
"""
Testing strategies for Hypothesis-based tests.
"""
import keyword
import string
from collections import OrderedDict
from hypothesis import strategies as st
import attr
from .utils import make_class
def gen_attr_names():
"""
Generate names for attributes, 'a'...'z', then 'aa'...'zz'.
~702 different attribute names should be enough in practice.
Some short strings (such as 'as') are keywords, so we skip them.
"""
lc = string.ascii_lowercase
for c in lc:
yield c
for outer in lc:
for inner in lc:
res = outer + inner
if keyword.iskeyword(res):
continue
yield outer + inner
def maybe_underscore_prefix(source):
"""
A generator to sometimes prepend an underscore.
"""
to_underscore = False
for val in source:
yield val if not to_underscore else "_" + val
to_underscore = not to_underscore
def _create_hyp_class(attrs):
"""
A helper function for Hypothesis to generate attrs classes.
"""
return make_class("HypClass", dict(zip(gen_attr_names(), attrs)))
def _create_hyp_nested_strategy(simple_class_strategy):
"""
Create a recursive attrs class.
Given a strategy for building (simpler) classes, create and return
a strategy for building classes that have as an attribute: either just
the simpler class, a list of simpler classes, a tuple of simpler classes,
an ordered dict or a dict mapping the string "cls" to a simpler class.
"""
# Use a tuple strategy to combine simple attributes and an attr class.
def just_class(tup):
combined_attrs = list(tup[0])
combined_attrs.append(attr.ib(default=attr.Factory(tup[1])))
return _create_hyp_class(combined_attrs)
def list_of_class(tup):
default = attr.Factory(lambda: [tup[1]()])
combined_attrs = list(tup[0])
combined_attrs.append(attr.ib(default=default))
return _create_hyp_class(combined_attrs)
def tuple_of_class(tup):
default = attr.Factory(lambda: (tup[1](),))
combined_attrs = list(tup[0])
combined_attrs.append(attr.ib(default=default))
return _create_hyp_class(combined_attrs)
def dict_of_class(tup):
default = attr.Factory(lambda: {"cls": tup[1]()})
combined_attrs = list(tup[0])
combined_attrs.append(attr.ib(default=default))
return _create_hyp_class(combined_attrs)
def ordereddict_of_class(tup):
default = attr.Factory(lambda: OrderedDict([("cls", tup[1]())]))
combined_attrs = list(tup[0])
combined_attrs.append(attr.ib(default=default))
return _create_hyp_class(combined_attrs)
# A strategy producing tuples of the form ([list of attributes], <given
# class strategy>).
attrs_and_classes = st.tuples(list_of_attrs, simple_class_strategy)
return st.one_of(
attrs_and_classes.map(just_class),
attrs_and_classes.map(list_of_class),
attrs_and_classes.map(tuple_of_class),
attrs_and_classes.map(dict_of_class),
attrs_and_classes.map(ordereddict_of_class),
)
bare_attrs = st.builds(attr.ib, default=st.none())
int_attrs = st.integers().map(lambda i: attr.ib(default=i))
str_attrs = st.text().map(lambda s: attr.ib(default=s))
float_attrs = st.floats().map(lambda f: attr.ib(default=f))
dict_attrs = st.dictionaries(keys=st.text(), values=st.integers()).map(
lambda d: attr.ib(default=d)
)
simple_attrs_without_metadata = (
bare_attrs | int_attrs | str_attrs | float_attrs | dict_attrs
)
@st.composite
def simple_attrs_with_metadata(draw):
"""
Create a simple attribute with arbitrary metadata.
"""
c_attr = draw(simple_attrs)
keys = st.booleans() | st.binary() | st.integers() | st.text()
vals = st.booleans() | st.binary() | st.integers() | st.text()
metadata = draw(
st.dictionaries(keys=keys, values=vals, min_size=1, max_size=5)
)
return attr.ib(
default=c_attr._default,
validator=c_attr._validator,
repr=c_attr.repr,
cmp=c_attr.cmp,
hash=c_attr.hash,
init=c_attr.init,
metadata=metadata,
type=None,
converter=c_attr.converter,
)
simple_attrs = simple_attrs_without_metadata | simple_attrs_with_metadata()
# Python functions support up to 255 arguments.
list_of_attrs = st.lists(simple_attrs, max_size=9)
@st.composite
def simple_classes(
draw, slots=None, frozen=None, weakref_slot=None, private_attrs=None
):
"""
A strategy that generates classes with default non-attr attributes.
For example, this strategy might generate a class such as:
@attr.s(slots=True, frozen=True, weakref_slot=True)
class HypClass:
a = attr.ib(default=1)
_b = attr.ib(default=None)
c = attr.ib(default='text')
_d = attr.ib(default=1.0)
c = attr.ib(default={'t': 1})
By default, all combinations of slots, frozen, and weakref_slot classes
will be generated. If `slots=True` is passed in, only slotted classes will
be generated, and if `slots=False` is passed in, no slot classes will be
generated. The same applies to `frozen` and `weakref_slot`.
By default, some attributes will be private (i.e. prefixed with an
underscore). If `private_attrs=True` is passed in, all attributes will be
private, and if `private_attrs=False`, no attributes will be private.
"""
attrs = draw(list_of_attrs)
frozen_flag = draw(st.booleans()) if frozen is None else frozen
slots_flag = draw(st.booleans()) if slots is None else slots
weakref_slot_flag = (
draw(st.booleans()) if weakref_slot is None else weakref_slot
)
if private_attrs is None:
attr_names = maybe_underscore_prefix(gen_attr_names())
elif private_attrs is True:
attr_names = ("_" + n for n in gen_attr_names())
elif private_attrs is False:
attr_names = gen_attr_names()
cls_dict = dict(zip(attr_names, attrs))
post_init_flag = draw(st.booleans())
if post_init_flag:
def post_init(self):
pass
cls_dict["__attrs_post_init__"] = post_init
return make_class(
"HypClass",
cls_dict,
slots=slots_flag,
frozen=frozen_flag,
weakref_slot=weakref_slot_flag,
)
# st.recursive works by taking a base strategy (in this case, simple_classes)
# and a special function. This function receives a strategy, and returns
# another strategy (building on top of the base strategy).
nested_classes = st.recursive(
simple_classes(), _create_hyp_nested_strategy, max_leaves=10
)
|
|
from __future__ import print_function, division
from sympy.core import S, sympify, Dummy, Mod
from sympy.core.function import Function, ArgumentIndexError
from sympy.core.logic import fuzzy_and
from sympy.core.numbers import Integer, pi
from sympy.core.relational import Eq
from sympy.ntheory import sieve
from math import sqrt as _sqrt
from sympy.core.compatibility import reduce, range
from sympy.core.cache import cacheit
from sympy.polys.polytools import poly_from_expr
from sympy.polys.polyerrors import PolificationFailed
class CombinatorialFunction(Function):
"""Base class for combinatorial functions. """
def _eval_simplify(self, ratio, measure):
from sympy.simplify.simplify import combsimp
expr = combsimp(self)
if measure(expr) <= ratio*measure(self):
return expr
return self
###############################################################################
######################## FACTORIAL and MULTI-FACTORIAL ########################
###############################################################################
class factorial(CombinatorialFunction):
"""Implementation of factorial function over nonnegative integers.
By convention (consistent with the gamma function and the binomial
coefficients), factorial of a negative integer is complex infinity.
The factorial is very important in combinatorics where it gives
the number of ways in which `n` objects can be permuted. It also
arises in calculus, probability, number theory, etc.
There is strict relation of factorial with gamma function. In
fact n! = gamma(n+1) for nonnegative integers. Rewrite of this
kind is very useful in case of combinatorial simplification.
Computation of the factorial is done using two algorithms. For
small arguments naive product is evaluated. However for bigger
input algorithm Prime-Swing is used. It is the fastest algorithm
known and computes n! via prime factorization of special class
of numbers, called here the 'Swing Numbers'.
Examples
========
>>> from sympy import Symbol, factorial, S
>>> n = Symbol('n', integer=True)
>>> factorial(0)
1
>>> factorial(7)
5040
>>> factorial(-2)
zoo
>>> factorial(n)
factorial(n)
>>> factorial(2*n)
factorial(2*n)
>>> factorial(S(1)/2)
factorial(1/2)
See Also
========
factorial2, RisingFactorial, FallingFactorial
"""
def fdiff(self, argindex=1):
from sympy import gamma, polygamma
if argindex == 1:
return gamma(self.args[0] + 1)*polygamma(0, self.args[0] + 1)
else:
raise ArgumentIndexError(self, argindex)
_small_swing = [
1, 1, 1, 3, 3, 15, 5, 35, 35, 315, 63, 693, 231, 3003, 429, 6435, 6435, 109395,
12155, 230945, 46189, 969969, 88179, 2028117, 676039, 16900975, 1300075,
35102025, 5014575, 145422675, 9694845, 300540195, 300540195
]
@classmethod
def _swing(cls, n):
if n < 33:
return cls._small_swing[n]
else:
N, primes = int(_sqrt(n)), []
for prime in sieve.primerange(3, N + 1):
p, q = 1, n
while True:
q //= prime
if q > 0:
if q & 1 == 1:
p *= prime
else:
break
if p > 1:
primes.append(p)
for prime in sieve.primerange(N + 1, n//3 + 1):
if (n // prime) & 1 == 1:
primes.append(prime)
L_product = R_product = 1
for prime in sieve.primerange(n//2 + 1, n + 1):
L_product *= prime
for prime in primes:
R_product *= prime
return L_product*R_product
@classmethod
def _recursive(cls, n):
if n < 2:
return 1
else:
return (cls._recursive(n//2)**2)*cls._swing(n)
@classmethod
def eval(cls, n):
n = sympify(n)
if n.is_Number:
if n is S.Zero:
return S.One
elif n is S.Infinity:
return S.Infinity
elif n.is_Integer:
if n.is_negative:
return S.ComplexInfinity
else:
n, result = n.p, 1
if n < 20:
for i in range(2, n + 1):
result *= i
else:
N, bits = n, 0
while N != 0:
if N & 1 == 1:
bits += 1
N = N >> 1
result = cls._recursive(n)*2**(n - bits)
return Integer(result)
def _eval_rewrite_as_gamma(self, n):
from sympy import gamma
return gamma(n + 1)
def _eval_rewrite_as_Product(self, n):
from sympy import Product
if n.is_nonnegative and n.is_integer:
i = Dummy('i', integer=True)
return Product(i, (i, 1, n))
def _eval_is_integer(self):
if self.args[0].is_integer and self.args[0].is_nonnegative:
return True
def _eval_is_positive(self):
if self.args[0].is_integer and self.args[0].is_nonnegative:
return True
def _eval_is_composite(self):
x = self.args[0]
if x.is_integer:
return (x - 3).is_nonnegative
def _eval_is_real(self):
x = self.args[0]
if x.is_nonnegative or x.is_noninteger:
return True
class MultiFactorial(CombinatorialFunction):
pass
class subfactorial(CombinatorialFunction):
r"""The subfactorial counts the derangements of n items and is
defined for non-negative integers as::
,
| 1 for n = 0
!n = { 0 for n = 1
| (n - 1)*(!(n - 1) + !(n - 2)) for n > 1
`
It can also be written as int(round(n!/exp(1))) but the recursive
definition with caching is implemented for this function.
An interesting analytic expression is the following [2]_
.. math:: !x = \Gamma(x + 1, -1)/e
which is valid for non-negative integers x. The above formula
is not very useful incase of non-integers. :math:`\Gamma(x + 1, -1)` is
single-valued only for integral arguments x, elsewhere on the positive real
axis it has an infinite number of branches none of which are real.
References
==========
.. [1] http://en.wikipedia.org/wiki/Subfactorial
.. [2] http://mathworld.wolfram.com/Subfactorial.html
Examples
========
>>> from sympy import subfactorial
>>> from sympy.abc import n
>>> subfactorial(n + 1)
subfactorial(n + 1)
>>> subfactorial(5)
44
See Also
========
sympy.functions.combinatorial.factorials.factorial,
sympy.utilities.iterables.generate_derangements,
sympy.functions.special.gamma_functions.uppergamma
"""
@classmethod
@cacheit
def _eval(self, n):
if not n:
return S.One
elif n == 1:
return S.Zero
return (n - 1)*(self._eval(n - 1) + self._eval(n - 2))
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg.is_Integer and arg.is_nonnegative:
return cls._eval(arg)
elif arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Infinity
def _eval_is_even(self):
if self.args[0].is_odd and self.args[0].is_nonnegative:
return True
def _eval_is_integer(self):
if self.args[0].is_integer and self.args[0].is_nonnegative:
return True
def _eval_rewrite_as_uppergamma(self, arg):
from sympy import uppergamma
return uppergamma(arg + 1, -1)/S.Exp1
def _eval_is_nonnegative(self):
if self.args[0].is_integer and self.args[0].is_nonnegative:
return True
def _eval_is_odd(self):
if self.args[0].is_even and self.args[0].is_nonnegative:
return True
class factorial2(CombinatorialFunction):
"""The double factorial n!!, not to be confused with (n!)!
The double factorial is defined for nonnegative integers and for odd
negative integers as::
,
| n*(n - 2)*(n - 4)* ... * 1 for n positive odd
n!! = { n*(n - 2)*(n - 4)* ... * 2 for n positive even
| 1 for n = 0
| (n+2)!! / (n+2) for n negative odd
`
References
==========
.. [1] https://en.wikipedia.org/wiki/Double_factorial
Examples
========
>>> from sympy import factorial2, var
>>> var('n')
n
>>> factorial2(n + 1)
factorial2(n + 1)
>>> factorial2(5)
15
>>> factorial2(-1)
1
>>> factorial2(-5)
1/3
See Also
========
factorial, RisingFactorial, FallingFactorial
"""
@classmethod
def eval(cls, arg):
# TODO: extend this to complex numbers?
if arg.is_Number:
if not arg.is_Integer:
raise ValueError("argument must be nonnegative integer or negative odd integer")
# This implementation is faster than the recursive one
# It also avoids "maximum recursion depth exceeded" runtime error
if arg.is_nonnegative:
if arg.is_even:
k = arg / 2
return 2 ** k * factorial(k)
return factorial(arg) / factorial2(arg - 1)
if arg.is_odd:
return arg * (S.NegativeOne) ** ((1 - arg) / 2) / factorial2(-arg)
raise ValueError("argument must be nonnegative integer or negative odd integer")
def _eval_is_even(self):
# Double factorial is even for every positive even input
n = self.args[0]
if n.is_integer:
if n.is_odd:
return False
if n.is_even:
if n.is_positive:
return True
if n.is_zero:
return False
def _eval_is_integer(self):
# Double factorial is an integer for every nonnegative input, and for
# -1 and -3
n = self.args[0]
if n.is_integer:
if (n + 1).is_nonnegative:
return True
if n.is_odd:
return (n + 3).is_nonnegative
def _eval_is_odd(self):
# Double factorial is odd for every odd input not smaller than -3, and
# for 0
n = self.args[0]
if n.is_odd:
return (n + 3).is_nonnegative
if n.is_even:
if n.is_positive:
return False
if n.is_zero:
return True
def _eval_is_positive(self):
# Double factorial is positive for every nonnegative input, and for
# every odd negative input which is of the form -1-4k for an
# nonnegative integer k
n = self.args[0]
if n.is_integer:
if (n + 1).is_nonnegative:
return True
if n.is_odd:
return ((n + 1) / 2).is_even
def _eval_rewrite_as_gamma(self, n):
from sympy import gamma, Piecewise, sqrt
return 2**(n/2)*gamma(n/2 + 1) * Piecewise((1, Eq(Mod(n, 2), 0)), (sqrt(2/pi), Eq(Mod(n, 2), 1)))
###############################################################################
######################## RISING and FALLING FACTORIALS ########################
###############################################################################
class RisingFactorial(CombinatorialFunction):
"""Rising factorial (also called Pochhammer symbol) is a double valued
function arising in concrete mathematics, hypergeometric functions
and series expansions. It is defined by:
rf(x, k) = x * (x + 1) * ... * (x + k - 1)
where 'x' can be arbitrary expression and 'k' is an integer. For
more information check "Concrete mathematics" by Graham, pp. 66
or visit http://mathworld.wolfram.com/RisingFactorial.html page.
When x is a polynomial f of a single variable y of order >= 1,
rf(x,k) = f(y) * f(y+1) * ... * f(x+k-1) as described in
Peter Paule, "Greatest Factorial Factorization and Symbolic Summation",
Journal of Symbolic Computation, vol. 20, pp. 235-268, 1995.
Examples
========
>>> from sympy import rf, symbols, factorial, ff, binomial
>>> from sympy.abc import x
>>> n, k = symbols('n k', integer=True)
>>> rf(x, 0)
1
>>> rf(1, 5)
120
>>> rf(x, 5) == x*(1 + x)*(2 + x)*(3 + x)*(4 + x)
True
>>> rf(x**3, 2)
Poly(x**6 + 3*x**5 + 3*x**4 + x**3, x, domain='ZZ')
Rewrite
>>> rf(x, k).rewrite(ff)
FallingFactorial(k + x - 1, k)
>>> rf(x, k).rewrite(binomial)
binomial(k + x - 1, k)*factorial(k)
>>> rf(n, k).rewrite(factorial)
factorial(k + n - 1)/factorial(n - 1)
See Also
========
factorial, factorial2, FallingFactorial
References
==========
.. [1] https://en.wikipedia.org/wiki/Pochhammer_symbol
"""
@classmethod
def eval(cls, x, k):
x = sympify(x)
k = sympify(k)
if x is S.NaN or k is S.NaN:
return S.NaN
elif x is S.One:
return factorial(k)
elif k.is_Integer:
if k is S.Zero:
return S.One
else:
if k.is_positive:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
if k.is_odd:
return S.NegativeInfinity
else:
return S.Infinity
else:
try:
F, opt = poly_from_expr(x)
except PolificationFailed:
return reduce(lambda r, i: r*(x + i), range(0, int(k)), 1)
if len(opt.gens) > 1 or F.degree() <= 1:
return reduce(lambda r, i: r*(x + i), range(0, int(k)), 1)
else:
v = opt.gens[0]
return reduce(lambda r, i:
r*(F.subs(v, v + i).expand()),
range(0, int(k)), 1)
else:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
return S.Infinity
else:
try:
F, opt = poly_from_expr(x)
except PolificationFailed:
return 1/reduce(lambda r, i:
r*(x - i),
range(1, abs(int(k)) + 1), 1)
if len(opt.gens) > 1 or F.degree() <= 1:
return 1/reduce(lambda r, i:
r*(x - i),
range(1, abs(int(k)) + 1), 1)
else:
v = opt.gens[0]
return 1/reduce(lambda r, i:
r*(F.subs(v, v - i).expand()),
range(1, abs(int(k)) + 1), 1)
def _eval_rewrite_as_gamma(self, x, k):
from sympy import gamma
return gamma(x + k) / gamma(x)
def _eval_rewrite_as_FallingFactorial(self, x, k):
return FallingFactorial(x + k - 1, k)
def _eval_rewrite_as_factorial(self, x, k):
if x.is_integer and k.is_integer:
return factorial(k + x - 1) / factorial(x - 1)
def _eval_rewrite_as_binomial(self, x, k):
if k.is_integer:
return factorial(k) * binomial(x + k - 1, k)
def _eval_is_integer(self):
return fuzzy_and((self.args[0].is_integer, self.args[1].is_integer,
self.args[1].is_nonnegative))
def _sage_(self):
import sage.all as sage
return sage.rising_factorial(self.args[0]._sage_(), self.args[1]._sage_())
class FallingFactorial(CombinatorialFunction):
"""Falling factorial (related to rising factorial) is a double valued
function arising in concrete mathematics, hypergeometric functions
and series expansions. It is defined by
ff(x, k) = x * (x-1) * ... * (x - k+1)
where 'x' can be arbitrary expression and 'k' is an integer. For
more information check "Concrete mathematics" by Graham, pp. 66
or visit http://mathworld.wolfram.com/FallingFactorial.html page.
When x is a polynomial f of a single variable y of order >= 1,
ff(x,k) = f(y) * f(y-1) * ... * f(x-k+1) as described in
Peter Paule, "Greatest Factorial Factorization and Symbolic Summation",
Journal of Symbolic Computation, vol. 20, pp. 235-268, 1995.
>>> from sympy import ff, factorial, rf, gamma, polygamma, binomial, symbols
>>> from sympy.abc import x, k
>>> n, m = symbols('n m', integer=True)
>>> ff(x, 0)
1
>>> ff(5, 5)
120
>>> ff(x, 5) == x*(x-1)*(x-2)*(x-3)*(x-4)
True
>>> ff(x**2, 2)
Poly(x**4 - 2*x**3 + x**2, x, domain='ZZ')
>>> ff(n, n)
factorial(n)
Rewrite
>>> ff(x, k).rewrite(gamma)
(-1)**k*gamma(k - x)/gamma(-x)
>>> ff(x, k).rewrite(rf)
RisingFactorial(-k + x + 1, k)
>>> ff(x, m).rewrite(binomial)
binomial(x, m)*factorial(m)
>>> ff(n, m).rewrite(factorial)
factorial(n)/factorial(-m + n)
See Also
========
factorial, factorial2, RisingFactorial
References
==========
.. [1] http://mathworld.wolfram.com/FallingFactorial.html
"""
@classmethod
def eval(cls, x, k):
x = sympify(x)
k = sympify(k)
if x is S.NaN or k is S.NaN:
return S.NaN
elif k.is_integer and x == k:
return factorial(x)
elif k.is_Integer:
if k is S.Zero:
return S.One
else:
if k.is_positive:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
if k.is_odd:
return S.NegativeInfinity
else:
return S.Infinity
else:
try:
F, opt = poly_from_expr(x)
except PolificationFailed:
return reduce(lambda r, i: r*(x - i),
range(0, int(k)), 1)
if len(opt.gens) > 1 or F.degree() <= 1:
return reduce(lambda r, i: r*(x - i),
range(0, int(k)), 1)
else:
v = opt.gens[0]
return reduce(lambda r, i:
r*(F.subs(v, v - i).expand()),
range(0, int(k)), 1)
else:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
return S.Infinity
else:
try:
F, opt = poly_from_expr(x)
except PolificationFailed:
return 1/reduce(lambda r, i: r*(x + i),
range(1, abs(int(k)) + 1), 1)
if len(opt.gens) > 1 or F.degree() <= 1:
return 1/reduce(lambda r, i: r*(x + i),
range(1, abs(int(k)) + 1), 1)
else:
v = opt.gens[0]
return 1/reduce(lambda r, i:
r*(F.subs(v, v + i).expand()),
range(1, abs(int(k)) + 1), 1)
def _eval_rewrite_as_gamma(self, x, k):
from sympy import gamma
return (-1)**k*gamma(k - x) / gamma(-x)
def _eval_rewrite_as_RisingFactorial(self, x, k):
return rf(x - k + 1, k)
def _eval_rewrite_as_binomial(self, x, k):
if k.is_integer:
return factorial(k) * binomial(x, k)
def _eval_rewrite_as_factorial(self, x, k):
if x.is_integer and k.is_integer:
return factorial(x) / factorial(x - k)
def _eval_is_integer(self):
return fuzzy_and((self.args[0].is_integer, self.args[1].is_integer,
self.args[1].is_nonnegative))
def _sage_(self):
import sage.all as sage
return sage.falling_factorial(self.args[0]._sage_(),
self.args[1]._sage_())
rf = RisingFactorial
ff = FallingFactorial
###############################################################################
########################### BINOMIAL COEFFICIENTS #############################
###############################################################################
class binomial(CombinatorialFunction):
"""Implementation of the binomial coefficient. It can be defined
in two ways depending on its desired interpretation:
C(n,k) = n!/(k!(n-k)!) or C(n, k) = ff(n, k)/k!
First, in a strict combinatorial sense it defines the
number of ways we can choose 'k' elements from a set of
'n' elements. In this case both arguments are nonnegative
integers and binomial is computed using an efficient
algorithm based on prime factorization.
The other definition is generalization for arbitrary 'n',
however 'k' must also be nonnegative. This case is very
useful when evaluating summations.
For the sake of convenience for negative 'k' this function
will return zero no matter what valued is the other argument.
To expand the binomial when n is a symbol, use either
expand_func() or expand(func=True). The former will keep the
polynomial in factored form while the latter will expand the
polynomial itself. See examples for details.
Examples
========
>>> from sympy import Symbol, Rational, binomial, expand_func
>>> n = Symbol('n', integer=True, positive=True)
>>> binomial(15, 8)
6435
>>> binomial(n, -1)
0
Rows of Pascal's triangle can be generated with the binomial function:
>>> for N in range(8):
... print([ binomial(N, i) for i in range(N + 1)])
...
[1]
[1, 1]
[1, 2, 1]
[1, 3, 3, 1]
[1, 4, 6, 4, 1]
[1, 5, 10, 10, 5, 1]
[1, 6, 15, 20, 15, 6, 1]
[1, 7, 21, 35, 35, 21, 7, 1]
As can a given diagonal, e.g. the 4th diagonal:
>>> N = -4
>>> [ binomial(N, i) for i in range(1 - N)]
[1, -4, 10, -20, 35]
>>> binomial(Rational(5, 4), 3)
-5/128
>>> binomial(Rational(-5, 4), 3)
-195/128
>>> binomial(n, 3)
binomial(n, 3)
>>> binomial(n, 3).expand(func=True)
n**3/6 - n**2/2 + n/3
>>> expand_func(binomial(n, 3))
n*(n - 2)*(n - 1)/6
"""
def fdiff(self, argindex=1):
from sympy import polygamma
if argindex == 1:
# http://functions.wolfram.com/GammaBetaErf/Binomial/20/01/01/
n, k = self.args
return binomial(n, k)*(polygamma(0, n + 1) - \
polygamma(0, n - k + 1))
elif argindex == 2:
# http://functions.wolfram.com/GammaBetaErf/Binomial/20/01/02/
n, k = self.args
return binomial(n, k)*(polygamma(0, n - k + 1) - \
polygamma(0, k + 1))
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def _eval(self, n, k):
# n.is_Number and k.is_Integer and k != 1 and n != k
if k.is_Integer:
if n.is_Integer and n >= 0:
n, k = int(n), int(k)
if k > n:
return S.Zero
elif k > n // 2:
k = n - k
M, result = int(_sqrt(n)), 1
for prime in sieve.primerange(2, n + 1):
if prime > n - k:
result *= prime
elif prime > n // 2:
continue
elif prime > M:
if n % prime < k % prime:
result *= prime
else:
N, K = n, k
exp = a = 0
while N > 0:
a = int((N % prime) < (K % prime + a))
N, K = N // prime, K // prime
exp = a + exp
if exp > 0:
result *= prime**exp
return Integer(result)
else:
d = result = n - k + 1
for i in range(2, k + 1):
d += 1
result *= d
result /= i
return result
@classmethod
def eval(cls, n, k):
n, k = map(sympify, (n, k))
d = n - k
if d.is_zero or k.is_zero:
return S.One
elif d.is_zero is False:
if (k - 1).is_zero:
return n
elif k.is_negative:
return S.Zero
elif n.is_integer and n.is_nonnegative and d.is_negative:
return S.Zero
if k.is_Integer and k > 0 and n.is_Number:
return cls._eval(n, k)
def _eval_expand_func(self, **hints):
"""
Function to expand binomial(n,k) when m is positive integer
Also,
n is self.args[0] and k is self.args[1] while using binomial(n, k)
"""
n = self.args[0]
if n.is_Number:
return binomial(*self.args)
k = self.args[1]
if k.is_Add and n in k.args:
k = n - k
if k.is_Integer:
if k == S.Zero:
return S.One
elif k < 0:
return S.Zero
else:
n = self.args[0]
result = n - k + 1
for i in range(2, k + 1):
result *= n - k + i
result /= i
return result
else:
return binomial(*self.args)
def _eval_rewrite_as_factorial(self, n, k):
return factorial(n)/(factorial(k)*factorial(n - k))
def _eval_rewrite_as_gamma(self, n, k):
from sympy import gamma
return gamma(n + 1)/(gamma(k + 1)*gamma(n - k + 1))
def _eval_rewrite_as_FallingFactorial(self, n, k):
if k.is_integer:
return ff(n, k) / factorial(k)
def _eval_is_integer(self):
n, k = self.args
if n.is_integer and k.is_integer:
return True
elif k.is_integer is False:
return False
|
|
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import btp_annotation231
except ImportError:
btp_annotation231 = sys.modules["onshape_client.oas.models.btp_annotation231"]
try:
from onshape_client.oas.models import btp_space10
except ImportError:
btp_space10 = sys.modules["onshape_client.oas.models.btp_space10"]
try:
from onshape_client.oas.models import btp_statement269
except ImportError:
btp_statement269 = sys.modules["onshape_client.oas.models.btp_statement269"]
try:
from onshape_client.oas.models import btp_statement_loop277_all_of
except ImportError:
btp_statement_loop277_all_of = sys.modules[
"onshape_client.oas.models.btp_statement_loop277_all_of"
]
try:
from onshape_client.oas.models import btp_statement_loop_for3278
except ImportError:
btp_statement_loop_for3278 = sys.modules[
"onshape_client.oas.models.btp_statement_loop_for3278"
]
try:
from onshape_client.oas.models import btp_statement_loop_for_in279
except ImportError:
btp_statement_loop_for_in279 = sys.modules[
"onshape_client.oas.models.btp_statement_loop_for_in279"
]
try:
from onshape_client.oas.models import btp_statement_loop_while280
except ImportError:
btp_statement_loop_while280 = sys.modules[
"onshape_client.oas.models.btp_statement_loop_while280"
]
class BTPStatementLoop277(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
("documentation_type",): {
"FUNCTION": "FUNCTION",
"PREDICATE": "PREDICATE",
"CONSTANT": "CONSTANT",
"ENUM": "ENUM",
"USER_TYPE": "USER_TYPE",
"FEATURE_DEFINITION": "FEATURE_DEFINITION",
"FILE_HEADER": "FILE_HEADER",
"UNDOCUMENTABLE": "UNDOCUMENTABLE",
"UNKNOWN": "UNKNOWN",
},
}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"body": (btp_statement269.BTPStatement269,), # noqa: E501
"space_after_loop_type": (btp_space10.BTPSpace10,), # noqa: E501
"atomic": (bool,), # noqa: E501
"documentation_type": (str,), # noqa: E501
"end_source_location": (int,), # noqa: E501
"node_id": (str,), # noqa: E501
"short_descriptor": (str,), # noqa: E501
"space_after": (btp_space10.BTPSpace10,), # noqa: E501
"space_before": (btp_space10.BTPSpace10,), # noqa: E501
"space_default": (bool,), # noqa: E501
"start_source_location": (int,), # noqa: E501
"annotation": (btp_annotation231.BTPAnnotation231,), # noqa: E501
}
@staticmethod
def discriminator():
return {
"bt_type": {
"BTPStatementLoopFor3-278": btp_statement_loop_for3278.BTPStatementLoopFor3278,
"BTPStatementLoopForIn-279": btp_statement_loop_for_in279.BTPStatementLoopForIn279,
"BTPStatementLoopWhile-280": btp_statement_loop_while280.BTPStatementLoopWhile280,
},
}
attribute_map = {
"bt_type": "btType", # noqa: E501
"body": "body", # noqa: E501
"space_after_loop_type": "spaceAfterLoopType", # noqa: E501
"atomic": "atomic", # noqa: E501
"documentation_type": "documentationType", # noqa: E501
"end_source_location": "endSourceLocation", # noqa: E501
"node_id": "nodeId", # noqa: E501
"short_descriptor": "shortDescriptor", # noqa: E501
"space_after": "spaceAfter", # noqa: E501
"space_before": "spaceBefore", # noqa: E501
"space_default": "spaceDefault", # noqa: E501
"start_source_location": "startSourceLocation", # noqa: E501
"annotation": "annotation", # noqa: E501
}
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
"_composed_instances",
"_var_name_to_model_instances",
"_additional_properties_model_instances",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""btp_statement_loop277.BTPStatementLoop277 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
body (btp_statement269.BTPStatement269): [optional] # noqa: E501
space_after_loop_type (btp_space10.BTPSpace10): [optional] # noqa: E501
atomic (bool): [optional] # noqa: E501
documentation_type (str): [optional] # noqa: E501
end_source_location (int): [optional] # noqa: E501
node_id (str): [optional] # noqa: E501
short_descriptor (str): [optional] # noqa: E501
space_after (btp_space10.BTPSpace10): [optional] # noqa: E501
space_before (btp_space10.BTPSpace10): [optional] # noqa: E501
space_default (bool): [optional] # noqa: E501
start_source_location (int): [optional] # noqa: E501
annotation (btp_annotation231.BTPAnnotation231): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
constant_args = {
"_check_type": _check_type,
"_path_to_item": _path_to_item,
"_from_server": _from_server,
"_configuration": _configuration,
}
required_args = {}
# remove args whose value is Null because they are unset
required_arg_names = list(required_args.keys())
for required_arg_name in required_arg_names:
if required_args[required_arg_name] is nulltype.Null:
del required_args[required_arg_name]
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in six.iteritems(kwargs):
if (
var_name in unused_args
and self._configuration is not None
and self._configuration.discard_unknown_keys
and not self._additional_properties_model_instances
):
# discard variable.
continue
setattr(self, var_name, var_value)
@staticmethod
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
return {
"anyOf": [],
"allOf": [
btp_statement269.BTPStatement269,
btp_statement_loop277_all_of.BTPStatementLoop277AllOf,
],
"oneOf": [],
}
@classmethod
def get_discriminator_class(cls, from_server, data):
"""Returns the child class specified by the discriminator"""
discriminator = cls.discriminator()
discr_propertyname_py = list(discriminator.keys())[0]
discr_propertyname_js = cls.attribute_map[discr_propertyname_py]
if from_server:
class_name = data[discr_propertyname_js]
else:
class_name = data[discr_propertyname_py]
class_name_to_discr_class = discriminator[discr_propertyname_py]
return class_name_to_discr_class.get(class_name)
|
|
#!/usr/bin/env python3
# Copyright (c) 2009-2019 The Bitcoin Core developers
# Copyright (c) 2014-2019 The DigiByte Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the bumpfee RPC.
Verifies that the bumpfee RPC creates replacement transactions successfully when
its preconditions are met, and returns appropriate errors in other cases.
This module consists of around a dozen individual test cases implemented in the
top-level functions named as test_<test_case_description>. The test functions
can be disabled or reordered if needed for debugging. If new test cases are
added in the future, they should try to follow the same convention and not
make assumptions about execution order.
"""
from decimal import Decimal
from test_framework.blocktools import add_witness_commitment, create_block, create_coinbase, send_to_witness
from test_framework.messages import BIP125_SEQUENCE_NUMBER, CTransaction
from test_framework.test_framework import DigiByteTestFramework
from test_framework.util import assert_equal, assert_greater_than, assert_raises_rpc_error, bytes_to_hex_str, connect_nodes_bi, hex_str_to_bytes, sync_mempools
import io
WALLET_PASSPHRASE = "test"
WALLET_PASSPHRASE_TIMEOUT = 3600
class BumpFeeTest(DigiByteTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [[
"-deprecatedrpc=addwitnessaddress",
"-walletrbf={}".format(i),
] for i in range(self.num_nodes)]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Encrypt wallet for test_locked_wallet_fails test
self.nodes[1].node_encrypt_wallet(WALLET_PASSPHRASE)
self.start_node(1)
self.nodes[1].walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
peer_node, rbf_node = self.nodes
rbf_node_address = rbf_node.getnewaddress()
# fund rbf node with 10 coins of 0.001 dgb (100,000 satoshis)
self.log.info("Mining blocks...")
peer_node.generate(110)
self.sync_all()
for i in range(25):
peer_node.sendtoaddress(rbf_node_address, 0.001)
self.sync_all()
peer_node.generate(1)
self.sync_all()
assert_equal(rbf_node.getbalance(), Decimal("0.025"))
self.log.info("Running tests")
dest_address = peer_node.getnewaddress()
test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address)
test_segwit_bumpfee_succeeds(rbf_node, dest_address)
test_nonrbf_bumpfee_fails(peer_node, dest_address)
test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address)
test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address)
test_small_output_fails(rbf_node, dest_address)
test_dust_to_fee(rbf_node, dest_address)
test_settxfee(rbf_node, dest_address)
test_rebumping(rbf_node, dest_address)
test_rebumping_not_replaceable(rbf_node, dest_address)
test_unconfirmed_not_spendable(rbf_node, rbf_node_address)
test_bumpfee_metadata(rbf_node, dest_address)
test_locked_wallet_fails(rbf_node, dest_address)
self.log.info("Success")
def test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbftx = rbf_node.gettransaction(rbfid)
sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert_equal(bumped_tx["errors"], [])
assert bumped_tx["fee"] - abs(rbftx["fee"]) > 0
# check that bumped_tx propagates, original tx was evicted and has a wallet conflict
sync_mempools((rbf_node, peer_node))
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert bumped_tx["txid"] in peer_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
assert rbfid not in peer_node.getrawmempool()
oldwtx = rbf_node.gettransaction(rbfid)
assert len(oldwtx["walletconflicts"]) > 0
# check wallet transaction replaces and replaced_by values
bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"])
assert_equal(bumpedwtx["replaces_txid"], rbfid)
def test_segwit_bumpfee_succeeds(rbf_node, dest_address):
# Create a transaction with segwit output, then create an RBF transaction
# which spends it, and make sure bumpfee can be called on it.
segwit_in = next(u for u in rbf_node.listunspent() if u["amount"] == Decimal("0.001"))
segwit_out = rbf_node.getaddressinfo(rbf_node.getnewaddress())
rbf_node.addwitnessaddress(segwit_out["address"])
segwitid = send_to_witness(
use_p2wsh=False,
node=rbf_node,
utxo=segwit_in,
pubkey=segwit_out["pubkey"],
encode_p2sh=False,
amount=Decimal("0.0009"),
sign=True)
rbfraw = rbf_node.createrawtransaction([{
'txid': segwitid,
'vout': 0,
"sequence": BIP125_SEQUENCE_NUMBER
}], {dest_address: Decimal("0.0005"),
rbf_node.getrawchangeaddress(): Decimal("0.0003")})
rbfsigned = rbf_node.signrawtransactionwithwallet(rbfraw)
rbfid = rbf_node.sendrawtransaction(rbfsigned["hex"])
assert rbfid in rbf_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
def test_nonrbf_bumpfee_fails(peer_node, dest_address):
# cannot replace a non RBF transaction (from node which did not enable RBF)
not_rbfid = peer_node.sendtoaddress(dest_address, Decimal("0.00090000"))
assert_raises_rpc_error(-4, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid)
def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address):
# cannot bump fee unless the tx has only inputs that we own.
# here, the rbftx has a peer_node coin and then adds a rbf_node input
# Note that this test depends upon the RPC code checking input ownership prior to change outputs
# (since it can't use fundrawtransaction, it lacks a proper change output)
utxos = [node.listunspent()[-1] for node in (rbf_node, peer_node)]
inputs = [{
"txid": utxo["txid"],
"vout": utxo["vout"],
"address": utxo["address"],
"sequence": BIP125_SEQUENCE_NUMBER
} for utxo in utxos]
output_val = sum(utxo["amount"] for utxo in utxos) - Decimal("0.001")
rawtx = rbf_node.createrawtransaction(inputs, {dest_address: output_val})
signedtx = rbf_node.signrawtransactionwithwallet(rawtx)
signedtx = peer_node.signrawtransactionwithwallet(signedtx["hex"])
rbfid = rbf_node.sendrawtransaction(signedtx["hex"])
assert_raises_rpc_error(-4, "Transaction contains inputs that don't belong to this wallet",
rbf_node.bumpfee, rbfid)
def test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address):
# cannot bump fee if the transaction has a descendant
# parent is send-to-self, so we don't have to check which output is change when creating the child tx
parent_id = spend_one_input(rbf_node, rbf_node_address)
tx = rbf_node.createrawtransaction([{"txid": parent_id, "vout": 0}], {dest_address: 0.00020000})
tx = rbf_node.signrawtransactionwithwallet(tx)
rbf_node.sendrawtransaction(tx["hex"])
assert_raises_rpc_error(-8, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id)
def test_small_output_fails(rbf_node, dest_address):
# cannot bump fee with a too-small output
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.bumpfee(rbfid, {"totalFee": 50000})
rbfid = spend_one_input(rbf_node, dest_address)
assert_raises_rpc_error(-4, "Change output is too small", rbf_node.bumpfee, rbfid, {"totalFee": 50001})
def test_dust_to_fee(rbf_node, dest_address):
# check that if output is reduced to dust, it will be converted to fee
# the bumped tx sets fee=49,900, but it converts to 50,000
rbfid = spend_one_input(rbf_node, dest_address)
fulltx = rbf_node.getrawtransaction(rbfid, 1)
# (32-byte p2sh-pwpkh output size + 148 p2pkh spend estimate) * 10k(discard_rate) / 1000 = 1800
# P2SH outputs are slightly "over-discarding" due to the IsDust calculation assuming it will
# be spent as a P2PKH.
bumped_tx = rbf_node.bumpfee(rbfid, {"totalFee": 50000-1800})
full_bumped_tx = rbf_node.getrawtransaction(bumped_tx["txid"], 1)
assert_equal(bumped_tx["fee"], Decimal("0.00050000"))
assert_equal(len(fulltx["vout"]), 2)
assert_equal(len(full_bumped_tx["vout"]), 1) #change output is eliminated
def test_settxfee(rbf_node, dest_address):
# check that bumpfee reacts correctly to the use of settxfee (paytxfee)
rbfid = spend_one_input(rbf_node, dest_address)
requested_feerate = Decimal("0.00025000")
rbf_node.settxfee(requested_feerate)
bumped_tx = rbf_node.bumpfee(rbfid)
actual_feerate = bumped_tx["fee"] * 1000 / rbf_node.getrawtransaction(bumped_tx["txid"], True)["vsize"]
# Assert that the difference between the requested feerate and the actual
# feerate of the bumped transaction is small.
assert_greater_than(Decimal("0.00001000"), abs(requested_feerate - actual_feerate))
rbf_node.settxfee(Decimal("0.00000000")) # unset paytxfee
def test_rebumping(rbf_node, dest_address):
# check that re-bumping the original tx fails, but bumping the bumper succeeds
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 2000})
assert_raises_rpc_error(-4, "already bumped", rbf_node.bumpfee, rbfid, {"totalFee": 3000})
rbf_node.bumpfee(bumped["txid"], {"totalFee": 3000})
def test_rebumping_not_replaceable(rbf_node, dest_address):
# check that re-bumping a non-replaceable bump tx fails
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 10000, "replaceable": False})
assert_raises_rpc_error(-4, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"],
{"totalFee": 20000})
def test_unconfirmed_not_spendable(rbf_node, rbf_node_address):
# check that unconfirmed outputs from bumped transactions are not spendable
rbfid = spend_one_input(rbf_node, rbf_node_address)
rbftx = rbf_node.gettransaction(rbfid)["hex"]
assert rbfid in rbf_node.getrawmempool()
bumpid = rbf_node.bumpfee(rbfid)["txid"]
assert bumpid in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
# check that outputs from the bump transaction are not spendable
# due to the replaces_txid check in CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == bumpid], [])
# submit a block with the rbf tx to clear the bump tx out of the mempool,
# then invalidate the block so the rbf tx will be put back in the mempool.
# This makes it possible to check whether the rbf tx outputs are
# spendable before the rbf tx is confirmed.
block = submit_block_with_tx(rbf_node, rbftx)
# Can not abandon conflicted tx
assert_raises_rpc_error(-5, 'Transaction not eligible for abandonment', lambda: rbf_node.abandontransaction(txid=bumpid))
rbf_node.invalidateblock(block.hash)
# Call abandon to make sure the wallet doesn't attempt to resubmit
# the bump tx and hope the wallet does not rebroadcast before we call.
rbf_node.abandontransaction(bumpid)
assert bumpid not in rbf_node.getrawmempool()
assert rbfid in rbf_node.getrawmempool()
# check that outputs from the rbf tx are not spendable before the
# transaction is confirmed, due to the replaced_by_txid check in
# CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == rbfid], [])
# check that the main output from the rbf tx is spendable after confirmed
rbf_node.generate(1)
assert_equal(
sum(1 for t in rbf_node.listunspent(minconf=0, include_unsafe=False)
if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1)
def test_bumpfee_metadata(rbf_node, dest_address):
rbfid = rbf_node.sendtoaddress(dest_address, Decimal("0.00100000"), "comment value", "to value")
bumped_tx = rbf_node.bumpfee(rbfid)
bumped_wtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(bumped_wtx["comment"], "comment value")
assert_equal(bumped_wtx["to"], "to value")
def test_locked_wallet_fails(rbf_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.walletlock()
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first.",
rbf_node.bumpfee, rbfid)
def spend_one_input(node, dest_address):
tx_input = dict(
sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in node.listunspent() if u["amount"] == Decimal("0.00100000")))
rawtx = node.createrawtransaction(
[tx_input], {dest_address: Decimal("0.00050000"),
node.getrawchangeaddress(): Decimal("0.00049000")})
signedtx = node.signrawtransactionwithwallet(rawtx)
txid = node.sendrawtransaction(signedtx["hex"])
return txid
def submit_block_with_tx(node, tx):
ctx = CTransaction()
ctx.deserialize(io.BytesIO(hex_str_to_bytes(tx)))
tip = node.getbestblockhash()
height = node.getblockcount() + 1
block_time = node.getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.vtx.append(ctx)
block.rehash()
block.hashMerkleRoot = block.calc_merkle_root()
add_witness_commitment(block)
block.solve()
node.submitblock(bytes_to_hex_str(block.serialize(True)))
return block
if __name__ == "__main__":
BumpFeeTest().main()
|
|
from __future__ import unicode_literals
import tempfile
import shutil
import os
from compose import config
from compose.project import Project
from compose.const import LABEL_CONFIG_HASH
from .testcases import DockerClientTestCase
class ProjectTestCase(DockerClientTestCase):
def run_up(self, cfg, **kwargs):
kwargs.setdefault('smart_recreate', True)
kwargs.setdefault('timeout', 0.1)
project = self.make_project(cfg)
project.up(**kwargs)
return set(project.containers(stopped=True))
def make_project(self, cfg):
return Project.from_dicts(
name='composetest',
client=self.client,
service_dicts=config.from_dictionary(cfg),
)
class BasicProjectTest(ProjectTestCase):
def setUp(self):
super(BasicProjectTest, self).setUp()
self.cfg = {
'db': {'image': 'busybox:latest'},
'web': {'image': 'busybox:latest'},
}
def test_no_change(self):
old_containers = self.run_up(self.cfg)
self.assertEqual(len(old_containers), 2)
new_containers = self.run_up(self.cfg)
self.assertEqual(len(new_containers), 2)
self.assertEqual(old_containers, new_containers)
def test_partial_change(self):
old_containers = self.run_up(self.cfg)
old_db = [c for c in old_containers if c.name_without_project == 'db_1'][0]
old_web = [c for c in old_containers if c.name_without_project == 'web_1'][0]
self.cfg['web']['command'] = '/bin/true'
new_containers = self.run_up(self.cfg)
self.assertEqual(len(new_containers), 2)
preserved = list(old_containers & new_containers)
self.assertEqual(preserved, [old_db])
removed = list(old_containers - new_containers)
self.assertEqual(removed, [old_web])
created = list(new_containers - old_containers)
self.assertEqual(len(created), 1)
self.assertEqual(created[0].name_without_project, 'web_1')
self.assertEqual(created[0].get('Config.Cmd'), ['/bin/true'])
def test_all_change(self):
old_containers = self.run_up(self.cfg)
self.assertEqual(len(old_containers), 2)
self.cfg['web']['command'] = '/bin/true'
self.cfg['db']['command'] = '/bin/true'
new_containers = self.run_up(self.cfg)
self.assertEqual(len(new_containers), 2)
unchanged = old_containers & new_containers
self.assertEqual(len(unchanged), 0)
new = new_containers - old_containers
self.assertEqual(len(new), 2)
class ProjectWithDependenciesTest(ProjectTestCase):
def setUp(self):
super(ProjectWithDependenciesTest, self).setUp()
self.cfg = {
'db': {
'image': 'busybox:latest',
'command': 'tail -f /dev/null',
},
'web': {
'image': 'busybox:latest',
'command': 'tail -f /dev/null',
'links': ['db'],
},
'nginx': {
'image': 'busybox:latest',
'command': 'tail -f /dev/null',
'links': ['web'],
},
}
def test_up(self):
containers = self.run_up(self.cfg)
self.assertEqual(
set(c.name_without_project for c in containers),
set(['db_1', 'web_1', 'nginx_1']),
)
def test_change_leaf(self):
old_containers = self.run_up(self.cfg)
self.cfg['nginx']['environment'] = {'NEW_VAR': '1'}
new_containers = self.run_up(self.cfg)
self.assertEqual(
set(c.name_without_project for c in new_containers - old_containers),
set(['nginx_1']),
)
def test_change_middle(self):
old_containers = self.run_up(self.cfg)
self.cfg['web']['environment'] = {'NEW_VAR': '1'}
new_containers = self.run_up(self.cfg)
self.assertEqual(
set(c.name_without_project for c in new_containers - old_containers),
set(['web_1', 'nginx_1']),
)
def test_change_root(self):
old_containers = self.run_up(self.cfg)
self.cfg['db']['environment'] = {'NEW_VAR': '1'}
new_containers = self.run_up(self.cfg)
self.assertEqual(
set(c.name_without_project for c in new_containers - old_containers),
set(['db_1', 'web_1', 'nginx_1']),
)
def test_change_root_no_recreate(self):
old_containers = self.run_up(self.cfg)
self.cfg['db']['environment'] = {'NEW_VAR': '1'}
new_containers = self.run_up(self.cfg, allow_recreate=False)
self.assertEqual(new_containers - old_containers, set())
def converge(service,
allow_recreate=True,
smart_recreate=False,
insecure_registry=False,
do_build=True):
"""
If a container for this service doesn't exist, create and start one. If there are
any, stop them, create+start new ones, and remove the old containers.
"""
plan = service.convergence_plan(
allow_recreate=allow_recreate,
smart_recreate=smart_recreate,
)
return service.execute_convergence_plan(
plan,
insecure_registry=insecure_registry,
do_build=do_build,
timeout=0.1,
)
class ServiceStateTest(DockerClientTestCase):
"""Test cases for Service.convergence_plan."""
def test_trigger_create(self):
web = self.create_service('web')
self.assertEqual(('create', []), web.convergence_plan(smart_recreate=True))
def test_trigger_noop(self):
web = self.create_service('web')
container = web.create_container()
web.start()
web = self.create_service('web')
self.assertEqual(('noop', [container]), web.convergence_plan(smart_recreate=True))
def test_trigger_start(self):
options = dict(command=["top"])
web = self.create_service('web', **options)
web.scale(2)
containers = web.containers(stopped=True)
containers[0].stop()
containers[0].inspect()
self.assertEqual([c.is_running for c in containers], [False, True])
web = self.create_service('web', **options)
self.assertEqual(
('start', containers[0:1]),
web.convergence_plan(smart_recreate=True),
)
def test_trigger_recreate_with_config_change(self):
web = self.create_service('web', command=["top"])
container = web.create_container()
web = self.create_service('web', command=["top", "-d", "1"])
self.assertEqual(('recreate', [container]), web.convergence_plan(smart_recreate=True))
def test_trigger_recreate_with_image_change(self):
repo = 'composetest_myimage'
tag = 'latest'
image = '{}:{}'.format(repo, tag)
image_id = self.client.images(name='busybox')[0]['Id']
self.client.tag(image_id, repository=repo, tag=tag)
try:
web = self.create_service('web', image=image)
container = web.create_container()
# update the image
c = self.client.create_container(image, ['touch', '/hello.txt'])
self.client.commit(c, repository=repo, tag=tag)
self.client.remove_container(c)
web = self.create_service('web', image=image)
self.assertEqual(('recreate', [container]), web.convergence_plan(smart_recreate=True))
finally:
self.client.remove_image(image)
def test_trigger_recreate_with_build(self):
context = tempfile.mkdtemp()
base_image = "FROM busybox\nLABEL com.docker.compose.test_image=true\n"
try:
dockerfile = os.path.join(context, 'Dockerfile')
with open(dockerfile, 'w') as f:
f.write(base_image)
web = self.create_service('web', build=context)
container = web.create_container()
with open(dockerfile, 'w') as f:
f.write(base_image + 'CMD echo hello world\n')
web.build()
web = self.create_service('web', build=context)
self.assertEqual(('recreate', [container]), web.convergence_plan(smart_recreate=True))
finally:
shutil.rmtree(context)
class ConfigHashTest(DockerClientTestCase):
def test_no_config_hash_when_one_off(self):
web = self.create_service('web')
container = web.create_container(one_off=True)
self.assertNotIn(LABEL_CONFIG_HASH, container.labels)
def test_no_config_hash_when_overriding_options(self):
web = self.create_service('web')
container = web.create_container(environment={'FOO': '1'})
self.assertNotIn(LABEL_CONFIG_HASH, container.labels)
def test_config_hash_with_custom_labels(self):
web = self.create_service('web', labels={'foo': '1'})
container = converge(web)[0]
self.assertIn(LABEL_CONFIG_HASH, container.labels)
self.assertIn('foo', container.labels)
def test_config_hash_sticks_around(self):
web = self.create_service('web', command=["top"])
container = converge(web)[0]
self.assertIn(LABEL_CONFIG_HASH, container.labels)
web = self.create_service('web', command=["top", "-d", "1"])
container = converge(web)[0]
self.assertIn(LABEL_CONFIG_HASH, container.labels)
|
|
"""
Handles all requests relevant to the extraction service of the API.
"""
import base64
import cv2
from hutts_verification.image_processing.sample_extract import TextExtractor
from flask import Blueprint, jsonify, request, make_response
from hutts_verification.utils.image_handling import grab_image
from hutts_verification.image_processing.sample_extract import FaceExtractor
from hutts_verification.utils.hutts_logger import logger
__authors__ = "Nicolai van Niekerk, Stephan Nell"
__copyright__ = "Copyright 2017, Java the Hutts"
__license__ = "BSD"
__maintainer__ = "Nicolai van Niekerk"
__email__ = "nicvaniek@gmail.com"
__status__ = "Development"
extract = Blueprint('extract', __name__)
@extract.route('/extractText', methods=['POST'])
def extract_text():
"""
Sample function to extract text from image received.
URL: http://localhost:5000/extractText.
"""
# Initialize the data dictionary to be returned by the request.
data = {"success": False}
# Check to see if this is a post request.
if request.method == "POST":
# Check to see if an image was uploaded.
if request.get_json().get("idPhoto", None) is not None:
# Grab the uploaded image.
image = grab_image(string=request.get_json()["idPhoto"])
# Otherwise, assume that a URL was passed in.
else:
# Grab the URL from the request.
url = request.get_json().get("url", None)
# If the URL is None, then return an error.
if url is None:
data["error"] = "No URL provided."
return jsonify(data)
# Load the image and convert.
image = grab_image(url=url)
# Grab additional parameters specifying techniques
preferences = {}
if 'blur_technique' in request.get_json():
preferences['blur_method'] = request.get_json()['blur_technique']
if 'threshold_technique' in request.get_json():
preferences['threshold_method'] = request.get_json()['threshold_technique']
if 'remove_face' in request.get_json():
preferences['remove_face'] = request.get_json()['remove_face']
if 'remove_barcode' in request.get_json():
preferences['remove_barcode'] = request.get_json()['remove_barcode']
if 'color' in request.get_json():
preferences['color'] = request.get_json()['color']
if 'id_type' in request.get_json():
preferences['id_type'] = request.get_json()['id_type']
if 'useIO' in request.get_json():
preferences['useIO'] = request.get_json()['useIO'] == 'true'
else:
preferences['useIO'] = False
# Extract text from image
extractor = TextExtractor(preferences)
result = extractor.extract(image)
return jsonify(result)
@extract.route('/extractFace', methods=['POST'])
def extract_face():
"""
Sample function to extract face from image received.
URL: http://localhost:5000/extractFace.
"""
# initialize the data dictionary to be returned by the request
data = {"success": False}
# check to see if this is a post request
if request.method == "POST":
# check to see if an image was uploaded
if request.get_json().get("idPhoto", None) is not None:
# grab the uploaded image
image = grab_image(string=request.get_json()["idPhoto"])
# otherwise, assume that a URL was passed in
else:
# grab the URL from the request
url = request.get_json().get("url", None)
# if the URL is None, then return an error
if url is None:
data["error"] = "No URL provided."
return jsonify(data)
# load the image and convert
image = grab_image(url=url)
# Add preferences
preferences = {}
if 'useIO' in request.get_json():
preferences['useIO'] = request.get_json()['useIO'] == 'true'
# Call open CV commands here with the extracted image
response = face_extraction_response(preferences['useIO'], image)
return response
@extract.route('/extractAll', methods=['POST'])
def extract_all():
"""
Sample function to extract face and text from image received.
URL: http://localhost:5000/extractAll.
"""
# initialize the data dictionary to be returned by the request
data = {"success": False}
# check to see if this is a post request
if request.method == "POST":
# check to see if an image was uploaded
if request.get_json().get("idPhoto", None) is not None:
# grab the uploaded image
image = grab_image(string=request.get_json()["idPhoto"])
# otherwise, assume that a URL was passed in
else:
# grab the URL from the request
url = request.get_json().get("url", None)
# if the URL is None, then return an error
if url is None:
data["error"] = "No URL provided."
return jsonify(data)
# load the image and convert
image = grab_image(url=url)
# Call open CV commands here with the extracted image
# Grab additional parameters specifying techniques
preferences = {}
if 'blur_technique' in request.get_json():
preferences['blur_method'] = request.get_json()['blur_technique']
if 'threshold_technique' in request.get_json():
preferences['threshold_method'] = request.get_json()['threshold_technique']
if 'remove_face' in request.get_json():
preferences['remove_face'] = request.get_json()['remove_face']
if 'remove_barcode' in request.get_json():
preferences['remove_barcode'] = request.get_json()['remove_barcode']
if 'color' in request.get_json():
preferences['color'] = request.get_json()['color']
if 'id_type' in request.get_json():
preferences['id_type'] = request.get_json()['id_type']
if 'useIO' in request.get_json():
preferences['useIO'] = request.get_json()['useIO'] == 'true'
else:
preferences['useIO'] = False
# Extract test from image
extractor = TextExtractor(preferences)
result = extractor.extract(image)
response = face_extraction_response(preferences['useIO'], image, result)
return response
def face_extraction_response(use_io, image, text_extract_result=None):
"""
This function converts the extracted cv2 image and converts it
to a jpg image. Furthermore, the jpg image is converted to
Base64 jpg type and returned. If text extraction results are provided
the response will contain the data of text extraction result as well.
:param use_io (boolean): Whether or not images should be written to disk.
:param image (obj): The cv2 (numpy) image that should be converted to jpg.
:param text_extract_result (dict): The extracted text results.
Returns:
- (obj): The response object that contains the information for HTTP transmission.
"""
extractor = FaceExtractor()
result = extractor.extract(image, use_io)
_, buffer = cv2.imencode('.jpg', result)
# replace base64 indicator for the first occurrence and apply apply base64 jpg encoding
logger.info("Converting to Base64")
jpg_img = ('data:image/jpg;base64' + str(base64.b64encode(buffer)).replace("b", ",", 1)).replace("'", "")
temp_dict = {"extracted_face": jpg_img}
if text_extract_result:
temp_dict["text_extract_result"] = text_extract_result
data = jsonify(temp_dict)
# prepare response
logger.info("Preparing Response")
response = make_response(data)
response.mimetype = 'multipart/form-data'
response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
response.headers['Pragma'] = 'no-cache'
return response
|
|
import time
import log
from led import LEDMatrix
from led import LEDStrip
from led import LEDCircle
import colors
from util import d
import threading
class animThread(threading.Thread):
def __init__(self, anim, args):
super(animThread, self).__init__()
self.setDaemon(True)
self._anim = anim
self._args = args
def run(self):
log.debug("Starting thread...")
self._anim._run(**self._args)
log.debug("Thread Complete")
class BaseAnimation(object):
def __init__(self, led):
self._led = led
self.animComplete = False
self._step = 0
self._timeRef = 0
self._internalDelay = None
self._sleep = None
self._threaded = False
self._thread = None
self._callback = None
self._stopEvent = threading.Event()
self._stopEvent.clear()
self._led._threadedAnim = False
self._free_run = False
def _msTime(self):
return time.time() * 1000.0
def preRun(self, amt=1):
self._led.all_off()
def preStep(self, amt=1):
pass
def postStep(self, amt=1):
pass
def step(self, amt=1):
raise RuntimeError("Base class step() called. This shouldn't happen")
def stopThread(self, wait=False):
if self._thread:
self._stopEvent.set()
if wait:
self._thread.join()
def __enter__(self):
return self
def _exit(self, type, value, traceback):
pass
def __exit__(self, type, value, traceback):
self._exit(type, value, traceback)
self.stopThread(wait=True)
self._led.all_off()
self._led.update()
self._led.waitForUpdate()
def cleanup(self):
return self.__exit__(None, None, None)
def stopped(self):
return not (self._thread and self._thread.isAlive())
def _run(self, amt, fps, sleep, max_steps, untilComplete, max_cycles, seconds):
self.preRun()
# calculate sleep time base on desired Frames per Second
if fps:
sleep = int(1000 / fps)
if seconds is not None:
max_steps = int((seconds * 1000) / sleep)
initSleep = sleep
self._step = 0
cur_step = 0
cycle_count = 0
self.animComplete = False
while (not self._stopEvent.isSet() and
((max_steps == 0 and not untilComplete) or
(max_steps > 0 and cur_step < max_steps) or
(max_steps == 0 and untilComplete and not self.animComplete))):
self._timeRef = self._msTime()
start = self._msTime()
if hasattr(self, "_input_dev"):
self._keys = self._input_dev.getKeys()
self.preStep(amt)
self.step(amt)
self.postStep(amt)
mid = self._msTime()
if self._free_run:
sleep = None
elif self._internalDelay:
sleep = self._internalDelay
elif initSleep:
sleep = initSleep
self._sleep = sleep
self._led._frameGenTime = int(mid - start)
self._led._frameTotalTime = sleep
self._led.update()
now = self._msTime()
if self.animComplete and max_cycles > 0:
if cycle_count < max_cycles - 1:
cycle_count += 1
self.animComplete = False
stepTime = int(mid - start)
if self._led._threadedUpdate:
updateTime = int(self._led.lastThreadedUpdate())
totalTime = updateTime
else:
updateTime = int(now - mid)
totalTime = stepTime + updateTime
if self._led._threadedUpdate:
log.debug(
"Frame: %sms / Update Max: %sms", stepTime, updateTime)
else:
log.debug("%sms/%sfps / Frame: %sms / Update: %sms",
totalTime, int(1000 / max(totalTime, 1)), stepTime, updateTime)
if sleep:
diff = (self._msTime() - self._timeRef)
t = max(0, (sleep - diff) / 1000.0)
if t == 0:
log.warning(
"Frame-time of %dms set, but took %dms!", sleep, diff)
if self._threaded:
self._stopEvent.wait(t)
else:
time.sleep(t)
cur_step += 1
self._exit(None, None, None)
if self._callback:
self._callback(self)
def run(self, amt=1, fps=None, sleep=None, max_steps=0, untilComplete=False, max_cycles=0, threaded=False, joinThread=False, callback=None, seconds=None):
self._led._threadedAnim = self._threaded = threaded
if self._threaded:
self._stopEvent.clear()
self._callback = callback
if self._threaded:
args = {}
l = locals()
run_params = ["amt", "fps", "sleep",
"max_steps", "untilComplete", "max_cycles", "seconds"]
for p in run_params:
if p in l:
args[p] = l[p]
self._thread = animThread(self, args)
self._thread.start()
if joinThread:
self._thread.join()
else:
self._run(amt, fps, sleep, max_steps, untilComplete, max_cycles, seconds)
RUN_PARAMS = [{
"id": "amt",
"label": "Step Amount",
"type": "int",
"min": 1,
"default": 1,
"help": "Amount to step animation by on each frame. May not be used on some animations."
}, {
"id": "fps",
"label": "Framerate",
"type": "int",
"default": 15,
"min": 1,
"help": "Framerate at which to run animation."
}, {
"id": "seconds",
"label": "Run Seconds",
"type": "int",
"default": None,
"min": 0,
"help": "Number of seconds to run animation for, based on framerate."
}, {
"id": "max_steps",
"label": "Max Frames",
"type": "int",
"min": 0,
"default": 0,
"help": "Total frames to run before stopping."
}, {
"id": "untilComplete",
"label": "Until Complete",
"type": "bool",
"default": False,
"help": "Run until animation marks itself as complete. If supported."
}, {
"id": "max_cycles",
"label": "Max Cycles",
"type": "int",
"min": 1,
"default": 1,
"help": "If Until Complete is set, animation will repeat this many times."
}, ]
class OffAnim(BaseAnimation):
def __init__(self, led, timeout=10):
super(OffAnim, self).__init__(led)
self._internalDelay = timeout * 1000
def step(self, amt=1):
self._led.all_off()
class AnimationQueue(BaseAnimation):
def __init__(self, led, anims=None):
super(AnimationQueue, self).__init__(led)
self.anims = anims or []
self.curAnim = None
self.animIndex = 0
self._internalDelay = 0 # never wait
self.fps = None
self.untilComplete = False
# overriding to handle all the animations
def stopThread(self, wait=False):
for a, r in self.anims:
# a bit of a hack. they aren't threaded, but stops them anyway
a._stopEvent.set()
super(AnimationQueue, self).stopThread(wait)
def addAnim(self, anim, amt=1, fps=None, max_steps=0, untilComplete=False, max_cycles=0, seconds=None):
a = (
anim,
{
"amt": amt,
"fps": fps,
"max_steps": max_steps,
"untilComplete": untilComplete,
"max_cycles": max_cycles,
"seconds": seconds
}
)
self.anims.append(a)
def preRun(self, amt=1):
if len(self.anims) == 0:
raise Exception("Must provide at least one animation.")
self.animIndex = -1
def run(self, amt=1, fps=None, sleep=None, max_steps=0, untilComplete=False, max_cycles=0, threaded=False, joinThread=False, callback=None, seconds=None):
self.fps = fps
self.untilComplete = untilComplete
super(AnimationQueue, self).run(amt=1, fps=None, sleep=None, max_steps=0, untilComplete=untilComplete,
max_cycles=0, threaded=threaded, joinThread=joinThread, callback=callback, seconds=seconds)
def step(self, amt=1):
self.animIndex += 1
if self.animIndex >= len(self.anims):
if self.untilComplete:
self.animComplete = True
else:
self.animIndex = 0
if not self.animComplete:
self.curAnim = self.anims[self.animIndex]
anim, run = self.curAnim
run.update(threaded=False, joinThread=False, callback=None)
run['fps'] = run.get('fps') or self.fps
anim.run(**(run))
RUN_PARAMS = [{
"id": "fps",
"label": "Default Framerate",
"type": "int",
"default": None,
"min": 1,
"help": "Default framerate to run all animations in queue."
}, {
"id": "untilComplete",
"label": "Until Complete",
"type": "bool",
"default": False,
"help": "Run until animation marks itself as complete. If supported."
}]
class BaseStripAnim(BaseAnimation):
def __init__(self, led, start=0, end=-1):
super(BaseStripAnim, self).__init__(led)
if not isinstance(led, LEDStrip):
raise RuntimeError("Must use LEDStrip with Strip Animations!")
self._start = max(start, 0)
self._end = end
if self._end < 0 or self._end > self._led.lastIndex:
self._end = self._led.lastIndex
self._size = self._end - self._start + 1
class BaseMatrixAnim(BaseAnimation):
def __init__(self, led, width=0, height=0, startX=0, startY=0):
super(BaseMatrixAnim, self).__init__(led)
if not isinstance(led, LEDMatrix):
raise RuntimeError("Must use LEDMatrix with Matrix Animations!")
self.width = width or led.width
self.height = height or led.height
self.startX = startX
self.startY = startY
class BaseGameAnim(BaseMatrixAnim):
def __init__(self, led, inputDev):
super(BaseGameAnim, self).__init__(led)
self._input_dev = inputDev
self._keys = None
self._lastKeys = None
self._speedStep = 0
self._speeds = {}
self._keyfuncs = {}
def _exit(self, type, value, traceback):
if hasattr(self._input_dev, 'setLightsOff'):
self._input_dev.setLightsOff(5)
self._input_dev.close()
def setSpeed(self, name, speed):
self._speeds[name] = speed
def getSpeed(self, name):
return self._speeds.get(name)
def _checkSpeed(self, speed):
return not (self._speedStep % speed)
def checkSpeed(self, name):
return name in self._speeds and self._checkSpeed(self._speeds[name])
def addKeyFunc(self, key, func, speed=1, hold=True):
if not isinstance(key, list):
key = [key]
for k in key:
self._keyfuncs[k] = d({
"func": func,
"speed": speed,
"hold": hold,
"last": False,
"inter": False
})
def handleKeys(self):
kf = self._keyfuncs
for key in self._keys:
val = self._keys[key]
if key in kf:
cfg = kf[key]
speedPass = self._checkSpeed(cfg.speed)
if cfg.hold:
if speedPass:
if (val or cfg.inter):
cfg.func()
else:
cfg.inter = cfg.last = val
elif speedPass:
if (val or cfg.inter) and not cfg.last:
cfg.func()
cfg.inter = cfg.last = val
else:
cfg.inter |= val
self._lastKeys = self._keys
def preStep(self, amt):
pass
def postStep(self, amt):
self._speedStep += 1
class BaseCircleAnim(BaseAnimation):
def __init__(self, led):
super(BaseCircleAnim, self).__init__(led)
if not isinstance(led, LEDCircle):
raise RuntimeError("Must use LEDCircle with Circle Animations!")
self.rings = led.rings
self.ringCount = led.ringCount
self.lastRing = led.lastRing
self.ringSteps = led.ringSteps
class StripChannelTest(BaseStripAnim):
def __init__(self, led):
super(StripChannelTest, self).__init__(led)
self._internalDelay = 500
self.colors = [colors.Red, colors.Green, colors.Blue, colors.White]
def step(self, amt=1):
self._led.set(0, colors.Red)
self._led.set(1, colors.Green)
self._led.set(2, colors.Green)
self._led.set(3, colors.Blue)
self._led.set(4, colors.Blue)
self._led.set(5, colors.Blue)
color = self._step % 4
self._led.fill(self.colors[color], 7, 9)
self._step += 1
class MatrixChannelTest(BaseMatrixAnim):
def __init__(self, led):
super(MatrixChannelTest, self).__init__(led, 0, 0)
self._internalDelay = 500
self.colors = [colors.Red, colors.Green, colors.Blue, colors.White]
def step(self, amt=1):
self._led.drawLine(0, 0, 0, self.height - 1, colors.Red)
self._led.drawLine(1, 0, 1, self.height - 1, colors.Green)
self._led.drawLine(2, 0, 2, self.height - 1, colors.Green)
self._led.drawLine(3, 0, 3, self.height - 1, colors.Blue)
self._led.drawLine(4, 0, 4, self.height - 1, colors.Blue)
self._led.drawLine(5, 0, 5, self.height - 1, colors.Blue)
color = self._step % 4
self._led.fillRect(7, 0, 3, self.height, self.colors[color])
self._step += 1
class MatrixCalibrationTest(BaseMatrixAnim):
def __init__(self, led):
super(MatrixCalibrationTest, self).__init__(led, 0, 0)
self._internalDelay = 500
self.colors = [colors.Red, colors.Green, colors.Green,
colors.Blue, colors.Blue, colors.Blue]
def step(self, amt=1):
self._led.all_off()
i = self._step % self.width
for x in range(i + 1):
c = self.colors[x % len(self.colors)]
self._led.drawLine(x, 0, x, i, c)
self.animComplete = (i == (self.width - 1))
self._step += 1
|
|
"""Test SQLAlchemy form extensions."""
import pytest
import mock
from django import forms
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy import Column, Integer, String
from sqlalchemy import create_engine
import forms2
from forms2 import sqlalchemy
from tests import record
def _deep_instance():
ret = record('a', 'c')
ret.a = record('b')
ret.a.b = 'val4'
ret.c = record('d')
ret.c.d = 'val5'
return ret
@pytest.mark.parametrize(
['model', 'result'],
[
(record('x', 'y'), dict(x='val1', y='val2')),
(record('z', 'f'), dict(z='val1', f='val2')),
])
def test_model_to_dict(model, result):
"""Test model to dict conversion."""
assert sqlalchemy.model_to_dict(model(**result), result.keys()) == result
@pytest.mark.parametrize(
['instance', 'result'],
[
(
_deep_instance(),
{
'a.b': 'val4',
'c.d': 'val5',
}
),
]
)
def test_model_to_dict_deep(instance, result):
"""Test model to dict recursive."""
assert sqlalchemy.model_to_dict(instance, result.keys()) == result
@pytest.mark.parametrize(
['model', 'result'],
[
(record('x', 'y'), dict(x='val1', y='val2')),
(record('z', 'f'), dict(z='val3', f='val3')),
])
def test_dict_to_model(model, result):
"""Test dict to model conversion."""
instance = model(**dict(((key, '') for (key, value) in result.items())))
sqlalchemy.dict_to_model(instance, result)
assert instance._asdict() == result
@pytest.mark.parametrize(
['instance', 'result'],
[
(
_deep_instance(),
{
'a.b': 'val4',
'c.d': 'val5'
}
),
]
)
def test_dict_to_model_deep(instance, result):
"""Test dict to model conversion recursive."""
sqlalchemy.dict_to_model(instance, result)
assert sqlalchemy.model_to_dict(instance, result.keys()) == result
@pytest.fixture
def model_class():
"""Create test model class."""
Base = declarative_base()
class TestModel(Base):
__tablename__ = 'test_table'
id = Column(Integer, primary_key=True)
name = Column(String)
fullname = Column(String)
password = Column(String)
def save():
pass
return TestModel
@pytest.fixture
def model(model_class):
"""Create test model instance."""
return model_class(id=1)
@pytest.fixture
def model_form_class(model_class):
"""Create test model form class."""
class TestModelForm(forms2.SAModelForm):
class Meta:
model = model_class
return TestModelForm
@pytest.fixture
def query(model_class, session):
"""Create SQLAlchemy Query for model_class."""
return session.query(model_class)
@pytest.fixture
def session(request, engine):
"""SQLAlchemy session."""
session = sessionmaker(bind=engine)()
return session
@pytest.fixture
def engine(request, model_class):
"""SQLAlchemy engine."""
engine = create_engine('sqlite:///:memory:')
model_class.metadata.create_all(engine)
return engine
def test_model_form(monkeypatch, model_form_class, model_class, model):
"""Test SAModelForm."""
form = model_form_class(instance=model, data={})
assert form.is_valid()
with mock.patch.object(model_class, 'save') as mocked:
form.save()
mocked.assert_called_once()
def test_model_form_without_instance(model_form_class, model_class, model):
"""Test SAModelForm without an instance."""
form = model_form_class(data={})
assert form.is_valid()
with mock.patch.object(model_class, 'save') as mocked:
form.save()
mocked.assert_called_once()
@pytest.mark.parametrize('lazy', [True, False])
def test_model_choice_field(query, model, session, lazy):
"""Test ModelChoiceField."""
field = sqlalchemy.ModelChoiceField(query)
assert field.label_from_instance(model) == repr(model)
field = sqlalchemy.ModelChoiceField(
query if not lazy else lambda field: query, label_from_instance=lambda x: str(x.id))
assert field.label_from_instance(model) == '1'
assert field.primary_key.name == 'id'
assert field.prepare_value(model) == 1
assert field.to_python(None) is None
session.add(model)
session.commit()
with pytest.raises(forms.ValidationError) as exc:
field.to_python(-1)
assert '%(' not in sqlalchemy.force_text(exc.value.message)
assert field.to_python(1) == model
def test_model_multiple_choice_field(query, model, session):
"""Test ModelMultipleChoiceField."""
field = sqlalchemy.ModelMultipleChoiceField(query)
with pytest.raises(forms.ValidationError):
field.clean(None)
with pytest.raises(forms.ValidationError):
field.clean([1])
session.add(model)
session.commit()
assert field.clean([1]) == [model]
assert field.prepare_value(model) == 1
assert field.prepare_value([model]) == [1]
|
|
from __future__ import absolute_import, unicode_literals
from datetime import date
from django import forms
from django.conf import settings
from django.contrib.admin.options import (ModelAdmin, TabularInline,
HORIZONTAL, VERTICAL)
from django.contrib.admin.sites import AdminSite
from django.contrib.admin.widgets import AdminDateWidget, AdminRadioSelect
from django.contrib.admin import (SimpleListFilter,
BooleanFieldListFilter)
from django.core.exceptions import ImproperlyConfigured
from django.forms.models import BaseModelFormSet
from django.forms.widgets import Select
from django.test import TestCase
from django.test.utils import str_prefix
from django.utils import unittest, six
from .models import Band, Concert, ValidationTestModel, ValidationTestInlineModel
class MockRequest(object):
pass
class MockSuperUser(object):
def has_perm(self, perm):
return True
request = MockRequest()
request.user = MockSuperUser()
class ModelAdminTests(TestCase):
def setUp(self):
self.band = Band.objects.create(
name='The Doors',
bio='',
sign_date=date(1965, 1, 1),
)
self.site = AdminSite()
# form/fields/fieldsets interaction ##############################
def test_default_fields(self):
ma = ModelAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'bio', 'sign_date'])
def test_default_fieldsets(self):
# fieldsets_add and fieldsets_change should return a special data structure that
# is used in the templates. They should generate the "right thing" whether we
# have specified a custom form, the fields argument, or nothing at all.
#
# Here's the default case. There are no custom form_add/form_change methods,
# no fields argument, and no fieldsets argument.
ma = ModelAdmin(Band, self.site)
self.assertEqual(ma.get_fieldsets(request),
[(None, {'fields': ['name', 'bio', 'sign_date']})])
self.assertEqual(ma.get_fieldsets(request, self.band),
[(None, {'fields': ['name', 'bio', 'sign_date']})])
def test_get_fieldsets(self):
# Test that get_fieldsets is called when figuring out form fields.
# Refs #18681.
class BandAdmin(ModelAdmin):
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['name', 'bio']})]
ma = BandAdmin(Band, self.site)
form = ma.get_form(None)
self.assertEqual(form._meta.fields, ['name', 'bio'])
class InlineBandAdmin(TabularInline):
model = Concert
fk_name = 'main_band'
can_delete = False
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['day', 'transport']})]
ma = InlineBandAdmin(Band, self.site)
form = ma.get_formset(None).form
self.assertEqual(form._meta.fields, ['day', 'transport'])
def test_lookup_allowed_allows_nonexistent_lookup(self):
"""
Ensure that a lookup_allowed allows a parameter
whose field lookup doesn't exist.
Refs #21129.
"""
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertTrue(ma.lookup_allowed('name__nonexistent', 'test_value'))
def test_field_arguments(self):
# If we specify the fields argument, fieldsets_add and fielsets_change should
# just stick the fields into a formsets structure and return it.
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertEqual(ma.get_fieldsets(request),
[(None, {'fields': ['name']})])
self.assertEqual(ma.get_fieldsets(request, self.band),
[(None, {'fields': ['name']})])
def test_field_arguments_restricted_on_form(self):
# If we specify fields or fieldsets, it should exclude fields on the Form class
# to the fields specified. This may cause errors to be raised in the db layer if
# required model fields arent in fields/fieldsets, but that's preferable to
# ghost errors where you have a field in your Form class that isn't being
# displayed because you forgot to add it to fields/fieldsets
# Using `fields`.
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name'])
self.assertEqual(list(ma.get_form(request, self.band).base_fields),
['name'])
# Using `fieldsets`.
class BandAdmin(ModelAdmin):
fieldsets = [(None, {'fields': ['name']})]
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name'])
self.assertEqual(list(ma.get_form(request, self.band).base_fields),
['name'])
# Using `exclude`.
class BandAdmin(ModelAdmin):
exclude = ['bio']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'sign_date'])
# You can also pass a tuple to `exclude`.
class BandAdmin(ModelAdmin):
exclude = ('bio',)
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'sign_date'])
# Using `fields` and `exclude`.
class BandAdmin(ModelAdmin):
fields = ['name', 'bio']
exclude = ['bio']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name'])
def test_custom_form_meta_exclude_with_readonly(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected when
used in conjunction with `ModelAdmin.readonly_fields` and when no
`ModelAdmin.exclude` is defined.
Refs #14496.
"""
# First, with `ModelAdmin` -----------------------
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['bio']
class BandAdmin(ModelAdmin):
readonly_fields = ['name']
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['sign_date'])
# Then, with `InlineModelAdmin` -----------------
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
readonly_fields = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets(request))[0]().forms[0].fields),
['main_band', 'opening_band', 'id', 'DELETE'])
def test_custom_form_meta_exclude(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is overridden if
`ModelAdmin.exclude` or `InlineModelAdmin.exclude` are defined.
Refs #14496.
"""
# First, with `ModelAdmin` -----------------------
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['bio']
class BandAdmin(ModelAdmin):
exclude = ['name']
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['bio', 'sign_date'])
# Then, with `InlineModelAdmin` -----------------
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
exclude = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets(request))[0]().forms[0].fields),
['main_band', 'opening_band', 'day', 'id', 'DELETE'])
def test_custom_form_validation(self):
# If we specify a form, it should use it allowing custom validation to work
# properly. This won't, however, break any of the admin widgets or media.
class AdminBandForm(forms.ModelForm):
delete = forms.BooleanField()
class BandAdmin(ModelAdmin):
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'bio', 'sign_date', 'delete'])
self.assertEqual(
type(ma.get_form(request).base_fields['sign_date'].widget),
AdminDateWidget)
def test_form_exclude_kwarg_override(self):
"""
Ensure that the `exclude` kwarg passed to `ModelAdmin.get_form()`
overrides all other declarations. Refs #8999.
"""
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['name']
class BandAdmin(ModelAdmin):
exclude = ['sign_date']
form = AdminBandForm
def get_form(self, request, obj=None, **kwargs):
kwargs['exclude'] = ['bio']
return super(BandAdmin, self).get_form(request, obj, **kwargs)
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['name', 'sign_date'])
def test_formset_exclude_kwarg_override(self):
"""
Ensure that the `exclude` kwarg passed to `InlineModelAdmin.get_formset()`
overrides all other declarations. Refs #8999.
"""
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
exclude = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
def get_formset(self, request, obj=None, **kwargs):
kwargs['exclude'] = ['opening_band']
return super(ConcertInline, self).get_formset(request, obj, **kwargs)
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets(request))[0]().forms[0].fields),
['main_band', 'day', 'transport', 'id', 'DELETE'])
def test_queryset_override(self):
# If we need to override the queryset of a ModelChoiceField in our custom form
# make sure that RelatedFieldWidgetWrapper doesn't mess that up.
band2 = Band(name='The Beatles', bio='', sign_date=date(1962, 1, 1))
band2.save()
class ConcertAdmin(ModelAdmin):
pass
ma = ConcertAdmin(Concert, self.site)
form = ma.get_form(request)()
self.assertHTMLEqual(str(form["main_band"]),
'<select name="main_band" id="id_main_band">\n'
'<option value="" selected="selected">---------</option>\n'
'<option value="%d">The Beatles</option>\n'
'<option value="%d">The Doors</option>\n'
'</select>' % (band2.id, self.band.id))
class AdminConcertForm(forms.ModelForm):
pass
def __init__(self, *args, **kwargs):
super(AdminConcertForm, self).__init__(*args, **kwargs)
self.fields["main_band"].queryset = Band.objects.filter(name='The Doors')
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
form = ma.get_form(request)()
self.assertHTMLEqual(str(form["main_band"]),
'<select name="main_band" id="id_main_band">\n'
'<option value="" selected="selected">---------</option>\n'
'<option value="%d">The Doors</option>\n'
'</select>' % self.band.id)
def test_regression_for_ticket_15820(self):
"""
Ensure that `obj` is passed from `InlineModelAdmin.get_fieldsets()` to
`InlineModelAdmin.get_formset()`.
"""
class CustomConcertForm(forms.ModelForm):
class Meta:
model = Concert
fields = ['day']
class ConcertInline(TabularInline):
model = Concert
fk_name = 'main_band'
def get_formset(self, request, obj=None, **kwargs):
if obj:
kwargs['form'] = CustomConcertForm
return super(ConcertInline, self).get_formset(request, obj, **kwargs)
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
concert = Concert.objects.create(main_band=self.band, opening_band=self.band, day=1)
ma = BandAdmin(Band, self.site)
inline_instances = ma.get_inline_instances(request)
fieldsets = list(inline_instances[0].get_fieldsets(request))
self.assertEqual(fieldsets[0][1]['fields'], ['main_band', 'opening_band', 'day', 'transport'])
fieldsets = list(inline_instances[0].get_fieldsets(request, inline_instances[0].model))
self.assertEqual(fieldsets[0][1]['fields'], ['day'])
# radio_fields behavior ###########################################
def test_default_foreign_key_widget(self):
# First, without any radio_fields specified, the widgets for ForeignKey
# and fields with choices specified ought to be a basic Select widget.
# ForeignKey widgets in the admin are wrapped with RelatedFieldWidgetWrapper so
# they need to be handled properly when type checking. For Select fields, all of
# the choices lists have a first entry of dashes.
cma = ModelAdmin(Concert, self.site)
cmafa = cma.get_form(request)
self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget),
Select)
self.assertEqual(
list(cmafa.base_fields['main_band'].widget.choices),
[('', '---------'), (self.band.id, 'The Doors')])
self.assertEqual(
type(cmafa.base_fields['opening_band'].widget.widget), Select)
self.assertEqual(
list(cmafa.base_fields['opening_band'].widget.choices),
[('', '---------'), (self.band.id, 'The Doors')])
self.assertEqual(type(cmafa.base_fields['day'].widget), Select)
self.assertEqual(list(cmafa.base_fields['day'].widget.choices),
[('', '---------'), (1, 'Fri'), (2, 'Sat')])
self.assertEqual(type(cmafa.base_fields['transport'].widget),
Select)
self.assertEqual(
list(cmafa.base_fields['transport'].widget.choices),
[('', '---------'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')])
def test_foreign_key_as_radio_field(self):
# Now specify all the fields as radio_fields. Widgets should now be
# RadioSelect, and the choices list should have a first entry of 'None' if
# blank=True for the model field. Finally, the widget should have the
# 'radiolist' attr, and 'inline' as well if the field is specified HORIZONTAL.
class ConcertAdmin(ModelAdmin):
radio_fields = {
'main_band': HORIZONTAL,
'opening_band': VERTICAL,
'day': VERTICAL,
'transport': HORIZONTAL,
}
cma = ConcertAdmin(Concert, self.site)
cmafa = cma.get_form(request)
self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['main_band'].widget.attrs,
{'class': 'radiolist inline'})
self.assertEqual(list(cmafa.base_fields['main_band'].widget.choices),
[(self.band.id, 'The Doors')])
self.assertEqual(
type(cmafa.base_fields['opening_band'].widget.widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['opening_band'].widget.attrs,
{'class': 'radiolist'})
self.assertEqual(
list(cmafa.base_fields['opening_band'].widget.choices),
[('', 'None'), (self.band.id, 'The Doors')])
self.assertEqual(type(cmafa.base_fields['day'].widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['day'].widget.attrs,
{'class': 'radiolist'})
self.assertEqual(list(cmafa.base_fields['day'].widget.choices),
[(1, 'Fri'), (2, 'Sat')])
self.assertEqual(type(cmafa.base_fields['transport'].widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['transport'].widget.attrs,
{'class': 'radiolist inline'})
self.assertEqual(list(cmafa.base_fields['transport'].widget.choices),
[('', 'None'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')])
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ('transport',)
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['main_band', 'opening_band', 'day'])
class AdminConcertForm(forms.ModelForm):
extra = forms.CharField()
class Meta:
model = Concert
fields = ['extra', 'transport']
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
self.assertEqual(list(ma.get_form(request).base_fields),
['extra', 'transport'])
class ConcertInline(TabularInline):
form = AdminConcertForm
model = Concert
fk_name = 'main_band'
can_delete = True
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets(request))[0]().forms[0].fields),
['extra', 'transport', 'id', 'DELETE', 'main_band'])
class ValidationTests(unittest.TestCase):
def test_validation_only_runs_in_debug(self):
# Ensure validation only runs when DEBUG = True
try:
settings.DEBUG = True
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = 10
site = AdminSite()
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.raw_id_fields' must be a list or tuple.",
site.register,
ValidationTestModel,
ValidationTestModelAdmin,
)
finally:
settings.DEBUG = False
site = AdminSite()
site.register(ValidationTestModel, ValidationTestModelAdmin)
def test_raw_id_fields_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = 10
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.raw_id_fields' must be a list or tuple.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('non_existent_field',)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.raw_id_fields' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestModel'.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('name',)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.raw_id_fields\[0\]', 'name' must be either a ForeignKey or ManyToManyField.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('users',)
ValidationTestModelAdmin.validate(ValidationTestModel)
def test_fieldsets_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = 10
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.fieldsets' must be a list or tuple.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = ({},)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.fieldsets\[0\]' must be a list or tuple.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = ((),)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.fieldsets\[0\]' does not have exactly two elements.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", ()),)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.fieldsets\[0\]\[1\]' must be a dictionary.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {}),)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'fields' key is required in ValidationTestModelAdmin.fieldsets\[0\]\[1\] field options dict.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {"fields": ("name",)}),)
ValidationTestModelAdmin.validate(ValidationTestModel)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {"fields": ("name",)}),)
fields = ["name",]
six.assertRaisesRegex(self,
ImproperlyConfigured,
"Both fieldsets and fields are specified in ValidationTestModelAdmin.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = [(None, {'fields': ['name', 'name']})]
six.assertRaisesRegex(self,
ImproperlyConfigured,
"There are duplicate field\(s\) in ValidationTestModelAdmin.fieldsets",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fields = ["name", "name"]
six.assertRaisesRegex(self,
ImproperlyConfigured,
"There are duplicate field\(s\) in ValidationTestModelAdmin.fields",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
def test_form_validation(self):
class FakeForm(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
form = FakeForm
six.assertRaisesRegex(self,
ImproperlyConfigured,
"ValidationTestModelAdmin.form does not inherit from BaseModelForm.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
def test_fieldsets_with_custom_form_validation(self):
class BandAdmin(ModelAdmin):
fieldsets = (
('Band', {
'fields': ('name',)
}),
)
BandAdmin.validate(Band)
class AdminBandForm(forms.ModelForm):
delete = forms.BooleanField()
class BandAdmin(ModelAdmin):
form = AdminBandForm
fieldsets = (
('Band', {
'fields': ('name', 'bio', 'sign_date', 'delete')
}),
)
BandAdmin.validate(Band)
def test_filter_vertical_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = 10
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_vertical' must be a list or tuple.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ("non_existent_field",)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_vertical' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestModel'.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ("name",)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_vertical\[0\]' must be a ManyToManyField.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ("users",)
ValidationTestModelAdmin.validate(ValidationTestModel)
def test_filter_horizontal_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = 10
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_horizontal' must be a list or tuple.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ("non_existent_field",)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_horizontal' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestModel'.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ("name",)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_horizontal\[0\]' must be a ManyToManyField.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ("users",)
ValidationTestModelAdmin.validate(ValidationTestModel)
def test_radio_fields_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = ()
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.radio_fields' must be a dictionary.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"non_existent_field": None}
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.radio_fields' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestModel'.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"name": None}
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.radio_fields\['name'\]' is neither an instance of ForeignKey nor does have choices set.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"state": None}
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.radio_fields\['state'\]' is neither admin.HORIZONTAL nor admin.VERTICAL.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"state": VERTICAL}
ValidationTestModelAdmin.validate(ValidationTestModel)
def test_prepopulated_fields_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = ()
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.prepopulated_fields' must be a dictionary.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"non_existent_field": None}
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.prepopulated_fields' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestModel'.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"slug": ("non_existent_field",)}
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.prepopulated_fields\['slug'\]\[0\]' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestModel'.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"users": ("name",)}
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.prepopulated_fields\['users'\]' is either a DateTimeField, ForeignKey or ManyToManyField. This isn't allowed.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
ValidationTestModelAdmin.validate(ValidationTestModel)
def test_list_display_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display = 10
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_display' must be a list or tuple.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_display = ('non_existent_field',)
six.assertRaisesRegex(self,
ImproperlyConfigured,
str_prefix("ValidationTestModelAdmin.list_display\[0\], %(_)s'non_existent_field' is not a callable or an attribute of 'ValidationTestModelAdmin' or found in the model 'ValidationTestModel'."),
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_display = ('users',)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_display\[0\]', 'users' is a ManyToManyField which is not supported.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
def a_callable(obj):
pass
class ValidationTestModelAdmin(ModelAdmin):
def a_method(self, obj):
pass
list_display = ('name', 'decade_published_in', 'a_method', a_callable)
ValidationTestModelAdmin.validate(ValidationTestModel)
def test_list_display_links_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = 10
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_display_links' must be a list or tuple.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = ('non_existent_field',)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_display_links\[0\]' refers to 'non_existent_field' which is not defined in 'list_display'.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = ('name',)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_display_links\[0\]' refers to 'name' which is not defined in 'list_display'.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
def a_callable(obj):
pass
class ValidationTestModelAdmin(ModelAdmin):
def a_method(self, obj):
pass
list_display = ('name', 'decade_published_in', 'a_method', a_callable)
list_display_links = ('name', 'decade_published_in', 'a_method', a_callable)
ValidationTestModelAdmin.validate(ValidationTestModel)
def test_list_filter_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_filter = 10
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_filter' must be a list or tuple.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_filter = ('non_existent_field',)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_filter\[0\]' refers to 'non_existent_field' which does not refer to a Field.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class RandomClass(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (RandomClass,)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_filter\[0\]' is 'RandomClass' which is not a descendant of ListFilter.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (('is_active', RandomClass),)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_filter\[0\]\[1\]' is 'RandomClass' which is not of type FieldListFilter.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class AwesomeFilter(SimpleListFilter):
def get_title(self):
return 'awesomeness'
def get_choices(self, request):
return (('bit', 'A bit awesome'), ('very', 'Very awesome'), )
def get_queryset(self, cl, qs):
return qs
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (('is_active', AwesomeFilter),)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_filter\[0\]\[1\]' is 'AwesomeFilter' which is not of type FieldListFilter.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (BooleanFieldListFilter,)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_filter\[0\]' is 'BooleanFieldListFilter' which is of type FieldListFilter but is not associated with a field name.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
# Valid declarations below -----------
class ValidationTestModelAdmin(ModelAdmin):
list_filter = ('is_active', AwesomeFilter, ('is_active', BooleanFieldListFilter), 'no')
ValidationTestModelAdmin.validate(ValidationTestModel)
def test_list_per_page_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_per_page = 'hello'
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_per_page' should be a int.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_per_page = 100
ValidationTestModelAdmin.validate(ValidationTestModel)
def test_max_show_all_allowed_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_max_show_all = 'hello'
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_max_show_all' should be a int.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_max_show_all = 200
ValidationTestModelAdmin.validate(ValidationTestModel)
def test_search_fields_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
search_fields = 10
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.search_fields' must be a list or tuple.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
def test_date_hierarchy_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'non_existent_field'
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.date_hierarchy' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestModel'.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'name'
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.date_hierarchy is neither an instance of DateField nor DateTimeField.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'pub_date'
ValidationTestModelAdmin.validate(ValidationTestModel)
def test_ordering_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = 10
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.ordering' must be a list or tuple.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('non_existent_field',)
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.ordering\[0\]' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestModel'.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('?', 'name')
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.ordering' has the random ordering marker '\?', but contains other fields as well. Please either remove '\?' or the other fields.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('?',)
ValidationTestModelAdmin.validate(ValidationTestModel)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('band__name',)
ValidationTestModelAdmin.validate(ValidationTestModel)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('name',)
ValidationTestModelAdmin.validate(ValidationTestModel)
def test_list_select_related_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_select_related = 1
six.assertRaisesRegex(
self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_select_related' should be either a "
"bool, a tuple or a list",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_select_related = False
ValidationTestModelAdmin.validate(ValidationTestModel)
def test_save_as_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
save_as = 1
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.save_as' should be a bool.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
save_as = True
ValidationTestModelAdmin.validate(ValidationTestModel)
def test_save_on_top_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
save_on_top = 1
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.save_on_top' should be a bool.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
save_on_top = True
ValidationTestModelAdmin.validate(ValidationTestModel)
def test_inlines_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
inlines = 10
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.inlines' must be a list or tuple.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestInline(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.inlines\[0\]' does not inherit from BaseModelAdmin.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
pass
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'model' is a required attribute of 'ValidationTestModelAdmin.inlines\[0\]'.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class SomethingBad(object):
pass
class ValidationTestInline(TabularInline):
model = SomethingBad
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestModelAdmin.inlines\[0\].model' does not inherit from models.Model.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
ValidationTestModelAdmin.validate(ValidationTestModel)
def test_fields_validation(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fields = 10
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestInline.fields' must be a list or tuple.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
def test_fk_name_validation(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fk_name = "non_existent_field"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestInline.fk_name' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestInlineModel'.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fk_name = "parent"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
ValidationTestModelAdmin.validate(ValidationTestModel)
def test_extra_validation(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
extra = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestInline.extra' should be a int.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
extra = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
ValidationTestModelAdmin.validate(ValidationTestModel)
def test_max_num_validation(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
max_num = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestInline.max_num' should be a int.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
max_num = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
ValidationTestModelAdmin.validate(ValidationTestModel)
def test_formset_validation(self):
class FakeFormSet(object):
pass
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
formset = FakeFormSet
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
six.assertRaisesRegex(self,
ImproperlyConfigured,
"'ValidationTestInline.formset' does not inherit from BaseModelFormSet.",
ValidationTestModelAdmin.validate,
ValidationTestModel,
)
class RealModelFormSet(BaseModelFormSet):
pass
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
formset = RealModelFormSet
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
ValidationTestModelAdmin.validate(ValidationTestModel)
|
|
#!/usr/bin/python2.7
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic API for reading/writing small numbers of records."""
__author__ = 'kpy@google.com (Ka-Ping Yee)'
import calendar
import csv
import logging
import re
import StringIO
import xml.dom.minidom
import django.utils.html
from google.appengine import runtime
import external_search
import importer
import indexing
import model
import pfif
import simplejson
import subscribe
import utils
from model import Person, Note, ApiActionLog
from text_query import TextQuery
from utils import Struct
HARD_MAX_RESULTS = 200 # Clients can ask for more, but won't get more.
class InputFileError(Exception):
pass
def get_requested_formats(path):
"""Returns a list of requested formats.
The possible values are 'persons' and 'notes'."""
format = path.split('/')[-1]
if format in ['persons', 'notes']:
return [format]
return ['persons', 'notes']
def complete_record_ids(record, domain):
"""Ensures that a record's record_id fields are prefixed with a domain."""
def complete(record, field):
id = record.get(field)
if id and '/' not in id:
record[field] = '%s/%s' % (domain, id)
complete(record, 'person_record_id')
complete(record, 'note_record_id')
return record
def get_tag_params(handler):
"""Return HTML tag parameters used in import.html."""
return {
'begin_notes_template_link':
'<a href="%s/notes-template.xlsx">' %
django.utils.html.escape(handler.env.global_url),
'end_notes_template_link':
'</a>',
'begin_sample_anchor_tag':
'<a href="%s/sample-import.csv" target="_blank">' %
django.utils.html.escape(handler.env.global_url),
'end_sample_anchor_tag':
'</a>',
'begin_document_anchor_tag':
'<a href='
'"https://code.google.com/p/googlepersonfinder/wiki/ImportCSV" '
'target="_blank">',
'end_document_anchor_tag':
'</a>',
}
def generate_note_record_ids(records):
for record in records:
if not record.get('note_record_id', '').strip():
record['note_record_id'] = str(model.UniqueId.create_id())
yield record
def convert_time(text, offset):
"""Converts a textual date and time into an RFC 3339 UTC timestamp."""
if utils.DATETIME_RE.match(text.strip()): # don't apply offset
return text
match = re.search(r'(\d\d\d\d)[/-](\d+)[/-](\d+) *(\d+):(\d+)', text)
if match:
y, l, d, h, m = map(int, match.groups())
timestamp = calendar.timegm((y, l, d, h, m, 0)) - offset*3600
return utils.format_utc_timestamp(timestamp)
return text # keep the original text so it shows up in the error message
def convert_time_fields(rows, default_offset=0):
"""Filters CSV rows, converting time fields to RFC 3339 UTC times.
The first row that contains "person_record_id" is assumed to be the header
row containing field names. Preceding rows are treated as a preamble.
If the text "time_zone_offset" is found in the preamble section, the cell
immediately below it is treated as a time zone offset from UTC in hours.
Otherwise default_offset is used as the time zone offset.
Rows below the header row are returned as dictionaries (as csv.DictReader
would), except that any "*_date" fields are parsed as local times,
converted to UTC according to the specified offset, and reformatted
as RFC 3339 UTC times.
"""
field_names = []
time_fields = []
setting_names = []
settings = {}
offset = default_offset
for row in rows:
if field_names:
record = dict(zip(field_names, row))
for key in time_fields:
record[key] = convert_time(record[key], offset)
yield record
elif 'person_record_id' in row:
field_names = [name.lower().strip() for name in row]
time_fields = [name for name in row if name.endswith('_date')]
if 'time_zone_offset' in settings:
try:
offset = float(settings['time_zone_offset'])
except ValueError:
raise InputFileError('invalid time_zone_offset value')
else:
settings.update(dict(zip(setting_names, row)))
setting_names = [name.lower().strip() for name in row]
class Import(utils.BaseHandler):
https_required = True
def get(self):
self.render('import.html',
formats=get_requested_formats(self.env.path),
params=self.params,
**get_tag_params(self))
def post(self):
if not (self.auth and self.auth.domain_write_permission):
# TODO(ryok): i18n
self.error(403, message='Missing or invalid authorization key.')
return
content = self.request.get('content')
if not content:
self.error(400, message='Please specify at least one CSV file.')
return
try:
lines = content.splitlines() # handles \r, \n, or \r\n
if self.request.get('format') == 'notes':
self.import_notes(lines)
else:
self.import_persons(lines)
except InputFileError, e:
self.error(400, message='Problem in the uploaded file: %s' % e)
except runtime.DeadlineExceededError, e:
self.error(400, message=
'Sorry, the uploaded file is too large. Try splitting it into '
'smaller files (keeping the header rows in each file) and '
'uploading each part separately.')
def import_notes(self, lines):
source_domain = self.auth.domain_write_permission
records = importer.utf8_decoder(generate_note_record_ids(
convert_time_fields(csv.reader(lines))))
try:
records = [complete_record_ids(r, source_domain) for r in records]
except csv.Error, e:
self.error(400, message=
'The CSV file is formatted incorrectly. (%s)' % e)
return
notes_written, notes_skipped, notes_total = importer.import_records(
self.repo, source_domain, importer.create_note, records,
believed_dead_permission=self.auth.believed_dead_permission,
omit_duplicate_notes=True)
utils.log_api_action(self, ApiActionLog.WRITE,
0, notes_written, 0, len(notes_skipped))
self.render('import.html',
formats=get_requested_formats(self.env.path),
params=self.params,
stats=[
Struct(type='Note',
written=notes_written,
skipped=notes_skipped,
total=notes_total)],
**get_tag_params(self))
def import_persons(self, lines):
# TODO(ryok): support non-UTF8 encodings.
source_domain = self.auth.domain_write_permission
records = importer.utf8_decoder(convert_time_fields(csv.reader(lines)))
try:
records = [complete_record_ids(r, source_domain) for r in records]
except csv.Error, e:
self.error(400, message=
'The CSV file is formatted incorrectly. (%s)' % e)
return
is_not_empty = lambda x: (x or '').strip()
persons = [r for r in records if is_not_empty(r.get('full_name'))]
notes = [r for r in records if is_not_empty(r.get('note_record_id'))]
people_written, people_skipped, people_total = importer.import_records(
self.repo, source_domain, importer.create_person, persons)
notes_written, notes_skipped, notes_total = importer.import_records(
self.repo, source_domain, importer.create_note, notes,
believed_dead_permission=self.auth.believed_dead_permission)
utils.log_api_action(self, ApiActionLog.WRITE,
people_written, notes_written,
len(people_skipped), len(notes_skipped))
self.render('import.html',
formats=get_requested_formats(self.env.path),
params=self.params,
stats=[
Struct(type='Person',
written=people_written,
skipped=people_skipped,
total=people_total),
Struct(type='Note',
written=notes_written,
skipped=notes_skipped,
total=notes_total)],
**get_tag_params(self))
class Read(utils.BaseHandler):
https_required = True
def get(self):
if self.config.read_auth_key_required and not (
self.auth and self.auth.read_permission):
self.info(
403,
message='Missing or invalid authorization key',
style='plain')
return
pfif_version = self.params.version
# Note that self.request.get can handle multiple IDs at once; we
# can consider adding support for multiple records later.
record_id = self.request.get('id')
if not record_id:
self.info(400, message='Missing id parameter', style='plain')
return
person = model.Person.get(
self.repo, record_id, filter_expired=False)
if not person:
self.info(
400,
message='No person record with ID %s' % record_id,
style='plain')
return
notes = model.Note.get_by_person_record_id(self.repo, record_id)
notes = [note for note in notes if not note.hidden]
self.response.headers['Content-Type'] = 'application/xml'
records = [pfif_version.person_to_dict(person, person.is_expired)]
note_records = map(pfif_version.note_to_dict, notes)
utils.optionally_filter_sensitive_fields(records, self.auth)
utils.optionally_filter_sensitive_fields(note_records, self.auth)
pfif_version.write_file(
self.response.out, records, lambda p: note_records)
utils.log_api_action(
self, ApiActionLog.READ, len(records), len(notes))
class Write(utils.BaseHandler):
https_required = True
def post(self):
if not (self.auth and self.auth.domain_write_permission):
self.info(
403,
message='Missing or invalid authorization key',
style='plain')
return
source_domain = self.auth.domain_write_permission
try:
person_records, note_records = \
pfif.parse_file(self.request.body_file)
except Exception, e:
self.info(400, message='Invalid XML: %s' % e, style='plain')
return
mark_notes_reviewed = bool(self.auth.mark_notes_reviewed)
believed_dead_permission = bool(
self.auth.believed_dead_permission)
self.response.headers['Content-Type'] = 'application/xml'
self.write('<?xml version="1.0"?>\n')
self.write('<status:status>\n')
create_person = importer.create_person
num_people_written, people_skipped, total = importer.import_records(
self.repo, source_domain, create_person, person_records)
self.write_status(
'person', num_people_written, people_skipped, total,
'person_record_id')
create_note = importer.create_note
num_notes_written, notes_skipped, total = importer.import_records(
self.repo, source_domain, create_note, note_records,
mark_notes_reviewed, believed_dead_permission, self)
self.write_status(
'note', num_notes_written, notes_skipped, total, 'note_record_id')
self.write('</status:status>\n')
utils.log_api_action(self, ApiActionLog.WRITE,
num_people_written, num_notes_written,
len(people_skipped), len(notes_skipped))
def write_status(self, type, written, skipped, total, id_field):
"""Emit status information about the results of an attempted write."""
skipped_records = []
for error, record in skipped:
skipped_records.append(
' <pfif:%s>%s</pfif:%s>\n' %
(id_field, record.get(id_field, ''), id_field))
skipped_records.append(
' <status:error>%s</status:error>\n' % error)
self.write('''
<status:write>
<status:record_type>pfif:%s</status:record_type>
<status:parsed>%d</status:parsed>
<status:written>%d</status:written>
<status:skipped>
%s
</status:skipped>
</status:write>
''' % (type, total, written, ''.join(skipped_records).rstrip()))
class Search(utils.BaseHandler):
https_required = False
def get(self):
if self.config.search_auth_key_required and not (
self.auth and self.auth.search_permission):
self.info(
403,
message='Missing or invalid authorization key',
style='plain')
return
pfif_version = self.params.version
# Retrieve parameters and do some sanity checks on them.
record_id = self.request.get('id')
query_string = self.request.get('q')
max_results = min(self.params.max_results or 100, HARD_MAX_RESULTS)
results = []
if record_id:
# Search by record ID (always returns just 1 result or nothing).
person = model.Person.get(self.repo, record_id)
if person:
results = [person]
elif query_string:
# Search by query words.
query = TextQuery(query_string)
if self.config.external_search_backends:
results = external_search.search(self.repo, query, max_results,
self.config.external_search_backends)
# External search backends are not always complete. Fall back to
# the original search when they fail or return no results.
if not results:
results = indexing.search(self.repo, query, max_results)
else:
self.info(
400,
message='Neither id nor q parameter specified',
style='plain')
records = [pfif_version.person_to_dict(result) for result in results]
utils.optionally_filter_sensitive_fields(records, self.auth)
# Define the function to retrieve notes for a person.
def get_notes_for_person(person):
notes = model.Note.get_by_person_record_id(
self.repo, person['person_record_id'])
notes = [note for note in notes if not note.hidden]
records = map(pfif_version.note_to_dict, notes)
utils.optionally_filter_sensitive_fields(records, self.auth)
return records
self.response.headers['Content-Type'] = 'application/xml'
pfif_version.write_file(
self.response.out, records, get_notes_for_person)
utils.log_api_action(self, ApiActionLog.SEARCH, len(records))
class Subscribe(utils.BaseHandler):
https_required = True
def post(self):
if not (self.auth and self.auth.subscribe_permission):
return self.error(403, 'Missing or invalid authorization key')
if not subscribe.is_email_valid(self.params.subscribe_email):
return self.error(400, 'Invalid email address')
person = model.Person.get(self.repo, self.params.id)
if not person:
return self.error(400, 'Invalid person_record_id')
subscription = subscribe.subscribe_to(self, self.repo, person,
self.params.subscribe_email,
self.env.lang)
utils.log_api_action(self, ApiActionLog.SUBSCRIBE)
if not subscription:
return self.info(200, 'Already subscribed')
return self.info(200, 'Successfully subscribed')
class Unsubscribe(utils.BaseHandler):
https_required = True
def post(self):
if not (self.auth and self.auth.subscribe_permission):
return self.error(403, 'Missing or invalid authorization key')
subscription = model.Subscription.get(self.repo, self.params.id,
self.params.subscribe_email)
self.response.set_status(200)
utils.log_api_action(self, ApiActionLog.UNSUBSCRIBE)
if subscription:
subscription.delete()
return self.info(200, 'Successfully unsubscribed')
return self.info(200, 'Not subscribed')
def fetch_all(query):
results = []
batch = query.fetch(500)
while batch:
results += batch
batch = query.with_cursor(query.cursor()).fetch(500)
return results
class Stats(utils.BaseHandler):
def get(self):
if not (self.auth and self.auth.stats_permission):
self.info(
403,
message='Missing or invalid authorization key',
style='plain')
return
person_counts = model.Counter.get_all_counts(self.repo, 'person')
note_counts = model.Counter.get_all_counts(self.repo, 'note')
# unreviewed
note_counts['hidden=FALSE,reviewed=FALSE'] = len(fetch_all(
model.Note.all(keys_only=True
).filter('repo =', self.repo
).filter('reviewed =', False
).filter('hidden =', False
).order('-entry_date')))
# accepted
note_counts['hidden=FALSE,reviewed=TRUE'] = len(fetch_all(
model.Note.all(keys_only=True
).filter('repo =', self.repo
).filter('reviewed =', True
).filter('hidden =', False
).order('-entry_date')))
# flagged
note_counts['hidden=TRUE'] = len(fetch_all(
model.Note.all(keys_only=True
).filter('repo =', self.repo
).filter('hidden =', True
).order('-entry_date')))
self.response.headers['Content-Type'] = 'application/json'
self.write(simplejson.dumps({'person': person_counts,
'note': note_counts}))
class HandleSMS(utils.BaseHandler):
https_required = True
repo_required = False
MAX_RESULTS = 3
def post(self):
if not (self.auth and self.auth.search_permission):
self.info(
403,
message=
'"key" URL parameter is either missing, invalid or '
'lacks required permissions. The key\'s repo must be "*" '
'and search_permission must be True.',
style='plain')
return
body = self.request.body_file.read()
doc = xml.dom.minidom.parseString(body)
message_text = self.get_element_text(doc, 'message_text')
receiver_phone_number = self.get_element_text(
doc, 'receiver_phone_number')
if message_text is None:
self.info(
400,
message='message_text element is required.',
style='plain')
return
if receiver_phone_number is None:
self.info(
400,
message='receiver_phone_number element is required.',
style='plain')
return
repo = (
self.config.sms_number_to_repo and
self.config.sms_number_to_repo.get(receiver_phone_number))
if not repo:
self.info(
400,
message=
'The given receiver_phone_number is not found in '
'sms_number_to_repo config.',
style='plain')
return
responses = []
m = re.search(r'^search\s+(.+)$', message_text.strip(), re.I)
if m:
query_string = m.group(1).strip()
query = TextQuery(query_string)
persons = indexing.search(repo, query, HandleSMS.MAX_RESULTS)
if persons:
for person in persons:
responses.append(self.render_person(person))
else:
responses.append('No results found for: %s' % query_string)
responses.append(
'More at: google.org/personfinder/%s?ui=light' % repo)
responses.append(
'All data entered in Person Finder is available to the public '
'and usable by anyone. Google does not review or verify the '
'accuracy of this data google.org/personfinder/global/tos.html')
else:
responses.append('Usage: Search John')
self.response.headers['Content-Type'] = 'application/xml'
self.write(
'<?xml version="1.0" encoding="utf-8"?>\n'
'<response>\n'
' <message_text>%s</message_text>\n'
'</response>\n'
% django.utils.html.escape(' ## '.join(responses)))
def render_person(self, person):
fields = []
fields.append(person.full_name)
if person.latest_status:
# The result of utils.get_person_status_text() may be a Django's
# proxy object for lazy translation. Use unicode() to convert it
# into a unicode object. We must not specify an encoding for
# unicode() in this case.
fields.append(unicode(
utils.get_person_status_text(person)))
if person.sex: fields.append(person.sex)
if person.age: fields.append(person.age)
if person.home_city or person.home_state:
fields.append(
'From: ' +
' '.join(filter(None, [person.home_city, person.home_state])))
return ' / '.join(fields)
def get_element_text(self, doc, tag_name):
elems = doc.getElementsByTagName(tag_name)
if elems:
text = u''
for node in elems[0].childNodes:
if node.nodeType == node.TEXT_NODE:
text += node.data
return text.encode('utf-8')
else:
return None
|
|
# This file is responsible for setup up the executing environment of droidbox app
# Here the executing environment includes:
# 1. Static environments: contacts, call logs, SMS, pre-installed apps, etc
# 2. Dynamic environments: continuous GPS, Accelerometer, etc
# The environment should be determined before app start running.
# We don't need to set up all environments for one app,
# instead we select a subset according to static analysis result of app.
import logging
import json
import time
import os
POLICY_NONE = "none"
POLICY_DUMMY = "dummy"
POLICY_STATIC = "static"
DEFAULT_POLICY = POLICY_NONE
class UnknownEnvException(Exception):
pass
class AppEnv(object):
"""
This class describes a environment attribute of device
"""
def to_dict(self):
return self.__dict__
def to_json(self):
return json.dumps(self.to_dict())
def __str__(self):
return self.to_dict().__str__()
def deploy(self, device):
"""
deploy this env to device
:param device: Device
"""
raise NotImplementedError
class StaticAppEnv(AppEnv):
"""
This class describes a static environment attribute of device
"""
def deploy(self, device):
raise NotImplementedError
class DynamicAppEnv(AppEnv):
"""
This class describes a dynamic environment attribute of device
usually we need to start a thread for this
"""
def deploy(self, device):
raise NotImplementedError
class ContactAppEnv(StaticAppEnv):
"""
This class describes a contact inside device
"""
def __init__(self, name='Lynn', phone="1234567890", email="droidbot@honeynet.com", env_dict=None):
if env_dict is not None:
self.__dict__ = env_dict
return
self.name = name
self.phone = phone
self.email = email
self.env_type = 'contact'
def deploy(self, device):
"""
add a contact to the device
"""
contact_data = self.__dict__
contact_data.pop('env_type')
return device.add_contact(contact_data)
class SettingsAppEnv(StaticAppEnv):
"""
This class describes settings of device
"""
def __init__(self, table_name="system", name="screen_brightness", value="50", env_dict=None):
if env_dict is not None:
self.__dict__ = env_dict
return
self.table_name = table_name
self.name = name
self.value = value
self.env_type = 'settings'
def deploy(self, device):
return device.change_settings(self.table_name, self.name, self.value)
class CallLogEnv(StaticAppEnv):
"""
call log
"""
def __init__(self, phone="1234567890", call_in=True, accepted=True, env_dict=None):
"""
a call log
:param phone: str, phone number of contact
:param call_in: bool, True for call in, False for call out
:param accepted: whether the call is accepted
"""
if env_dict is not None:
self.__dict__ = env_dict
return
self.phone = phone
self.call_in = call_in
self.accepted = accepted
self.env_type = 'calllog'
def deploy(self, device):
if self.call_in:
return self.deploy_call_in(device)
else:
return self.deploy_call_out(device)
def deploy_call_in(self, device):
"""
deploy call in log event to device
"""
if not device.receive_call(self.phone):
return False
time.sleep(1)
if self.accepted:
device.accept_call(self.phone)
time.sleep(1)
return device.cancel_call(self.phone)
def deploy_call_out(self, device):
"""
deploy call out log event to device
"""
device.call(self.phone)
time.sleep(2)
return device.cancel_call(self.phone)
class DummyFilesEnv(StaticAppEnv):
"""
push dummy files to device
"""
def __init__(self, dummy_files_dir=None):
"""
:param: dummy_files_dir: directory to dummy files
"""
if dummy_files_dir is None:
import pkg_resources
dummy_files_dir = pkg_resources.resource_filename("droidbot", "resources/dummy_documents")
self.dummy_files_dir = dummy_files_dir
self.env_type = "dummy_files"
def deploy(self, device):
device.push_file(self.dummy_files_dir)
class SMSLogEnv(StaticAppEnv):
"""
SMS log
"""
def __init__(self, phone="1234567890", sms_in=True, content="Hello world", env_dict=None):
"""
a call log
:param phone: str, phone number of contact
:param sms_in: bool, True for income message, False for outcome
:param content: content of message
"""
if env_dict is not None:
self.__dict__ = env_dict
return
self.phone = phone
self.sms_in = sms_in
self.content = content
self.env_type = 'smslog'
def deploy(self, device):
if self.sms_in:
return device.receive_sms(self.phone, self.content)
else:
return device.send_sms(self.phone, self.content)
class GPSAppEnv(DynamicAppEnv):
"""
This class describes the continuous updating GPS data inside device
"""
def __init__(self, center_x=50, center_y=50, delta_x=1, delta_y=1, env_dict=None):
if env_dict is not None:
self.__dict__ = env_dict
return
self.center_x = center_x
self.center_y = center_y
self.delta_x = delta_x
self.delta_y = delta_y
self.env_type = 'gps'
def deploy(self, device):
return device.set_continuous_gps(self.center_x, self.center_y, self.delta_x, self.delta_y)
ENV_TYPES = {
'contact': ContactAppEnv,
'settings': SettingsAppEnv,
'calllog': CallLogEnv,
'smslog': SMSLogEnv,
'gps': GPSAppEnv
}
class AppEnvManager(object):
"""
AppEnvManager manages the environment of device in which an app will run.
"""
def __init__(self, device, app, env_policy):
"""
construct a new AppEnvManager instance
:param device: instance of Device
:param app: instance of App
:param env_policy: policy of setting up environment, string
:return:
"""
self.logger = logging.getLogger('AppEnvManager')
self.device = device
self.app = app
self.policy = env_policy
self.envs = []
self.enabled = True
if not self.policy:
self.policy = POLICY_NONE
if self.policy == POLICY_NONE:
self.env_factory = None
elif self.policy == POLICY_DUMMY:
self.env_factory = DummyEnvFactory()
elif self.policy == POLICY_STATIC:
self.env_factory = StaticEnvFactory(app)
else:
self.env_factory = FileEnvFactory(self.policy)
def add_env(self, env):
"""
add a env to the envs list
:param env: a env instance, should be subclass of AppEnv
:return:
"""
self.envs.append(env)
def deploy(self):
"""
deploy the environments to device (Emulator)
:return:
"""
self.logger.info("Start deploying environment, policy is %s" % self.policy)
if self.env_factory is not None:
self.envs = self.generate_from_factory(self.env_factory)
if self.envs is None:
return
for env in self.envs:
if not self.enabled:
break
self.device.add_env(env)
self.logger.debug("Finish deploying environment")
if self.device.output_dir is not None:
out_file = open(os.path.join(self.device.output_dir, "droidbot_env.json"), "w")
self.dump(out_file)
out_file.close()
self.logger.debug("Environment settings saved to droidbot_env.json")
def dump(self, env_file):
"""
dump the environment information to a file
:param env_file: the file to output the environment
:return:
"""
env_array = []
for env in self.envs:
env_array.append(env.to_dict())
env_json = json.dumps(env_array)
env_file.write(env_json)
def generate_from_factory(self, app_env_factory):
"""
generate the environment of app from factory
:param app_env_factory: the AppEnvFactory instance used to generate
:return:
"""
return app_env_factory.produce_envs()
def stop(self):
self.enabled = False
class AppEnvFactory(object):
"""
This class is responsible for produce a list of static and dynamic AppEnv
"""
def produce_envs(self):
return []
class DummyEnvFactory(AppEnvFactory):
"""
A dummy factory which generate randomized app environment
"""
def produce_envs(self):
"""
produce a list of dummy environment
"""
envs = [ContactAppEnv(), SettingsAppEnv(), CallLogEnv(), SMSLogEnv(), GPSAppEnv(), DummyFilesEnv()]
return envs
class StaticEnvFactory(AppEnvFactory):
"""
A factory which generate ad hoc environment based on static analysis result of app
"""
def __init__(self, app):
"""
create a StaticEnvFactory from app analysis result
"""
self.app = app
def produce_envs(self):
"""
generate app-specific envs
"""
envs = []
permissions = self.app.permissions
if 'android.permission.READ_CONTACTS' in permissions:
envs.append(ContactAppEnv())
if 'android.permission.READ_CALL_LOG' in permissions:
envs.append(CallLogEnv())
envs.append(CallLogEnv(call_in=False))
envs.append(CallLogEnv(accepted=False))
if 'android.permission.ACCESS_FINE_LOCATION' in permissions:
envs.append(GPSAppEnv())
if 'android.permission.READ_SMS' in permissions:
envs.append(SMSLogEnv())
envs.append(SMSLogEnv(sms_in=False))
if 'android.permission.READ_EXTERNAL_STORAGE' in permissions \
or 'android.permission.WRITE_EXTERNAL_STORAGE' in permissions \
or 'android.permission.MOUNT_UNMOUNT_FILESYSTEMS' in permissions:
envs.append(DummyFilesEnv())
# TODO add more app-specific app environment
return envs
class FileEnvFactory(AppEnvFactory):
"""
A factory which generate environment from file
"""
def __init__(self, env_file):
"""
create a FileEnvFactory from a json file
:param env_file path string
"""
self.envs = []
self.file = env_file
f = open(env_file, 'r')
env_array = json.load(f)
for env_dict in env_array:
if not isinstance(env_dict, dict):
raise UnknownEnvException
if 'env_type' not in env_dict:
raise UnknownEnvException
env_type = env_dict['env_type']
if 'env_type' not in ENV_TYPES:
raise UnknownEnvException
EnvType = ENV_TYPES[env_type]
env = EnvType(dict=env_dict)
self.envs.append(env)
self.index = 0
def produce_envs(self):
"""
generate envs from file
"""
env = self.envs[self.index]
self.index += 1
return env
|
|
import datetime
from datetime import timedelta
from django.test import TestCase
from django.forms.models import model_to_dict
from django.contrib.auth import models as auth_models
from django.core.exceptions import ValidationError
from pyconde.conference.test_utils import ConferenceTestingMixin
from pyconde.conference import models as conference_models
from pyconde.speakers import models as speakers_models
from . import models
from . import forms
from . import validators
from . import utils
class SubmissionTests(ConferenceTestingMixin, TestCase):
def setUp(self):
self.create_test_conference()
self.user = auth_models.User.objects.create_user(
'test', 'test@test.com',
'testpassword')
speakers_models.Speaker.objects.all().delete()
self.speaker = speakers_models.Speaker(user=self.user)
self.speaker.save()
self.now = datetime.datetime.now()
def tearDown(self):
self.destroy_all_test_conferences()
def test_with_open_sessionkind(self):
"""
Tests that a proposal can be submitted with an open sessionkind
"""
proposal = models.Proposal(
conference=self.conference,
title="Proposal",
description="DESCRIPTION",
abstract="ABSTRACT",
speaker=self.speaker,
kind=self.kind,
audience_level=self.audience_level,
duration=self.duration,
track=self.track
)
data = model_to_dict(proposal)
data['agree_to_terms'] = True
form = forms.ProposalSubmissionForm(data=data)
self.assertTrue(form.is_valid(), form.errors)
now = datetime.datetime.now()
self.kind.start_date = now - datetime.timedelta(1)
self.kind.end_date = now + datetime.timedelta(1)
self.kind.save()
data = model_to_dict(proposal)
data['agree_to_terms'] = True
form = forms.ProposalSubmissionForm(data=data)
self.assertTrue(form.is_valid(), form.errors)
def test_with_closed_sessionkind(self):
proposal = models.Proposal(
conference=self.conference,
title="Proposal",
description="DESCRIPTION",
abstract="ABSTRACT",
speaker=self.speaker,
kind=self.kind,
audience_level=self.audience_level,
duration=self.duration,
track=self.track
)
self.kind.start_date = self.now - timedelta(2)
self.kind.end_date = self.now - timedelta(1)
self.kind.closed = None
self.kind.save()
form = forms.ProposalSubmissionForm(data=model_to_dict(proposal))
self.assertFalse(form.is_valid())
self.kind.start_date = None
self.kind.end_date = None
self.kind.closed = True
self.kind.save()
form = forms.ProposalSubmissionForm(data=model_to_dict(proposal))
self.assertFalse(form.is_valid(), form.errors)
class MaxWordsValidatorTests(TestCase):
def test_too_long(self):
v = validators.MaxWordsValidator(3)
self.assertRaises(ValidationError, v, "this is a bit too long")
def test_ok_with_signs(self):
v = validators.MaxWordsValidator(3)
v("hi! hello... world!")
def test_ok(self):
v = validators.MaxWordsValidator(2)
v("hello world!")
def test_ok_with_whitespaces(self):
v = validators.MaxWordsValidator(2)
v("hello \n \t world!")
class ListUserProposalsViewTests(ConferenceTestingMixin, TestCase):
def setUp(self):
self.create_test_conference('')
self.create_test_conference('other_')
self.user = auth_models.User.objects.create_user(
'test', 'test@test.com',
'testpassword'
)
speakers_models.Speaker.objects.all().delete()
self.speaker = speakers_models.Speaker(user=self.user)
self.speaker.save()
self.now = datetime.datetime.now()
def tearDown(self):
self.destroy_all_test_conferences()
self.user.delete()
def test_current_conf_only(self):
"""
This view should only list proposals made for the current conference.
"""
# In this case the user has made two proposals: One for the current
# conference, one for another one also managed within the same
# database. Only the one for the current conference should be listed
# here.
previous_proposal = models.Proposal(
conference=self.other_conference,
title="Proposal",
description="DESCRIPTION",
abstract="ABSTRACT",
speaker=self.speaker,
kind=self.other_kind,
audience_level=self.other_audience_level,
duration=self.other_duration,
track=self.other_track
)
previous_proposal.save()
current_proposal = models.Proposal(
conference=self.conference,
title="Proposal",
description="DESCRIPTION",
abstract="ABSTRACT",
speaker=self.speaker,
kind=self.kind,
audience_level=self.audience_level,
duration=self.duration,
track=self.track
)
current_proposal.save()
with self.settings(CONFERENCE_ID=self.conference.pk):
self.client.login(username=self.user.username,
password='testpassword')
ctx = self.client.get('/proposals/mine/').context
self.assertEqual([current_proposal], list(ctx['proposals']))
with self.settings(CONFERENCE_ID=self.other_conference.pk):
self.client.login(username=self.user.username,
password='testpassword')
ctx = self.client.get('/proposals/mine/').context
self.assertEqual([previous_proposal], list(ctx['proposals']))
def test_login_required(self):
"""
This list should only be available to logged in users.
"""
self.client.logout()
self.assertRedirects(
self.client.get('/proposals/mine/'),
'/accounts/login/?next=/proposals/mine/')
class SubmitProposalViewTests(TestCase):
def test_login_required(self):
self.client.logout()
self.assertRedirects(
self.client.get('/proposals/submit/'),
'/accounts/login/?next=/proposals/submit/')
class SubmitTypedProposalViewTests(TestCase):
def test_login_required(self):
self.client.logout()
self.assertRedirects(
self.client.get('/proposals/submit/testkind/'),
'/accounts/login/?next=/proposals/submit/testkind/')
class EditProposalViewTests(TestCase):
def test_login_required(self):
self.client.logout()
self.assertRedirects(
self.client.get('/proposals/edit/123/'),
'/accounts/login/?next=/proposals/edit/123/')
class CancelProposalViewTests(TestCase):
def test_login_required(self):
self.client.logout()
self.assertRedirects(
self.client.get('/proposals/cancel/123/'),
'/accounts/login/?next=/proposals/cancel/123/')
class LeaveProposalViewTests(TestCase):
def test_login_required(self):
self.client.logout()
self.assertRedirects(
self.client.get('/proposals/leave/123/'),
'/accounts/login/?next=/proposals/leave/123/')
class TimeslotModelTests(ConferenceTestingMixin, TestCase):
def setUp(self):
self.create_test_conference()
def tearDown(self):
self.destroy_all_test_conferences()
def test_uniqueness(self):
"""
Ensure uniqueness of a timeslot per day.
"""
section = conference_models.Section(
conference=self.conference, name="Test section")
section.save()
today = datetime.datetime.now().date()
ts1 = models.TimeSlot(date=today, slot=1, section=section)
ts1.save()
ts2 = models.TimeSlot(date=today, slot=2, section=section)
ts2.save()
ts1_again = models.TimeSlot(date=today, slot=1, section=section)
with self.assertRaises(Exception):
ts1_again.clear()
class DateRangeTests(TestCase):
def test_simple_daterange(self):
start = datetime.date(2013, 3, 15)
end = datetime.date(2013, 3, 18)
range_ = list(utils.get_date_range(start, end))
self.assertEquals(4, len(range_))
self.assertEquals(datetime.date(2013, 3, 15), range_[0])
self.assertEquals(datetime.date(2013, 3, 16), range_[1])
self.assertEquals(datetime.date(2013, 3, 17), range_[2])
self.assertEquals(datetime.date(2013, 3, 18), range_[3])
def test_oneday_daterange(self):
start = datetime.date(2013, 3, 15)
end = datetime.date(2013, 3, 15)
range_ = list(utils.get_date_range(start, end))
self.assertEquals(1, len(range_))
self.assertEquals(datetime.date(2013, 3, 15), range_[0])
def test_invalid_daterange(self):
start = datetime.date(2013, 03, 15)
end = datetime.date(2013, 03, 14)
with self.assertRaises(ValueError):
list(utils.get_date_range(start, end))
|
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finite difference approximations of the Fisher-Rao norm regularizer.
The provided utility routines are used to create Fisher-Rao norm regularizers.
The implementations use finite difference perturbations of the parameters in the
original loss to approximate the necessary gradient-vector products.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class VariableCollector(object):
"""Helper class with custom getter to collect `Variable` objects in a scope.
When called for the first time the custom getter stores the corresponding
`Variable` object in dictionary.
When called subsequently for the same `Variable` name the getter will return
the object from the dictionary instead of calling the original getter.
"""
def __init__(self):
self.variables = {}
def collector_getter(self, getter, name, *args, **kwargs):
"""Custom getter for `VariableScope` that stores `Variable` in dictionary.
Args:
getter: Function, original getter function
name: String, name of `Variable` in the scope
*args: Additional arguments, currently only passed forward on first
call of the original getter
**kwargs: Additional arguments, currently only passed forward on first
call of the original getter
Returns:
A `Tensor` object that contains the named `Variable` either from calling
the original getter or if available from the dictionary.
"""
if name not in self.variables:
self.variables[name] = getter(name, *args, **kwargs)
# TODO(jonathanjh): Add consistency check for args and kwargs.
return self.variables[name]
def make_perturbation_getter(should_regularize, collector, perturbation):
"""Creates custom getter to replace variables in scope by their perturbations.
Args:
should_regularize: Function, takes a variable name as String and returns
Boolean that decides whether the variable should be regularized.
collector: `VariableCollector` object that provides the dictionary to use
for subsequent use of the same `Variable` object for the same name.
perturbation: Float, perturbation value added on top of 1 to use for
variable replacement value.
Returns:
A custom getter function that can be used with `VariableScope`.
"""
def plus_getter(getter, name, *args, **kwargs):
var = collector.collector_getter(getter, name, *args, **kwargs)
if should_regularize(name) and kwargs.get("trainable"):
var = (1. + perturbation) * var
return var
return plus_getter
def make_empirical_fisher_regularizer(make_logits, labels, scope,
should_regularize, perturbation):
"""Creates per-example logits and the per-example empirical Fisher-Rao norm.
This function assumes the model of a categorical distribution generated by a
softmax function.
The empirical Fisher-Rao norm uses the empirical training distribution for
both the input values and the labels to estimate the Fisher information
matrix.
Args:
make_logits: Function, returns `Tensor` representing the per-example logits.
The expected shape of the tensor is such that the number of categories
is the last dimension.
labels: Tensor, encoding of the class labels compatible in dimension with
the return of the make_logits function.
scope: String, name of `VariableScope` to use for the `Variable` objects
that represent the regularized parameter.
should_regularize: Function, takes a variable name as String and returns
Boolean that decides whether the variable should be regularized.
The passed variable name includes the name of the scope.
perturbation: Float, finite difference perturbation constant.
The choice of perturbation constant represents a tradeoff between rounding
and approximation error and should depend on floating point precision and
parameter norm.
Returns:
A tuple of `Tensor` objects representing the per-example logits and the
scalar empirical Fisher-Rao norm regularization loss.
Raises:
ValueError: if the last dimension of the logits shape is not statically
inferrable.
"""
collector = VariableCollector()
with tf.variable_scope(scope, custom_getter=collector.collector_getter):
logits = make_logits()
if logits.shape[-1].value is None:
raise ValueError("The size of the last dimension of the logits vector must"
" be statically inferrable.")
with tf.variable_scope(
scope,
custom_getter=
make_perturbation_getter(should_regularize, collector, perturbation)):
perturbed_logits = make_logits()
loss = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits)
perturbed_loss = tf.nn.softmax_cross_entropy_with_logits(
labels=labels, logits=perturbed_logits)
regularizer = tf.square(
tf.divide(tf.subtract(perturbed_loss, loss), perturbation))
regularizer = tf.reduce_mean(regularizer)
return (logits, regularizer)
def make_standard_fisher_regularizer(make_logits, scope, should_regularize,
perturbation, differentiate_probability):
"""Creates per-example logits and the per-example standard Fisher-Rao norm.
This function assumes the model of a categorical distribution generated by a
softmax function.
The standard Fisher-Rao norm uses the model distribution computed from the
logits by the softmax function to estimate the Fisher information matrix.
The empirical training distribution is used for the input values.
Args:
make_logits: Function, returns `Tensor` representing the per-example logits.
The expected shape of the tensor is such that the number of categories
is the last dimension.
scope: String, name of `VariableScope` to use for the `Variable` objects
that represent the regularized parameter.
should_regularize: Function, takes a variable name as String and returns
Boolean that decides whether the variable should be regularized.
The passed variable name includes the name of the scope.
perturbation: Float, finite difference perturbation constant.
The choice of perturbation constant represents a tradeoff between rounding
and approximation error and should depend on floating point precision and
parameter norm.
differentiate_probability: Boolean, determines whether the label probability
distribution should be differentiated.
Returns:
A tuple of `Tensor` objects representing the per-example logits and the
scalar standard Fisher-Rao norm regularization loss.
Raises:
ValueError: if the last dimension of the logits shape is not statically
inferrable.
"""
collector = VariableCollector()
with tf.variable_scope(scope, custom_getter=collector.collector_getter):
logits = make_logits()
if logits.shape[-1].value is None:
raise ValueError("The size of the last dimension of the logits vector must"
" be statically inferrable.")
with tf.variable_scope(
scope,
custom_getter=
make_perturbation_getter(should_regularize, collector, perturbation)):
perturbed_logits = make_logits()
log_probs = tf.nn.log_softmax(logits, axis=-1)
perturbed_log_probs = tf.nn.log_softmax(perturbed_logits, axis=-1)
stop_probs = tf.stop_gradient(tf.exp(log_probs))
log_prob_derivative = (tf.square((perturbed_log_probs - log_probs) /
perturbation))
if differentiate_probability:
prob_regularizer_loss = (log_prob_derivative * stop_probs +
tf.stop_gradient(log_prob_derivative) * log_probs *
stop_probs -
tf.stop_gradient(log_prob_derivative * log_probs *
stop_probs))
else:
prob_regularizer_loss = log_prob_derivative * stop_probs
regularizer = logits.shape[-1].value * tf.reduce_mean(prob_regularizer_loss)
return (logits, regularizer)
|
|
# Copyright (c) 2015-2016 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module containing all of the datatypes written and read from the datastore.
"""
from collections import namedtuple
import copy
import json
import re
from pycalico import netns
from netaddr import IPAddress, IPNetwork
from pycalico.util import generate_cali_interface_name, validate_characters, \
validate_ports, validate_icmp_type
from pycalico.block import BLOCK_PREFIXLEN
from pycalico.datastore_errors import InvalidBlockSizeError
IF_PREFIX = "cali"
"""
prefix that appears in all Calico interface names in the root namespace. e.g.
cali123456789ab.
"""
class Rules(namedtuple("Rules", ["inbound_rules", "outbound_rules"])):
"""
A set of Calico rules describing inbound and outbound network traffic
policy.
"""
def to_dict(self):
"""
Convert the Rules object to a dictionary.
:return: A dictionary representation of this object.
"""
json_dict = self._asdict()
rules = json_dict["inbound_rules"]
json_dict["inbound_rules"] = [rule.to_json_dict() for rule in rules]
rules = json_dict["outbound_rules"]
json_dict["outbound_rules"] = [rule.to_json_dict() for rule in rules]
return json_dict
def to_json(self, indent=None):
"""
Convert the Rules object to a JSON string.
:param indent: Integer representing the level of indent from the
returned json string. None = no indent, 0 = only newlines. Recommend
using 1 for human-readable strings.
:return: A JSON string representation of this object.
"""
return json.dumps(self.to_dict(), indent=indent)
@classmethod
def from_json(cls, json_str):
"""
Create a Rules object from a JSON string.
:param json_str: A JSON string representation of a Rules object.
:return: A Rules object.
"""
json_dict = json.loads(json_str)
inbound_rules = []
for rule in json_dict["inbound_rules"]:
inbound_rules.append(Rule(**rule))
outbound_rules = []
for rule in json_dict["outbound_rules"]:
outbound_rules.append(Rule(**rule))
rules = cls(inbound_rules=inbound_rules,
outbound_rules=outbound_rules)
return rules
class BGPPeer(object):
"""
Class encapsulating a BGPPeer.
"""
def __init__(self, ip, as_num):
"""
Constructor.
:param ip: The BGPPeer IP address (string or IPAddress)
:param as_num: The AS Number (string or int).
"""
self.ip = IPAddress(ip)
# Store the AS number as a string. This allows dotted notation of
# AS numbers.
self.as_num = str(as_num)
def to_json(self):
"""
Convert the BGPPeer to a JSON string.
:return: A JSON string.
"""
json_dict = {"ip": str(self.ip), "as_num": self.as_num}
return json.dumps(json_dict)
@classmethod
def from_json(cls, json_str):
"""
Convert the json string into a BGPPeer object.
:param json_str: The JSON string representing a BGPPeer.
:return: A BGPPeer object.
"""
json_dict = json.loads(json_str)
return cls(json_dict["ip"], json_dict["as_num"])
def __eq__(self, other):
if not isinstance(other, BGPPeer):
return NotImplemented
return (self.ip == other.ip and
self.as_num == other.as_num)
class IPPool(object):
"""
Class encapsulating an IPPool.
"""
def __init__(self, cidr, ipip=False, masquerade=False, ipam=True, disabled=False):
"""
Constructor.
:param cidr: IPNetwork object (or CIDR string) representing the pool.
NOTE: When used by Calico IPAM, an IPPool's cidr prefix must have a
length equal to or smaller than an IPAM block, such as /24 if the
IPAM block size is /26.
:param ipip: Use IP-IP for this pool.
:param masquerade: Enable masquerade (outgoing NAT) for this pool.
:param ipam: Whether this IPPool is used by Calico IPAM.
:param disabled: Whether this IPPool is disabled. If disabled, the pool
is not used by the IPAM client for new allocation blocks.
"""
# Normalize the CIDR (e.g. 1.2.3.4/16 -> 1.2.0.0/16)
self.cidr = IPNetwork(cidr).cidr
self.ipam = bool(ipam)
if self.ipam:
if self.cidr.prefixlen > BLOCK_PREFIXLEN[self.cidr.version]:
raise InvalidBlockSizeError("The CIDR block size for an "
"IPv%s pool when using Calico IPAM must have a prefix "
"length of %s or lower. Given: %s" %
(self.cidr.version,
BLOCK_PREFIXLEN[self.cidr.version],
self.cidr.prefixlen))
self.ipip = bool(ipip)
self.masquerade = bool(masquerade)
self.disabled = bool(disabled)
def to_json(self):
"""
Convert the IPPool to a JSON string.
:return: A JSON string.
"""
json_dict = {"cidr" : str(self.cidr)}
if self.ipip:
json_dict["ipip"] = "tunl0"
if self.masquerade:
json_dict["masquerade"] = True
# Only write "ipam" and "disabled" when they differ from their default
# values. This keeps the interface unchanged between versions when
# these fields are not required.
if not self.ipam:
json_dict["ipam"] = False
if self.disabled:
json_dict["disabled"] = True
return json.dumps(json_dict)
@classmethod
def from_json(cls, json_str):
"""
Convert the json string into a IPPool object.
:param json_str: The JSON string representing an IPPool.
:return: An IPPool object.
"""
# The fields "ipam" and "disabled" may not be present in older versions
# of the data, so use default values if not present.
json_dict = json.loads(json_str)
return cls(json_dict["cidr"],
ipip=json_dict.get("ipip"),
masquerade=json_dict.get("masquerade"),
ipam=json_dict.get("ipam", True),
disabled=json_dict.get("disabled", False))
def __eq__(self, other):
if not isinstance(other, IPPool):
return NotImplemented
return (self.cidr == other.cidr and
self.ipip == other.ipip and
self.masquerade == other.masquerade and
self.ipam == other.ipam and
self.disabled == other.disabled)
def __contains__(self, item):
"""
Override __contains__ so that you can check if an IP address is in this
pool.
e.g. IPAddress("1.2.3.4) in IPPool("1.2.3.0/24") is True.
"""
return item in self.cidr
def __str__(self):
"""Return the CIDR of this pool."""
return str(self.cidr)
class Endpoint(object):
"""
Class encapsulating an Endpoint.
This class keeps track of the original JSON representation of the
endpoint to allow atomic updates to be performed.
"""
# Endpoint path match regex
ENDPOINT_KEY_MATCH = re.compile("/calico/v1/host/(?P<hostname>[^/]*)/"
"workload/(?P<orchestrator_id>[^/]*)/"
"(?P<workload_id>[^/]*)/"
"endpoint/(?P<endpoint_id>[^/]*)")
def __init__(self, hostname, orchestrator_id, workload_id, endpoint_id,
state, mac, name=None):
self.hostname = hostname
self.orchestrator_id = orchestrator_id
self.workload_id = workload_id
self.endpoint_id = endpoint_id
self.state = state
self.mac = mac
self.name = name or generate_cali_interface_name(IF_PREFIX,
endpoint_id)
self.ipv4_nets = set()
self.ipv6_nets = set()
self.profile_ids = []
self._original_json = None
self.labels = {}
def to_json(self):
json_dict = {"state": self.state,
"name": self.name,
"mac": self.mac,
"profile_ids": self.profile_ids,
"labels": self.labels,
"ipv4_nets": sorted([str(net) for net in self.ipv4_nets]),
"ipv6_nets": sorted([str(net) for net in self.ipv6_nets])}
return json.dumps(json_dict)
@classmethod
def from_json(cls, endpoint_key, json_str):
"""
Create an Endpoint from the endpoint raw JSON and the endpoint key.
:param endpoint_key: The endpoint key (the etcd path to the endpoint)
:param json_str: The raw endpoint JSON data.
:return: An Endpoint object, or None if the endpoint_key does not
represent and Endpoint.
"""
match = Endpoint.ENDPOINT_KEY_MATCH.match(endpoint_key)
if not match:
return None
hostname = match.group("hostname")
orchestrator_id = match.group("orchestrator_id")
workload_id = match.group("workload_id")
endpoint_id = match.group("endpoint_id")
json_dict = json.loads(json_str)
ep = cls(hostname, orchestrator_id, workload_id, endpoint_id,
json_dict["state"], json_dict["mac"], name=json_dict["name"])
for net in json_dict["ipv4_nets"]:
ep.ipv4_nets.add(IPNetwork(net))
for net in json_dict["ipv6_nets"]:
ep.ipv6_nets.add(IPNetwork(net))
labels = json_dict.get("labels", {})
ep.labels = labels
# Version controlled fields
profile_id = json_dict.get("profile_id", None)
ep.profile_ids = [profile_id] if profile_id else \
json_dict.get("profile_ids", [])
# Store the original JSON representation of this Endpoint.
ep._original_json = json_str
return ep
def matches(self, hostname=None, orchestrator_id=None,
workload_id=None, endpoint_id=None):
"""
A less strict 'equals' function, which compares provided parameters to
the current endpoint object.
:param hostname: The hostname to compare to
:param orchestrator_id: The orchestrator ID to compare to.
:param workload_id: The workload ID to compare to
:param endpoint_id: The endpoint ID to compare to
:return: True if the provided parameters match the Endpoint's
parameters, False if any of the provided parameters are different from
the Endpoint's parameters.
"""
if hostname and hostname != self.hostname:
return False
elif orchestrator_id and orchestrator_id != self.orchestrator_id:
return False
elif workload_id and workload_id != self.workload_id:
return False
elif endpoint_id and endpoint_id != self.endpoint_id:
return False
else:
return True
def provision_veth(self, namespace, veth_name_ns):
"""
Create the veth, move into the container namespace, add the IP and
set up the default routes.
Note, the endpoint will not be updated in etcd. If desired, the user
should update the endpoint mac with the mac address provided
by the function and then call update_endpoint
:param self: The endpoint object to provision the veth on
:param namespace: The namespace to operate in
:type namespace netns.Namespace
:param veth_name_ns: The name of the interface in the namespace
:return The mac address of the veth as a string
"""
assert isinstance(namespace, netns.Namespace), \
'Namespace object expected.'
netns.create_veth(self.name, self.temp_interface_name)
netns.move_veth_into_ns(namespace, self.temp_interface_name,
veth_name_ns)
for ip_net in self.ipv4_nets | self.ipv6_nets:
netns.add_ip_to_ns_veth(namespace, ip_net.ip, veth_name_ns)
netns.add_ns_default_route(namespace, self.name, veth_name_ns)
return netns.get_ns_veth_mac(namespace, veth_name_ns)
def __eq__(self, other):
if not isinstance(other, Endpoint):
return NotImplemented
return (self.endpoint_id == other.endpoint_id and
self.state == other.state and
self.mac == other.mac and
self.profile_ids == other.profile_ids and
self.ipv4_nets == other.ipv4_nets and
self.name == other.name and
self.ipv6_nets == other.ipv6_nets)
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def copy(self):
return copy.deepcopy(self)
@property
def temp_interface_name(self):
return generate_cali_interface_name("tmp", self.endpoint_id)
def __repr__(self):
return self.__str__()
def __str__(self):
return "Endpoint(%s)" % self.to_json()
class Profile(object):
"""A Calico policy profile."""
def __init__(self, name):
self.name = name
self.tags = set()
# Default to empty lists of rules.
self.rules = Rules([], [])
class Policy(object):
"""A Calico policy."""
def __init__(self, tier_name, policy_name):
self.tier_name = tier_name
self.policy_name = policy_name
self.order = 0
# Default to empty lists of rules.
self.rules = Rules([], [])
# Default to empty selector.
self.selector = ""
def to_json(self):
"""
Returns a json string representing this Policy
as stored in the data store.
"""
data = {"order": self.order,
"selector": self.selector}
data.update(self.rules.to_dict())
return json.dumps(data)
class Rule(dict):
"""
A Calico inbound or outbound traffic rule.
"""
ALLOWED_KEYS = ["protocol",
"src_tag",
"src_selector",
"src_ports",
"src_net",
"dst_tag",
"dst_selector",
"dst_ports",
"dst_net",
"icmp_type",
"icmp_code",
"action"]
def __init__(self, **kwargs):
super(Rule, self).__init__()
for key, value in kwargs.iteritems():
self[key] = value
def __setitem__(self, key, value):
if key not in Rule.ALLOWED_KEYS:
raise KeyError("Key %s is not allowed on Rule." % key)
# Convert any CIDR strings to netaddr before inserting them.
if key in ("src_net", "dst_net"):
value = IPNetwork(value)
if key == "action" and value not in ("allow", "deny", "next-tier"):
raise ValueError("'%s' is not allowed for key 'action'" % value)
if (key == "protocol" and
value not in ("tcp", "udp", "icmp", "icmpv6", None)):
raise ValueError("'%s' is not allowed for key 'protocol'" % value)
if key in ("src_tag", "dst_tag") and not validate_characters(value):
raise ValueError("'%s' is not allowed for key '%s'" % (value, key))
if key in ("src_ports", "dst_ports") and not validate_ports(value):
raise ValueError("'%s' is not allowed for key '%s'" % (value, key))
if key in ("icmp_type", "icmp_code") and not validate_icmp_type(value):
raise ValueError("'%s' is not allowed for key '%s'" % (value, key))
super(Rule, self).__setitem__(key, value)
def to_json(self):
"""
Convert the Rule object to a JSON string.
:return: A JSON string representation of this object.
"""
return json.dumps(self.to_json_dict())
def to_json_dict(self):
"""
Convert the Rule object to a dict that can be directly converted to
JSON.
:return: A dict containing valid JSON types.
"""
# Convert IPNetworks to strings
json_dict = self.copy()
if "dst_net" in json_dict:
json_dict["dst_net"] = str(json_dict["dst_net"])
if "src_net" in json_dict:
json_dict["src_net"] = str(json_dict["src_net"])
# Convert ports to integers.
if "dst_ports" in json_dict:
json_dict["dst_ports"] = [p for p in json_dict["dst_ports"]]
if "src_ports" in json_dict:
json_dict["src_ports"] = [p for p in json_dict["src_ports"]]
return json_dict
def pprint(self):
"""Human readable description."""
out = [self["action"]]
if "protocol" in self:
out.append(self["protocol"])
if "icmp_type" in self:
out.extend(["type", str(self["icmp_type"])])
if "icmp_code" in self:
out.extend(["code", str(self["icmp_code"])])
if "src_tag" in self or "src_ports" in self or "src_net" in self:
out.append("from")
if "src_ports" in self:
ports = ",".join(str(p) for p in self["src_ports"])
out.extend(["ports", ports])
if "src_tag" in self:
out.extend(["tag", self["src_tag"]])
if "src_net" in self:
out.extend(["cidr", str(self["src_net"])])
if "dst_tag" in self or "dst_ports" in self or "dst_net" in self:
out.append("to")
if "dst_ports" in self:
ports = ",".join(str(p) for p in self["dst_ports"])
out.extend(["ports", ports])
if "dst_tag" in self:
out.extend(["tag", self["dst_tag"]])
if "dst_net" in self:
out.extend(["cidr", str(self["dst_net"])])
return " ".join(out)
class IPAMConfig(object):
"""
IPAM configuration.
"""
AUTO_ALLOCATE_BLOCKS = "auto_allocate_blocks"
STRICT_AFFINITY = "strict_affinity"
def __init__(self, auto_allocate_blocks=True, strict_affinity=False):
self.auto_allocate_blocks = auto_allocate_blocks
"""
Whether Calico IPAM module is allowed to auto-allocate affine blocks
when auto-assigning IP addresses.
"""
self.strict_affinity = strict_affinity
"""
Whether strict affinity should be observed for affine blocks.
"""
def to_json(self):
"""
Convert the IPAMConfig object to a JSON string.
:return: A JSON string representation of this object.
"""
return json.dumps(self.to_json_dict())
def to_json_dict(self):
"""
Convert the Rule object to a dict that can be directly converted to
JSON.
:return: A dict containing valid JSON types.
"""
return {
IPAMConfig.AUTO_ALLOCATE_BLOCKS: self.auto_allocate_blocks,
IPAMConfig.STRICT_AFFINITY: self.strict_affinity
}
@classmethod
def from_json(cls, json_str):
"""
Create an IPAMConfig from the raw JSON.
:param json_str: A JSON string representation of an IPAMConfig
object.
:return: An IPAMConfig object.
"""
json_dict = json.loads(json_str)
return IPAMConfig(
auto_allocate_blocks=json_dict[IPAMConfig.AUTO_ALLOCATE_BLOCKS],
strict_affinity=json_dict[IPAMConfig.STRICT_AFFINITY]
)
def __eq__(self, other):
if not isinstance(other, IPAMConfig):
return NotImplemented
return (self.auto_allocate_blocks == other.auto_allocate_blocks and
self.strict_affinity == other.strict_affinity)
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def __repr__(self):
return self.__str__()
def __str__(self):
return "IPAMConfig(%s)" % self.to_json()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from flask_restful import fields
from cairis.core.AcceptEnvironmentProperties import AcceptEnvironmentProperties
from cairis.core.ObjectSummary import ObjectSummary
from cairis.core.Asset import Asset
from cairis.core.AssetEnvironmentProperties import AssetEnvironmentProperties
from cairis.core.Attacker import Attacker
from cairis.core.AttackerEnvironmentProperties import AttackerEnvironmentProperties
from cairis.core.ClassAssociation import ClassAssociation
from cairis.core.GoalAssociation import GoalAssociation
from cairis.core.Dependency import Dependency
from cairis.core.Directory import Directory
from cairis.core.Goal import Goal
from cairis.core.GoalEnvironmentProperties import GoalEnvironmentProperties
from cairis.core.Obstacle import Obstacle
from cairis.core.ObstacleEnvironmentProperties import ObstacleEnvironmentProperties
from cairis.core.DomainProperty import DomainProperty
from cairis.core.MisuseCase import MisuseCase
from cairis.core.MisuseCaseEnvironmentProperties import MisuseCaseEnvironmentProperties
from cairis.core.MitigateEnvironmentProperties import MitigateEnvironmentProperties
from cairis.core.Persona import Persona
from cairis.core.PersonaEnvironmentProperties import PersonaEnvironmentProperties
from cairis.core.Requirement import Requirement
from cairis.core.Risk import Risk
from cairis.core.Role import Role
from cairis.core.SecurityPattern import SecurityPattern
from cairis.core.Target import Target
from cairis.core.Task import Task
from cairis.core.Trace import Trace
from cairis.core.TrustBoundary import TrustBoundary
from cairis.core.UseCase import UseCase
from cairis.core.TaskEnvironmentProperties import TaskEnvironmentProperties
from cairis.core.UseCaseEnvironmentProperties import UseCaseEnvironmentProperties
from cairis.core.ThreatEnvironmentProperties import ThreatEnvironmentProperties
from cairis.core.TransferEnvironmentProperties import TransferEnvironmentProperties
from cairis.core.ValidationResult import ValidationResult
from cairis.core.ValueType import ValueType
from cairis.core.Vulnerability import Vulnerability
from cairis.core.VulnerabilityEnvironmentProperties import VulnerabilityEnvironmentProperties
from cairis.core.Countermeasure import Countermeasure
from cairis.core.CountermeasureEnvironmentProperties import CountermeasureEnvironmentProperties
from cairis.core.ClassAssociation import ClassAssociation
from cairis.core.GoalAssociation import GoalAssociation
from cairis.core.ExternalDocument import ExternalDocument
from cairis.core.DocumentReference import DocumentReference
from cairis.core.ReferenceSynopsis import ReferenceSynopsis
from cairis.core.ReferenceContribution import ReferenceContribution
from cairis.core.GoalContribution import GoalContribution
from cairis.core.ConceptReference import ConceptReference
from cairis.core.PersonaCharacteristic import PersonaCharacteristic
from cairis.core.TaskCharacteristic import TaskCharacteristic
from cairis.core.ComponentView import ComponentView
from cairis.core.Component import Component
from cairis.core.TemplateGoal import TemplateGoal
from cairis.core.TemplateAsset import TemplateAsset
from cairis.core.TemplateRequirement import TemplateRequirement
from cairis.core.Location import Location
from cairis.core.Locations import Locations
from cairis.core.WeaknessTarget import WeaknessTarget
from cairis.core.DataFlow import DataFlow
from cairis.tools.PseudoClasses import EnvironmentTensionModel, SecurityAttribute, ValuedRole, RiskRating, CountermeasureTarget,PersonaTaskCharacteristics, StepAttributes, CharacteristicReference,ObjectDependency,CharacteristicReferenceSynopsis,CharacteristicReferenceContribution
__author__ = 'Robin Quetin, Shamal Faily'
obj_id_field = "__python_obj__"
likelihood_metadata = { "enum": ['Incredible', 'Improbable', 'Remote', 'Occasional', 'Probable', 'Frequent'] }
severity_metadata = { "enum": ['Negligible', 'Marginal', 'Critical', 'Catastrophic'] }
value_metadata = { "enum": ['None','Low', 'Medium', 'High'] }
assettype_metadata = { "enum" : ['Information','Systems','Software','Hardware','People']}
def gen_class_metadata(class_ref):
return {
"enum": [class_ref.__module__+'.'+class_ref.__name__]
}
class InterfaceModel(object):
resource_fields = {
obj_id_field: fields.String,
"theInterfaceName": fields.String,
"theInterfaceType": fields.String,
"theAccessRight": fields.String,
"thePrivilege": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class AssetEnvironmentPropertiesModel(object):
def __init__(self, env_name='', associations=[], attributes=[]):
self.environment = env_name
self.associations = associations
self.attributes = attributes
self.attributesDictionary = {}
def json_prepare(self):
self.attributes = list(self.attributesDictionary.values())
self.attributesDictionary = {}
for idx in range(0, len(self.associations)):
self.associations[idx] = list(self.associations[idx])
resource_fields = {
"__python_obj__": fields.String,
"theAssociations": fields.List(fields.List(fields.String)),
"theProperties": fields.List(fields.Nested(SecurityAttribute.resource_fields)),
"theEnvironmentName": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class AssetModel(object):
resource_fields = {
obj_id_field: fields.String,
"theDescription": fields.String,
"theSignificance": fields.String,
"theTags": fields.List(fields.String),
"theCriticalRationale": fields.String,
"theInterfaces": fields.List(fields.Nested(InterfaceModel.resource_fields)),
"theType": fields.String,
"theName": fields.String,
"isCritical": fields.Integer,
"theShortCode": fields.String,
"theEnvironmentProperties": fields.List(fields.Nested(AssetEnvironmentPropertiesModel.resource_fields))
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class ObjectSummaryModel(object):
resource_fields = {
obj_id_field: fields.String,
"theName": fields.String,
"theType": fields.String,
"theDescription": fields.String,
"theOriginator": fields.String,
"theStatus": fields.String,
"theVulnerability": fields.String,
"theThreat": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class CapabilityModel(object):
resource_fields = {
"name": fields.String,
"value": fields.String
}
required = list(resource_fields.keys())
class AttackerEnvironmentPropertiesModel(object):
resource_fields = {
obj_id_field: fields.String,
'theMotives': fields.List(fields.String),
'theRoles': fields.List(fields.String),
'theCapabilities': fields.List(fields.Nested(CapabilityModel.resource_fields)),
'theEnvironmentName': fields.String,
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class AttackerModel(object):
resource_fields = {
obj_id_field: fields.String,
'theName': fields.String,
'theImage': fields.String,
'theDescription': fields.String,
'theTags': fields.List(fields.String),
'theEnvironmentProperties': fields.List(fields.Nested(AttackerEnvironmentPropertiesModel.resource_fields)),
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class CImportParams(object):
resource_fields = {
'urlenc_file_contents': fields.String,
'type': fields.String,
'overwrite': fields.Integer
}
required = list(resource_fields.keys())
class CExportParams(object):
resource_fields = {
'theModel': fields.String
}
required = list(resource_fields.keys())
class DocumentationParams(object):
resource_fields = {
'theDocumentType': fields.String,
'theTypeFlags': fields.List(fields.Integer),
'theSectionFlags': fields.List(fields.Integer)
}
required = list(resource_fields.keys())
class DependencyModel(object):
resource_fields = {
obj_id_field: fields.String,
'theDependencyType': fields.String,
'theRationale': fields.String,
'theEnvironmentName': fields.String,
'theDepender': fields.String,
'theDependee': fields.String,
'theDependency': fields.String,
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class ClassAssociationModel(object):
resource_fields = {
obj_id_field: fields.String,
'theEnvironmentName': fields.String,
'theHeadAsset': fields.String,
'theHeadNav': fields.String,
'theHeadType': fields.String,
'theHeadMultiplicity': fields.String,
'theHeadRole': fields.String,
'theTailRole': fields.String,
'theTailMultiplicity': fields.String,
'theTailType': fields.String,
'theTailNav': fields.String,
'theTailAsset': fields.String,
'theRationale': fields.String,
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class GoalAssociationModel(object):
resource_fields = {
obj_id_field: fields.String,
'theEnvironmentName': fields.String,
'theGoal': fields.String,
'theGoalDimension': fields.String,
'theAssociation': fields.String,
'theSubGoal': fields.String,
'theSubGoalDimension': fields.String,
'theAlternativeId': fields.String,
'theRationale': fields.String,
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class EnvironmentModel(object):
resource_fields = {
obj_id_field: fields.String,
"theName": fields.String,
"theShortCode": fields.String,
"theDescription": fields.String,
"theEnvironments": fields.List(fields.String),
"theDuplicateProperty": fields.String,
"theOverridingEnvironment": fields.String,
"theTensions": fields.List(fields.Nested(EnvironmentTensionModel.resource_fields)),
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class ConcernAssociationModel(object):
resource_fields = {
obj_id_field: fields.String,
'theSource': fields.String,
'theSourceNry': fields.String,
'theLinkVerb': fields.String,
'theTargetNry': fields.String,
'theTarget': fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self,src,srcNry,linkVerb,targ,targNry):
self.theSource = src
self.theSourceNry = srcNry
self.theLinkVerb = linkVerb
self.theTargetNry = targNry
self.theTarget = targ
class RefinementModel(object):
resource_fields = {
obj_id_field: fields.String,
'theEndName': fields.String,
'theEndType': fields.String,
'theRefType': fields.String,
'isAlternate': fields.String,
'theRationale': fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self,eName,eType,refType,isAlt,refRat):
self.theEndName = eName
self.theEndType = eType
self.theRefType = refType
self.isAlternate = isAlt
self.theRationale = refRat
class GoalEnvironmentPropertiesModel(object):
resource_fields = {
obj_id_field: fields.String,
"theCategory": fields.String,
"theConcernAssociations": fields.List(fields.Nested(ConcernAssociationModel.resource_fields)),
"theConcerns": fields.List(fields.String),
"theDefinition": fields.String,
"theEnvironmentName": fields.String,
"theFitCriterion": fields.String,
"theGoalRefinements": fields.List(fields.Nested(RefinementModel.resource_fields)),
"theIssue": fields.String,
"thePriority": fields.String,
"theSubGoalRefinements": fields.List(fields.Nested(RefinementModel.resource_fields))
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class GoalModel(object):
resource_fields = {
obj_id_field: fields.String,
"theEnvironmentProperties": fields.List(fields.Nested(GoalEnvironmentPropertiesModel.resource_fields)),
"theName": fields.String,
"theOriginator": fields.String,
"theTags": fields.List(fields.String)
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class ObstacleEnvironmentPropertiesModel(object):
resource_fields = {
obj_id_field: fields.String,
"theDefinition": fields.String,
"theCategory": fields.String,
"theGoalRefinements": fields.List(fields.Nested(RefinementModel.resource_fields)),
"theSubGoalRefinements": fields.List(fields.Nested(RefinementModel.resource_fields)),
"theConcerns": fields.List(fields.String),
"theProbability": fields.Float,
"theProbabilityRationale": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class ObstacleModel(object):
resource_fields = {
obj_id_field: fields.String,
"theName": fields.String,
"theTags": fields.List(fields.String),
"theOriginator": fields.String,
"theEnvironmentProperties": fields.List(fields.Nested(ObstacleEnvironmentPropertiesModel.resource_fields)),
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class DomainPropertyModel(object):
resource_fields = {
obj_id_field: fields.String,
"theName": fields.String,
"theTags": fields.List(fields.String),
"theDescription": fields.String,
"theType": fields.String,
"theOriginator": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class MisuseCaseEnvironmentPropertiesModel(object):
resource_fields = {
obj_id_field: fields.String,
"theAssets": fields.List(fields.String),
"theAttackers": fields.List(fields.String),
"theDescription": fields.String,
"theEnvironmentName": fields.String,
"theObjective": fields.String,
"theLikelihood": fields.String,
"theRiskRating": fields.Nested(RiskRating.resource_fields),
"theSeverity": fields.String,
}
required = ["theDescription", "theEnvironmentName"]
class MisuseCaseModel(object):
resource_fields = {
obj_id_field: fields.String,
"theName": fields.String,
"theRiskName": fields.String,
"theEnvironmentProperties": fields.List(fields.Nested(MisuseCaseEnvironmentPropertiesModel.resource_fields))
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class RequirementModel(object):
resource_fields = {
obj_id_field: fields.String,
"theName": fields.String,
"theLabel": fields.String,
"theDescription": fields.String,
"thePriority": fields.Integer,
"theOriginator": fields.String,
"theFitCriterion": fields.String,
"theRationale": fields.String,
"theType": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class AcceptEnvironmentPropertiesModel(object):
resource_fields = {
obj_id_field: fields.String,
'theCost': fields.String,
'theRationale': fields.String,
'theEnvironmentName': fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class MitigateEnvironmentPropertiesModel(object):
resource_fields = {
obj_id_field: fields.String,
'theDetectionMechanisms': fields.List(fields.String),
'theDetectionPoint': fields.String,
'theType': fields.String,
'theEnvironmentName': fields.String,
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class TransferEnvironmentPropertiesModel(object):
resource_fields = {
obj_id_field: fields.String,
'theRoles': fields.List(fields.Nested(ValuedRole.resource_fields)),
'theRationale': fields.String,
'theEnvironmentName': fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class ResponseEnvironmentPropertiesModel(object):
resource_fields = {
'accept': fields.List(fields.Nested(AcceptEnvironmentPropertiesModel.resource_fields)),
'mitigate': fields.List(fields.Nested(MitigateEnvironmentPropertiesModel.resource_fields)),
'transfer': fields.List(fields.Nested(TransferEnvironmentPropertiesModel.resource_fields))
}
field_names = list(resource_fields.keys())
class ResponseModel(object):
resource_fields = {
obj_id_field: fields.String,
'theTags': fields.List(fields.String),
'theRisk': fields.String,
'theName': fields.String,
'theEnvironmentProperties': fields.Nested(ResponseEnvironmentPropertiesModel.resource_fields),
'theResponseType': fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
required.remove('theTags')
class RiskModel(object):
resource_fields = {
obj_id_field: fields.String,
"theVulnerabilityName": fields.String,
"theMisuseCase": fields.Nested(MisuseCaseModel.resource_fields),
"theTags": fields.List(fields.Nested(fields.String)),
"theThreatName": fields.String,
"theRiskName": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
required.remove("theTags")
class RoleModel(object):
resource_fields = {
obj_id_field: fields.String,
"theName": fields.String,
"theType": fields.String,
"theShortCode": fields.String,
"theDescription": fields.String,
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class RoleEnvironmentPropertiesModel(object):
resource_fields = {
"theEnvironmentName": fields.String,
"theResponses": fields.List(fields.List(fields.String)),
"theCountermeasures": fields.List(fields.String),
"theGoals": fields.List(fields.String),
"theRequirements": fields.List(fields.String)
}
required = list(resource_fields.keys())
class ThreatEnvironmentPropertiesModel(object):
resource_fields = {
obj_id_field: fields.String,
'theAssets': fields.List(fields.String),
'theLikelihood': fields.String,
'theEnvironmentName': fields.String,
'theAttackers': fields.List(fields.String),
'theProperties': fields.List(fields.Nested(SecurityAttribute.resource_fields)),
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class ThreatModel(object):
resource_fields = {
obj_id_field: fields.String,
'theTags': fields.List(fields.String),
'theName': fields.String,
'theType': fields.String,
'theMethod': fields.String,
'theEnvironmentProperties': fields.List(fields.Nested(ThreatEnvironmentPropertiesModel.resource_fields)),
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class UserConfigModel(object):
resource_fields = {
"user": fields.String,
"passwd": fields.String,
"db": fields.String,
"host": fields.String,
"port": fields.Integer,
"jsonPrettyPrint": fields.String
}
required = list(resource_fields.keys())
required.remove("jsonPrettyPrint")
class VulnerabilityEnvironmentPropertiesModel(object):
resource_fields = {
obj_id_field: fields.String,
"theAssets": fields.List(fields.String),
"theEnvironmentName": fields.String,
"theSeverity": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class VulnerabilityModel(object):
resource_fields = {
obj_id_field: fields.String,
'theName': fields.String,
'theType': fields.String,
'theTags': fields.List(fields.String),
'theDescription': fields.String,
'theEnvironmentProperties': fields.List(fields.Nested(VulnerabilityEnvironmentPropertiesModel.resource_fields))
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class CountermeasureTask(object):
resource_fields = {
"thePersona": fields.String,
"theTask": fields.String,
"theDuration": fields.String,
"theFrequency": fields.String,
"theDemands": fields.String,
"theGoalConflict": fields.String
}
required = list(resource_fields.keys())
def __init__(self,pName,tName,tDur,tFreq,tDemands,tGoalConflict):
self.thePersona = pName
self.theTask = tName
self.theDuration = tDur
self.theFrequency = tFreq
self.theDemands = tDemands
self.theGoalConflict = tGoalConflict
class CountermeasureEnvironmentPropertiesModel(object):
resource_fields = {
obj_id_field: fields.String,
"theEnvironmentName": fields.String,
"theRequirements": fields.List(fields.String),
"theTargets": fields.List(fields.Nested(CountermeasureTarget.resource_fields)),
'theProperties': fields.List(fields.Nested(SecurityAttribute.resource_fields)),
"theCost": fields.String,
"theRoles": fields.List(fields.String),
"thePersonas": fields.List(fields.Nested(CountermeasureTask.resource_fields))
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class CountermeasureModel(object):
resource_fields = {
obj_id_field: fields.String,
'theName': fields.String,
'theTags': fields.List(fields.String),
'theDescription': fields.String,
'theType': fields.String,
'theEnvironmentProperties': fields.List(fields.Nested(CountermeasureEnvironmentPropertiesModel.resource_fields)),
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
required.remove('theTags')
class PersonaEnvironmentPropertiesModel(object):
resource_fields = {
obj_id_field: fields.String,
'theDirectFlag': fields.Integer,
'theNarrative': fields.String,
'theRoles': fields.List(fields.String)
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class PersonaModel(object):
resource_fields = {
obj_id_field: fields.String,
'theName': fields.String,
'theTags': fields.List(fields.String),
'theActivities': fields.String,
'theAttitudes': fields.String,
'theAptitudes': fields.String,
'theMotivations': fields.String,
'theSkills': fields.String,
'theIntrinsic': fields.String,
'theContextual': fields.String,
'theImage': fields.String,
'isAssumption': fields.Integer,
'thePersonaType': fields.String,
'theEnvironmentProperties': fields.List(fields.Nested(PersonaEnvironmentPropertiesModel.resource_fields))
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class TaskEnvironmentPropertiesModel(object):
resource_fields = {
obj_id_field: fields.String,
'thePersonas': fields.List(fields.Nested(PersonaTaskCharacteristics.resource_fields)),
'theAssets': fields.List(fields.String),
'theDependencies': fields.String,
'theNarrative': fields.String,
'theConsequences': fields.String,
'theBenefits': fields.String,
'theConcernAssociations': fields.List(fields.Nested(ConcernAssociationModel.resource_fields))
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class TaskModel(object):
resource_fields = {
obj_id_field: fields.String,
'theName': fields.String,
'theShortCode': fields.String,
'theObjective': fields.String,
'isAssumption': fields.Integer,
'theAuthor': fields.String,
'theTags': fields.List(fields.String),
'theEnvironmentProperties': fields.List(fields.Nested(TaskEnvironmentPropertiesModel.resource_fields))
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class UseCaseEnvironmentPropertiesModel(object):
resource_fields = {
obj_id_field: fields.String,
'thePreCond': fields.String,
'theSteps': fields.List(fields.Nested(StepAttributes.resource_fields)),
'thePostCond': fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class UseCaseContributionModel(object):
resource_fields = {
obj_id_field: fields.String,
'theContributionTo': fields.String,
'theReferenceContribution': fields.Nested(CharacteristicReferenceContribution.resource_fields)
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self,contTo,rc):
self.theContributionTo = contTo
self.theReferenceContribution = rc
def __getitem__(self,varName):
if (varName == 'theContributionTo'): return self.theContributionTo
elif (varName == 'theReferenceContribution'): return self.theReferenceContribution
else: return None
class UseCaseModel(object):
resource_fields = {
obj_id_field: fields.String,
'theName': fields.String,
'theTags': fields.List(fields.String),
'theAuthor': fields.String,
'theCode': fields.String,
'theActors': fields.List(fields.String),
'theDescription': fields.String,
'theReferenceContributions': fields.List(fields.Nested(UseCaseContributionModel.resource_fields)),
'theEnvironmentProperties': fields.List(fields.Nested(UseCaseEnvironmentPropertiesModel.resource_fields))
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class SearchValuesModel(object):
resource_fields = {
obj_id_field: fields.String,
'theEnvironmentName': fields.String,
'theTypeName': fields.String,
'theObjectName': fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class FindModel(object):
resource_fields = {
obj_id_field: fields.String,
'theSearchValues': fields.List(fields.Nested(SearchValuesModel.resource_fields))
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class VersionModel(object):
resource_fields = {
obj_id_field: fields.String,
'theVersion': fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class ValidationModel(object):
resource_fields = {
obj_id_field: fields.String,
'theLabel': fields.String,
'theMessage': fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class AssetAssociationModel(object):
resource_fields = {
obj_id_field: fields.String,
"theEnvironmentName": fields.String,
"theHeadAsset": fields.String,
"theHeadType": fields.String,
"theHeadNavigation": fields.String,
"theHeadMultiplicity": fields.String,
"theHeadRole": fields.String,
"theTailRole": fields.String,
"theTailMultiplicity": fields.String,
"theTailType": fields.String,
"theTailNavigation": fields.String,
"theTailAsset": fields.String,
"theRationale": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class GoalAssociationModel(object):
resource_fields = {
obj_id_field: fields.String,
"theEnvironmentName": fields.String,
"theGoal": fields.String,
"theGoalDimension": fields.String,
"theAssociationType": fields.String,
"theSubGoal": fields.String,
"theSubGoalDimension": fields.String,
"theAlternativeId": fields.String,
"theRationale": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class ExternalDocumentModel(object):
resource_fields = {
obj_id_field: fields.String,
"theName": fields.String,
"theVersion": fields.String,
"thePublicationDate": fields.String,
"theAuthors": fields.String,
"theDescription": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class DocumentReferenceModel(object):
resource_fields = {
obj_id_field: fields.String,
"theName": fields.String,
"theDocName": fields.String,
"theContributor": fields.String,
"theExcerpt": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class ReferenceSynopsisModel(object):
resource_fields = {
obj_id_field: fields.String,
"theReference": fields.String,
"theSynopsis": fields.String,
"theDimension": fields.String,
"theActor": fields.String,
"theSynopsisDimension" : fields.String,
"theInitialSatisfaction" : fields.String,
"theRelatedGoals" : fields.List(fields.String)
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class ReferenceContributionModel(object):
resource_fields = {
obj_id_field: fields.String,
"theSource": fields.String,
"theDestination": fields.String,
"theMeansEnd": fields.String,
"theContribution": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class GoalContributionModel(object):
resource_fields = {
obj_id_field: fields.String,
"theSource": fields.String,
"theSourceType": fields.String,
"theDestination": fields.String,
"theDestinationType": fields.String,
"theMeansEnd": fields.String,
"theContribution": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class ConceptReferenceModel(object):
resource_fields = {
obj_id_field: fields.String,
"theName": fields.String,
"theDimName": fields.String,
"theObjtName": fields.String,
"theDescription": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class PersonaCharacteristicModel(object):
resource_fields = {
obj_id_field: fields.String,
"thePersonaName": fields.String,
"theModQual": fields.String,
"theVariable": fields.String,
"theName": fields.String,
"theCharacteristicSynopsis": fields.Nested(CharacteristicReferenceSynopsis.resource_fields),
"theGrounds": fields.List(fields.Nested(CharacteristicReference.resource_fields)),
"theWarrant": fields.List(fields.Nested(CharacteristicReference.resource_fields)),
"theRebuttal": fields.List(fields.Nested(CharacteristicReference.resource_fields))
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class TaskCharacteristicModel(object):
resource_fields = {
obj_id_field: fields.String,
"theTaskName": fields.String,
"theModQual": fields.String,
"theName": fields.String,
"theGrounds": fields.List(fields.Nested(CharacteristicReference.resource_fields)),
"theWarrant": fields.List(fields.Nested(CharacteristicReference.resource_fields)),
"theRebuttal": fields.List(fields.Nested(CharacteristicReference.resource_fields))
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class ObjectDependencyModel(object):
resource_fields = {
obj_id_field: fields.String,
"theDependencies": fields.List(fields.Nested(ObjectDependency.resource_fields))
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self):
self.theDependencies = []
class ComponentStructureModel(object):
resource_fields = {
obj_id_field: fields.String,
"theHeadAsset": fields.String,
"theHeadAdornment": fields.String,
"theHeadNav": fields.String,
"theHeadNry": fields.String,
"theHeadRole": fields.String,
"theTailRole": fields.String,
"theTailNry": fields.String,
"theTailNav": fields.String,
"theTailAdornment": fields.String,
"theTailAsset": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class ComponentGoalAssociationModel(object):
resource_fields = {
obj_id_field: fields.String,
"theHeadGoal": fields.String,
"theRefType": fields.String,
"theTailGoal": fields.String,
"theRationale": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class ComponentModel(object):
resource_fields = {
obj_id_field: fields.String,
"theName": fields.String,
"theDescription": fields.String,
"theInterfaces" : fields.List(fields.Nested(InterfaceModel.resource_fields)),
"theStructure" : fields.List(fields.Nested(ComponentStructureModel.resource_fields)),
"theRequirements" : fields.List(fields.String),
"theGoals" : fields.List(fields.String),
"theGoalAssociations" : fields.List(fields.Nested(ComponentGoalAssociationModel.resource_fields))
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class ConnectorModel(object):
resource_fields = {
obj_id_field: fields.String,
"theConnectorName": fields.String,
"theFromComponent": fields.String,
"theFromRole": fields.String,
"theFromInterface": fields.String,
"theToComponent": fields.String,
"theToInterface": fields.String,
"theToRole": fields.String,
"theAssetName": fields.String,
"theProtocol": fields.String,
"theAccessRight": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class ArchitecturalPatternModel(object):
resource_fields = {
obj_id_field: fields.String,
"theName": fields.String,
"theSynopsis": fields.String,
"theComponents": fields.List(fields.Nested(ComponentModel.resource_fields)),
"theConnectors": fields.List(fields.Nested(ConnectorModel.resource_fields)),
"theAttackSurfaceMetric": fields.List(fields.Integer)
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
required.remove('theAttackSurfaceMetric')
class ValueTypeModel(object):
resource_fields = {
obj_id_field: fields.String,
"theName": fields.String,
"theDescription": fields.String,
"theType": fields.String,
"theEnvironmentName": fields.String,
"theScore": fields.Integer,
"theRationale": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class TemplateGoalModel(object):
resource_fields = {
obj_id_field: fields.String,
"theName": fields.String,
"theDefinition": fields.String,
"theRationale": fields.String,
"theConcerns": fields.List(fields.String),
"theResponsibilities": fields.List(fields.String)
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class TemplateAssetModel(object):
resource_fields = {
obj_id_field: fields.String,
"theName": fields.String,
"theShortCode": fields.String,
"theDescription": fields.String,
"theSignificance": fields.String,
"theType": fields.String,
"theSurfaceType": fields.String,
"theAccessRight": fields.String,
"theProperties": fields.List(fields.Nested(SecurityAttribute.resource_fields)),
"theTags": fields.String,
"theInterfaces" : fields.List(fields.Nested(InterfaceModel.resource_fields))
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
required.remove('theTags')
class TemplateRequirementModel(object):
resource_fields = {
obj_id_field: fields.String,
"theName": fields.String,
"theAssetName": fields.String,
"theType": fields.String,
"theDescription": fields.String,
"theRationale": fields.String,
"theFitCriterion": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class LocationModel(object):
resource_fields = {
obj_id_field: fields.String,
"theName": fields.String,
"theAssetInstances": fields.List(fields.String),
"thePersonaInstances": fields.List(fields.String),
"theLinks": fields.List(fields.String),
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class LocationsModel(object):
resource_fields = {
obj_id_field: fields.String,
"theName": fields.String,
"theDiagram": fields.List(fields.String),
"theLocations" : fields.List(fields.Nested(LocationModel.resource_fields))
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class SummaryModel(object):
resource_fields = {
"theLabel": fields.String,
"theValue": fields.String
}
required = list(resource_fields.keys())
def __init__(self,lbl,val):
self.theLabel = lbl
self.theValue = val
class TraceModel(object):
resource_fields = {
obj_id_field: fields.String,
"theFromObject": fields.String,
"theFromName": fields.String,
"theToObject": fields.String,
"theToName": fields.String,
"theLabel" : fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self,fromObjt,fromName,toObjt,toName,lbl):
self.theFromObject = fromObjt
self.theFromName = fromName
self.theToObject = toObjt
self.theToName = toName
self.theLabel = lbl
class WeaknessTargetModel(object):
resource_fields = {
obj_id_field: fields.String,
"theTargetName": fields.String,
"theComponents": fields.List(fields.String),
"theTemplateAssets": fields.List(fields.String),
"theAssets": fields.List(fields.String),
"theTreatmentRequirement": fields.String,
"theTreatmentAsset": fields.String,
"theTreatmentEffectiveness": fields.String,
"theTreatmentRationale" : fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self):
self.theTargetName = ''
self.theComponents = []
self.theTemplateAssets = []
self.theAssets = []
self.theTreatmentRequirement = ''
self.theTreatmentAsset = ''
self.theTreatmentEffectiveness = ''
self.theTreatmentRationale = ''
class PersonaImpactModel(object):
resource_fields = {
obj_id_field: fields.String,
"thePersonaName": fields.String,
"theImpactScore": fields.Integer
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self,pName,iScore):
self.thePersonaName = pName
self.theImpactScore = iScore
class CandidateGoalObstacleModel(object):
resource_fields = {
obj_id_field: fields.String,
"theGoalName": fields.String,
"theObstacleName": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self,gName,oName):
self.theGoalName = gName
self.theObstacleName = oName
class WeaknessAnalysisModel(object):
resource_fields = {
obj_id_field: fields.String,
"theVulnerabilityWeaknesses" : fields.List(fields.Nested(WeaknessTargetModel.resource_fields)),
"theThreatWeaknesses" : fields.List(fields.Nested(WeaknessTargetModel.resource_fields)),
"thePersonaImpact" : fields.List(fields.Nested(PersonaImpactModel.resource_fields)),
"theCandidateGoals" : fields.List(fields.Nested(CandidateGoalObstacleModel.resource_fields))
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self):
self.theVulnerabilityWeaknesses = []
self.theThreatWeaknesses = []
self.thePersonaImpact = []
self.theCandidateGoals = []
class SecurityPatternStructureModel(object):
resource_fields = {
obj_id_field: fields.String,
"theHeadAsset": fields.String,
"theHeadAdornment": fields.String,
"theHeadNry": fields.String,
"theHeadRole": fields.String,
"theTailRole": fields.String,
"theTailNry": fields.String,
"theTailAdornment": fields.String,
"theTailAsset": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class PatternRequirementModel(object):
resource_fields = {
obj_id_field: fields.String,
"theName": fields.String,
"theAsset": fields.String,
"theType": fields.String,
"theDescription": fields.String,
"theRationale": fields.String,
"theFitCriterion": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class SecurityPatternModel(object):
resource_fields = {
obj_id_field: fields.String,
"theName": fields.String,
"theContext": fields.String,
"theProblem": fields.String,
"theSolution": fields.String,
"theRequirements": fields.List(fields.Nested(PatternRequirementModel.resource_fields)),
"theConcernAssociations" : fields.List(fields.Nested(SecurityPatternStructureModel.resource_fields))
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class DataFlowObstacle(object):
resource_fields = {
obj_id_field: fields.String,
"theObstacleName" : fields.String,
"theKeyword" : fields.String,
"theContext" : fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class DataFlowModel(object):
resource_fields = {
obj_id_field: fields.String,
"theName": fields.String,
"theType": fields.String,
"theEnvironmentName": fields.String,
"theFromName": fields.String,
"theFromType": fields.String,
"theToName": fields.String,
"theToType": fields.String,
"theAssets": fields.List(fields.String),
"theObstacles": fields.List(fields.Nested(DataFlowObstacle.resource_fields)),
"theTags": fields.List(fields.String)
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class DirectoryModel(object):
resource_fields = {
obj_id_field: fields.String,
"theLabel": fields.String,
"theName": fields.String,
"theDescription": fields.String,
"theType": fields.String,
"theReference": fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class TrustBoundaryComponent(object):
resource_fields = {
obj_id_field: fields.String,
'theName': fields.String,
'theType': fields.String,
"theTags": fields.List(fields.String)
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self,n,t):
self.theName = n
self.theType = t
def name(self): return self.theName
def type(self): return self.theType
class TrustBoundaryEnvironmentModel(object):
resource_fields = {
obj_id_field: fields.String,
'theEnvironmentName': fields.String,
'theComponents': fields.List(fields.Nested(TrustBoundaryComponent.resource_fields)),
'thePrivilege' : fields.String
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
def __init__(self,n,c,p):
self.theEnvironmentName = n
self.theComponents = c
self.thePrivilege = p
class TrustBoundaryModel(object):
resource_fields = {
obj_id_field: fields.String,
'theName': fields.String,
'theType': fields.String,
'theDescription': fields.String,
'theEnvironmentProperties': fields.List(fields.Nested(TrustBoundaryEnvironmentModel.resource_fields))
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class ThreatModelPropertyModel(object):
resource_fields = {
obj_id_field: fields.String,
'theProperty': fields.String,
'theThreats': fields.List(fields.String)
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class ThreatModelElementModel(object):
resource_fields = {
obj_id_field: fields.String,
'theElement': fields.String,
'theProperties': fields.List(fields.Nested(ThreatModelPropertyModel.resource_fields))
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
class ThreatModelModel(object):
resource_fields = {
obj_id_field: fields.String,
'theEntities': fields.List(fields.Nested(ThreatModelElementModel.resource_fields)),
'theProcesses': fields.List(fields.Nested(ThreatModelElementModel.resource_fields)),
'theDatastores': fields.List(fields.Nested(ThreatModelElementModel.resource_fields)),
'theDataflows': fields.List(fields.Nested(ThreatModelElementModel.resource_fields))
}
required = list(resource_fields.keys())
required.remove(obj_id_field)
|
|
#!/usr/bin/env python2
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Copyright (c) 2015-2018 The PIVX developers
# Copyright (c) 2018 The Ion developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Exercise the wallet backup code. Ported from walletbackup.sh.
Test case is:
4 nodes. 1 2 and 3 send transactions between each other,
fourth node is a miner.
1 2 3 each mine a block to start, then
Miner creates 100 blocks so 1 2 3 each have 50 mature
coins to spend.
Then 5 iterations of 1/2/3 sending coins amongst
themselves to get transactions in the wallets,
and the miner mining one block.
Wallets are backed up using dumpwallet/backupwallet.
Then 5 more iterations of transactions and mining a block.
Miner then generates 101 more blocks, so any
transaction fees paid mature.
Sanity check:
Sum(1,2,3,4 balances) == 114*50
1/2/3 are shutdown, and their wallets erased.
Then restore using wallet.dat backup. And
confirm 1/2/3/4 balances are same as before.
Shutdown again, restore using importwallet,
and confirm again balances are correct.
"""
from test_framework import BitcoinTestFramework
from util import *
from random import randint
import logging
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
class WalletBackupTest(BitcoinTestFramework):
def setup_chain(self):
logging.info("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
# This mirrors how the network was setup in the bash test
def setup_network(self, split=False):
# nodes 1, 2,3 are spenders, let's give them a keypool=100
extra_args = [["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []]
self.nodes = start_nodes(4, self.options.tmpdir, extra_args)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
self.is_network_split=False
self.sync_all()
def one_send(self, from_node, to_address):
if (randint(1,2) == 1):
amount = Decimal(randint(1,10)) / Decimal(10)
self.nodes[from_node].sendtoaddress(to_address, amount)
def do_one_round(self):
a0 = self.nodes[0].getnewaddress()
a1 = self.nodes[1].getnewaddress()
a2 = self.nodes[2].getnewaddress()
self.one_send(0, a1)
self.one_send(0, a2)
self.one_send(1, a0)
self.one_send(1, a2)
self.one_send(2, a0)
self.one_send(2, a1)
# Have the miner (node3) mine a block.
# Must sync mempools before mining.
sync_mempools(self.nodes)
self.nodes[3].setgenerate(True, 1)
# As above, this mirrors the original bash test.
def start_three(self):
self.nodes[0] = start_node(0, self.options.tmpdir)
self.nodes[1] = start_node(1, self.options.tmpdir)
self.nodes[2] = start_node(2, self.options.tmpdir)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
def stop_three(self):
stop_node(self.nodes[0], 0)
stop_node(self.nodes[1], 1)
stop_node(self.nodes[2], 2)
def erase_three(self):
os.remove(self.options.tmpdir + "/node0/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node2/regtest/wallet.dat")
def run_test(self):
logging.info("Generating initial blockchain")
self.nodes[0].setgenerate(True, 1)
sync_blocks(self.nodes)
self.nodes[1].setgenerate(True, 1)
sync_blocks(self.nodes)
self.nodes[2].setgenerate(True, 1)
sync_blocks(self.nodes)
self.nodes[3].setgenerate(True, 100)
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 50)
assert_equal(self.nodes[3].getbalance(), 0)
logging.info("Creating transactions")
# Five rounds of sending each other transactions.
for i in range(5):
self.do_one_round()
logging.info("Backing up")
tmpdir = self.options.tmpdir
self.nodes[0].backupwallet(tmpdir + "/node0/wallet.bak")
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].backupwallet(tmpdir + "/node1/wallet.bak")
self.nodes[1].dumpwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].backupwallet(tmpdir + "/node2/wallet.bak")
self.nodes[2].dumpwallet(tmpdir + "/node2/wallet.dump")
logging.info("More transactions")
for i in range(5):
self.do_one_round()
# Generate 101 more blocks, so any fees paid mature
self.nodes[3].setgenerate(True, 101)
self.sync_all()
balance0 = self.nodes[0].getbalance()
balance1 = self.nodes[1].getbalance()
balance2 = self.nodes[2].getbalance()
balance3 = self.nodes[3].getbalance()
total = balance0 + balance1 + balance2 + balance3
# At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.)
# 114 are mature, so the sum of all wallets should be 114 * 50 = 5700.
assert_equal(total, 5700)
##
# Test restoring spender wallets from backups
##
logging.info("Restoring using wallet.dat")
self.stop_three()
self.erase_three()
# Start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
# Restore wallets from backup
shutil.copyfile(tmpdir + "/node0/wallet.bak", tmpdir + "/node0/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node1/wallet.bak", tmpdir + "/node1/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node2/wallet.bak", tmpdir + "/node2/regtest/wallet.dat")
logging.info("Re-starting nodes")
self.start_three()
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
logging.info("Restoring using dumped wallet")
self.stop_three()
self.erase_three()
#start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
self.start_three()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
self.nodes[0].importwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].importwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].importwallet(tmpdir + "/node2/wallet.dump")
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
if __name__ == '__main__':
WalletBackupTest().main()
|
|
import math as python_lib_Math
import math as Math
import builtins as python_lib_Builtins
import inspect as python_lib_Inspect
class Enum:
_hx_class_name = "Enum"
_hx_fields = ["tag", "index", "params"]
_hx_methods = ["__str__"]
def __init__(self,tag,index,params):
self.tag = None
self.index = None
self.params = None
self.tag = tag
self.index = index
self.params = params
def __str__(self):
if (self.params is None):
return self.tag
else:
return (((HxOverrides.stringOrNull(self.tag) + "(") + HxOverrides.stringOrNull(",".join([python_Boot.toString1(x1,'') for x1 in self.params]))) + ")")
class Script:
_hx_class_name = "Script"
_hx_statics = ["main"]
@staticmethod
def main():
data = sys_io_File.getContent("dq_unisex_names.csv")
data = StringTools.replace(data,"\r","")
data_list = data.split("\n")
first_five = data_list[0:5]
print(str(first_five))
class StringTools:
_hx_class_name = "StringTools"
_hx_statics = ["replace"]
@staticmethod
def replace(s,sub,by):
_this = None
if (sub == ""):
_this = list(s)
else:
_this = s.split(sub)
return by.join([python_Boot.toString1(x1,'') for x1 in _this])
class haxe_io_Eof:
_hx_class_name = "haxe.io.Eof"
_hx_methods = ["toString"]
def toString(self):
return "Eof"
class python_Boot:
_hx_class_name = "python.Boot"
_hx_statics = ["keywords", "toString1", "fields", "simpleField", "getInstanceFields", "getSuperClass", "getClassFields", "prefixLength", "unhandleKeywords"]
@staticmethod
def toString1(o,s):
if (o is None):
return "null"
if isinstance(o,str):
return o
if (s is None):
s = ""
if (len(s) >= 5):
return "<...>"
if isinstance(o,bool):
if o:
return "true"
else:
return "false"
if isinstance(o,int):
return str(o)
if isinstance(o,float):
try:
if (o == int(o)):
def _hx_local_1():
def _hx_local_0():
v = o
return Math.floor((v + 0.5))
return str(_hx_local_0())
return _hx_local_1()
else:
return str(o)
except Exception as _hx_e:
_hx_e1 = _hx_e
e = _hx_e1
return str(o)
if isinstance(o,list):
o1 = o
l = len(o1)
st = "["
s = (("null" if s is None else s) + "\t")
_g = 0
while (_g < l):
i = _g
_g = (_g + 1)
prefix = ""
if (i > 0):
prefix = ","
st = (("null" if st is None else st) + HxOverrides.stringOrNull(((("null" if prefix is None else prefix) + HxOverrides.stringOrNull(python_Boot.toString1((o1[i] if i >= 0 and i < len(o1) else None),s))))))
st = (("null" if st is None else st) + "]")
return st
try:
if hasattr(o,"toString"):
return o.toString()
except Exception as _hx_e:
_hx_e1 = _hx_e
pass
if (python_lib_Inspect.isfunction(o) or python_lib_Inspect.ismethod(o)):
return "<function>"
if hasattr(o,"__class__"):
if isinstance(o,_hx_AnonObject):
toStr = None
try:
fields = python_Boot.fields(o)
fieldsStr = None
_g1 = []
_g11 = 0
while (_g11 < len(fields)):
f = (fields[_g11] if _g11 >= 0 and _g11 < len(fields) else None)
_g11 = (_g11 + 1)
x = ((("" + ("null" if f is None else f)) + " : ") + HxOverrides.stringOrNull(python_Boot.toString1(python_Boot.simpleField(o,f),(("null" if s is None else s) + "\t"))))
_g1.append(x)
fieldsStr = _g1
toStr = (("{ " + HxOverrides.stringOrNull(", ".join([x1 for x1 in fieldsStr]))) + " }")
except Exception as _hx_e:
_hx_e1 = _hx_e
e2 = _hx_e1
return "{ ... }"
if (toStr is None):
return "{ ... }"
else:
return toStr
if isinstance(o,Enum):
o2 = o
l1 = len(o2.params)
hasParams = (l1 > 0)
if hasParams:
paramsStr = ""
_g2 = 0
while (_g2 < l1):
i1 = _g2
_g2 = (_g2 + 1)
prefix1 = ""
if (i1 > 0):
prefix1 = ","
paramsStr = (("null" if paramsStr is None else paramsStr) + HxOverrides.stringOrNull(((("null" if prefix1 is None else prefix1) + HxOverrides.stringOrNull(python_Boot.toString1((o2.params[i1] if i1 >= 0 and i1 < len(o2.params) else None),s))))))
return (((HxOverrides.stringOrNull(o2.tag) + "(") + ("null" if paramsStr is None else paramsStr)) + ")")
else:
return o2.tag
if hasattr(o,"_hx_class_name"):
if (o.__class__.__name__ != "type"):
fields1 = python_Boot.getInstanceFields(o)
fieldsStr1 = None
_g3 = []
_g12 = 0
while (_g12 < len(fields1)):
f1 = (fields1[_g12] if _g12 >= 0 and _g12 < len(fields1) else None)
_g12 = (_g12 + 1)
x1 = ((("" + ("null" if f1 is None else f1)) + " : ") + HxOverrides.stringOrNull(python_Boot.toString1(python_Boot.simpleField(o,f1),(("null" if s is None else s) + "\t"))))
_g3.append(x1)
fieldsStr1 = _g3
toStr1 = (((HxOverrides.stringOrNull(o._hx_class_name) + "( ") + HxOverrides.stringOrNull(", ".join([x1 for x1 in fieldsStr1]))) + " )")
return toStr1
else:
fields2 = python_Boot.getClassFields(o)
fieldsStr2 = None
_g4 = []
_g13 = 0
while (_g13 < len(fields2)):
f2 = (fields2[_g13] if _g13 >= 0 and _g13 < len(fields2) else None)
_g13 = (_g13 + 1)
x2 = ((("" + ("null" if f2 is None else f2)) + " : ") + HxOverrides.stringOrNull(python_Boot.toString1(python_Boot.simpleField(o,f2),(("null" if s is None else s) + "\t"))))
_g4.append(x2)
fieldsStr2 = _g4
toStr2 = (((("#" + HxOverrides.stringOrNull(o._hx_class_name)) + "( ") + HxOverrides.stringOrNull(", ".join([x1 for x1 in fieldsStr2]))) + " )")
return toStr2
if (o == str):
return "#String"
if (o == list):
return "#Array"
if callable(o):
return "function"
try:
if hasattr(o,"__repr__"):
return o.__repr__()
except Exception as _hx_e:
_hx_e1 = _hx_e
pass
if hasattr(o,"__str__"):
return o.__str__([])
if hasattr(o,"__name__"):
return o.__name__
return "???"
else:
return str(o)
@staticmethod
def fields(o):
a = []
if (o is not None):
if hasattr(o,"_hx_fields"):
fields = o._hx_fields
return list(fields)
if isinstance(o,_hx_AnonObject):
d = o.__dict__
keys = d.keys()
handler = python_Boot.unhandleKeywords
for k in keys:
a.append(handler(k))
elif hasattr(o,"__dict__"):
a1 = []
d1 = o.__dict__
keys1 = d1.keys()
for k in keys1:
a.append(k)
return a
@staticmethod
def simpleField(o,field):
if (field is None):
return None
field1 = None
if field in python_Boot.keywords:
field1 = ("_hx_" + field)
elif ((((len(field) > 2) and ((ord(field[0]) == 95))) and ((ord(field[1]) == 95))) and ((ord(field[(len(field) - 1)]) != 95))):
field1 = ("_hx_" + field)
else:
field1 = field
if hasattr(o,field1):
return getattr(o,field1)
else:
return None
@staticmethod
def getInstanceFields(c):
f = None
if hasattr(c,"_hx_fields"):
f = c._hx_fields
else:
f = []
if hasattr(c,"_hx_methods"):
a = c._hx_methods
f = (f + a)
sc = python_Boot.getSuperClass(c)
if (sc is None):
return f
else:
scArr = python_Boot.getInstanceFields(sc)
scMap = set(scArr)
res = []
_g = 0
while (_g < len(f)):
f1 = (f[_g] if _g >= 0 and _g < len(f) else None)
_g = (_g + 1)
if (not f1 in scMap):
scArr.append(f1)
return scArr
@staticmethod
def getSuperClass(c):
if (c is None):
return None
try:
if hasattr(c,"_hx_super"):
return c._hx_super
return None
except Exception as _hx_e:
_hx_e1 = _hx_e
pass
return None
@staticmethod
def getClassFields(c):
if hasattr(c,"_hx_statics"):
x = c._hx_statics
return list(x)
else:
return []
@staticmethod
def unhandleKeywords(name):
if (HxString.substr(name,0,python_Boot.prefixLength) == "_hx_"):
real = HxString.substr(name,python_Boot.prefixLength,None)
if real in python_Boot.keywords:
return real
return name
class _hx_AnonObject:
_hx_class_name = "_hx_AnonObject"
class python_internal_ArrayImpl:
_hx_class_name = "python.internal.ArrayImpl"
_hx_statics = ["_get"]
@staticmethod
def _get(x,idx):
if ((idx > -1) and ((idx < len(x)))):
return x[idx]
else:
return None
class HxOverrides:
_hx_class_name = "HxOverrides"
_hx_statics = ["eq", "stringOrNull"]
@staticmethod
def eq(a,b):
if (isinstance(a,list) or isinstance(b,list)):
return a is b
return (a == b)
@staticmethod
def stringOrNull(s):
if (s is None):
return "null"
else:
return s
class HxString:
_hx_class_name = "HxString"
_hx_statics = ["substr"]
@staticmethod
def substr(s,startIndex,_hx_len = None):
if (_hx_len is None):
return s[startIndex:]
else:
if (_hx_len == 0):
return ""
return s[startIndex:(startIndex + _hx_len)]
class sys_io_File:
_hx_class_name = "sys.io.File"
_hx_statics = ["getContent"]
@staticmethod
def getContent(path):
f = python_lib_Builtins.open(path,"r",-1,"utf-8",None,"")
content = f.read(-1)
f.close()
return content
Math.NEGATIVE_INFINITY = float("-inf")
Math.POSITIVE_INFINITY = float("inf")
Math.NaN = float("nan")
Math.PI = python_lib_Math.pi
python_Boot.keywords = set(["and", "del", "from", "not", "with", "as", "elif", "global", "or", "yield", "assert", "else", "if", "pass", "None", "break", "except", "import", "raise", "True", "class", "exec", "in", "return", "False", "continue", "finally", "is", "try", "def", "for", "lambda", "while"])
python_Boot.prefixLength = len("_hx_")
Script.main()
|
|
from __future__ import absolute_import, print_function, unicode_literals
import datetime
import random
from django.db.models import Max, Min, Sum
from django.db.models.query import F
from kolibri.auth.filters import HierarchyRelationsFilter
from kolibri.auth.models import Classroom, Facility, FacilityUser
from kolibri.content.models import ContentNode
from kolibri.logger.models import AttemptLog, ContentSessionLog, ContentSummaryLog, MasteryLog
from le_utils.constants import content_kinds
def get_or_create_facilities(**options):
n_facilities = options['n_facilities']
n_on_device = Facility.objects.all().count()
n_to_create = n_facilities - n_on_device
if n_to_create > 0:
print('Generating {n} facility object(s)'.format(n=n_to_create))
for i in range(0, n_to_create):
Facility.objects.create(name='Test Facility {i}'.format(i=i + 1))
return Facility.objects.all()[0:n_facilities]
def get_or_create_classrooms(**options):
n_classes = options['n_classes']
facility = options['facility']
n_on_device = Classroom.objects.filter(parent=facility).count()
n_to_create = n_classes - n_on_device
if n_to_create > 0:
print('Generating {n} classroom object(s) for facility: {name}'.format(
n=n_to_create,
name=facility.name,
))
for i in range(0, n_to_create):
Classroom.objects.create(
parent=facility,
name='Classroom {i}{a}'.format(i=i + 1, a=random.choice('ABCD'))
)
return Classroom.objects.filter(parent=facility)[0:n_classes]
def get_or_create_classroom_users(**options):
classroom = options['classroom']
n_users = options['n_users']
user_data = options['user_data']
facility = options['facility']
# The headers in the user_data.csv file that we use to generate user Full Names
# Note, we randomly pick from these to give deliberately varied (and sometimes idiosyncratic)
# Full names - because we should never assume that users have names like us
user_data_name_fields = ["GivenName", "MiddleInitial", "Surname"]
n_in_classroom = HierarchyRelationsFilter(FacilityUser.objects.all()).filter_by_hierarchy(
ancestor_collection=classroom,
target_user=F("id"),
).count()
# Only generate new users if there are fewer users than requested.
n_to_create = n_users - n_in_classroom
if n_to_create > 0:
print('Generating {n} user object(s) for class: {classroom} in facility: {facility}'.format(
n=n_to_create,
classroom=classroom,
facility=facility,
))
for i in range(0, n_to_create):
# Get the first base data that does not have a matching user already
base_data = user_data[n_in_classroom + i]
# Randomly create the name from 1 to 3 of the three user name fields
name = " ".join([base_data[key] for key in random.sample(user_data_name_fields, random.randint(1, 3))])
user = FacilityUser.objects.create(
facility=facility,
full_name=name,
username=base_data['Username']
)
# Set a dummy password so that if we want to login as this learner later, we can.
user.set_password('password')
user.save()
# Add the user to the current classroom
classroom.add_member(user)
return HierarchyRelationsFilter(FacilityUser.objects.all()).filter_by_hierarchy(
target_user=F("id"),
ancestor_collection=classroom,
)[0:n_users]
def add_channel_activity_for_user(**options): # noqa: max-complexity=16
n_content_items = options['n_content_items']
channel = options['channel']
user = options['user']
now = options['now']
channel_id = channel.id
default_channel_content = ContentNode.objects.exclude(kind=content_kinds.TOPIC).filter(channel_id=channel_id)
print('Generating {i} user interaction(s) for user: {user} for channel: {channel}'.format(
i=n_content_items,
user=user,
channel=channel.name
))
# Generate a content interaction history for this many content items
for i in range(0, n_content_items):
# Use this to randomly select a content node to generate the interaction for
index = random.randint(0, default_channel_content.count() - 1)
random_node = default_channel_content[index]
# We will generate between 1 and 5 content session logs for this content item
session_logs = []
for j in range(0, random.randint(1, 5)):
# How many minutes did they spend in this session? Up to 15
duration = random.random() * 15
# Assume they spent some of this session time not doing anything - the lazy...
idle_time = random.random() * duration
session_logs.append(ContentSessionLog(
user=user,
channel_id=channel_id,
content_id=random_node.content_id,
start_timestamp=now - datetime.timedelta(i + j, 0, duration),
end_timestamp=now - datetime.timedelta(i + j),
# How many seconds did they actually spend doing something?
time_spent=(duration - idle_time) * 60,
progress=random.random(),
kind=random_node.kind,
))
# Assume they have not completed
completion_timestamp = None
cumulative_progress = 0
# Go through all the session logs and add up the progress in each
for session_log in session_logs:
cumulative_progress = min(cumulative_progress + session_log.progress, 1.0)
# If the progress is 1 or more, they have completed! Set the completion timestamp
# For the end of this session, for the sake of argument.
if cumulative_progress >= 1.0:
completion_timestamp = session_log.end_timestamp
session_log.save()
# Now that we have created all the Session Logs, infer the summary log from them
summary_log, created = ContentSummaryLog.objects.get_or_create(
user=user,
kind=random_node.kind,
content_id=random_node.content_id,
# Use defaults here so that we don't try to create a new Summary Log with the same
# kind/content_id/user combo, as this would violate uniqueness constraints
defaults={
'channel_id': channel_id,
# Start timestamp is the earliest start timestamp of the session logs
'start_timestamp': min(session_logs, key=lambda x: x.start_timestamp).start_timestamp,
# End timestamp is the latest of all the end timestamps
'end_timestamp': max(session_logs, key=lambda x: x.end_timestamp).end_timestamp,
'completion_timestamp': completion_timestamp,
'time_spent': sum(session_log.time_spent for session_log in session_logs),
'progress': min(sum(session_log.progress for session_log in session_logs), 1.0),
}
)
if not created:
# We didn't create the summary log this time, so it probably means it has other session logs
# Aggregate the information from there to update the relevant fields on the summary log
updates = ContentSessionLog.objects.filter(
user=user,
kind=random_node.kind,
content_id=random_node.content_id
).aggregate(
start_timestamp=Min('start_timestamp'),
end_timestamp=Max('end_timestamp'),
progress=Sum('progress')
)
summary_log.start_timestamp = updates['start_timestamp']
summary_log.end_timestamp = updates['end_timestamp']
if summary_log.progress < 1.0 and updates['progress'] >= 1.0:
# If it was not previously completed, and is now, set the completion timestamp to the
# final end timestamp of the session logs.
summary_log.completion_timestamp = updates['end_timestamp']
summary_log.progress = min(1.0, updates['progress'])
summary_log.save()
# If we are dealing with anything but an assessment (currently only exercises)
# we are done - if not, create additional data here
if random_node.kind == content_kinds.EXERCISE:
# Generate a mastery log if needed
mastery_log, created = MasteryLog.objects.get_or_create(
user=user,
mastery_level=1,
summarylog=summary_log,
defaults={
'start_timestamp': summary_log.start_timestamp,
'end_timestamp': summary_log.end_timestamp,
'complete': summary_log.progress >= 1.0,
'completion_timestamp': completion_timestamp,
'mastery_criterion': {
'm': 5,
'n': 5,
'type': 'm_of_n',
},
}
)
if not created:
# Not created, so update relevant fields on it based on new interactions
if not mastery_log.complete and summary_log.progress >= 1.0:
mastery_log.complete = True
mastery_log.completion_timestamp = summary_log.completion_timestamp
mastery_log.end_timestamp = summary_log.end_timestamp
# Get the list of assessment item ids from the assessment meta data
assessment_item_ids = random_node.assessmentmetadata.first().assessment_item_ids
for i, session_log in enumerate(reversed(session_logs)):
# Always make students get 5 attempts correct in the most recent session
# if the exercise is complete
complete = (i == 0 and mastery_log.complete)
if complete:
n = 5
else:
# Otherwise, let them have answered between 1 and 5 questions per session
n = random.randint(1, 5)
# How long did they spend on these n questions?
timespan = session_log.end_timestamp - session_log.start_timestamp
# Index through each individual question
for j in range(0, n):
if complete:
# If this is the session where they completed the exercise, always
# make them get it right
correct = True
else:
# Otherwise only let students get odd indexed questions right,
# ensuring they will always have a mastery breaking sequence
# as zero based indexing means their first attempt will always be wrong!
correct = j % 2 == 1
start_timestamp = session_log.end_timestamp - (timespan / n) * (j + 1)
end_timestamp = session_log.end_timestamp - (timespan / n) * j
# If incorrect, must have made at least two attempts at the question
question_attempts = 1 if correct else random.randint(2, 5)
question_interval = (end_timestamp - start_timestamp) / question_attempts
# If they got it wrong, give 20/80 chance that they took a hint to do so
hinted = random.choice((False, False, False, False, not correct))
if hinted:
first_interaction = {
'correct': False,
'type': 'hint',
}
else:
first_interaction = {
'correct': correct,
'type': 'answer',
}
first_interaction.update({
'answer': {},
'timestamp': start_timestamp + question_interval
})
interaction_history = [first_interaction]
# If it is correct, this can be our only response, otherwise, add more.
if not correct:
for att in range(1, question_attempts - 1):
# Add on additional attempts for intervening incorrect responses
interaction_history += [{
'correct': False,
'type': 'answer',
'answer': {},
'timestamp': start_timestamp + question_interval * (att + 1),
}]
# Finally, add a correct response that allows the user to move onto the next question
interaction_history += [{
'correct': True,
'type': 'answer',
'answer': {},
'timestamp': end_timestamp,
}]
AttemptLog.objects.create(
# Choose a random assessment item id from the exercise
item=random.choice(assessment_item_ids),
# Just let each attempt be a fixed proportion of the total time spent on the exercise
start_timestamp=start_timestamp,
end_timestamp=end_timestamp,
time_spent=timespan.total_seconds(),
# Mark all attempts as complete, as assume that student gave correct answer eventually
complete=True,
# Mark as correct or incorrect
correct=correct,
hinted=hinted,
# We can't meaningfully generate fake answer data for Perseus exercises
# (which are currently our only exercise type) - so don't bother.
answer={},
simple_answer='',
interaction_history=interaction_history,
user=user,
masterylog=mastery_log,
sessionlog=session_log,
)
|
|
'''
Copyright Zlatko Minev and Zaki Leghtas
2015, 2016, 2017
'''
import os
import time
import shutil
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from stat import S_ISREG, ST_CTIME, ST_MODE
from pandas import HDFStore, Series, DataFrame
from pint import UnitRegistry
from hfss import *
from toolbox import *
from config_bbq import root_dir, gseam, th, eps_r, tan_delta_surf, tan_delta_sapp
### Definitions
ureg = UnitRegistry(system='mks')
#==============================================================================
#%% Main compuation class & interface with HFSS
#==============================================================================
class Bbq(object):
"""
This class defines a BBQ object which calculates and saves
Hamiltonian parameters from an HFSS simulation
"""
def __init__(self, project, design, verbose=True, append_analysis=False, setup_name = None):
''' calculate_H is the single-jucntion method using UH-Ue '''
self.project = project
self.design = design
self.setup = design.get_setup(name=setup_name)
self.fields = self.setup.get_fields()
self.nmodes = int(self.setup.n_modes)
self.listvariations = design._solutions.ListVariations(str(self.setup.solution_name))
self.nominalvariation = design.get_nominal_variation()
self.nvariations = np.size(self.listvariations)
self.solutions = self.setup.get_solutions()
self.verbose = verbose
self.append_analysis = append_analysis
self.hfss_variables = {} # container for eBBQ list of varibles
self.sols = {} # container for eBBQ solutions; could make a Panel
self.meta_data = {} # container for eBBQ metadata
self.setup_data()
if self.verbose: print ' # Modes: ' + str(self.nmodes), '\n # Variations: ' + str(self.nvariations)
#self.get_latest_h5()
self.latest_h5_path = None
if self.latest_h5_path is not None and self.append_analysis:
latest_bbq_analysis = BbqAnalysis(self.latest_h5_path)
if self.verbose: print 'Varied variables and values : ', latest_bbq_analysis.get_swept_variables(), \
'Variations : ', latest_bbq_analysis.variations
def get_latest_h5(self):
dirpath = self.data_dir
entries1 = (os.path.join(dirpath, fn) for fn in os.listdir(dirpath)) # get all entries in the directory w/ stats
entries2 = ((os.stat(path), path) for path in entries1)
entries3 = ((stat[ST_CTIME], path) # leave only regular files, insert creation date
for stat, path in entries2 if S_ISREG(stat[ST_MODE]) and path[-4:]=='hdf5')
#NOTE: on Windows `ST_CTIME` is a creation date but on Unix it could be something else
#NOTE: use `ST_MTIME` to sort by a modification date
paths_sorted = []
for cdate, path in sorted(entries3):
paths_sorted.append(path)
#print time.ctime(cdate), os.path.basename(path)
if len(paths_sorted) > 0:
self.latest_h5_path = paths_sorted[-1]
if self.verbose: print 'This simulations has been analyzed, latest data in ' + self.latest_h5_path
else:
self.latest_h5_path = None
if self.verbose: print 'This simulation has never been analyzed'
def setup_data(self):
'''
Setups up the folder path
'''
data_dir = root_dir + '/' + self.project.name + '/' + self.design.name
if self.verbose: print data_dir
if not os.path.isdir(data_dir):
os.makedirs(data_dir)
self.data_dir = data_dir
self.data_filename = self.data_dir + '/' + self.design.name + '_' + time.strftime('%Y%m%d_%H%M%S', time.localtime()) + '.hdf5'
if len(self.design.name) > 50:
print_color('WARNING! DESING FILENAME MAY BE TOO LONG! ')
if self.verbose:
print "Data will be saved in " + str(data_dir)
@deprecated
def calc_p_j(self, modes=None, variation=None):
'''
Calculates the p_j for all the modes.
Requires a calculator expression called P_J.
'''
lv = self.get_lv(variation)
if modes is None:
modes = range(self.nmodes)
pjs = {}
for ii, m in enumerate(modes):
print 'Calculating p_j for mode ' + str(m) + ' (' + str(ii) + '/' + str(np.size(modes)-1) + ')'
self.solutions.set_mode(m+1, 0)
self.fields = self.setup.get_fields()
P_J = self.fields.P_J
pjs['pj_'+str(m)] = P_J.evaluate(lv=lv)
self.pjs = pjs
if self.verbose: print pjs
return pjs
def get_p_j(self, mode):
pj = {}
pj_val = (self.U_E-self.U_H)/(2*self.U_E)
pj['pj_'+str(mode)] = np.abs(pj_val)
print ' p_j_' + str(mode) + ' = ' + str(pj_val)
return pj
def get_freqs_bare(self, variation):
#str(self.get_lv(variation))
freqs_bare_vals = []
freqs_bare_dict = {}
freqs, kappa_over_2pis = self.solutions.eigenmodes(self.get_lv_EM(variation))
for m in range(self.nmodes):
freqs_bare_dict['freq_bare_'+str(m)] = 1e9*freqs[m]
freqs_bare_vals.append(1e9*freqs[m])
if kappa_over_2pis is not None:
freqs_bare_dict['Q_'+str(m)] = freqs[m]/kappa_over_2pis[m]
else:
freqs_bare_dict['Q_'+str(m)] = 0
self.freqs_bare = freqs_bare_dict
self.freqs_bare_vals = freqs_bare_vals
return freqs_bare_dict, freqs_bare_vals
def get_lv(self, variation):
''' variation is a string #; e.g., '0'
returns array of var names and var values '''
if variation is None:
lv = self.nominalvariation
lv = self.parse_listvariations(lv)
else:
lv = self.listvariations[ ureg(variation) ]
lv = self.parse_listvariations(lv)
return lv
def get_lv_EM(self, variation):
if variation is None:
lv = self.nominalvariation
#lv = self.parse_listvariations_EM(lv)
else:
lv = self.listvariations[ ureg(variation) ]
#lv = self.parse_listvariations_EM(lv)
return str(lv)
def parse_listvariations_EM(self,lv):
lv = str(lv)
lv = lv.replace("=",":=,")
lv = lv.replace(' ',',')
lv = lv.replace("'","")
lv = lv.split(",")
return lv
def parse_listvariations(self,lv):
lv = str(lv)
lv = lv.replace("=",":=,")
lv = lv.replace(' ',',')
lv = lv.replace("'","")
lv = lv.split(",")
return lv
def get_variables(self,variation=None):
lv = self.get_lv(variation)
variables={}
for ii in range(len(lv)/2):
variables['_'+lv[2*ii][:-2]]=lv[2*ii+1]
self.variables = variables
return variables
# @deprecated # TODO: delete this
# def save_data(self, data, variation):
# group = self.h5file.create_group(variation)
# for name, val in data.items():
# group[name] = val
def get_Qseam(self, seam, mode, variation):
'''
caculate the contribution to Q of a seam, by integrating the current in
the seam with finite conductance: set in the config file
ref: http://arxiv.org/pdf/1509.01119.pdf
'''
lv = self.get_lv(variation)
Qseam = {}
print 'Calculating Qseam_'+ seam +' for mode ' + str(mode) + ' (' + str(mode) + '/' + str(self.nmodes-1) + ')'
j_2_norm = self.fields.Vector_Jsurf.norm_2() # overestimating the loss by taking norm2 of j, rather than jperp**2
int_j_2 = j_2_norm.integrate_line(seam)
int_j_2_val = int_j_2.evaluate(lv=lv, phase=90)
yseam = int_j_2_val/self.U_H/self.omega
Qseam['Qseam_'+seam+'_'+str(mode)] = gseam/yseam
print 'Qseam_' + seam + '_' + str(mode) + str(' = ') + str(gseam/yseam)
return Series(Qseam)
def get_Qseam_sweep(self, seam, mode, variation, variable, values, unit, pltresult=True):
# values = ['5mm','6mm','7mm']
# ref: http://arxiv.org/pdf/1509.01119.pdf
self.solutions.set_mode(mode+1, 0)
self.fields = self.setup.get_fields()
freqs_bare_dict, freqs_bare_vals = self.get_freqs_bare(variation)
self.omega = 2*np.pi*freqs_bare_vals[mode]
print variation
print type(variation)
print ureg(variation)
self.U_H = self.calc_U_H(variation)
lv = self.get_lv(variation)
Qseamsweep = []
print 'Calculating Qseam_'+ seam +' for mode ' + str(mode) + ' (' + str(mode) + '/' + str(self.nmodes-1) + ')'
for value in values:
self.design.set_variable(variable, str(value)+unit)
j_2_norm = self.fields.Vector_Jsurf.norm_2() # overestimating the loss by taking norm2 of j, rather than jperp**2
int_j_2 = j_2_norm.integrate_line(seam)
int_j_2_val = int_j_2.evaluate(lv=lv, phase=90)
yseam = int_j_2_val/self.U_H/self.omega
Qseamsweep.append(gseam/yseam)
# Qseamsweep['Qseam_sweep_'+seam+'_'+str(mode)] = gseam/yseam
#Cprint 'Qseam_' + seam + '_' + str(mode) + str(' = ') + str(gseam/yseam)
if pltresult:
fig, ax = plt.subplots()
ax.plot(values,Qseamsweep)
ax.set_yscale('log')
ax.set_xlabel(variable+' ('+unit+')')
ax.set_ylabel('Q'+'_'+seam)
return Qseamsweep
def get_Qdielectric(self, dielectric, mode, variation):
Qdielectric = {}
print 'Calculating Qdielectric_'+ dielectric +' for mode ' + str(mode) + ' (' + str(mode) + '/' + str(self.nmodes-1) + ')'
U_dielectric = self.calc_U_E(variation, volume=dielectric)
p_dielectric = U_dielectric/self.U_E
Qdielectric['Qdielectric_'+dielectric+'_'+str(mode)] = 1/(p_dielectric*tan_delta_sapp)
print 'p_dielectric'+'_'+dielectric+'_'+str(mode)+' = ' + str(p_dielectric)
return Series(Qdielectric)
def get_Qsurface(self, mode, variation):
'''
caculate the contribution to Q of a dieletric layer of dirt on all surfaces
set the dirt thickness and loss tangent in the config file
ref: http://arxiv.org/pdf/1509.01854.pdf
'''
lv = self.get_lv(variation)
Qsurf = {}
print 'Calculating Qsurface for mode ' + str(mode) + ' (' + str(mode) + '/' + str(self.nmodes-1) + ')'
# A = self.fields.Mag_E**2
# A = A.integrate_vol(name='AllObjects')
# U_surf = A.evaluate(lv=lv)
calcobject=CalcObject([],self.setup)
vecE=calcobject.getQty("E")
A=vecE
B=vecE.conj()
A=A.dot(B)
A=A.real()
A=A.integrate_surf(name='AllObjects')
U_surf = A.evaluate(lv=lv)
U_surf *= th*epsilon_0*eps_r
p_surf = U_surf/self.U_E
Qsurf['Qsurf_'+str(mode)] = 1/(p_surf*tan_delta_surf)
print 'p_surf'+'_'+str(mode)+' = ' + str(p_surf)
return Series(Qsurf)
def get_Hparams(self, freqs, pjs, lj):
Hparams = {}
fzpfs = []
# calculate Kerr and fzpf
for m in self.modes:
omega = 2*pi*freqs[m]
ej = fluxQ**2/lj
pj = pjs['pj_'+str(m)]
fzpf = np.sqrt(pj*hbar*omega/ej)
fzpfs.append(fzpf)
Hparams['fzpf_'+str(m)] = fzpf
alpha = 2*ej/fact(4)*nck(4,2)*(fzpf**4)/hbar
Hparams['alpha_'+str(m)] = alpha
Hparams['freq_'+str(m)]=(omega-alpha)/2/pi
# calculate chi
for m in self.modes:
for n in self.modes:
if n<m:
chi_mn = ej/hbar*(fzpfs[m]*fzpfs[n])**2
Hparams['chi_'+str(m)+'_'+str(n)] = chi_mn
return Hparams
def calc_U_E(self, variation, volume=None):
''' This is 2 * the peak electric energy.(since we do not divide by 2, and use the peak phasors) '''
lv = self.get_lv(variation)
if volume is None:
volume = 'AllObjects'
else:
pass
calcobject=CalcObject([],self.setup)
vecE=calcobject.getQty("E")
A=vecE.times_eps()
B=vecE.conj()
A=A.dot(B)
A=A.real()
A=A.integrate_vol(name=volume)
return A.evaluate(lv=lv)
def calc_U_H(self, variation, volume=None):
lv = self.get_lv(variation)
if volume is None:
volume = 'AllObjects'
else:
pass
calcobject=CalcObject([],self.setup)
vecH=calcobject.getQty("H")
A=vecH.times_mu()
B=vecH.conj()
A=A.dot(B)
A=A.real()
A=A.integrate_vol(name=volume)
return A.evaluate(lv=lv)
def calc_current(self, fields, line ):
'''Function to calculate Current based on line. Not in use
line = integration line between plates - name
'''
self.design.Clear_Field_Clac_Stack()
comp = fields.Vector_H
exp = comp.integrate_line_tangent(line)
I = exp.evaluate(phase = 90)
self.design.Clear_Field_Clac_Stack()
return I
def calc_avg_current_J_surf_mag(self, variation, junc_rect, junc_len):
''' Peak current I_max for mdoe J in junction J
The avg. is over the surface of the junction. I.e., spatial. '''
lv = self.get_lv(variation)
calc = CalcObject([],self.setup)
calc = calc.getQty("Jsurf").mag().integrate_surf(name = junc_rect)
I = calc.evaluate(lv=lv) / junc_len #phase = 90
#self.design.Clear_Field_Clac_Stack()
return I
def calc_line_current(self, variation, junc_line_name):
lv = self.get_lv(variation)
calc = CalcObject([],self.setup)
calc = calc.getQty("H").imag().integrate_line_tangent(name = junc_line_name)
#self.design.Clear_Field_Clac_Stack()
return calc.evaluate(lv=lv)
def calc_Pjs_from_I_for_mode(self,variation, U_H,U_E, LJs, junc_rects,junc_lens, method = 'J_surf_mag' ,
freq = None, calc_sign = None):
''' Expected that you have specified the mode before calling this
Expected to precalc U_H and U_E for mode, will retunr pandas series object
junc_rect = ['junc_rect1', 'junc_rect2'] name of junc rectangles to integrate H over
junc_len = [0.0001] specify in SI units; i.e., meters
LJs = [8e-09, 8e-09] SI units
calc_sign = ['junc_line1', 'junc_line2'] used to define sign of ZPF
Potential errors: If you dont have a line or rect by the right name you will prob get an erorr o the type:
com_error: (-2147352567, 'Exception occurred.', (0, None, None, None, 0, -2147024365), None)
'''
dat = {}
for i, junc_rect in enumerate(junc_rects):
print_NoNewLine(' ' + junc_rect)
if method is 'J_surf_mag':
I_peak = self.calc_avg_current_J_surf_mag(variation, junc_rect, junc_lens[i])
else:
print 'Not yet implemented.'
if LJs is None: print_color(' -----> ERROR: Why is LJs passed as None!?')
#dat['I_' +junc_rect] = I_peak # stores the phase information as well
dat['pJ_' +junc_rect] = LJs[i] * I_peak**2 / (2*U_E)
if calc_sign is not None:
Idum = self.calc_line_current(variation, calc_sign[i])
dat['sign_'+junc_rect] = +1 if Idum > 0 else -1
print ' %+.5f' %(dat['pJ_' +junc_rect] * dat['sign_'+junc_rect] )
else: print ' %0.5f' %(dat['pJ_' +junc_rect])
return pd.Series(dat)
def do_eBBQ(self, variations= None, plot_fig = False, modes = None,
Pj_from_current = True, junc_rect = [], junc_lines = None, junc_len = [], junc_LJ_var_name = [],
dielectrics = None, seams = None, surface = False,
calc_Hamiltonian = False,pJ_method = 'J_surf_mag',
save_mesh_stats = True):
"""
Pj_from_current:
Multi-junction calculation of energy participation ratio matrix based on <I_J>. Current is integrated average of J_surf by default: (zkm 3/29/16)
Will calculate the Pj matrix for the selected modes for the given junctions junc_rect array & length of juuncs
junc_rect = ['junc_rect1', 'junc_rect2'] name of junc rectangles to integrate H over
junc_lines = ['junc_line1', 'junc_line2'] used to define the current flow direction, arbitrary, doesnt really matter that much, just need a line there
junc_len = [0.0001] lenght of junc = lenght of junc_line #TODO: could now get rid of this and use the line [specify in SI units; i.e., meters]
junc_LJ_var_name = ['LJ1', 'LJ2']
pJ_method = 'J_surf_mag' - takes the avg. Jsurf over the rect. Make sure you have seeded lots of tets here. i recommend starting with 4 across smallest dimension.
Assumptions:
Low dissipation (high-Q).
Right now, we assume that there are no lumped capcitors to simply calculations. Not required.
We assume that there are only lumped inductors, so that U_tot = U_E+U_H+U_L and U_C =0, so that U_tot = 2*U_E;
Other parameters:
seams = ['seam1', 'seam2'] (seams needs to be a list of strings)
variations = ['0', '1']
A variation is a combination of project/design variables in an optimetric sweep
"""
self.Pj_from_current = Pj_from_current; meta_data = {}; assert(type(junc_LJ_var_name) == list), "Please pass junc_LJ_var_name as a list "
if Pj_from_current : print_color(' Setup: ' + self.setup.name); self.PJ_multi_sol = {} # this is where the result will go
if seams is not None: self.seams = seams; meta_data['seams'] = seams;
if dielectrics is not None: self.dielectrics = dielectrics; meta_data['dielectrics'] = dielectrics;
if variations is None: variations = (['-1'] if self.listvariations == (u'',) else [str(i) for i in range(self.nvariations)] )
if modes is None: modes = range(self.nmodes)
if self.latest_h5_path is not None and self.append_analysis:shutil.copyfile(self.latest_h5_path, self.data_filename);
self.h5file = hdf = pd.HDFStore(self.data_filename);
self.variations = variations; self.modes = modes; self.njunc = len(junc_rect)
meta_data['junc_rect'] = junc_rect; meta_data['junc_lines'] = junc_lines; meta_data['junc_len'] = junc_len; meta_data['junc_LJ_var_name'] = junc_LJ_var_name; meta_data['pJ_method'] = pJ_method;
mesh_stats = self.mesh_stats = []
for ii, variation in enumerate(variations):
print_color( 'variation : ' + variation + ' / ' + str(self.nvariations-1), bg = 44, newline = False )
self.lv = self.get_lv(variation)
if (variation+'/hfss_variables') in hdf.keys() and self.append_analysis: print_NoNewLine(' previously analyzed ...\n'); \
continue;
print_NoNewLine( ' NOT analyzed\n' ); time.sleep(0.5)
hdf[variation+'/hfss_variables'] = self.hfss_variables[variation] = varz \
= pd.Series(self.get_variables(variation=variation))
freqs_bare_dict, freqs_bare_vals = self.get_freqs_bare(variation) # get bare freqs from HFSS
self.pjs={}; var_sol_accum = []
for mode in modes:
sol = Series({'freq' : freqs_bare_vals[mode]*10**-9, 'modeQ' : freqs_bare_dict['Q_'+str(mode)] })
self.omega = 2*np.pi*freqs_bare_vals[mode] # this should really be passed as argument to the functions rather than a property of the calss I would say
print ' Mode \x1b[0;30;46m ' + str(mode) + ' \x1b[0m / ' + str(self.nmodes-1)+' calculating:'
self.solutions.set_mode(mode+1, 0)
self.fields = self.setup.get_fields()
print_NoNewLine(' U_H ...'); sol['U_H'] = self.U_H = self.calc_U_H(variation)
print_NoNewLine(' U_E'); sol['U_E'] = self.U_E = self.calc_U_E(variation)
print( " => U_L = %.3f%%" %( (self.U_E - self.U_H )/(2*self.U_E)) )
if self.Pj_from_current:
self.LJs = [ ureg.Quantity(varz['_'+LJvar_nm]).to_base_units().magnitude for LJvar_nm in junc_LJ_var_name]
meta_data['LJs'] = dict(zip(junc_LJ_var_name, self.LJs))
print ' I -> p_{mJ} ...'
sol_PJ = self.calc_Pjs_from_I_for_mode(variation, self.U_H, self.U_E, self.LJs, junc_rect, junc_len,
method = pJ_method, freq = freqs_bare_vals[mode]*10**-9,
calc_sign = junc_lines)
sol = sol.append(sol_PJ)
if self.njunc == 1: # Single-junction method using global U_H and U_E;
assert(type(junc_LJ_var_name) == list and len(junc_LJ_var_name) == 1), "Please pass junc_LJ_var_name as array of 1 element for a single junction; e.g., junc_LJ_var_name = ['junc1']"
#lj = 1E-3*ureg.Quantity(varz['_'+junc_LJ_var_name]).to_base_units().magnitude
sol['pj1'] = self.get_p_j(mode)
self.pjs.update(sol['pj1']) # convinience function for single junction case
if seams is not None: # get seam Q
for seam in seams: sol = sol.append(self.get_Qseam(seam,mode,variation))
if dielectrics is not None: # get Q dielectric
for dielectric in dielectrics: sol = sol.append(self.get_Qdielectric(dielectric, mode, variation))
if surface is True: # get Q surface
sol = sol.append( self.get_Qsurface(mode, variation) )
var_sol_accum +=[sol]
#TODO: add metadata to the Dataframe & save it
# such as what are the junc_rect names and Lj values etc. (e.g., http://stackoverflow.com/questions/29129095/save-additional-attributes-in-pandas-dataframe/29130146#29130146)
hdf[variation+'/eBBQ_solution'] = self.sols[variation] \
= pd.DataFrame(var_sol_accum, index = modes)
hdf[variation+'/meta_data'] = self.meta_data[variation] \
= Series(meta_data)
if save_mesh_stats:
msh = self.setup.get_mesh_stats(self.listvariations[ureg(variation)])
mesh_stats += [msh]
if msh is not None: hdf[variation+'/mesh_stats'] = msh # returns dataframe
conv = self.setup.get_convergence(self.listvariations[ureg(variation)]) # returns dataframe
#print 'conv.'
if conv is not None: hdf[variation+'/convergence'] = conv
self.h5file.close()
self.bbq_analysis = BbqAnalysis(self.data_filename, variations=self.variations)
#TODO: to be implemented below
# if plot_fig:
# self.bbq_analysis.plot_Hparams(modes=self.modes)
# self.bbq_analysis.print_Hparams(modes=self.modes)
print "\n\nCOMPLETE: do_eBBQ.\n"
return
def eBBQ_ND(freqs, PJ, Om, EJ, LJs, SIGN, cos_trunc = 6, fock_trunc = 7, use_1st_order = False):
'''
numerical diagonalizaiton for energy BBQ
fzpfs: reduced zpf ( in units of \phi_0 )
'''
assert(all(freqs<1E6)), "Please input the frequencies in GHz"
assert(all(LJs <1E-3)),"Please input the inductances in Henries"
assert((PJ >0).any()),"ND -- PJs are not all > 0; \n %s" % (PJ)
import bbqNumericalDiagonalization
from bbqNumericalDiagonalization import bbq_hmt, make_dispersive, fqr
fzpfs = np.zeros(PJ.T.shape)
for junc in xrange(fzpfs.shape[0]):
for mode in xrange(fzpfs.shape[1]):
fzpfs[junc, mode] = np.sqrt(PJ[mode,junc] * Om[mode,mode] / EJ[junc,junc] ) #*0.001
fzpfs = fzpfs * SIGN.T
Hs = bbq_hmt(freqs*10**9, LJs.astype(np.float), fqr*fzpfs, cos_trunc, fock_trunc, individual = use_1st_order)
f1s, CHI_ND, fzpfs, f0s = make_dispersive(Hs, fock_trunc, fzpfs, freqs,use_1st_order = use_1st_order) # f0s = freqs
CHI_ND = -1*CHI_ND *1E-6;
return f1s, CHI_ND, fzpfs, f0s;
def eBBQ_Pmj_to_H_params(s,
meta_data,
cos_trunc = None,
fock_trunc = None,
_renorm_pj = True,
use_1st_order = False):
'''
returns the CHIs as MHz with anharmonicity alpha as the diagonal (with - sign)
---------------
f0s [GHz]: Eigenmode frequencies computed by HFSS; i.e., linear freq returned in GHz
f1s [GHz]: Dressed mode frequencies (by the non-linearity; e.g., Lamb shift, etc. ). If numerical diagonalizaiton is run, then we return the numerically diagonalizaed frequencies, otherwise, use 1st order pertuirbation theory on the 4th order expansion of the cosine.
CHI_O1 [MHz] : Analytic expression for the chis based on a cos trunc to 4th order, and using 1st order perturbation theory.
CHI_ND [MHz] : Numerically diagonalized chi matrix.
PJ : Participation matrix
Om [GHz] : Diagnoal matrix of of linear mode (HFSS) frequencies
EJ [GHz] : Diagonal matrix of junction energies, in GHz.
ask Zlatko for more info.
'''
import scipy
Planck = scipy.constants.Planck
f0s = np.array( s['freq'] )
Qs = s['modeQ']
LJ_nms = meta_data['junc_LJ_var_name'] # ordered
LJs = np.array([meta_data['LJs'][nm] for nm in LJ_nms]) # LJ in Henries, must make sure these are given in the right order
EJs = (fluxQ**2/LJs/Planck*10**-9).astype(np.float) # EJs in GHz
PJ_Jsu = s.loc[:,s.keys().str.contains('pJ')] # EPR from Jsurf avg
PJ_Jsu_sum = PJ_Jsu.apply(sum, axis = 1) # sum of participations as calculated by avg surf current
PJ_glb_sum = (s['U_E'] - s['U_H'])/(2*s['U_E']) # sum of participations as calculated by global UH and UE
diff = (PJ_Jsu_sum-PJ_glb_sum)/PJ_glb_sum*100 # debug
if _renorm_pj: # Renormalize
PJs = PJ_Jsu.divide(PJ_Jsu_sum, axis=0).mul(PJ_glb_sum,axis=0)
else:
PJs = PJ_Jsu
print 'NO renorm'
if (PJs < 0).any().any() == True:
print "\n\n**************\n\n"
print_color("Warning / error!!! Some PJ was found <= 0. This is probably a numerical error, or a super low-Q mode. We will take the abs value. Otherwise, rerun with more precision, inspect, and do due dilligence.)")
print PJs
print "\n\n**************\n\n"
PJs = np.abs(PJs)
SIGN = s.loc[:,s.keys().str.contains('sign_')]
PJ = np.mat(PJs.values)
Om = np.mat(np.diagflat(f0s))
EJ = np.mat(np.diagflat(EJs))
CHI_O1= Om * PJ * EJ.I * PJ.T * Om * 1000. # MHz
CHI_O1= divide_diagonal_by_2(CHI_O1) # Make the diagonals alpha
f1s = f0s - np.diag(CHI_O1/1000.) # 1st order PT expect freq to be dressed down by alpha
if cos_trunc is not None:
f1s, CHI_ND, fzpfs, f0s = eBBQ_ND(f0s, PJ, Om, EJ, LJs, SIGN, cos_trunc = cos_trunc, fock_trunc = fock_trunc, use_1st_order = use_1st_order)
else:
CHI_ND, fzpfs = None, None
return CHI_O1, CHI_ND, PJ, Om, EJ, diff, LJs, SIGN, f0s, f1s, fzpfs, Qs
# the return could be made clener, or dictionary
#%%
class BbqAnalysis(object):
''' defines an analysis object which loads and plots data from a h5 file
This data is obtained using e.g bbq.do_bbq
'''
def __init__(self, data_filename, variations=None):
#raise('not implemented')
self.data_filename = data_filename
with HDFStore(data_filename, mode = 'r') as hdf: # = h5py.File(data_filename, 'r')
# i think we should open & close the file here, i dont see why we need to keep it open & keep accessing it. It is small in memeory, just load it into the RAM.
# all the data will be stored in 3 objects.
if variations is None:
import re
variations = []
for key in hdf.keys():
if 'hfss_variables' in key:
variations += re.findall(r'\b\d+\b', key)
self.variations = variations
self.hfss_variables = {}
self.sols = {}
self.meta_data = {}
self.mesh_stats = {}
self.convergence = {}
for variation in variations:
try:
self.hfss_variables[variation] = hdf[variation+'/hfss_variables']
self.sols[variation] = hdf[variation+'/eBBQ_solution']
self.meta_data[variation] = hdf[variation+'/meta_data']
self.mesh_stats[variation] = hdf[variation+'/mesh_stats']
self.convergence[variation] = hdf[variation+'/convergence'] # TODO: better way to handle errors
except Exception as e:
print_color('Error in variation ' + str(variation))
print_color(e)
self.nmodes = self.sols[variations[0]].shape[0]
self.meta_data = DataFrame(self.meta_data)
self._renorm_pj = True
def get_variable_vs(self, swpvar):
ret = {}
for key, varz in self.hfss_variables.iteritems():
ret[key] = ureg.Quantity(varz['_'+swpvar]).magnitude
return ret
def get_convergences_max_tets(self):
''' Index([u'Pass Number', u'Solved Elements', u'Max Delta Freq. %' ]) '''
ret = {}
for key, df in self.convergence.iteritems():
ret[key] = df['Solved Elements'].iloc[-1]
return ret
def get_convergences_Tets_vs_pass(self):
''' Index([u'Pass Number', u'Solved Elements', u'Max Delta Freq. %' ]) '''
ret = {}
for key, df in self.convergence.iteritems():
s = df['Solved Elements']
#s.index = df['Pass Number']
ret[key] = s
return ret
def get_convergences_MaxDeltaFreq_vs_pass(self):
''' Index([u'Pass Number', u'Solved Elements', u'Max Delta Freq. %' ]) '''
ret = {}
for key, df in self.convergence.iteritems():
s = df['Max Delta Freq. %']
#s.index = df['Pass Number']
ret[key] = s
return ret
def get_mesh_tot(self):
ret = {}
for key, m in self.mesh_stats.iteritems():
ret[key] = m['Num Tets '].sum()
return ret
def get_solution_column(self, col_name, swp_var, sort = True):
''' sort by variation -- must be numeric '''
Qs, swp = [], []
for key, sol in self.sols.iteritems():
Qs += [ sol[col_name] ]
varz = self.hfss_variables[key]
swp += [ ureg.Quantity(varz['_'+swp_var]).magnitude ]
Qs = DataFrame(Qs, index = swp)
return Qs if not sort else Qs.sort_index()
def get_Qs(self, swp_var, sort = True):
return self.get_solution_column('modeQ', swp_var, sort)
def get_Fs(self, swp_var, sort = True):
''' this returns the linear frequencies that HFSS gives'''
return self.get_solution_column('freq', swp_var, sort)
def get_junc_rect_names(self):
return self.meta_data.loc['junc_rect',:]
def analyze_variation(self,
variation = '0',
cos_trunc = 6,
fock_trunc = 7,
print_results = True,
frmt = "{:7.2f}" ):
'''
Container function to call eBBQ_Pmj_to_H_params
Can also print results neatly.
Returns
----------------------------
f0s [GHz]: Eigenmode frequencies computed by HFSS; i.e., linear freq returned in GHz
f1s [GHz]: Dressed mode frequencies (by the non-linearity; e.g., Lamb shift, etc. ). If numerical diagonalizaiton is run, then we return the numerically diagonalizaed frequencies, otherwise, use 1st order pertuirbation theory on the 4th order expansion of the cosine.
CHI_O1 [MHz] : Analytic expression for the chis based on a cos trunc to 4th order, and using 1st order perturbation theory.
CHI_ND [MHz] : Numerically diagonalized chi matrix.
PJ : Participation matrix
Om [GHz] : Diagnoal matrix of of linear mode (HFSS) frequencies
EJ [GHz] : Diagonal matrix of junction energies, in GHz.
'''
s = self.sols[variation];
meta_data = self.meta_data[variation]
varz = self.hfss_variables[variation]
CHI_O1, CHI_ND, PJ, Om, EJ, diff, LJs, SIGN, f0s, f1s, fzpfs, Qs = \
eBBQ_Pmj_to_H_params(s,
meta_data,
cos_trunc = cos_trunc,
fock_trunc = fock_trunc,
_renorm_pj = self._renorm_pj)
if print_results: ##TODO: generalize to more modes
print '\nPJ=\t(renorm.)'
print_matrix(PJ*SIGN, frmt = "{:7.4f}")
print "\n","* "*5, "CHI matrix (MHz)", "* "*5
if cos_trunc is not None:
print '\nCHI_ND=\t PJ O(%d) [alpha diag]'%(cos_trunc)
print_matrix(CHI_ND, append_row ="MHz", frmt = frmt)
else:
print '\nCHI_O1=\t [alpha diag]'
print_matrix(CHI_O1, append_row ="MHz", frmt = frmt)
if len(f0s) == 3:
print '\nf0={:6.2f} {:7.2f} {:7.2f} GHz'.format(*f0s)
print '\nf1={:6.2f} {:7.2f} {:7.2f} GHz'.format(*(f1s*1E-9))
print 'Q={:8.1e} {:7.1e} {:6.0f}'.format(*(Qs))
else:
print "\n","* "*5, "Eigen (Linear) vs Dressed Frequencies MHz", "* "*5
print pd.DataFrame(np.array([f0s*1E3,f1s*1E3]).transpose(), columns = ['Linear', 'Dressed'])
#print "\n", "* "*5, "Dressed freqs Frequencies MHz", "* "*5 # these are the ND if ND was used, else it is the O1PT
#print
print "\n","* "*5, "Eigen (linear) Qs ", "* "*5
print pd.Series(Qs) # Q =0 means no dissipation used in sim.
return CHI_O1, CHI_ND, PJ, Om, EJ, diff, LJs, SIGN, f0s, f1s, fzpfs, Qs, varz
@deprecated
def get_swept_variables(self):
#TODO: needs to be updated to new standard; currently borken
swept_variables_names = []
swept_variables_values = []
for name in self.h5data[self.variations[0]].keys():
if '_'==name[0]: # design variables all start with _
variables = []
for variation in self.variations:
variables.append(self.h5data[variation][name].value)
if len(set(variables))>1:
swept_variables_names.append(name)
swept_variables_values.append(list(set(variables)))
else:
pass
return swept_variables_names, swept_variables_values
@deprecated
def get_variable_variations(self, variablename):
variables = []
for variation in self.variations:
variables.append(self.h5data[variation][variablename].value)
return np.asarray(variables)
@deprecated
def get_float_units(self, variable_name, variation='0'):
variable_value = self.h5data[variation][variable_name].value
n = 1
try:
float(variable_value)
return float(variable_value), ''
except ValueError:
while True:
try:
float(variable_value[:-n])
return float(variable_value[:-n]), variable_value[len(variable_value)-n:]
except:
n+=1
@deprecated
def print_Hparams(self, variation=None, modes=None):
#TODO: needs to be updated to new standard; currently borken
if modes==None:
modes = range(self.nmodes)
else:
pass
if variation == None:
variation = self.variations[-1]
else:
pass
swept_variables_names, swept_variables_values = self.get_swept_variables()
for vname in swept_variables_names:
print vname + ' = ' + self.h5data[variation][vname].value
for ii, m in enumerate(modes):
freq_m = 'freq_'+str(m)
Kerr_m = 'alpha_'+str(m)
Q_m = 'Q_'+str(m)
if freq_m not in self.h5data[variation].keys():
freq_m = 'freq_bare_'+str(m)
else:
pass
if Kerr_m in self.h5data[variation].keys():
print Kerr_m + ' = ' +str(self.h5data[variation][Kerr_m].value/2/pi/1e6) + ' MHz'
else:
pass
print freq_m +' = ' + str(self.h5data[variation][freq_m].value/1e9) + ' GHz'
if Q_m in self.h5data[variation].keys():
print Q_m + ' = ' + str(self.h5data[variation][Q_m].value)
else:
pass
for n in modes[0:ii]:
chi_m_n = 'chi_'+str(m)+'_'+str(n)
if chi_m_n in self.h5data[variation].keys():
print chi_m_n + ' = ' + str(self.h5data[variation][chi_m_n].value/2/pi/1e6) + ' MHz'
@deprecated
def plot_Hparams(self, variable_name=None, modes=None):
#TODO: needs to be updated to new standard; currently borken
fig, ax = plt.subplots(2,2, figsize=(24,10))
if variable_name == None:
xaxis = self.variations
else:
xaxis = []
for variation in self.variations:
xaxis.append(self.get_float_units(variable_name, variation)[0])
if modes==None:
modes = range(self.nmodes)
else:
pass
for ii, m in enumerate(modes):
freq_m = 'freq_'+str(m)
Kerr_m = 'alpha_'+str(m)
Q_m = 'Q_'+str(m)
Qsurf_m = 'Qsurf_'+str(m)
if freq_m not in self.h5data[self.variations[0]].keys():
freq_m = 'freq_bare_'+str(m)
else:
pass
if Kerr_m in self.h5data[self.variations[0]].keys():
ax[0][1].plot(xaxis, self.get_variable_variations(Kerr_m)/2/pi/1e6, 'o', label = str(m))
else:
pass
ax[0][0].plot(xaxis, self.get_variable_variations(freq_m)/1e9, 'o', label=str(m))
if Q_m in self.h5data[self.variations[0]].keys():
ax[1][1].plot(xaxis, self.get_variable_variations(Q_m), 'o', label = Q_m)
else:
pass
if Qsurf_m in self.h5data[self.variations[0]].keys():
ax[1][1].plot(xaxis, self.get_variable_variations(Qsurf_m), 'o', label = Qsurf_m)
else:
pass
if 'seams' in self.h5data[self.variations[0]].keys():
for seam in self.h5data[self.variations[0]]['seams'].value:
Qseam_m = 'Qseam_'+seam+'_'+str(m)
if Qseam_m in self.h5data[self.variations[0]].keys():
ax[1][1].plot(xaxis, self.get_variable_variations(Qseam_m), 'o', label = Qseam_m)
else:
pass
if 'dielectrics' in self.h5data[self.variations[0]].keys():
for dielectric in self.h5data[self.variations[0]]['dielectrics'].value:
Qdielectric_m = 'Qdielectric_'+dielectric+'_'+str(m)
if Qdielectric_m in self.h5data[self.variations[0]].keys():
ax[1][1].plot(xaxis, self.get_variable_variations(Qdielectric_m), 'o', label = Qdielectric_m)
else:
pass
for n in modes[0:ii]:
chi_m_n = 'chi_'+str(m)+'_'+str(n)
if chi_m_n in self.h5data[self.variations[0]].keys():
ax[1][0].plot(xaxis, self.get_variable_variations(chi_m_n)/2/pi/1e6, 'o', label=str(m)+','+str(n))
ax[0][0].legend()
ax[0][0].set_ylabel('freq (GHz)')
ax[0][1].legend()
ax[0][1].set_ylabel('Kerr/2pi (MHz)')
ax[0][1].set_yscale('log')
ax[1][0].legend()
ax[1][0].set_ylabel('Chi/2pi (MHz)')
ax[1][0].set_yscale('log')
ax[1][1].legend()
ax[1][1].set_ylabel('Q')
ax[1][1].set_yscale('log')
if variable_name == None:
swept_variables_names, swept_variables_values = self.get_swept_variables()
xticks = []
for variation in xaxis:
xtick = ''
for name in swept_variables_names:
xtick += name[1:] + ' = ' + self.h5data[variation][name].value + '\n'
xticks.append(str(xtick))
ax[1][0].set_xticks([int(v) for v in xaxis])
ax[1][0].set_xticklabels(xticks, rotation='vertical')
ax[1][1].set_xticks([int(v) for v in xaxis])
ax[1][1].set_xticklabels(xticks, rotation='vertical')
ax[0][0].set_xticklabels([])
ax[0][1].set_xticklabels([])
else:
xlabel = variable_name + ' (' + self.get_float_units(variable_name, self.variations[0])[1] + ')'
ax[1][0].set_xlabel(xlabel)
ax[1][1].set_xlabel(xlabel)
fig.subplots_adjust(bottom=0.3)
fig.suptitle(self.data_filename)
fig.savefig(self.data_filename[:-5]+'.jpg')
return fig, ax
# for variable in swept_variables_names:
# fig1 = plt.subplots()
# ax1 = fig1.add_subplot(221)
# ax.scatter()
# return
|
|
import random
import six.moves.urllib.error
import six.moves.urllib.parse
import six.moves.urllib.request
from splunk_eventgen.lib.logging_config import logger
from splunk_eventgen.lib.outputplugin import OutputPlugin
try:
from concurrent.futures import ThreadPoolExecutor
import requests
from requests import Session
from requests_futures.sessions import FuturesSession
except ImportError:
pass
try:
import ujson as json
except ImportError:
import json
class NoServers(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class BadConnection(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class HTTPCoreOutputPlugin(OutputPlugin):
name = "httpcore"
MAXQUEUELENGTH = 1000
useOutputQueue = False
validSettings = [
"httpeventServers",
"httpeventOutputMode",
"httpeventMaxPayloadSize",
]
defaultableSettings = [
"httpeventServers",
"httpeventOutputMode",
"httpeventMaxPayloadSize",
]
jsonSettings = ["httpeventServers"]
def __init__(self, sample, output_counter=None):
OutputPlugin.__init__(self, sample, output_counter)
# TODO: make workers a param that can be set in eventgen.conf
def _setup_REST_workers(self, session=None, workers=20):
# disable any "requests" warnings
requests.packages.urllib3.disable_warnings()
# Bind passed in samples to the outputter.
self.lastsourcetype = None
if not session:
session = Session()
self.session = FuturesSession(
session=session, executor=ThreadPoolExecutor(max_workers=workers)
)
self.active_sessions = []
@staticmethod
def _urlencode(value):
"""
Takes a value and make sure everything int he string is URL safe.
:param value: string
:return: urlencoded string
"""
return six.moves.urllib.parse.quote(value)
@staticmethod
def _bg_convert_json(sess, resp):
"""
Takes a futures session object, and sets the data to a parsed json output. Use this as a background task for the
session queue. Example: future = session.get('http://httpbin.org/get', background_callback=_bg_convert_json)
:param sess: futures session object. Automatically called on a background_callback as aruguments.
:param resp: futures resp object. Automatically called on a background_callback as aruguments.
:return:
"""
if resp.status_code == 200:
if getattr(resp, "json", None):
resp.data = resp.json()
else:
if type(resp.data) == str:
resp.data = json.loads(resp.data)
def updateConfig(self, config):
OutputPlugin.updateConfig(self, config)
try:
if hasattr(self.config, "httpeventServers") is False:
if hasattr(self._sample, "httpeventServers"):
self.config.httpeventServers = self._sample.httpeventServers
else:
logger.error(
"outputMode %s but httpeventServers not specified for sample %s"
% (self.name, self._sample.name)
)
raise NoServers(
"outputMode %s but httpeventServers not specified for sample %s"
% (self.name, self._sample.name)
)
# set default output mode to round robin
if (
hasattr(self.config, "httpeventOutputMode")
and self.config.httpeventOutputMode
):
self.httpeventoutputmode = config.httpeventOutputMode
else:
if (
hasattr(self._sample, "httpeventOutputMode")
and self._sample.httpeventOutputMode
):
self.httpeventoutputmode = self._sample.httpeventOutputMode
else:
self.httpeventoutputmode = "roundrobin"
if (
hasattr(self.config, "httpeventMaxPayloadSize")
and self.config.httpeventMaxPayloadSize
):
self.httpeventmaxsize = self.config.httpeventMaxPayloadSize
else:
if (
hasattr(self._sample, "httpeventMaxPayloadSize")
and self._sample.httpeventMaxPayloadSize
):
self.httpeventmaxsize = self._sample.httpeventMaxPayloadSize
else:
self.httpeventmaxsize = 10000
logger.debug("Currentmax size: %s " % self.httpeventmaxsize)
if isinstance(config.httpeventServers, str):
self.httpeventServers = json.loads(config.httpeventServers)
else:
self.httpeventServers = config.httpeventServers
logger.debug(
"Setting up the connection pool for %s in %s"
% (self._sample.name, self._app)
)
self.createConnections()
logger.debug("Pool created.")
logger.debug("Finished init of %s plugin." % self.name)
except Exception as e:
logger.exception(str(e))
def createConnections(self):
self.serverPool = []
if self.httpeventServers:
for server in self.httpeventServers.get("servers"):
if not server.get("address"):
logger.error(
"requested a connection to a httpevent server, but no address specified for sample %s"
% self._sample.name
)
raise ValueError(
"requested a connection to a httpevent server, but no address specified for sample %s"
% self._sample.name
)
if not server.get("port"):
logger.error(
"requested a connection to a httpevent server, but no port specified for server %s"
% server
)
raise ValueError(
"requested a connection to a httpevent server, but no port specified for server %s"
% server
)
if not server.get("key"):
logger.error(
"requested a connection to a httpevent server, but no key specified for server %s"
% server
)
raise ValueError(
"requested a connection to a httpevent server, but no key specified for server %s"
% server
)
if not (
(server.get("protocol") == "http")
or (server.get("protocol") == "https")
):
logger.error(
"requested a connection to a httpevent server, but no protocol specified for server %s"
% server
)
raise ValueError(
"requested a connection to a httpevent server, but no protocol specified for server %s"
% server
)
logger.debug(
"Validation Passed, Creating a requests object for server: %s"
% server.get("address")
)
setserver = {}
setserver["url"] = "%s://%s:%s/services/collector" % (
server.get("protocol"),
server.get("address"),
server.get("port"),
)
setserver["header"] = "Splunk %s" % server.get("key")
logger.debug("Adding server set to pool, server: %s" % setserver)
self.serverPool.append(setserver)
else:
raise NoServers(
"outputMode %s but httpeventServers not specified for sample %s"
% (self.name, self._sample.name)
)
def _sendHTTPEvents(self, payload):
currentreadsize = 0
stringpayload = ""
totalbytesexpected = 0
totalbytessent = 0
numberevents = len(payload)
logger.debug("Sending %s events to splunk" % numberevents)
for line in payload:
logger.debug("line: %s " % line)
targetline = json.dumps(line)
logger.debug("targetline: %s " % targetline)
targetlinesize = len(targetline)
totalbytesexpected += targetlinesize
if (int(currentreadsize) + int(targetlinesize)) <= int(
self.httpeventmaxsize
):
stringpayload = stringpayload + targetline
currentreadsize = currentreadsize + targetlinesize
logger.debug("stringpayload: %s " % stringpayload)
else:
logger.debug(
"Max size for payload hit, sending to splunk then continuing."
)
try:
self._transmitEvents(stringpayload)
totalbytessent += len(stringpayload)
currentreadsize = targetlinesize
stringpayload = targetline
except Exception as e:
logger.exception(str(e))
raise e
else:
try:
totalbytessent += len(stringpayload)
logger.debug(
"End of for loop hit for sending events to splunk, total bytes sent: %s ---- out of %s -----"
% (totalbytessent, totalbytesexpected)
)
self._transmitEvents(stringpayload)
except Exception as e:
logger.exception(str(e))
raise e
def _transmitEvents(self, payloadstring):
targetServer = []
logger.debug("Transmission called with payloadstring: %s " % payloadstring)
if self.httpeventoutputmode == "mirror":
targetServer = self.serverPool
else:
targetServer.append(random.choice(self.serverPool))
for server in targetServer:
logger.debug("Selected targetServer object: %s" % targetServer)
url = server["url"]
headers = {}
headers["Authorization"] = server["header"]
headers["content-type"] = "application/json"
try:
payloadsize = len(payloadstring)
self.active_sessions.append(
self.session.post(
url=url, data=payloadstring, headers=headers, verify=False
)
)
except Exception as e:
logger.error("Failed for exception: %s" % e)
logger.error(
"Failed sending events to url: %s sourcetype: %s size: %s"
% (url, self.lastsourcetype, payloadsize)
)
logger.debug(
"Failed sending events to url: %s headers: %s payload: %s"
% (url, headers, payloadstring)
)
raise e
def load():
"""Returns an instance of the plugin"""
return HTTPCoreOutputPlugin
|
|
"""fontTools.misc.bezierTools.py -- tools for working with bezier path segments.
Rewritten to elimate the numpy dependency
"""
__all__ = [
"calcQuadraticBounds",
"calcCubicBounds",
"splitLine",
"splitQuadratic",
"splitCubic",
"splitQuadraticAtT",
"splitCubicAtT",
"solveQuadratic",
"solveCubic",
]
from robofab.misc.arrayTools import calcBounds
epsilon = 1e-12
def calcQuadraticBounds(pt1, pt2, pt3):
"""Return the bounding rectangle for a qudratic bezier segment.
pt1 and pt3 are the "anchor" points, pt2 is the "handle".
>>> calcQuadraticBounds((0, 0), (50, 100), (100, 0))
(0, 0, 100, 50.0)
>>> calcQuadraticBounds((0, 0), (100, 0), (100, 100))
(0.0, 0.0, 100, 100)
"""
(ax, ay), (bx, by), (cx, cy) = calcQuadraticParameters(pt1, pt2, pt3)
ax2 = ax*2.0
ay2 = ay*2.0
roots = []
if ax2 != 0:
roots.append(-bx/ax2)
if ay2 != 0:
roots.append(-by/ay2)
points = [(ax*t*t + bx*t + cx, ay*t*t + by*t + cy) for t in roots if 0 <= t < 1] + [pt1, pt3]
return calcBounds(points)
def calcCubicBounds(pt1, pt2, pt3, pt4):
"""Return the bounding rectangle for a cubic bezier segment.
pt1 and pt4 are the "anchor" points, pt2 and pt3 are the "handles".
>>> calcCubicBounds((0, 0), (25, 100), (75, 100), (100, 0))
(0, 0, 100, 75.0)
>>> calcCubicBounds((0, 0), (50, 0), (100, 50), (100, 100))
(0.0, 0.0, 100, 100)
>>> calcCubicBounds((50, 0), (0, 100), (100, 100), (50, 0))
(35.566243270259356, 0, 64.43375672974068, 75.0)
"""
(ax, ay), (bx, by), (cx, cy), (dx, dy) = calcCubicParameters(pt1, pt2, pt3, pt4)
# calc first derivative
ax3 = ax * 3.0
ay3 = ay * 3.0
bx2 = bx * 2.0
by2 = by * 2.0
xRoots = [t for t in solveQuadratic(ax3, bx2, cx) if 0 <= t < 1]
yRoots = [t for t in solveQuadratic(ay3, by2, cy) if 0 <= t < 1]
roots = xRoots + yRoots
points = [(ax*t*t*t + bx*t*t + cx * t + dx, ay*t*t*t + by*t*t + cy * t + dy) for t in roots] + [pt1, pt4]
return calcBounds(points)
def splitLine(pt1, pt2, where, isHorizontal):
"""Split the line between pt1 and pt2 at position 'where', which
is an x coordinate if isHorizontal is False, a y coordinate if
isHorizontal is True. Return a list of two line segments if the
line was successfully split, or a list containing the original
line.
>>> printSegments(splitLine((0, 0), (100, 100), 50, True))
((0, 0), (50.0, 50.0))
((50.0, 50.0), (100, 100))
>>> printSegments(splitLine((0, 0), (100, 100), 100, True))
((0, 0), (100, 100))
>>> printSegments(splitLine((0, 0), (100, 100), 0, True))
((0, 0), (0.0, 0.0))
((0.0, 0.0), (100, 100))
>>> printSegments(splitLine((0, 0), (100, 100), 0, False))
((0, 0), (0.0, 0.0))
((0.0, 0.0), (100, 100))
"""
pt1x, pt1y = pt1
pt2x, pt2y = pt2
ax = (pt2x - pt1x)
ay = (pt2y - pt1y)
bx = pt1x
by = pt1y
ax1 = (ax, ay)[isHorizontal]
if ax == 0:
return [(pt1, pt2)]
t = float(where - (bx, by)[isHorizontal]) / ax
if 0 <= t < 1:
midPt = ax * t + bx, ay * t + by
return [(pt1, midPt), (midPt, pt2)]
else:
return [(pt1, pt2)]
def splitQuadratic(pt1, pt2, pt3, where, isHorizontal):
"""Split the quadratic curve between pt1, pt2 and pt3 at position 'where',
which is an x coordinate if isHorizontal is False, a y coordinate if
isHorizontal is True. Return a list of curve segments.
>>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 150, False))
((0, 0), (50, 100), (100, 0))
>>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 50, False))
((0.0, 0.0), (25.0, 50.0), (50.0, 50.0))
((50.0, 50.0), (75.0, 50.0), (100.0, 0.0))
>>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 25, False))
((0.0, 0.0), (12.5, 25.0), (25.0, 37.5))
((25.0, 37.5), (62.5, 75.0), (100.0, 0.0))
>>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 25, True))
((0.0, 0.0), (7.32233047034, 14.6446609407), (14.6446609407, 25.0))
((14.6446609407, 25.0), (50.0, 75.0), (85.3553390593, 25.0))
((85.3553390593, 25.0), (92.6776695297, 14.6446609407), (100.0, -7.1054273576e-15))
>>> # XXX I'm not at all sure if the following behavior is desirable:
>>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 50, True))
((0.0, 0.0), (25.0, 50.0), (50.0, 50.0))
((50.0, 50.0), (50.0, 50.0), (50.0, 50.0))
((50.0, 50.0), (75.0, 50.0), (100.0, 0.0))
"""
a, b, c = calcQuadraticParameters(pt1, pt2, pt3)
solutions = solveQuadratic(a[isHorizontal], b[isHorizontal],
c[isHorizontal] - where)
solutions = [t for t in solutions if 0 <= t < 1]
solutions.sort()
if not solutions:
return [(pt1, pt2, pt3)]
return _splitQuadraticAtT(a, b, c, *solutions)
def splitCubic(pt1, pt2, pt3, pt4, where, isHorizontal):
"""Split the cubic curve between pt1, pt2, pt3 and pt4 at position 'where',
which is an x coordinate if isHorizontal is False, a y coordinate if
isHorizontal is True. Return a list of curve segments.
>>> printSegments(splitCubic((0, 0), (25, 100), (75, 100), (100, 0), 150, False))
((0, 0), (25, 100), (75, 100), (100, 0))
>>> printSegments(splitCubic((0, 0), (25, 100), (75, 100), (100, 0), 50, False))
((0.0, 0.0), (12.5, 50.0), (31.25, 75.0), (50.0, 75.0))
((50.0, 75.0), (68.75, 75.0), (87.5, 50.0), (100.0, 0.0))
>>> printSegments(splitCubic((0, 0), (25, 100), (75, 100), (100, 0), 25, True))
((0.0, 0.0), (2.2937927384, 9.17517095361), (4.79804488188, 17.5085042869), (7.47413641001, 25.0))
((7.47413641001, 25.0), (31.2886200204, 91.6666666667), (68.7113799796, 91.6666666667), (92.52586359, 25.0))
((92.52586359, 25.0), (95.2019551181, 17.5085042869), (97.7062072616, 9.17517095361), (100.0, 1.7763568394e-15))
"""
a, b, c, d = calcCubicParameters(pt1, pt2, pt3, pt4)
solutions = solveCubic(a[isHorizontal], b[isHorizontal], c[isHorizontal],
d[isHorizontal] - where)
solutions = [t for t in solutions if 0 <= t < 1]
solutions.sort()
if not solutions:
return [(pt1, pt2, pt3, pt4)]
return _splitCubicAtT(a, b, c, d, *solutions)
def splitQuadraticAtT(pt1, pt2, pt3, *ts):
"""Split the quadratic curve between pt1, pt2 and pt3 at one or more
values of t. Return a list of curve segments.
>>> printSegments(splitQuadraticAtT((0, 0), (50, 100), (100, 0), 0.5))
((0.0, 0.0), (25.0, 50.0), (50.0, 50.0))
((50.0, 50.0), (75.0, 50.0), (100.0, 0.0))
>>> printSegments(splitQuadraticAtT((0, 0), (50, 100), (100, 0), 0.5, 0.75))
((0.0, 0.0), (25.0, 50.0), (50.0, 50.0))
((50.0, 50.0), (62.5, 50.0), (75.0, 37.5))
((75.0, 37.5), (87.5, 25.0), (100.0, 0.0))
"""
a, b, c = calcQuadraticParameters(pt1, pt2, pt3)
return _splitQuadraticAtT(a, b, c, *ts)
def splitCubicAtT(pt1, pt2, pt3, pt4, *ts):
"""Split the cubic curve between pt1, pt2, pt3 and pt4 at one or more
values of t. Return a list of curve segments.
>>> printSegments(splitCubicAtT((0, 0), (25, 100), (75, 100), (100, 0), 0.5))
((0.0, 0.0), (12.5, 50.0), (31.25, 75.0), (50.0, 75.0))
((50.0, 75.0), (68.75, 75.0), (87.5, 50.0), (100.0, 0.0))
>>> printSegments(splitCubicAtT((0, 0), (25, 100), (75, 100), (100, 0), 0.5, 0.75))
((0.0, 0.0), (12.5, 50.0), (31.25, 75.0), (50.0, 75.0))
((50.0, 75.0), (59.375, 75.0), (68.75, 68.75), (77.34375, 56.25))
((77.34375, 56.25), (85.9375, 43.75), (93.75, 25.0), (100.0, 0.0))
"""
a, b, c, d = calcCubicParameters(pt1, pt2, pt3, pt4)
return _splitCubicAtT(a, b, c, d, *ts)
def _splitQuadraticAtT(a, b, c, *ts):
ts = list(ts)
segments = []
ts.insert(0, 0.0)
ts.append(1.0)
ax, ay = a
bx, by = b
cx, cy = c
for i in range(len(ts) - 1):
t1 = ts[i]
t2 = ts[i+1]
delta = (t2 - t1)
# calc new a, b and c
a1x = ax * delta**2
a1y = ay * delta**2
b1x = (2*ax*t1 + bx) * delta
b1y = (2*ay*t1 + by) * delta
c1x = ax*t1**2 + bx*t1 + cx
c1y = ay*t1**2 + by*t1 + cy
pt1, pt2, pt3 = calcQuadraticPoints((a1x, a1y), (b1x, b1y), (c1x, c1y))
segments.append((pt1, pt2, pt3))
return segments
def _splitCubicAtT(a, b, c, d, *ts):
ts = list(ts)
ts.insert(0, 0.0)
ts.append(1.0)
segments = []
ax, ay = a
bx, by = b
cx, cy = c
dx, dy = d
for i in range(len(ts) - 1):
t1 = ts[i]
t2 = ts[i+1]
delta = (t2 - t1)
# calc new a, b, c and d
a1x = ax * delta**3
a1y = ay * delta**3
b1x = (3*ax*t1 + bx) * delta**2
b1y = (3*ay*t1 + by) * delta**2
c1x = (2*bx*t1 + cx + 3*ax*t1**2) * delta
c1y = (2*by*t1 + cy + 3*ay*t1**2) * delta
d1x = ax*t1**3 + bx*t1**2 + cx*t1 + dx
d1y = ay*t1**3 + by*t1**2 + cy*t1 + dy
pt1, pt2, pt3, pt4 = calcCubicPoints((a1x, a1y), (b1x, b1y), (c1x, c1y), (d1x, d1y))
segments.append((pt1, pt2, pt3, pt4))
return segments
#
# Equation solvers.
#
from math import sqrt, acos, cos, pi
def solveQuadratic(a, b, c,
sqrt=sqrt):
"""Solve a quadratic equation where a, b and c are real.
a*x*x + b*x + c = 0
This function returns a list of roots. Note that the returned list
is neither guaranteed to be sorted nor to contain unique values!
"""
if abs(a) < epsilon:
if abs(b) < epsilon:
# We have a non-equation; therefore, we have no valid solution
roots = []
else:
# We have a linear equation with 1 root.
roots = [-c/b]
else:
# We have a true quadratic equation. Apply the quadratic formula to find two roots.
DD = b*b - 4.0*a*c
if DD >= 0.0:
rDD = sqrt(DD)
roots = [(-b+rDD)/2.0/a, (-b-rDD)/2.0/a]
else:
# complex roots, ignore
roots = []
return roots
def solveCubic(a, b, c, d,
abs=abs, pow=pow, sqrt=sqrt, cos=cos, acos=acos, pi=pi):
"""Solve a cubic equation where a, b, c and d are real.
a*x*x*x + b*x*x + c*x + d = 0
This function returns a list of roots. Note that the returned list
is neither guaranteed to be sorted nor to contain unique values!
"""
#
# adapted from:
# CUBIC.C - Solve a cubic polynomial
# public domain by Ross Cottrell
# found at: http://www.strangecreations.com/library/snippets/Cubic.C
#
if abs(a) < epsilon:
# don't just test for zero; for very small values of 'a' solveCubic()
# returns unreliable results, so we fall back to quad.
return solveQuadratic(b, c, d)
a = float(a)
a1 = b/a
a2 = c/a
a3 = d/a
Q = (a1*a1 - 3.0*a2)/9.0
R = (2.0*a1*a1*a1 - 9.0*a1*a2 + 27.0*a3)/54.0
R2_Q3 = R*R - Q*Q*Q
if R2_Q3 < 0:
theta = acos(R/sqrt(Q*Q*Q))
rQ2 = -2.0*sqrt(Q)
x0 = rQ2*cos(theta/3.0) - a1/3.0
x1 = rQ2*cos((theta+2.0*pi)/3.0) - a1/3.0
x2 = rQ2*cos((theta+4.0*pi)/3.0) - a1/3.0
return [x0, x1, x2]
else:
if Q == 0 and R == 0:
x = 0
else:
x = pow(sqrt(R2_Q3)+abs(R), 1/3.0)
x = x + Q/x
if R >= 0.0:
x = -x
x = x - a1/3.0
return [x]
#
# Conversion routines for points to parameters and vice versa
#
def calcQuadraticParameters(pt1, pt2, pt3):
x2, y2 = pt2
x3, y3 = pt3
cx, cy = pt1
bx = (x2 - cx) * 2.0
by = (y2 - cy) * 2.0
ax = x3 - cx - bx
ay = y3 - cy - by
return (ax, ay), (bx, by), (cx, cy)
def calcCubicParameters(pt1, pt2, pt3, pt4):
x2, y2 = pt2
x3, y3 = pt3
x4, y4 = pt4
dx, dy = pt1
cx = (x2 -dx) * 3.0
cy = (y2 -dy) * 3.0
bx = (x3 - x2) * 3.0 - cx
by = (y3 - y2) * 3.0 - cy
ax = x4 - dx - cx - bx
ay = y4 - dy - cy - by
return (ax, ay), (bx, by), (cx, cy), (dx, dy)
def calcQuadraticPoints(a, b, c):
ax, ay = a
bx, by = b
cx, cy = c
x1 = cx
y1 = cy
x2 = (bx * 0.5) + cx
y2 = (by * 0.5) + cy
x3 = ax + bx + cx
y3 = ay + by + cy
return (x1, y1), (x2, y2), (x3, y3)
def calcCubicPoints(a, b, c, d):
ax, ay = a
bx, by = b
cx, cy = c
dx, dy = d
x1 = dx
y1 = dy
x2 = (cx / 3.0) + dx
y2 = (cy / 3.0) + dy
x3 = (bx + cx) / 3.0 + x2
y3 = (by + cy) / 3.0 + y2
x4 = ax + dx + cx + bx
y4 = ay + dy + cy + by
return (x1, y1), (x2, y2), (x3, y3), (x4, y4)
def _segmentrepr(obj):
"""
>>> _segmentrepr([1, [2, 3], [], [[2, [3, 4], [0.1, 2.2]]]])
'(1, (2, 3), (), ((2, (3, 4), (0.1, 2.2))))'
"""
try:
it = iter(obj)
except TypeError:
return str(obj)
else:
return "(%s)" % ", ".join([_segmentrepr(x) for x in it])
def printSegments(segments):
"""Helper for the doctests, displaying each segment in a list of
segments on a single line as a tuple.
"""
for segment in segments:
print(_segmentrepr(segment))
if __name__ == "__main__":
import doctest
doctest.testmod()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Validates all aggregate functions across all datatypes
#
import pytest
from testdata.common import widetable
from tests.common.environ import USING_OLD_AGGS_JOINS
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.skip import SkipIfOldAggsJoins
from tests.common.test_dimensions import (
create_exec_option_dimension,
create_uncompressed_text_dimension)
from tests.common.test_result_verifier import (
assert_codegen_enabled,
parse_column_types,
parse_column_labels,
QueryTestResult,
parse_result_rows)
from tests.common.test_vector import ImpalaTestDimension
# Test dimensions for TestAggregation.
AGG_FUNCTIONS = ['sum', 'count', 'min', 'max', 'avg', 'ndv']
DATA_TYPES = ['int', 'bool', 'double', 'bigint', 'tinyint',
'smallint', 'float', 'timestamp', 'string']
# Lookup table for TestAggregation results.
result_lut = {
'sum-tinyint': 45000, 'avg-tinyint': 5, 'count-tinyint': 9000,
'min-tinyint': 1, 'max-tinyint': 9, 'ndv-tinyint': 9,
'sum-smallint': 495000, 'avg-smallint': 50, 'count-smallint': 9900,
'min-smallint': 1, 'max-smallint': 99, 'ndv-smallint': 99,
'sum-int': 4995000, 'avg-int': 500, 'count-int': 9990,
'min-int': 1, 'max-int': 999, 'ndv-int': 999,
'sum-bigint': 49950000, 'avg-bigint': 5000, 'count-bigint': 9990,
'min-bigint': 10, 'max-bigint' : 9990, 'ndv-bigint': 999,
'sum-bool': 5000, 'count-bool': 10000, 'min-bool': 'false',
'max-bool': 'true', 'avg-bool': 0.5, 'ndv-bool': 2,
'sum-double': 50449500.0, 'count-double': 9990, 'min-double': 10.1,
'max-double': 10089.9, 'avg-double': 5050.0, 'ndv-double': 999,
'sum-float': 5494500.0, 'count-float': 9990, 'min-float': 1.10,
'max-float': 1098.9, 'avg-float': 550.0, 'ndv-float': 999,
'count-timestamp': 10000, 'min-timestamp': '2010-01-01 00:00:00',
'max-timestamp': '2010-01-10 18:02:05.100000000',
'avg-timestamp': '2010-01-05 20:47:11.705080000', 'ndv-timestamp': 10000,
'count-string': 10000, 'min-string': '0', 'max-string': '999', 'ndv-string': 999,
'sum-distinct-tinyint': 45, 'count-distinct-tinyint': 9, 'min-distinct-tinyint': 1,
'max-distinct-tinyint': 9, 'avg-distinct-tinyint': 5, 'ndv-distinct-tinyint': 9,
'sum-distinct-smallint': 4950, 'count-distinct-smallint': 99,
'min-distinct-smallint': 1, 'max-distinct-smallint': 99,
'avg-distinct-smallint': 50, 'ndv-distinct-smallint': 99,
'sum-distinct-int': 499500, 'count-distinct-int': 999, 'min-distinct-int': 1,
'max-distinct-int': 999, 'avg-distinct-int': 500, 'ndv-distinct-int': 999,
'sum-distinct-bigint': 4995000, 'count-distinct-bigint': 999, 'min-distinct-bigint': 10,
'max-distinct-bigint': 9990, 'avg-distinct-bigint': 5000,
'ndv-distinct-bigint': 999,
'sum-distinct-bool': 1, 'count-distinct-bool': 2, 'min-distinct-bool': 'false',
'max-distinct-bool': 'true', 'avg-distinct-bool': 0.5, 'ndv-distinct-bool': 2,
'sum-distinct-double': 5044950.0, 'count-distinct-double': 999,
'min-distinct-double': 10.1, 'max-distinct-double': 10089.9,
'avg-distinct-double': 5050.0, 'ndv-distinct-double': 999,
'sum-distinct-float': 549450.0, 'count-distinct-float': 999, 'min-distinct-float': 1.1,
'max-distinct-float': 1098.9, 'avg-distinct-float': 550.0,
'ndv-distinct-float': 999,
'count-distinct-timestamp': 10000, 'min-distinct-timestamp': '2010-01-01 00:00:00',
'max-distinct-timestamp': '2010-01-10 18:02:05.100000000',
'avg-distinct-timestamp': '2010-01-05 20:47:11.705080000',
'ndv-distinct-timestamp': 10000,
'count-distinct-string': 1000, 'min-distinct-string': '0',
'max-distinct-string': '999', 'ndv-distinct-string': 999,
}
class TestAggregation(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestAggregation, cls).add_test_dimensions()
# Add two more dimensions
cls.ImpalaTestMatrix.add_dimension(ImpalaTestDimension('agg_func', *AGG_FUNCTIONS))
cls.ImpalaTestMatrix.add_dimension(ImpalaTestDimension('data_type', *DATA_TYPES))
cls.ImpalaTestMatrix.add_constraint(lambda v: cls.is_valid_vector(v))
@classmethod
def is_valid_vector(cls, vector):
data_type, agg_func = vector.get_value('data_type'), vector.get_value('agg_func')
file_format = vector.get_value('table_format').file_format
if file_format not in ['parquet']: return False
if cls.exploration_strategy() == 'core':
# Reduce execution time when exploration strategy is 'core'
if vector.get_value('exec_option')['batch_size'] != 0: return False
# Avro doesn't have timestamp type
non_numeric = data_type in ['bool', 'string']
if file_format == 'avro' and data_type == 'timestamp':
return False
elif non_numeric and agg_func not in ['min', 'max', 'count', 'ndv']:
return False
elif agg_func == 'sum' and data_type == 'timestamp':
return False
return True
def test_aggregation(self, vector):
exec_option = vector.get_value('exec_option')
disable_codegen = exec_option['disable_codegen']
# The old aggregation node does not support codegen for all aggregate functions.
check_codegen_enabled = not disable_codegen and not USING_OLD_AGGS_JOINS
data_type, agg_func = (vector.get_value('data_type'), vector.get_value('agg_func'))
query = 'select %s(%s_col) from alltypesagg where day is not null' % (agg_func,
data_type)
result = self.execute_query(query, exec_option,
table_format=vector.get_value('table_format'))
assert len(result.data) == 1
self.verify_agg_result(agg_func, data_type, False, result.data[0]);
if check_codegen_enabled:
# Verify codegen was enabled for the preaggregation.
# It is deliberately disabled for the merge aggregation.
assert_codegen_enabled(result.runtime_profile, [1])
query = 'select %s(DISTINCT(%s_col)) from alltypesagg where day is not null' % (
agg_func, data_type)
result = self.execute_query(query, vector.get_value('exec_option'))
assert len(result.data) == 1
self.verify_agg_result(agg_func, data_type, True, result.data[0]);
if check_codegen_enabled:
# Verify codegen was enabled for all stages of the aggregation.
assert_codegen_enabled(result.runtime_profile, [1, 2, 4, 6])
def verify_agg_result(self, agg_func, data_type, distinct, actual_string):
key = '%s-%s%s' % (agg_func, 'distinct-' if distinct else '', data_type)
if agg_func == 'ndv':
# NDV is inherently approximate. Compare with some tolerance.
err = abs(result_lut[key] - int(actual_string))
rel_err = err / float(result_lut[key])
print key, result_lut[key], actual_string,abs(result_lut[key] - int(actual_string))
assert err <= 1 or rel_err < 0.05
elif data_type in ('float', 'double') and agg_func != 'count':
# Compare with a margin of error.
delta = 1e6 if data_type == 'double' else 1e3
assert abs(result_lut[key] - float(actual_string)) < delta
elif data_type == 'timestamp' and agg_func != 'count':
# Strip off everything past 10s of microseconds.
ignore_digits = 4
assert result_lut[key][:-ignore_digits] == actual_string[:-ignore_digits]
else:
assert str(result_lut[key]) == actual_string
class TestAggregationQueries(ImpalaTestSuite):
"""Run the aggregation test suite, with codegen enabled and disabled, to exercise our
non-codegen code"""
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestAggregationQueries, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(
create_exec_option_dimension(disable_codegen_options=[False, True]))
if cls.exploration_strategy() == 'core':
cls.ImpalaTestMatrix.add_dimension(
create_uncompressed_text_dimension(cls.get_workload()))
def test_non_codegen_tinyint_grouping(self, vector, unique_database):
# Regression for IMPALA-901. The test includes an INSERT statement, so can only be run
# on INSERT-able formats - text only in this case, since the bug doesn't depend on the
# file format.
if vector.get_value('table_format').file_format == 'text' \
and vector.get_value('table_format').compression_codec == 'none':
self.client.execute("create table %s.imp_901 (col tinyint)" % unique_database)
self.run_test_case('QueryTest/aggregation_no_codegen_only', vector,
unique_database)
def test_aggregation(self, vector):
if vector.get_value('table_format').file_format == 'hbase':
pytest.xfail(reason="IMPALA-283 - select count(*) produces inconsistent results")
self.run_test_case('QueryTest/aggregation', vector)
def test_distinct(self, vector):
if vector.get_value('table_format').file_format == 'hbase':
pytest.xfail("HBase returns columns in alphabetical order for select distinct *, "
"making the result verication to fail.")
if vector.get_value('table_format').file_format == 'kudu':
pytest.xfail("IMPALA-4042: count(distinct NULL) fails on a view, needed for kudu")
self.run_test_case('QueryTest/distinct', vector)
def test_group_concat(self, vector):
"""group_concat distinct tests
Required to run directly in python because the order in which results will be
merged at the final, single-node aggregation step is non-deterministic (if the
first phase is running on multiple nodes). Need to pull the result apart and
compare the actual items)"""
exec_option = vector.get_value('exec_option')
disable_codegen = exec_option['disable_codegen']
table_format = vector.get_value('table_format')
# Test group_concat distinct with other aggregate function and groupings.
# expected result is the row: 2010,'1, 2, 3, 4','1-2-3-4','2|3|1|4',40,4
query = """select year, group_concat(distinct string_col),
group_concat(distinct string_col, '-'), group_concat(distinct string_col, '|'),
count(string_col), count(distinct string_col)
from alltypesagg where int_col < 5 and year = 2010 group by year"""
result = self.execute_query(query, exec_option, table_format=table_format)
row = (result.data)[0].split("\t")
assert(len(row) == 6)
assert(row[0] == '2010')
delimiter = [', ', '-', '|']
for i in range(1, 4):
assert(set(row[i].split(delimiter[i-1])) == set(['1', '2', '3', '4']))
assert(row[4] == '40')
assert(row[5] == '4')
check_codegen_enabled = not disable_codegen and not USING_OLD_AGGS_JOINS
if check_codegen_enabled:
# Verify codegen was enabled for all three stages of the aggregation.
assert_codegen_enabled(result.runtime_profile, [1, 2, 4])
# Test group_concat distinct with arrow delimiter, with multiple rows
query = """select day, group_concat(distinct string_col, "->")
from (select * from alltypesagg where id % 100 = day order by id limit 99999) a
group by day order by day"""
result = self.execute_query(query, exec_option, table_format=table_format)
string_col = []
string_col.append(set(['1','101','201','301','401','501','601','701','801','901']))
string_col.append(set(['2','102','202','302','402','502','602','702','802','902']))
string_col.append(set(['3','103','203','303','403','503','603','703','803','903']))
string_col.append(set(['4','104','204','304','404','504','604','704','804','904']))
string_col.append(set(['5','105','205','305','405','505','605','705','805','905']))
string_col.append(set(['6','106','206','306','406','506','606','706','806','906']))
string_col.append(set(['7','107','207','307','407','507','607','707','807','907']))
string_col.append(set(['8','108','208','308','408','508','608','708','808','908']))
string_col.append(set(['9','109','209','309','409','509','609','709','809','909']))
string_col.append(set(['10','110','210','310','410','510','610','710','810','910']))
assert(len(result.data) == 10)
for i in range(10):
row = (result.data)[i].split("\t")
assert(len(row) == 2)
assert(row[0] == str(i+1))
assert(set(row[1].split("->")) == string_col[i])
# Test group_concat distinct with merge node
query = """select group_concat(distinct string_col, ' ') from alltypesagg
where int_col < 10"""
result = self.execute_query(query, exec_option, table_format=table_format)
assert(set((result.data)[0].split(" ")) == set(['1','2','3','4','5','6','7','8','9']))
if check_codegen_enabled:
# Verify codegen was enabled for all four stages of the aggregation.
assert_codegen_enabled(result.runtime_profile, [1, 2, 4, 6])
class TestWideAggregationQueries(ImpalaTestSuite):
"""Test that aggregations with many grouping columns work"""
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestWideAggregationQueries, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(
create_exec_option_dimension(disable_codegen_options=[False, True]))
# File format doesn't matter for this test.
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('table_format').file_format == 'parquet')
def test_many_grouping_columns(self, vector):
"""Test that an aggregate with many grouping columns works"""
table_format = vector.get_value('table_format')
exec_option = vector.get_value('exec_option')
query = "select distinct * from widetable_1000_cols"
# Ensure codegen is enabled.
result = self.execute_query(query, exec_option, table_format=table_format)
# All rows should be distinct.
expected_result = widetable.get_data(1000, 10, quote_strings=True)
types = parse_column_types(result.schema)
labels = parse_column_labels(result.schema)
expected = QueryTestResult(expected_result, types, labels, order_matters=False)
actual = QueryTestResult(parse_result_rows(result), types, labels,
order_matters=False)
assert expected == actual
class TestTPCHAggregationQueries(ImpalaTestSuite):
# Uses the TPC-H dataset in order to have larger aggregations.
@classmethod
def get_workload(cls):
return 'tpch'
@classmethod
def add_test_dimensions(cls):
super(TestTPCHAggregationQueries, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format in ['parquet'])
def test_tpch_aggregations(self, vector):
self.run_test_case('tpch-aggregations', vector)
@SkipIfOldAggsJoins.passthrough_preagg
def test_tpch_passthrough_aggregations(self, vector):
self.run_test_case('tpch-passthrough-aggregations', vector)
|
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 4 23:00:43 2016
@author: ddboline
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
import argparse
import os
from subprocess import call
from movie_collection_app.movie_collection import MovieCollection
from movie_collection_app.util import (play_file, remove_remote_file, HOMEDIR, get_remote_file,
get_remote_files, HOSTNAME)
list_of_commands = ('list', 'get', 'play', 'playyad', 'rm', 'add', 'purge', 'time', 'tvshows',
'master', 'slave', 'fifo', 'test', 'web', 'addcol')
help_text = 'commands=%s,[number]' % ','.join(list_of_commands)
def make_queue(_command='list', _args=None):
''' seperate function to run commands from make_queue() '''
if _command == 'test':
testfile = '/home/ddboline/public_html/videos/test.mp4'
play_file(testfile)
remove_remote_file(testfile)
testfile = '/home/ddboline/public_html/videos/temp.mp4'
play_file(testfile)
remove_remote_file(testfile)
return []
mq_ = MovieCollection()
out_list = []
if _command == 'list' or _command == 'time' or _command == 'web':
if not _args:
out_list += mq_.list_entries(None, do_time=(_command == 'time'))
elif len(_args) == 1 and _args[0] == 'head':
out_list += mq_.list_entries(
None, first_entry=0, last_entry=20, do_time=(_command == 'time'))
elif len(_args) == 1 and _args[0] == 'tail':
out_list += mq_.list_entries(
None, first_entry=-20, last_entry=0, do_time=(_command == 'time'))
elif len(_args) == 1 and _args[0] == 'local':
out_list += mq_.list_entries(None, do_local=True, do_time=(_command == 'time'))
elif len(_args) == 1:
out_list += mq_.list_entries(_args[0], do_time=(_command == 'time'))
elif len(_args) == 2 and isinstance(_args[0], int)\
and isinstance(_args[1], int):
out_list += mq_.list_entries(
None, first_entry=_args[0], last_entry=_args[1], do_time=(_command == 'time'))
elif len(_args) >= 2:
for arg in _args:
out_list += mq_.list_entries(arg, do_time=(_command == 'time'))
if _command == 'web':
make_web_page_from_string(
mq_.list_entries(), '/tmp/current_queue.html', do_main_dir=False, subdir='all')
if os.path.exists('/tmp/current_queue.html'):
call(
'mv /tmp/current_queue.html %s/public_html/videos/'
'current_queue.html' % HOMEDIR,
shell=True)
make_web_page_from_string(out_list, do_main_dir=True, subdir='partial')
elif _command == 'get':
if len(_args) == 1 and isinstance(_args[0], int):
out_list += [get_remote_file(mq_.current_queue[_args[0]])]
elif len(_args) == 2 and isinstance(_args[0], int)\
and isinstance(_args[1], int):
out_list += get_remote_files(mq_.current_queue[_args[0]:_args[1]])
elif len(_args) >= 1:
for arg in _args:
out_list += [mq_.get_entry_by_name(arg)]
elif _command == 'tvshows':
if not _args:
out_list += mq_.list_tvshows()
elif len(_args) == 1:
out_list += mq_.list_tvshows(do_random=(_args[0] == 'rand'))
else:
out_list += mq_.list_tvshows(
do_random=(_args[0] == 'rand'), play_random=(_args[1] == 'play'))
elif _command == 'add' and len(_args) > 0:
for arg in _args:
if os.path.exists(arg):
tmp_, _ = mq_.add_entry(arg)
out_list += tmp_
elif _command == 'addcol' and len(_args) > 0:
for arg in _args:
if os.path.exists(arg):
tmp_, _ = mq_.add_entry_to_collection(arg)
out_list += tmp_
elif _command == 'rm' and len(_args) > 0:
_args = sorted(_args)
offset = 0
if len(_args) == 2 and isinstance(_args[0], int)\
and isinstance(_args[1], int) and _args[0] < _args[1]\
and _args[1] < len(mq_.current_queue):
for idx in range(_args[0], _args[1] + 1):
tm_ = mq_.rm_entry(idx - offset)
if tm_:
out_list += tm_
offset += 1
else:
for arg in _args:
if isinstance(arg, int):
tm_ = mq_.rm_entry(arg - offset)
if tm_:
out_list += tm_
offset += 1
else:
out_list += mq_.rm_entry_by_name(arg)
mq_.read_queue_from_db()
elif isinstance(_command, int):
pos = _command
for arg in _args:
if isinstance(arg, int):
continue
if os.path.exists(arg):
tmp_, pos_ = mq_.add_entry(arg, position=pos)
out_list += tmp_
pos += pos_
elif _command[0:4] == 'play':
if len(_args) == 2 and isinstance(_args[0], int)\
and isinstance(_args[1], int) and _args[0] < _args[1]:
for idx in range(_args[0], _args[1]):
play_file(mq_.current_queue[idx], yad=(_command == 'playyad'))
for arg in _args:
if isinstance(arg, int) and arg < len(mq_.current_queue):
print(len(mq_.current_queue))
play_file(mq_.current_queue[arg]['path'], yad=(_command == 'playyad'))
else:
fn_ = mq_.show_entry_by_name(arg)
if fn_:
play_file(fn_, yad=(_command == 'playyad'))
return out_list
def make_web_page_from_string(in_list=None,
queue_file='%s/public_html/videos/' % HOMEDIR + 'video_queue.html',
do_main_dir=False,
subdir=''):
''' write video queue html file '''
if HOSTNAME != 'dilepton-tower' or not in_list:
return
if do_main_dir and os.path.exists('/var/www/html/videos'):
# os.system('rm -rf %s' % '/'.join(('/var/www/html/videos', subdir)))
os.system('mkdir -p %s' % '/'.join(('/var/www/html/videos', subdir)))
with open(queue_file, 'w') as vidfile:
vidfile.write('<!DOCTYPE HTML>\n')
vidfile.write('<html>\n')
vidfile.write('<body>\n')
vidfname = ''
for line in in_list:
idx = -1
cur = ''
ents = line.split()
idx = int(ents[0])
cur = ents[1]
if 'sabrent2000' in cur:
vidfname = cur.replace('/media/sabrent2000/Documents/movies',
'movies')\
.replace(
'/media/sabrent2000/television/unwatched',
'television')
if 'caviar2000' in cur:
vidfname = cur.replace('/media/caviar2000/Documents/movies', 'movies2')
if 'western2000' in cur:
vidfname = cur.replace('/media/western2000/Documents/movies', 'movies3')
if do_main_dir and os.path.exists('/'.join(('/var/www/html/videos', subdir))):
link_path = '%s/%s' % ('/'.join(('/var/www/html/videos', subdir)),
os.path.basename(cur))
if not os.path.exists(link_path):
os.system('rm -rf %s' % link_path)
os.system('ln -s %s %s' % (cur, link_path))
vidfname = '../videos/%s' % os.path.basename(cur)
vidname = cur.split('/')[-1].replace('_', ' ')
vidfile.write('<H3 align="center">\n<a href="../%s">' % vidfname +
'%03i\t%s</a>\n</H3>\n\n' % (idx, vidname))
vidfile.write('</body>\n')
vidfile.write('</html>\n')
def make_queue_parse():
parser = argparse.ArgumentParser(description='make_queue script')
parser.add_argument('command', nargs='*', help=help_text)
args = parser.parse_args()
_command = 'list'
_args = []
if hasattr(args, 'command'):
if len(args.command) > 0:
_command = args.command[0]
if _command not in list_of_commands:
try:
_command = int(_command)
except ValueError:
print(help_text)
exit(0)
if len(args.command) > 1:
for it_ in args.command[1:]:
try:
it_ = int(it_)
except ValueError:
pass
_args.append(it_)
out_list = make_queue(_command=_command, _args=_args)
if len(out_list) > 0:
out_list = '\n'.join(out_list)
try:
print(out_list)
except IOError:
pass
|
|
#!/usr/bin/env python
# isbn.py
# Code for messing with ISBN numbers
# Especially stuff for converting between ISBN-10 and ISBN-13
# Copyright (C) 2007 Darren J Wilkinson
# Free GPL code
# Last updated: 14/8/2007
import sys,re
__doc__="""Code for messing with ISBN numbers. Stuff for validating ISBN-10 and
ISBN-13 numbers, computing check digits and converting from one format
to the other.
This code doesn't know anything about proper hyphenation of ISBNs. Nor does
it know anything about the real "validity" of ISBNs - it just validates on
the basis of the check-digit.
Some examples:
>>> import isbn
>>> isbn.isValid("1-58488-540-8")
True
>>> isbn.isValid("1-58488-540-5")
False
>>> isbn.isValid("978-158488-540-5")
True
>>> isbn.isI10("978-158488-540-5")
False
>>> isbn.isI13("978-158488-540-5")
True
>>> isbn.convert("1-58488-540-8")
'9781584885405'
>>> isbn.convert("978-158488-540-5")
'1584885408'
>>> isbn.isbn_strip("978-158488-540-5")
'9781584885405'
>>> isbn.check("1-58488-540")
'8'
>>> isbn.toI13("1-58488-540-8")
'9781584885405'
>>> isbn.toI13("978-158488-540-5")
'9781584885405'
>>> isbn.url("amazon","978-158488-540-5")
'http://www.amazon.com/exec/obidos/ASIN/1584885408'
The code is very simple pure python code in a single source file. Please
read the source code file (isbn.py) for further information about how
it works.
Please send bug reports, bug fixes, etc. to:
darrenjwilkinson@btinternet.com
Free GPL code, Copyright (C) 2007 Darren J Wilkinson
http://www.staff.ncl.ac.uk/d.j.wilkinson/
"""
def isbn_strip(isbn):
"""Strip whitespace, hyphens, etc. from an ISBN number and return
the result."""
short=re.sub("\W","",isbn)
return re.sub("\D","X",short)
def convert(isbn):
"""Convert an ISBN-10 to ISBN-13 or vice-versa."""
short=isbn_strip(isbn)
if (isValid(short)==False):
raise "Invalid ISBN"
if len(short)==10:
stem="978"+short[:-1]
return stem+check(stem)
else:
if short[:3]=="978":
stem=short[3:-1]
return stem+check(stem)
else:
raise "ISBN not convertible"
def isValid(isbn):
"""Check the validity of an ISBN. Works for either ISBN-10 or ISBN-13."""
short=isbn_strip(isbn)
if len(short)==10:
return isI10(short)
elif len(short)==13:
return isI13(short)
else:
return False
def check(stem):
"""Compute the check digit for the stem of an ISBN. Works with either
the first 9 digits of an ISBN-10 or the first 12 digits of an ISBN-13."""
short=isbn_strip(stem)
if len(short)==9:
return checkI10(short)
elif len(short)==12:
return checkI13(short)
else:
return False
def checkI10(stem):
"""Computes the ISBN-10 check digit based on the first 9 digits of a
stripped ISBN-10 number."""
chars=list(stem)
sum=0
digit=10
for char in chars:
sum+=digit*int(char)
digit-=1
check=11-(sum%11)
if check==10:
return "X"
elif check==11:
return "0"
else:
return str(check)
def isI10(isbn):
"""Checks the validity of an ISBN-10 number."""
short=isbn_strip(isbn)
if (len(short)!=10):
return False
chars=list(short)
sum=0
digit=10
for char in chars:
if (char=='X' or char=='x'):
char="10"
sum+=digit*int(char)
digit-=1
remainder=sum%11
if remainder==0:
return True
else:
return False
def checkI13(stem):
"""Compute the ISBN-13 check digit based on the first 12 digits of a
stripped ISBN-13 number. """
chars=list(stem)
sum=0
count=0
for char in chars:
if (count%2==0):
sum+=int(char)
else:
sum+=3*int(char)
count+=1
check=10-(sum%10)
if check==10:
return "0"
else:
return str(check)
def isI13(isbn):
"""Checks the validity of an ISBN-13 number."""
short=isbn_strip(isbn)
if (len(short)!=13):
return False
chars=list(short)
sum=0
count=0
for char in chars:
if (count%2==0):
sum+=int(char)
else:
sum+=3*int(char)
count+=1
remainder=sum%10
if remainder==0:
return True
else:
return False
def toI10(isbn):
"""Converts supplied ISBN (either ISBN-10 or ISBN-13) to a stripped
ISBN-10."""
if (isValid(isbn)==False):
raise "Invalid ISBN"
if isI10(isbn):
return isbn_strip(isbn)
else:
return convert(isbn)
def toI13(isbn):
"""Converts supplied ISBN (either ISBN-10 or ISBN-13) to a stripped
ISBN-13."""
if (isValid(isbn)==False):
raise "Invalid ISBN"
if isI13(isbn):
return isbn_strip(isbn)
else:
return convert(isbn)
def url(type,isbn):
"""Returns a URL for a book, corresponding to the "type" and the "isbn"
provided. This function is likely to go out-of-date quickly, and is
provided mainly as an example of a potential use-case for the package.
Currently allowed types are "google-books" (the default if the type is
not recognised), "amazon", "amazon-uk", "blackwells".
"""
short=toI10(isbn)
if type=="amazon":
return "http://www.amazon.com/o/ASIN/"+short
elif type=="amazon-uk":
return "http://www.amazon.co.uk/o/ASIN/"+short
elif type=="blackwells":
return "http://bookshop.blackwell.co.uk/jsp/welcome.jsp?action=search&type=isbn&term="+short
else:
return "http://books.google.com/books?vid="+short
if __name__=='__main__':
isbn="1-58488-540-8"
# isbn="978-158488-540-5"
print isbn
if isValid(isbn):
print "isbn ok"
else:
print "isbn BAD"
print convert(isbn)
print """
For help/information, do "python", "import isbn", "help(isbn)".
"""
# eof
|
|
import pytest
import numpy as np
from numpy.testing import assert_allclose
from keras.layers import Dense, Dropout
from keras.engine.topology import merge, Input
from keras.engine.training import Model
from keras.models import Sequential
from keras import backend as K
from keras.utils.test_utils import keras_test
@keras_test
def test_model_methods():
a = Input(shape=(3,), name='input_a')
b = Input(shape=(3,), name='input_b')
a_2 = Dense(4, name='dense_1')(a)
dp = Dropout(0.5, name='dropout')
b_2 = dp(b)
model = Model([a, b], [a_2, b_2])
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1., 0.5]
model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
sample_weight_mode=None)
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
# test train_on_batch
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
out = model.train_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np])
out = model.train_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np})
# test fit
out = model.fit([input_a_np, input_b_np],
[output_a_np, output_b_np], nb_epoch=1, batch_size=4)
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np], nb_epoch=1, batch_size=4)
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np},
nb_epoch=1, batch_size=4)
# test validation_split
out = model.fit([input_a_np, input_b_np],
[output_a_np, output_b_np],
nb_epoch=1, batch_size=4, validation_split=0.5)
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np],
nb_epoch=1, batch_size=4, validation_split=0.5)
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np},
nb_epoch=1, batch_size=4, validation_split=0.5)
# test validation data
out = model.fit([input_a_np, input_b_np],
[output_a_np, output_b_np],
nb_epoch=1, batch_size=4,
validation_data=([input_a_np, input_b_np], [output_a_np, output_b_np]))
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np],
nb_epoch=1, batch_size=4, validation_split=0.5,
validation_data=({'input_a': input_a_np, 'input_b': input_b_np}, [output_a_np, output_b_np]))
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np},
nb_epoch=1, batch_size=4, validation_split=0.5,
validation_data=({'input_a': input_a_np, 'input_b': input_b_np}, {'dense_1': output_a_np, 'dropout': output_b_np}))
# test_on_batch
out = model.test_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
out = model.test_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np])
out = model.test_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np})
# predict_on_batch
out = model.predict_on_batch([input_a_np, input_b_np])
out = model.predict_on_batch({'input_a': input_a_np, 'input_b': input_b_np})
# predict, evaluate
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
out = model.evaluate([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4)
out = model.predict([input_a_np, input_b_np], batch_size=4)
# with sample_weight
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
sample_weight = [None, np.random.random((10,))]
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np],
sample_weight=sample_weight)
out = model.test_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np],
sample_weight=sample_weight)
# test accuracy metric
model.compile(optimizer, loss, metrics=['acc'],
sample_weight_mode=None)
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
assert len(out) == 5
out = model.test_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
assert len(out) == 5
# this should also work
model.compile(optimizer, loss, metrics={'dense_1': 'acc'},
sample_weight_mode=None)
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
assert len(out) == 4
out = model.test_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
assert len(out) == 4
# and this as well
model.compile(optimizer, loss, metrics={'dense_1': ['acc']},
sample_weight_mode=None)
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
assert len(out) == 4
out = model.test_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
assert len(out) == 4
# test with a custom metric function
mse = lambda y_true, y_pred: K.mean(K.pow(y_true - y_pred, 2))
def mse_powers(y_true, y_pred):
m = mse(y_true, y_pred)
return {
'mse_squared': K.pow(m, 2),
'mse_cubed': K.pow(m, 3)
}
model.compile(optimizer, loss, metrics=[mse, mse_powers],
sample_weight_mode=None)
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
out_len = 1 + 2 * 4 # total loss, per layer: loss + 3 metrics
assert len(out) == out_len
out = model.test_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
assert len(out) == out_len
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4, nb_epoch=1)
out = model.evaluate([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4)
out = model.predict([input_a_np, input_b_np], batch_size=4)
@keras_test
def test_trainable_argument():
x = np.random.random((5, 3))
y = np.random.random((5, 2))
model = Sequential()
model.add(Dense(2, input_dim=3, trainable=False))
model.compile('rmsprop', 'mse')
out = model.predict(x)
model.train_on_batch(x, y)
out_2 = model.predict(x)
assert_allclose(out, out_2)
# test with nesting
input = Input(shape=(3,))
output = model(input)
model = Model(input, output)
model.compile('rmsprop', 'mse')
out = model.predict(x)
model.train_on_batch(x, y)
out_2 = model.predict(x)
assert_allclose(out, out_2)
if __name__ == '__main__':
pytest.main([__file__])
|
|
import unittest, random, sys, time, re
sys.path.extend(['.','..','py'])
import h2o, h2o_browse as h2b, h2o_exec as h2e, h2o_hosts, h2o_import as h2i
initList = [
('r.hex', 'r.hex=i.hex'),
]
DO_IFELSE = False
DO_CAN_RETURN_NAN = False
DO_FAIL1 = False
DO_TERNARY = False
DO_APPLY = True
DO_FUNCTION = False
DO_FORCE_LHS_ON_MULTI = True
exprList = [
'x= 3; r.hex[(x > 0) & (x < 4),]', # all x values between 0 and 1
'x= 3; r.hex[,(x > 0) & (x < 4)]', # all x values between 0 and 1
# 'z = if (any(r3.hex == 0) || any(r4.hex == 0)), "zero encountered"',
# FALSE and TRUE don't exist?
# 'x <- c(NA, FALSE, TRUE)',
# 'names(x) <- as.character(x)'
# outer(x, x, "&")## AND table
# outer(x, x, "|")## OR table
"1.23",
"!1.23",
"1.23<2.34",
"!1.23<2.34",
"!1.23<!2.34",
"1.23<!2.34",
"1.23<=2.34",
"1.23>2.34",
"1.23>=2.34",
"1.23==2.34",
"1.23!=2.34",
"r.hex",
"!r.hex",
# Not supported
# "+(1.23,2.34)",
"x=0; x+2",
"x=!0; !x+2",
"x=!0; x+!2",
"x=1",
"x=!1",
"x<-1",
"x<-!1",
"c(1,3,5)",
"!c(1,3,5)",
"!c(!1,3,5)",
"!c(1,!3,5)",
"!c(1,3,!5)",
"a=0; x=0",
"a=!0; x=!0",
"r.hex[2,3]",
# "r.hex[!2,3]",
# no cols selectd
# "r.hex[2,!3]",
"r.hex[2+4,-4]",
"r.hex[1,-1]; r.hex[1,-1]; r.hex[1,-1]",
"r.hex[1,]",
"r.hex+1",
"r.hex[,1]",
"r.hex[,1]+1",
"r.hex-r.hex",
"1.23+(r.hex-r.hex)",
"(1.23+r.hex)-r.hex",
"is.na(r.hex)",
"nrow(r.hex)*3",
"r.hex[nrow(r.hex)-1,ncol(r.hex)-1]",
"r.hex[nrow(r.hex),]",
"r.hex[,ncol(r.hex)+1]=4",
"r.hex[,1]=3.3; r.hex",
"r.hex[,1]=r.hex[,1]+1",
# doesn't work
# "cbind(c(1), c(2), c(3))",
# "cbind(c(1,2,3), c(4,5,6))",
# "cbind(c(1,2,3), c(4,5,6), c(7,8,9))",
# "cbind(c(1,2,3,4), c(5,6,7))",
# "cbind(c(1,2,3), c(4,5,6,7))",
"cbind(c(1,2,3,4), c(5,6,7,8))",
"r.hex[c(1,3,5),]",
"a=c(11,22,33,44,55,66); a[c(2,6,1),]",
# fails?
# "a=c(1,2,3); a[a[,1]>10,1]",
"sum(1,2)",
"sum(1,2,3)",
"sum(c(1,3,5))",
"sum(4,c(1,3,5),2,6)",
"sum(1,r.hex,3)",
"min(1,2)",
# doesn't work
# "min(1,2,3)",
# doesn't work. only 2 params?
# "min(c(1,3,5))",
# doesn't work. only 2 params?
# "min(4,c(1,3,5),2,6)",
# doesn't work
# "min(1,r.hex,3)",
"max(1,23)",
# doesn't work
# Passed 3 args but expected 2
# "max(1,2,3)",
# doesn't work
# "max(c(1,3,5))",
# doesn't work
# Passed 4 args but expected 2
# "max(4,c(1,3,5),2,6)",
# doesn't work
# "max(1,r.hex,3)",
"factor(r.hex[,5])",
"r.hex[,1]==1.0",
"runif(r.hex[,1], -1)",
"r.hex[,3]=4",
]
if DO_APPLY:
exprList += [
"apply(r.hex,1,sum)",
"apply(r.hex,2,sum)",
]
if DO_FUNCTION:
exprList += [
# doesn't work
# "crnk=function(x){99}",
# "crk=function(x){99}",
"crunk=function(x){x+99}",
# "function(x){x+99}",
# "crunk=function(x){99}; r.hex[,3]=4",
"function(x,y,z){x[]}(r.hex,1,2)",
"function(x){x+1}(2)",
"function(x){y=x*2; y+1}(2)",
"function(x){y=1+2}(2)",
"function(funy){function(x){funy(x)*funy(x)}}(sgn)(-2)",
"a=1; a=2; function(x){x=a;a=3}",
"a=r.hex; function(x){x=a;a=3;nrow(x)*a}(a)",
# "mean2=function(x){apply(x,1,sum)/nrow(x)};mean2(r.hex)",
# "mean2=function(x){apply(x,2,sum)/nrow(x)};mean2(r.hex)",
"mean2=function(x){99/nrow(x)};mean2(r.hex)",
"mean2=function(x){99/nrow(x)}",
# what happens if you rename a function in a single string
]
# FIX! should add ternary here?
# ifelse does all 3 params
# ? doesn't do the else if true
# we don't support the split if/else
if DO_TERNARY:
exprList += [
# do we really care about this case
# "(0 ? + : *)(1,2)",
# "0 ? + : * (1, 2)",
"1 ? r.hex : (r.hex+1)",
"1 ? (r.hex+1) : r.hex",
# don't do these harder ternary for now
#"(1 ? r.hex : (r.hex+1))[1,2]",
# "apply(r.hex,2, function(x){x==-1 ? 1 : x})",
"0 ? 1 : 2",
"0 ? r.hex+1 : r.hex+2",
"r.hex>3 ? 99 : r.hex",
]
if DO_IFELSE:
exprList += [
"apply(r.hex,2,function(x){ifelse(x==-1,1,x)})",
"ifelse(0,1,2)",
"ifelse(0,r.hex+1,r.hex+2)",
"ifelse(r.hex>3,99,r.hex)",
"ifelse(0,+,*)(1,2)",
]
if DO_CAN_RETURN_NAN:
exprList += [
"r.hex[r.hex[,1]>4,]",
]
if DO_FAIL1:
exprList += [
"a=ncol(r.hex); r.hex[,c(a+1,a+2)]=5",
]
# concatenate some random choices to make life harder
exprBigList = []
for i in range(1000):
# expr = ""
# concatNum = random.randint(1,2)
# expr = "crunk=function(x){x+98};"
expr = ""
# expr = "function(x){x+98};"
concatNum = random.randint(1,3)
for j in range(concatNum):
randExpr = random.choice(exprList)
if DO_FORCE_LHS_ON_MULTI:
# lhs =?
if re.search("=(?!=)", randExpr):
expr += randExpr + ";"
else:
expr += "d=" + randExpr + ";"
else:
expr += randExpr + ";"
assert expr!="r"
exprBigList.append(expr)
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED, localhost
SEED = h2o.setup_random_seed()
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1)
else:
h2o_hosts.build_cloud_with_hosts(1)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_exec2_operators2(self):
bucket = 'smalldata'
csvPathname = 'iris/iris2.csv'
hexKey = 'i.hex'
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, schema='put', hex_key=hexKey)
for resultKey, execExpr in initList:
h2e.exec_expr(h2o.nodes[0], execExpr, resultKey=resultKey, timeoutSecs=4)
start = time.time()
h2e.exec_expr_list_rand(len(h2o.nodes), exprList, None, maxTrials=200, timeoutSecs=10, allowEmptyResult=True)
# now run them just concatenating each time. We don't do any template substitutes, so don't need
# exec_expr_list_rand()
bigExecExpr = ""
for execExpr in exprBigList:
h2e.exec_expr(h2o.nodes[0], execExpr, resultKey=None, timeoutSecs=4.)
h2o.check_sandbox_for_errors()
print "exec end on ", "operators" , 'took', time.time() - start, 'seconds'
if __name__ == '__main__':
h2o.unit_main()
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities and helper functions."""
import contextlib
import datetime
import hashlib
import inspect
import os
import pyclbr
import re
import shutil
import stat
import sys
import tempfile
from xml.dom import minidom
from xml.parsers import expat
from xml import sax
from xml.sax import expatreader
from xml.sax import saxutils
from os_brick.initiator import connector
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from oslo_utils import timeutils
import retrying
import six
from cinder import exception
from cinder.i18n import _, _LE
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
ISO_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
synchronized = lockutils.synchronized_with_prefix('cinder-')
def find_config(config_path):
"""Find a configuration file using the given hint.
:param config_path: Full or relative path to the config.
:returns: Full path of the config, if it exists.
:raises: `cinder.exception.ConfigNotFound`
"""
possible_locations = [
config_path,
os.path.join(CONF.state_path, "etc", "cinder", config_path),
os.path.join(CONF.state_path, "etc", config_path),
os.path.join(CONF.state_path, config_path),
"/etc/cinder/%s" % config_path,
]
for path in possible_locations:
if os.path.exists(path):
return os.path.abspath(path)
raise exception.ConfigNotFound(path=os.path.abspath(config_path))
def as_int(obj, quiet=True):
# Try "2" -> 2
try:
return int(obj)
except (ValueError, TypeError):
pass
# Try "2.5" -> 2
try:
return int(float(obj))
except (ValueError, TypeError):
pass
# Eck, not sure what this is then.
if not quiet:
raise TypeError(_("Can not translate %s to integer.") % (obj))
return obj
def is_int_like(val):
"""Check if a value looks like an int."""
try:
return str(int(val)) == str(val)
except Exception:
return False
def check_exclusive_options(**kwargs):
"""Checks that only one of the provided options is actually not-none.
Iterates over all the kwargs passed in and checks that only one of said
arguments is not-none, if more than one is not-none then an exception will
be raised with the names of those arguments who were not-none.
"""
if not kwargs:
return
pretty_keys = kwargs.pop("pretty_keys", True)
exclusive_options = {}
for (k, v) in kwargs.iteritems():
if v is not None:
exclusive_options[k] = True
if len(exclusive_options) > 1:
# Change the format of the names from pythonic to
# something that is more readable.
#
# Ex: 'the_key' -> 'the key'
if pretty_keys:
names = [k.replace('_', ' ') for k in kwargs.keys()]
else:
names = kwargs.keys()
names = ", ".join(sorted(names))
msg = (_("May specify only one of %s") % (names))
raise exception.InvalidInput(reason=msg)
def execute(*cmd, **kwargs):
"""Convenience wrapper around oslo's execute() method."""
if 'run_as_root' in kwargs and 'root_helper' not in kwargs:
kwargs['root_helper'] = get_root_helper()
return processutils.execute(*cmd, **kwargs)
def check_ssh_injection(cmd_list):
ssh_injection_pattern = ['`', '$', '|', '||', ';', '&', '&&', '>', '>>',
'<']
# Check whether injection attacks exist
for arg in cmd_list:
arg = arg.strip()
# Check for matching quotes on the ends
is_quoted = re.match('^(?P<quote>[\'"])(?P<quoted>.*)(?P=quote)$', arg)
if is_quoted:
# Check for unescaped quotes within the quoted argument
quoted = is_quoted.group('quoted')
if quoted:
if (re.match('[\'"]', quoted) or
re.search('[^\\\\][\'"]', quoted)):
raise exception.SSHInjectionThreat(command=cmd_list)
else:
# We only allow spaces within quoted arguments, and that
# is the only special character allowed within quotes
if len(arg.split()) > 1:
raise exception.SSHInjectionThreat(command=cmd_list)
# Second, check whether danger character in command. So the shell
# special operator must be a single argument.
for c in ssh_injection_pattern:
if c not in arg:
continue
result = arg.find(c)
if not result == -1:
if result == 0 or not arg[result - 1] == '\\':
raise exception.SSHInjectionThreat(command=cmd_list)
def create_channel(client, width, height):
"""Invoke an interactive shell session on server."""
channel = client.invoke_shell()
channel.resize_pty(width, height)
return channel
def cinderdir():
import cinder
return os.path.abspath(cinder.__file__).split('cinder/__init__.py')[0]
def last_completed_audit_period(unit=None):
"""This method gives you the most recently *completed* audit period.
arguments:
units: string, one of 'hour', 'day', 'month', 'year'
Periods normally begin at the beginning (UTC) of the
period unit (So a 'day' period begins at midnight UTC,
a 'month' unit on the 1st, a 'year' on Jan, 1)
unit string may be appended with an optional offset
like so: 'day@18' This will begin the period at 18:00
UTC. 'month@15' starts a monthly period on the 15th,
and year@3 begins a yearly one on March 1st.
returns: 2 tuple of datetimes (begin, end)
The begin timestamp of this audit period is the same as the
end of the previous.
"""
if not unit:
unit = CONF.volume_usage_audit_period
offset = 0
if '@' in unit:
unit, offset = unit.split("@", 1)
offset = int(offset)
rightnow = timeutils.utcnow()
if unit not in ('month', 'day', 'year', 'hour'):
raise ValueError('Time period must be hour, day, month or year')
if unit == 'month':
if offset == 0:
offset = 1
end = datetime.datetime(day=offset,
month=rightnow.month,
year=rightnow.year)
if end >= rightnow:
year = rightnow.year
if 1 >= rightnow.month:
year -= 1
month = 12 + (rightnow.month - 1)
else:
month = rightnow.month - 1
end = datetime.datetime(day=offset,
month=month,
year=year)
year = end.year
if 1 >= end.month:
year -= 1
month = 12 + (end.month - 1)
else:
month = end.month - 1
begin = datetime.datetime(day=offset, month=month, year=year)
elif unit == 'year':
if offset == 0:
offset = 1
end = datetime.datetime(day=1, month=offset, year=rightnow.year)
if end >= rightnow:
end = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 1)
begin = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 2)
else:
begin = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 1)
elif unit == 'day':
end = datetime.datetime(hour=offset,
day=rightnow.day,
month=rightnow.month,
year=rightnow.year)
if end >= rightnow:
end = end - datetime.timedelta(days=1)
begin = end - datetime.timedelta(days=1)
elif unit == 'hour':
end = rightnow.replace(minute=offset, second=0, microsecond=0)
if end >= rightnow:
end = end - datetime.timedelta(hours=1)
begin = end - datetime.timedelta(hours=1)
return (begin, end)
class ProtectedExpatParser(expatreader.ExpatParser):
"""An expat parser which disables DTD's and entities by default."""
def __init__(self, forbid_dtd=True, forbid_entities=True,
*args, **kwargs):
# Python 2.x old style class
expatreader.ExpatParser.__init__(self, *args, **kwargs)
self.forbid_dtd = forbid_dtd
self.forbid_entities = forbid_entities
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
raise ValueError("Inline DTD forbidden")
def entity_decl(self, entityName, is_parameter_entity, value, base,
systemId, publicId, notationName):
raise ValueError("<!ENTITY> forbidden")
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
# expat 1.2
raise ValueError("<!ENTITY> forbidden")
def reset(self):
expatreader.ExpatParser.reset(self)
if self.forbid_dtd:
self._parser.StartDoctypeDeclHandler = self.start_doctype_decl
if self.forbid_entities:
self._parser.EntityDeclHandler = self.entity_decl
self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
def safe_minidom_parse_string(xml_string):
"""Parse an XML string using minidom safely.
"""
try:
return minidom.parseString(xml_string, parser=ProtectedExpatParser())
except sax.SAXParseException:
raise expat.ExpatError()
def xhtml_escape(value):
"""Escapes a string so it is valid within XML or XHTML.
"""
return saxutils.escape(value, {'"': '"', "'": '''})
def get_from_path(items, path):
"""Returns a list of items matching the specified path.
Takes an XPath-like expression e.g. prop1/prop2/prop3, and for each item
in items, looks up items[prop1][prop2][prop3]. Like XPath, if any of the
intermediate results are lists it will treat each list item individually.
A 'None' in items or any child expressions will be ignored, this function
will not throw because of None (anywhere) in items. The returned list
will contain no None values.
"""
if path is None:
raise exception.Error('Invalid mini_xpath')
(first_token, sep, remainder) = path.partition('/')
if first_token == '':
raise exception.Error('Invalid mini_xpath')
results = []
if items is None:
return results
if not isinstance(items, list):
# Wrap single objects in a list
items = [items]
for item in items:
if item is None:
continue
get_method = getattr(item, 'get', None)
if get_method is None:
continue
child = get_method(first_token)
if child is None:
continue
if isinstance(child, list):
# Flatten intermediate lists
for x in child:
results.append(x)
else:
results.append(child)
if not sep:
# No more tokens
return results
else:
return get_from_path(results, remainder)
def is_valid_boolstr(val):
"""Check if the provided string is a valid bool string or not."""
val = str(val).lower()
return (val == 'true' or val == 'false' or
val == 'yes' or val == 'no' or
val == 'y' or val == 'n' or
val == '1' or val == '0')
def is_none_string(val):
"""Check if a string represents a None value."""
if not isinstance(val, six.string_types):
return False
return val.lower() == 'none'
def monkey_patch():
"""If the CONF.monkey_patch set as True,
this function patches a decorator
for all functions in specified modules.
You can set decorators for each modules
using CONF.monkey_patch_modules.
The format is "Module path:Decorator function".
Example: 'cinder.api.ec2.cloud:' \
cinder.openstack.common.notifier.api.notify_decorator'
Parameters of the decorator is as follows.
(See cinder.openstack.common.notifier.api.notify_decorator)
name - name of the function
function - object of the function
"""
# If CONF.monkey_patch is not True, this function do nothing.
if not CONF.monkey_patch:
return
# Get list of modules and decorators
for module_and_decorator in CONF.monkey_patch_modules:
module, decorator_name = module_and_decorator.split(':')
# import decorator function
decorator = importutils.import_class(decorator_name)
__import__(module)
# Retrieve module information using pyclbr
module_data = pyclbr.readmodule_ex(module)
for key in module_data.keys():
# set the decorator for the class methods
if isinstance(module_data[key], pyclbr.Class):
clz = importutils.import_class("%s.%s" % (module, key))
for method, func in inspect.getmembers(clz, inspect.ismethod):
setattr(
clz, method,
decorator("%s.%s.%s" % (module, key, method), func))
# set the decorator for the function
if isinstance(module_data[key], pyclbr.Function):
func = importutils.import_class("%s.%s" % (module, key))
setattr(sys.modules[module], key,
decorator("%s.%s" % (module, key), func))
def generate_glance_url():
"""Generate the URL to glance."""
# TODO(jk0): This will eventually need to take SSL into consideration
# when supported in glance.
return "http://%s:%d" % (CONF.glance_host, CONF.glance_port)
def make_dev_path(dev, partition=None, base='/dev'):
"""Return a path to a particular device.
>>> make_dev_path('xvdc')
/dev/xvdc
>>> make_dev_path('xvdc', 1)
/dev/xvdc1
"""
path = os.path.join(base, dev)
if partition:
path += str(partition)
return path
def sanitize_hostname(hostname):
"""Return a hostname which conforms to RFC-952 and RFC-1123 specs."""
if isinstance(hostname, unicode):
hostname = hostname.encode('latin-1', 'ignore')
hostname = re.sub('[ _]', '-', hostname)
hostname = re.sub('[^\w.-]+', '', hostname)
hostname = hostname.lower()
hostname = hostname.strip('.-')
return hostname
def hash_file(file_like_object):
"""Generate a hash for the contents of a file."""
checksum = hashlib.sha1()
any(map(checksum.update, iter(lambda: file_like_object.read(32768), '')))
return checksum.hexdigest()
def service_is_up(service):
"""Check whether a service is up based on last heartbeat."""
last_heartbeat = service['updated_at'] or service['created_at']
# Timestamps in DB are UTC.
elapsed = (timeutils.utcnow() - last_heartbeat).total_seconds()
return abs(elapsed) <= CONF.service_down_time
def read_file_as_root(file_path):
"""Secure helper to read file as root."""
try:
out, _err = execute('cat', file_path, run_as_root=True)
return out
except processutils.ProcessExecutionError:
raise exception.FileNotFound(file_path=file_path)
@contextlib.contextmanager
def temporary_chown(path, owner_uid=None):
"""Temporarily chown a path.
:params owner_uid: UID of temporary owner (defaults to current user)
"""
if owner_uid is None:
owner_uid = os.getuid()
orig_uid = os.stat(path).st_uid
if orig_uid != owner_uid:
execute('chown', owner_uid, path, run_as_root=True)
try:
yield
finally:
if orig_uid != owner_uid:
execute('chown', orig_uid, path, run_as_root=True)
@contextlib.contextmanager
def tempdir(**kwargs):
tmpdir = tempfile.mkdtemp(**kwargs)
try:
yield tmpdir
finally:
try:
shutil.rmtree(tmpdir)
except OSError as e:
LOG.debug('Could not remove tmpdir: %s',
six.text_type(e))
def walk_class_hierarchy(clazz, encountered=None):
"""Walk class hierarchy, yielding most derived classes first."""
if not encountered:
encountered = []
for subclass in clazz.__subclasses__():
if subclass not in encountered:
encountered.append(subclass)
# drill down to leaves first
for subsubclass in walk_class_hierarchy(subclass, encountered):
yield subsubclass
yield subclass
def get_root_helper():
return 'sudo cinder-rootwrap %s' % CONF.rootwrap_config
def brick_get_connector_properties(multipath=False, enforce_multipath=False):
"""wrapper for the brick calls to automatically set
the root_helper needed for cinder.
:param multipath: A boolean indicating whether the connector can
support multipath.
:param enforce_multipath: If True, it raises exception when multipath=True
is specified but multipathd is not running.
If False, it falls back to multipath=False
when multipathd is not running.
"""
root_helper = get_root_helper()
return connector.get_connector_properties(root_helper,
CONF.my_ip,
multipath,
enforce_multipath)
def brick_get_connector(protocol, driver=None,
execute=processutils.execute,
use_multipath=False,
device_scan_attempts=3,
*args, **kwargs):
"""Wrapper to get a brick connector object.
This automatically populates the required protocol as well
as the root_helper needed to execute commands.
"""
root_helper = get_root_helper()
return connector.InitiatorConnector.factory(protocol, root_helper,
driver=driver,
execute=execute,
use_multipath=use_multipath,
device_scan_attempts=
device_scan_attempts,
*args, **kwargs)
def require_driver_initialized(driver):
"""Verifies if `driver` is initialized
If the driver is not initialized, an exception will be raised.
:params driver: The driver instance.
:raises: `exception.DriverNotInitialized`
"""
# we can't do anything if the driver didn't init
if not driver.initialized:
driver_name = driver.__class__.__name__
LOG.error(_LE("Volume driver %s not initialized"), driver_name)
raise exception.DriverNotInitialized()
def get_file_mode(path):
"""This primarily exists to make unit testing easier."""
return stat.S_IMODE(os.stat(path).st_mode)
def get_file_gid(path):
"""This primarily exists to make unit testing easier."""
return os.stat(path).st_gid
def get_file_size(path):
"""Returns the file size."""
return os.stat(path).st_size
def _get_disk_of_partition(devpath, st=None):
"""Returns a disk device path from a partition device path, and stat for
the device. If devpath is not a partition, devpath is returned as it is.
For example, '/dev/sda' is returned for '/dev/sda1', and '/dev/disk1' is
for '/dev/disk1p1' ('p' is prepended to the partition number if the disk
name ends with numbers).
"""
diskpath = re.sub('(?:(?<=\d)p)?\d+$', '', devpath)
if diskpath != devpath:
try:
st_disk = os.stat(diskpath)
if stat.S_ISBLK(st_disk.st_mode):
return (diskpath, st_disk)
except OSError:
pass
# devpath is not a partition
if st is None:
st = os.stat(devpath)
return (devpath, st)
def get_blkdev_major_minor(path, lookup_for_file=True):
"""Get the device's "major:minor" number of a block device to control
I/O ratelimit of the specified path.
If lookup_for_file is True and the path is a regular file, lookup a disk
device which the file lies on and returns the result for the device.
"""
st = os.stat(path)
if stat.S_ISBLK(st.st_mode):
path, st = _get_disk_of_partition(path, st)
return '%d:%d' % (os.major(st.st_rdev), os.minor(st.st_rdev))
elif stat.S_ISCHR(st.st_mode):
# No I/O ratelimit control is provided for character devices
return None
elif lookup_for_file:
# lookup the mounted disk which the file lies on
out, _err = execute('df', path)
devpath = out.split("\n")[1].split()[0]
if devpath[0] is not '/':
# the file is on a network file system
return None
return get_blkdev_major_minor(devpath, False)
else:
msg = _("Unable to get a block device for file \'%s\'") % path
raise exception.Error(msg)
def check_string_length(value, name, min_length=0, max_length=None):
"""Check the length of specified string
:param value: the value of the string
:param name: the name of the string
:param min_length: the min_length of the string
:param max_length: the max_length of the string
"""
if not isinstance(value, six.string_types):
msg = _("%s is not a string or unicode") % name
raise exception.InvalidInput(message=msg)
if len(value) < min_length:
msg = _("%(name)s has a minimum character requirement of "
"%(min_length)s.") % {'name': name, 'min_length': min_length}
raise exception.InvalidInput(message=msg)
if max_length and len(value) > max_length:
msg = _("%(name)s has more than %(max_length)s "
"characters.") % {'name': name, 'max_length': max_length}
raise exception.InvalidInput(message=msg)
_visible_admin_metadata_keys = ['readonly', 'attached_mode']
def add_visible_admin_metadata(volume):
"""Add user-visible admin metadata to regular metadata.
Extracts the admin metadata keys that are to be made visible to
non-administrators, and adds them to the regular metadata structure for the
passed-in volume.
"""
visible_admin_meta = {}
if volume.get('volume_admin_metadata'):
for item in volume['volume_admin_metadata']:
if item['key'] in _visible_admin_metadata_keys:
visible_admin_meta[item['key']] = item['value']
# avoid circular ref when volume is a Volume instance
elif (volume.get('admin_metadata') and
isinstance(volume.get('admin_metadata'), dict)):
for key in _visible_admin_metadata_keys:
if key in volume['admin_metadata'].keys():
visible_admin_meta[key] = volume['admin_metadata'][key]
if not visible_admin_meta:
return
# NOTE(zhiyan): update visible administration metadata to
# volume metadata, administration metadata will rewrite existing key.
if volume.get('volume_metadata'):
orig_meta = list(volume.get('volume_metadata'))
for item in orig_meta:
if item['key'] in visible_admin_meta.keys():
item['value'] = visible_admin_meta.pop(item['key'])
for key, value in visible_admin_meta.iteritems():
orig_meta.append({'key': key, 'value': value})
volume['volume_metadata'] = orig_meta
# avoid circular ref when vol is a Volume instance
elif (volume.get('metadata') and
isinstance(volume.get('metadata'), dict)):
volume['metadata'].update(visible_admin_meta)
else:
volume['metadata'] = visible_admin_meta
def remove_invalid_filter_options(context, filters,
allowed_search_options):
"""Remove search options that are not valid
for non-admin API/context.
"""
if context.is_admin:
# Allow all options
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in filters
if opt not in allowed_search_options]
bad_options = ", ".join(unknown_options)
LOG.debug("Removing options '%s' from query.", bad_options)
for opt in unknown_options:
del filters[opt]
def is_blk_device(dev):
try:
if stat.S_ISBLK(os.stat(dev).st_mode):
return True
return False
except Exception:
LOG.debug('Path %s not found in is_blk_device check', dev)
return False
def retry(exceptions, interval=1, retries=3, backoff_rate=2):
def _retry_on_exception(e):
return isinstance(e, exceptions)
def _backoff_sleep(previous_attempt_number, delay_since_first_attempt_ms):
exp = backoff_rate ** previous_attempt_number
wait_for = max(0, interval * exp)
LOG.debug("Sleeping for %s seconds", wait_for)
return wait_for * 1000.0
def _print_stop(previous_attempt_number, delay_since_first_attempt_ms):
delay_since_first_attempt = delay_since_first_attempt_ms / 1000.0
LOG.debug("Failed attempt %s", previous_attempt_number)
LOG.debug("Have been at this for %s seconds",
delay_since_first_attempt)
return previous_attempt_number == retries
if retries < 1:
raise ValueError('Retries must be greater than or '
'equal to 1 (received: %s). ' % retries)
def _decorator(f):
@six.wraps(f)
def _wrapper(*args, **kwargs):
r = retrying.Retrying(retry_on_exception=_retry_on_exception,
wait_func=_backoff_sleep,
stop_func=_print_stop)
return r.call(f, *args, **kwargs)
return _wrapper
return _decorator
def convert_version_to_int(version):
try:
if isinstance(version, six.string_types):
version = convert_version_to_tuple(version)
if isinstance(version, tuple):
return reduce(lambda x, y: (x * 1000) + y, version)
except Exception:
msg = _("Version %s is invalid.") % version
raise exception.CinderException(msg)
def convert_version_to_str(version_int):
version_numbers = []
factor = 1000
while version_int != 0:
version_number = version_int - (version_int // factor * factor)
version_numbers.insert(0, six.text_type(version_number))
version_int = version_int / factor
return reduce(lambda x, y: "%s.%s" % (x, y), version_numbers)
def convert_version_to_tuple(version_str):
return tuple(int(part) for part in version_str.split('.'))
|
|
import datetime
import logging
import MySQLdb
import random
import re
import subprocess
from pylons import config
from anygit.backends import common
from anygit.data import exceptions
logger = logging.getLogger(__name__)
connection = None
collection_to_class = {}
sha1_re = re.compile('^[a-f0-9]*')
## Exported functions
def create_schema():
print 'Huhh??'
def _flush(klass, instances):
klass._object_store.insert_all(instance.get_updates() for instance in instances)
common._register_flush(_flush)
def init_model(connection):
"""Call me before using any of the tables or classes in the model."""
db = connection
for obj in common.__dict__.itervalues():
if type(obj) == type and issubclass(obj, common.Model) and hasattr(obj, '__tablename__'):
tablename = getattr(obj, '__tablename__')
obj._object_store = Domain(db, tablename)
collection_to_class[obj._object_store] = obj
def setup():
"""
Sets up the database session
"""
global connection
connection = MySQLdb.connect(host=config.get('mysql.host'),
user=config.get('mysql.user'),
passwd=config.get('mysql.password'),
db=config.get('mysql.db'),
ssl={'ca' : config.get('mysql.cert')})
init_model(connection)
def destroy_session():
global connection
connection = None
## Internal functions
class Query(object):
def __init__(self, domain, query, is_full_query=None):
self.is_full_query = is_full_query
self._limit = None
self._skip = None
self._order = None
self.domain = domain
if isinstance(query, dict):
items = []
for k, v in query.iteritems():
if isinstance(v, list):
items.append('`%s` IN (%s)' % (k, ','.join(self.domain._encode(val) for val in v)))
elif isinstance(v, dict):
if '$lt' in v:
items.append('`%s` < %s' % (k, self.domain._encode(v['$lt'])))
elif '$in' in v:
if v['$in']:
items.append('`%s` IN (%s)' %
(k, ','.join(self.domain._encode(val) for val in v['$in'])))
else:
items.append('1 = 0')
else:
raise ValueError('Unrecognized query modifier %s' % v)
else:
items.append('`%s` = %s' % (k, self.domain._encode(v)))
query = ' and '.join(items)
self.query = query
self._iterator = None
def _get_iterator(self):
if not self._iterator:
self._iterator = iter(self.domain.select(self._get_select()))
return self._iterator
def _get_order(self):
if self._order:
return ' ORDER BY `%s` %s' % self._order
else:
return ''
def _get_select(self):
# TODO: select a subset of attributes
if self.is_full_query:
return self.query
if self.query:
full_query = 'select * from `%s` where %s' % (self.domain.name, self.query)
else:
full_query = 'select * from `%s`' % self.domain.name
return full_query + self._get_order() + self._get_limit()
def _get_count(self):
if self.query:
full_query = 'select count(*) as count from `%s` where %s' % (self.domain.name, self.query)
else:
full_query = 'select count(*) from `%s`' % self.domain.name
return full_query
def _get_limit(self):
clause = []
if self._limit is not None:
clause.append('LIMIT %d' % self._limit)
if self._skip is not None:
clause.append('OFFSET %d' % self._skip)
if clause:
return ' %s' % ' '.join(clause)
else:
return ''
def __iter__(self):
return iter(self.transform_outgoing(i) for i in self._get_iterator())
def count(self):
return int(self.domain.select(self._get_count()).next()['count'])
def next(self):
return self.transform_outgoing(self._get_iterator().next())
def limit(self, limit):
self._limit = limit
return self
def skip(self, skip):
self._skip = skip
return self
def order(self, column, type):
"""Order the results. type should be ASC or DESC"""
self._order = (column, type)
return self
def transform_outgoing(self, son):
"""Transform an object retrieved from the database"""
if 'type' in son:
klass = common.classify(son['type'])
return klass.demongofy(son)
else:
try:
return collection_to_class[self.domain].demongofy(son)
except KeyError:
return son
class Domain(object):
def __init__(self, connection, name):
self.connection = connection
self.name = name
def find(self, kwargs='', is_full_query=None):
return Query(self, kwargs, is_full_query=is_full_query)
def find_one(self, kwargs):
result = self.find(kwargs)
return result.next()
def find_prefix(self, attr, value):
# TODO: Perhaps do actual escaping here
if not sha1_re.search(value):
raise ValueError('Invalid sha1 prefix %s' % value)
return Query(self, '%s LIKE "%s%%"' % (attr, value))
def _encode(self, value):
if isinstance(value, bool):
if value:
return '1'
else:
return '0'
else:
return repr(unicode(value)).lstrip('u')
def _prepare_params(self, id, attributes):
keys = []
values = []
# TODO: escape
if id is not None:
keys.append('`id`')
values.append(self._encode(id))
for k, v in attributes.iteritems():
keys.append('`%s`' % k)
values.append(self._encode(v))
return keys, values
def insert(self, attributes, delayed=True):
keys, values = self._prepare_params(None, attributes)
if delayed:
delayed_statement = ' DELAYED'
else:
delayed_statement = ''
query = 'INSERT%s IGNORE INTO `%s` (%s) VALUES (%s)' % (delayed_statement,
self.name,
', '.join(keys),
', '.join(values))
self._execute(query)
def insert_all(self, attributes_list, delayed=True):
if not attributes_list:
logger.error('Asked me to save nothing...')
return
if delayed:
delayed_statement = ' DELAYED'
else:
delayed_statement = ''
args = []
for attributes in attributes_list:
keys, values = self._prepare_params(None, attributes)
assert keys
assert values
args.append(' (%s)' % ', '.join(values))
query = 'INSERT%s IGNORE INTO `%s` (%s) VALUES %s' % (delayed_statement,
self.name,
', '.join(keys),
', '.join(args))
logger.info('Massive insert: %s' % query)
self._execute(query)
def update(self, id, attributes):
keys, values = self._prepare_params(None, attributes)
# Mutable
args = ', '.join('%s=%s' % (k, v) for k, v in zip(keys, values))
query = 'UPDATE `%s` SET %s WHERE `id` = %s' % (self.name, args, self._encode(id))
self._execute(query)
def select(self, query_string):
cursor = self.connection.cursor(MySQLdb.cursors.DictCursor)
self._execute(query_string, cursor=cursor)
return iter(cursor)
def drop(self):
self._execute('DROP TABLE `%s`' % self.name)
def _execute(self, query_string, cursor=None):
if not cursor:
cursor = self.connection.cursor()
return cursor.execute(query_string)
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
import os
import sys
import tempfile
import time
import traceback
import zipfile
from django.conf import settings
from django.shortcuts import render_to_response
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from django.core.servers.basehttp import FileWrapper
from django.shortcuts import redirect
from django.utils.translation import ugettext as _
import django.views.debug
from desktop.lib import django_mako
from desktop.lib.conf import GLOBAL_CONFIG
from desktop.lib.django_util import login_notrequired, render_json, render
from desktop.lib.i18n import smart_str, force_unicode
from desktop.lib.paths import get_desktop_root
from desktop.log.access import access_log_level, access_warn
from desktop.models import UserPreferences, Settings, Document, DocumentTag
from desktop import appmanager
import desktop.conf
import desktop.log.log_buffer
LOG = logging.getLogger(__name__)
def home(request):
docs = Document.objects.get_docs(request.user).order_by('-last_modified')[:1000]
tags = DocumentTag.objects.get_tags(user=request.user)
apps = appmanager.get_apps_dict(request.user)
return render('home.mako', request, {
'apps': apps,
'documents': docs,
'json_documents': json.dumps(massaged_documents_for_json(docs)),
'tags': tags,
'json_tags': json.dumps(massaged_tags_for_json(tags, request.user))
})
def list_docs(request):
docs = Document.objects.get_docs(request.user).order_by('-last_modified')[:1000]
return HttpResponse(json.dumps(massaged_documents_for_json(docs)), mimetype="application/json")
def list_tags(request):
tags = DocumentTag.objects.get_tags(user=request.user)
return HttpResponse(json.dumps(massaged_tags_for_json(tags, request.user)), mimetype="application/json")
def massaged_documents_for_json(documents):
return [massage_doc_for_json(doc) for doc in documents]
def massage_doc_for_json(doc):
perms = doc.list_permissions()
return {
'id': doc.id,
'contentType': doc.content_type.name,
'icon': doc.icon,
'name': doc.name,
'url': doc.content_object.get_absolute_url(),
'description': doc.description,
'tags': [{'id': tag.id, 'name': tag.tag} for tag in doc.tags.all()],
'perms': {
'read': {
'users': [{'id': user.id, 'username': user.username} for user in perms.users.all()],
'groups': [{'id': group.id, 'name': group.name} for group in perms.groups.all()]
}
},
'owner': doc.owner.username,
'lastModified': doc.last_modified.strftime("%x %X"),
'lastModifiedInMillis': time.mktime(doc.last_modified.timetuple())
}
def massaged_tags_for_json(tags, user):
ts = []
trash = DocumentTag.objects.get_trash_tag(user)
history = DocumentTag.objects.get_history_tag(user)
for tag in tags:
massaged_tag = {
'id': tag.id,
'name': tag.tag,
'isTrash': tag.id == trash.id,
'isHistory': tag.id == history.id,
'isExample': tag.tag == DocumentTag.EXAMPLE
}
ts.append(massaged_tag)
return ts
def add_tag(request):
response = {'status': -1, 'message': ''}
if request.method == 'POST':
try:
tag = DocumentTag.objects.create_tag(request.user, request.POST['name'])
response['tag_id'] = tag.id
response['status'] = 0
except Exception, e:
response['message'] = force_unicode(e)
else:
response['message'] = _('POST request only')
return HttpResponse(json.dumps(response), mimetype="application/json")
def tag(request):
response = {'status': -1, 'message': ''}
if request.method == 'POST':
request_json = json.loads(request.POST['data'])
try:
tag = DocumentTag.objects.tag(request.user, request_json['doc_id'], request_json.get('tag'), request_json.get('tag_id'))
response['tag_id'] = tag.id
response['status'] = 0
except Exception, e:
response['message'] = force_unicode(e)
else:
response['message'] = _('POST request only')
return HttpResponse(json.dumps(response), mimetype="application/json")
def update_tags(request):
response = {'status': -1, 'message': ''}
if request.method == 'POST':
request_json = json.loads(request.POST['data'])
try:
doc = DocumentTag.objects.update_tags(request.user, request_json['doc_id'], request_json['tag_ids'])
response['doc'] = massage_doc_for_json(doc)
response['status'] = 0
except Exception, e:
response['message'] = force_unicode(e)
else:
response['message'] = _('POST request only')
return HttpResponse(json.dumps(response), mimetype="application/json")
def remove_tags(request):
response = {'status': -1, 'message': _('Error')}
if request.method == 'POST':
request_json = json.loads(request.POST['data'])
try:
for tag_id in request_json['tag_ids']:
DocumentTag.objects.delete_tag(tag_id, request.user)
response['message'] = _('Tag(s) removed!')
response['status'] = 0
except Exception, e:
response['message'] = force_unicode(e)
else:
response['message'] = _('POST request only')
return HttpResponse(json.dumps(response), mimetype="application/json")
def update_permissions(request):
response = {'status': -1, 'message': _('Error')}
if request.method == 'POST':
data = json.loads(request.POST['data'])
doc_id = request.POST['doc_id']
try:
doc = Document.objects.get_doc(doc_id, request.user)
# doc.sync_permissions({'read': {'user_ids': [1, 2, 3], 'group_ids': [1, 2, 3]}})
doc.sync_permissions(data)
response['message'] = _('Permissions updated!')
response['status'] = 0
response['doc'] = massage_doc_for_json(doc)
except Exception, e:
response['message'] = force_unicode(e)
else:
response['message'] = _('POST request only')
return HttpResponse(json.dumps(response), mimetype="application/json")
@access_log_level(logging.WARN)
def log_view(request):
"""
We have a log handler that retains the last X characters of log messages.
If it is attached to the root logger, this view will display that history,
otherwise it will report that it can't be found.
"""
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
l = logging.getLogger()
for h in l.handlers:
if isinstance(h, desktop.log.log_buffer.FixedBufferHandler):
return render('logs.mako', request, dict(log=[l for l in h.buf], query=request.GET.get("q", "")))
return render('logs.mako', request, dict(log=[_("No logs found!")]))
@access_log_level(logging.WARN)
def download_log_view(request):
"""
Zip up the log buffer and then return as a file attachment.
"""
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
l = logging.getLogger()
for h in l.handlers:
if isinstance(h, desktop.log.log_buffer.FixedBufferHandler):
try:
# We want to avoid doing a '\n'.join of the entire log in memory
# in case it is rather big. So we write it to a file line by line
# and pass that file to zipfile, which might follow a more efficient path.
tmp = tempfile.NamedTemporaryFile()
log_tmp = tempfile.NamedTemporaryFile("w+t")
for l in h.buf:
log_tmp.write(smart_str(l) + '\n')
# This is not just for show - w/out flush, we often get truncated logs
log_tmp.flush()
t = time.time()
zip = zipfile.ZipFile(tmp, "w", zipfile.ZIP_DEFLATED)
zip.write(log_tmp.name, "hue-logs/hue-%s.log" % t)
zip.close()
length = tmp.tell()
# if we don't seek to start of file, no bytes will be written
tmp.seek(0)
wrapper = FileWrapper(tmp)
response = HttpResponse(wrapper, content_type="application/zip")
response['Content-Disposition'] = 'attachment; filename=hue-logs-%s.zip' % t
response['Content-Length'] = length
return response
except Exception, e:
logging.exception("Couldn't construct zip file to write logs to.")
return log_view(request)
return render_to_response("logs.mako", dict(log=[_("No logs found.")]))
@access_log_level(logging.DEBUG)
def prefs(request, key=None):
"""Get or set preferences."""
if key is None:
d = dict( (x.key, x.value) for x in UserPreferences.objects.filter(user=request.user))
return render_json(d)
else:
if "set" in request.REQUEST:
try:
x = UserPreferences.objects.get(user=request.user, key=key)
except UserPreferences.DoesNotExist:
x = UserPreferences(user=request.user, key=key)
x.value = request.REQUEST["set"]
x.save()
return render_json(True)
if "delete" in request.REQUEST:
try:
x = UserPreferences.objects.get(user=request.user, key=key)
x.delete()
return render_json(True)
except UserPreferences.DoesNotExist:
return render_json(False)
else:
try:
x = UserPreferences.objects.get(user=request.user, key=key)
return render_json(x.value)
except UserPreferences.DoesNotExist:
return render_json(None)
def bootstrap(request):
"""Concatenates bootstrap.js files from all installed Hue apps."""
# Has some None's for apps that don't have bootsraps.
all_bootstraps = [ (app, app.get_bootstrap_file()) for app in appmanager.DESKTOP_APPS if request.user.has_hue_permission(action="access", app=app.name) ]
# Iterator over the streams.
concatenated = [ "\n/* %s */\n%s" % (app.name, b.read()) for app, b in all_bootstraps if b is not None ]
# HttpResponse can take an iteratable as the first argument, which
# is what happens here.
return HttpResponse(concatenated, mimetype='text/javascript')
_status_bar_views = []
def register_status_bar_view(view):
global _status_bar_views
_status_bar_views.append(view)
@access_log_level(logging.DEBUG)
def status_bar(request):
"""
Concatenates multiple views together to build up a "status bar"/"status_bar".
These views are registered using register_status_bar_view above.
"""
resp = ""
for view in _status_bar_views:
try:
r = view(request)
if r.status_code == 200:
resp += r.content
else:
LOG.warning("Failed to execute status_bar view %s" % (view,))
except:
LOG.exception("Failed to execute status_bar view %s" % (view,))
return HttpResponse(resp)
def dump_config(request):
# Note that this requires login (as do most apps).
show_private = False
conf_dir = os.path.realpath(os.getenv("HUE_CONF_DIR", get_desktop_root("conf")))
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
if request.GET.get("private"):
show_private = True
apps = sorted(appmanager.DESKTOP_MODULES, key=lambda app: app.name)
apps_names = [app.name for app in apps]
top_level = sorted(GLOBAL_CONFIG.get().values(), key=lambda obj: apps_names.index(obj.config.key))
return render("dump_config.mako", request, dict(
show_private=show_private,
top_level=top_level,
conf_dir=conf_dir,
apps=apps))
if sys.version_info[0:2] <= (2,4):
def _threads():
import threadframe
return threadframe.dict().iteritems()
else:
def _threads():
return sys._current_frames().iteritems()
@access_log_level(logging.WARN)
def threads(request):
"""Dumps out server threads. Useful for debugging."""
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
out = []
for thread_id, stack in _threads():
out.append("Thread id: %s" % thread_id)
for filename, lineno, name, line in traceback.extract_stack(stack):
out.append(" %-20s %s(%d)" % (name, filename, lineno))
out.append(" %-80s" % (line))
out.append("")
return HttpResponse("\n".join(out), content_type="text/plain")
def jasmine(request):
return render('jasmine.mako', request, None)
def index(request):
if request.user.is_superuser:
return redirect(reverse('about:index'))
else:
return home(request)
def serve_404_error(request, *args, **kwargs):
"""Registered handler for 404. We just return a simple error"""
access_warn(request, "404 not found")
return render("404.mako", request, dict(uri=request.build_absolute_uri()), status=404)
def serve_500_error(request, *args, **kwargs):
"""Registered handler for 500. We use the debug view to make debugging easier."""
try:
exc_info = sys.exc_info()
if exc_info:
if desktop.conf.HTTP_500_DEBUG_MODE.get() and exc_info[0] and exc_info[1]:
# If (None, None, None), default server error describing why this failed.
return django.views.debug.technical_500_response(request, *exc_info)
else:
# Could have an empty traceback
return render("500.mako", request, {'traceback': traceback.extract_tb(exc_info[2])})
else:
# exc_info could be empty
return render("500.mako", request, {})
finally:
# Fallback to default 500 response if ours fails
# Will end up here:
# - Middleware or authentication backends problems
# - Certain missing imports
# - Packaging and install issues
pass
_LOG_LEVELS = {
"critical": logging.CRITICAL,
"error": logging.ERROR,
"warning": logging.WARNING,
"info": logging.INFO,
"debug": logging.DEBUG
}
_MAX_LOG_FRONTEND_EVENT_LENGTH = 1024
_LOG_FRONTEND_LOGGER = logging.getLogger("desktop.views.log_frontend_event")
@login_notrequired
def log_frontend_event(request):
"""
Logs arguments to server's log. Returns an
empty string.
Parameters (specified via either GET or POST) are
"logname", "level" (one of "debug", "info", "warning",
"error", or "critical"), and "message".
"""
def get(param, default=None):
return request.REQUEST.get(param, default)
level = _LOG_LEVELS.get(get("level"), logging.INFO)
msg = "Untrusted log event from user %s: %s" % (
request.user,
get("message", "")[:_MAX_LOG_FRONTEND_EVENT_LENGTH])
_LOG_FRONTEND_LOGGER.log(level, msg)
return HttpResponse("")
def who_am_i(request):
"""
Returns username and FS username, and optionally sleeps.
"""
try:
sleep = float(request.REQUEST.get("sleep") or 0.0)
except ValueError:
sleep = 0.0
time.sleep(sleep)
return HttpResponse(request.user.username + "\t" + request.fs.user + "\n")
def commonheader(title, section, user, padding="90px"):
"""
Returns the rendered common header
"""
current_app = None
other_apps = []
if user.is_authenticated():
apps = appmanager.get_apps(user)
apps_list = appmanager.get_apps_dict(user)
for app in apps:
if app.display_name not in ['beeswax', 'impala', 'pig', 'jobsub', 'jobbrowser', 'metastore', 'hbase', 'sqoop', 'oozie', 'filebrowser', 'useradmin', 'search', 'help', 'about', 'zookeeper', 'proxy']:
other_apps.append(app)
if section == app.display_name:
current_app = app
else:
apps_list = []
return django_mako.render_to_string("common_header.mako", {
'current_app': current_app,
'apps': apps_list,
'other_apps': other_apps,
'title': title,
'section': section,
'padding': padding,
'user': user
})
def commonfooter(messages=None):
"""
Returns the rendered common footer
"""
if messages is None:
messages = {}
hue_settings = Settings.get_settings()
return django_mako.render_to_string("common_footer.mako", {
'messages': messages,
'version': settings.HUE_DESKTOP_VERSION,
'collect_usage': desktop.conf.COLLECT_USAGE.get(),
'tours_and_tutorials': hue_settings.tours_and_tutorials
})
# If the app's conf.py has a config_validator() method, call it.
CONFIG_VALIDATOR = 'config_validator'
#
# Cache config errors because (1) they mostly don't go away until restart,
# and (2) they can be costly to compute. So don't stress the system just because
# the dock bar wants to refresh every n seconds.
#
# The actual viewing of all errors may choose to disregard the cache.
#
_CONFIG_ERROR_LIST = None
def _get_config_errors(request, cache=True):
"""Returns a list of (confvar, err_msg) tuples."""
global _CONFIG_ERROR_LIST
if not cache or _CONFIG_ERROR_LIST is None:
error_list = [ ]
for module in appmanager.DESKTOP_MODULES:
# Get the config_validator() function
try:
validator = getattr(module.conf, CONFIG_VALIDATOR)
except AttributeError:
continue
if not callable(validator):
LOG.warn("Auto config validation: %s.%s is not a function" %
(module.conf.__name__, CONFIG_VALIDATOR))
continue
try:
error_list.extend(validator(request.user))
except Exception, ex:
LOG.exception("Error in config validation by %s: %s" % (module.nice_name, ex))
_CONFIG_ERROR_LIST = error_list
return _CONFIG_ERROR_LIST
def check_config(request):
"""Check config and view for the list of errors"""
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
conf_dir = os.path.realpath(os.getenv("HUE_CONF_DIR", get_desktop_root("conf")))
return render('check_config.mako', request, {
'error_list': _get_config_errors(request, cache=False),
'conf_dir': conf_dir
},
force_template=True)
def check_config_ajax(request):
"""Alert administrators about configuration problems."""
if not request.user.is_superuser:
return HttpResponse('')
error_list = _get_config_errors(request)
if not error_list:
# Return an empty response, rather than using the mako template, for performance.
return HttpResponse('')
return render('config_alert_dock.mako',
request,
dict(error_list=error_list),
force_template=True)
|
|
from __future__ import absolute_import
import os
import six
import logging
import warnings
from django import VERSION
from leonardo.base import leonardo, default
from leonardo.utils.settings import (get_conf_from_module, merge,
get_leonardo_modules, get_loaded_modules,
DJANGO_CONF)
from importlib import import_module # noqa
from django.utils.module_loading import module_has_submodule # noqa
_file_path = os.path.abspath(os.path.dirname(__file__)).split('/')
BASE_DIR = '/'.join(_file_path[0:-2])
from leonardo.conf.default import *
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
if VERSION[:2] >= (1, 8):
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'templates')
],
'OPTIONS': {
'context_processors': default.context_processors,
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'dbtemplates.loader.Loader',
'horizon.loaders.TemplateLoader',
],
'debug': True
},
},
]
else:
TEMPLATE_DIRS = [
os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'templates')
]
TEMPLATE_CONTEXT_PROCESSORS = default.context_processors
TEMPLATE_LOADERS = (
'dbtemplates.loader.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'horizon.loaders.TemplateLoader',
)
try:
# obsole location since 1.0.3 use `leonrdo_site.settings`
from leonardo_site.local.settings import *
warnings.warn(
'leonardo_site.local.settings is obsolete use new location')
except ImportError:
pass
try:
# full settings
# TODO support configurable from local_settings
# LEONARDO_PROJECT_NAME = 'leonardo_site'
from leonardo_site.settings import *
except ImportError:
pass
try:
# local settings
from local_settings import *
except ImportError:
warnings.warn(
'local_settings was not found in $PYTHONPATH !')
if not DEBUG:
if VERSION[:2] >= (1, 8):
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'dbtemplates.loader.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'horizon.loaders.TemplateLoader',
])]
TEMPLATES[0]['OPTIONS']['debug'] = False
else:
TEMPLATE_DEBUG = DEBUG
APPS = merge(APPS, default.core)
if 'media' in APPS:
FILER_IMAGE_MODEL = 'leonardo.module.media.models.Image'
try:
from leonardo.conf.horizon import *
from leonardo.conf.static import *
except Exception as e:
pass
if LEONARDO_SYSTEM_MODULE:
APPS = merge(APPS, ['leonardo_system'])
HORIZON_CONFIG['system_module'] = True
else:
HORIZON_CONFIG['system_module'] = False
# load directly specified apps
leonardo.get_app_modules(APPS)
# propagate settings to leonardo
leonardo.MODULES_AUTOLOAD = LEONARDO_MODULE_AUTO_INCLUDE
# load all modules
leonardo.load_modules()
# just propagate all loaded modules to settings
LEONARDO_MODULES = leonardo.get_modules()
# iterate over sorted modules
for mod, mod_cfg in LEONARDO_MODULES:
try:
# import all settings keys from module
if module_has_submodule(mod, "settings"):
try:
settings_mod = import_module(
'{0}.settings'.format(mod.__name__))
for k in dir(settings_mod):
if not k.startswith("_"):
val = getattr(settings_mod, k, None)
globals()[k] = val
locals()[k] = val
except Exception as e:
warnings.warn(
'Exception "{}" raised during loading '
'settings from {}'.format(str(e), mod))
# go through django keys and merge it to main settings
for key in DJANGO_CONF.keys():
updated_value = mod_cfg.get_value(key, globals()[key])
globals()[key] = updated_value
locals()[key] = updated_value
# map value to leonardo but under our internal name
setattr(leonardo, DJANGO_CONF[key], updated_value)
if mod_cfg.urls_conf:
MODULE_URLS[mod_cfg.urls_conf] = {'is_public': mod_cfg.public}
# TODO move to utils.settings
# support for one level nested in config dictionary
for config_key, config_value in six.iteritems(mod_cfg.config):
if isinstance(config_value, dict):
CONSTANCE_CONFIG_GROUPS.update({config_key: config_value})
for c_key, c_value in six.iteritems(config_value):
mod_cfg.config[c_key] = c_value
# remove from main dict
mod_cfg.config.pop(config_key)
else:
if isinstance(mod_cfg.optgroup, six.string_types):
CONSTANCE_CONFIG_GROUPS.update({
mod_cfg.optgroup: mod_cfg.config})
else:
if 'ungrouped' in CONSTANCE_CONFIG_GROUPS:
CONSTANCE_CONFIG_GROUPS['ungrouped'].update(mod_cfg.config)
else:
CONSTANCE_CONFIG_GROUPS['ungrouped'] = \
mod_cfg.config
# import and update absolute overrides
for model, method in six.iteritems(mod_cfg.absolute_url_overrides):
try:
_mod = import_module(".".join(method.split('.')[:-1]))
ABSOLUTE_URL_OVERRIDES[model] = getattr(
_mod, method.split('.')[-1])
except Exception as e:
raise e
for nav_extension in mod_cfg.navigation_extensions:
try:
import_module(nav_extension)
except ImportError:
pass
CONSTANCE_CONFIG.update(mod_cfg.config)
if VERSION[:2] >= (1, 8):
TEMPLATES[0]['DIRS'] = merge(TEMPLATES[0]['DIRS'], mod_cfg.dirs)
cp = TEMPLATES[0]['OPTIONS']['context_processors']
TEMPLATES[0]['OPTIONS']['context_processors'] = merge(
cp, mod_cfg.context_processors)
else:
TEMPLATE_CONTEXT_PROCESSORS = merge(
TEMPLATE_CONTEXT_PROCESSORS, mod_cfg.context_processors)
TEMPLATE_DIRS = merge(TEMPLATE_DIRS, mod_cfg.dirs)
# collect grouped widgets
if isinstance(mod_cfg.optgroup, six.string_types):
if len(mod_cfg.widgets) > 0:
WIDGETS[mod_cfg.optgroup] = merge(
getattr(WIDGETS, mod_cfg.optgroup, []), mod_cfg.widgets)
else:
if len(mod_cfg.widgets) > 0 and DEBUG:
WIDGETS['ungrouped'] = merge(
getattr(WIDGETS, 'ungrouped', []), mod_cfg.widgets)
warnings.warn('You have ungrouped widgets'
', please specify your ``optgroup``'
'which categorize your widgets in %s' % mod)
except Exception as e:
warnings.warn(
'Exception "{}" raised during loading '
'module {}'.format(str(e), mod))
setattr(leonardo, 'widgets', WIDGETS)
from leonardo.module.web.models import Page
from leonardo.module.web.widget import ApplicationWidget
# register external apps
Page.create_content_type(
ApplicationWidget, APPLICATIONS=APPLICATION_CHOICES)
# register widgets
for _optgroup, _widgets in six.iteritems(WIDGETS):
optgroup = _optgroup if _optgroup != 'ungrouped' else None
for widget in _widgets:
Page.create_content_type(widget, optgroup=optgroup)
Page.register_extensions(*PAGE_EXTENSIONS)
Page.register_default_processors(LEONARDO_FRONTEND_EDITING)
# FINALLY OVERRIDE ALL
try:
# local settings
from local_settings import *
except ImportError:
warnings.warn(
'Missing local_settings !')
try:
# full settings
from leonardo_site.local.settings import *
except ImportError:
pass
try:
# full settings
from leonardo_site.settings import *
except ImportError:
pass
# and again merge core with others
APPS = merge(APPS, default.core)
# go through django keys and merge it to main settings
for key in DJANGO_CONF.keys():
# map value to leonardo but under our internal name
setattr(leonardo, DJANGO_CONF[key], globals()[key])
# Add HORIZON_CONFIG to the context information for offline compression
COMPRESS_OFFLINE_CONTEXT = {
'STATIC_URL': STATIC_URL,
'HORIZON_CONFIG': HORIZON_CONFIG,
}
if DEBUG:
try:
import debug_toolbar
INSTALLED_APPS = merge(INSTALLED_APPS, ['debug_toolbar'])
from leonardo.conf.debug import *
except ImportError:
if DEBUG:
warnings.warn('DEBUG is set to True but, DEBUG tools '
'is not installed please run '
'"pip install django-leonardo[debug]"')
# async messages
try:
import async_messages
INSTALLED_APPS = merge(INSTALLED_APPS, ['async_messages'])
MIDDLEWARE_CLASSES = merge(MIDDLEWARE_CLASSES,
['async_messages.middleware.AsyncMiddleware'])
except ImportError:
pass
# use js files instead of horizon
HORIZON_CONFIG['js_files'] = leonardo.js_files
HORIZON_CONFIG['js_spec_files'] = leonardo.js_spec_files
HORIZON_CONFIG['css_files'] = leonardo.css_files
HORIZON_CONFIG['scss_files'] = leonardo.scss_files
HORIZON_CONFIG['angular_modules'] = leonardo.angular_modules
HORIZON_CONFIG['page_actions'] = leonardo.page_actions
HORIZON_CONFIG['widget_actions'] = leonardo.widget_actions
# path horizon config
from horizon import conf
conf.HORIZON_CONFIG = HORIZON_CONFIG
if DEBUG:
logging.basicConfig(level=logging.DEBUG)
|
|
from yahoo_finance import Share
from argparse import ArgumentParser
import time
import sys
import datetime
import timeit
mystock = ""
yearhighlist = ""
yearlowlist = ""
volumealert = ""
winnerList = ""
looserList = ""
sp500index = Share('^GSPC')
nasdaqindex = Share('^IXIC')
sp500Price = ""
sp500Change = ""
sp500percentChange = ""
ndqChange = ""
globalTrip = 0
###VolumeHighTest - return the volumealert as a list instead of a string, so that it can be referenced easier elsewhere in the future
def isMarketOpen():
"""determines how many seconds after midnight now is, and compares it with the seconds after midnight in the blackout period. Between 9am-9:45am Eastern Yahoo! sends N/A data for some fields, for which I don't have all of the error handling working"""
newnow = datetime.datetime.now()
midnight = datetime.datetime.combine(newnow.date(), datetime.time())
seconds = (newnow - midnight).seconds
startBlackout = 32400 #9:00am Eastern is 32,400 seconds after midnight, begin blackout period
endBlackout = 35100 #9:45am Eastern is 35,100 seconds after midnight; end blackout period
print seconds
if (seconds >= startBlackout) and (seconds <= endBlackout):
return 0
else:
return 1
def ofAverageVolume(self):
"""Compares a stocks current volume against the average volume determine what percent of average it else"""
myAverageVolume = getAvgDailyVolume(self)
myCurrentVolume = getVolume(self)
if myAverageVolume > myCurrentVolume:
ofAverageVolume = myCurrentVolume / myAverageVolume
return ofAverageVolume
else:
ofAverageVolume = (1-(myAverageVolume - myCurrentVolume)/(myAverageVolume))*100
return ofAverageVolume
def volumeHighTest(self):
""" If the current volume for a stock is greater than the average volume, add the ticker to the global volulumalert string and return yes/no. """
newvolumehightest = ""
if getVolume(self) > getAvgDailyVolume(self):
newvolumehightest = "yes"
global volumealert
volumealert += ticker
else:
newvolumehightest = "no"
return newvolumehightest
def getStock(self):
"""wrapper for the Yahoo-Finance module warpper; if refactoring this could probably be eliminated"""
return Share(self)
def getPrice(self):
return self.get_price()
def percentChange(self):
if (mydaychange is not None) and (myopen is not None):
tickerPercentChange = (mydaychange/myopen)*100
return tickerPercentChange
else:
return 0
def index_price():
global sp500Change, sp500Price
sp500Price = float(sp500index.get_price())
sp500Open = float(sp500index.get_open())
sp500Change = float(sp500index.get_change())
global sp500percentChange
sp500percentChange = ((sp500Change/sp500Open)*100)
return (sp500Price, sp500Open, sp500Change, sp500percentChange)
def getOpen(self):
go = self.get_open()
if go is not None:
return float(self.get_open())
else:
return 1
def getVolume(self):
return float(self.get_volume())
def getAvgDailyVolume(self):
return float(self.get_avg_daily_volume())
def getDayChange(self):
"""Checks to see if the gdc is None (e.g. between ~9am-9:45am Yhaoo Finance returns N/A for change). If Yahoo-Finance returns N/A (none), then return 0 to prevent error. Returning 0 causes winLoss() to return invalid data."""
gdc = mystock.get_change()
if gdc is not None:
#print ticker, gdc
return float(self.get_change())
else:
return 1
def getDayHigh(self):
return self.get_days_high()
def getDayLow(self):
return self.get_days_low()
def getYearHigh(self):
return self.get_year_high()
def getYearLow(self):
return self.get_year_low()
def winList(self):
if getDayChange(self) > 0:
global winnerList
winnerList += ticker
def looseList(self):
if getDayChange(self) <= 0:
global looserList
looserList += ticker
def newHighTest(self):
newhighpricetest = ""
if getDayHigh(self) == getYearHigh(self):
newhighpricetest = "yes"
global yearhighlist
yearhighlist += ticker
else:
newhighpricetest = "no"
return newhighpricetest
def newLowTest(self):
newlowpricetest = ""
if getDayLow(self) == getYearLow(self):
newlowpricetest = "yes"
global yearlowlist
yearlowlist += ticker
else:
newlowpricetest = "no"
return newlowpricetest
def winReport():
print "\n", "Winners: \n", winnerList
def lossReport():
print "\n", "Loosers: \n", looserList
def volumeSummary():
if volumealert:
print "\n", "High Volume alert: \n" + volumealert
else:
print "\n", "High Volume alert:: \n", "No tickers in watchlist flagged for high volume \n"
def yearLowSummary():
if yearlowlist:
print "Hitting 52-week low: \n" + yearlowlist
else:
print "\n", "Hitting 52-week low: \n", "No tickers in watchlist hitting 52-week low \n"
def yearHighSummary():
#global yearhighlist
if yearhighlist:
print "\n", "Hitting 52-week high: \n" + yearhighlist
else:
print "\n", "Hitting 52-week high: \n", "No tickers in watchlist hitting 52-week high \n"
def detailTicker():
"""Primarily for debugging -d and modify this function to return what you're interested printing while looping trough ticker list"""
myofAverageVolume = ""
#debug detail line item; modify so that -d fits the test case.
if args.detail:
#print ticker, myprice, myvolume, myavgdailyvolume, mydaychange, mydayhigh, mydaylow, myyearhigh, myyearlow, mynewhightest, myvolumehightest, mynewlowtest, myopen, mypercentchange
tickerPrint = ticker.strip('\n')
print ticker, myprice, mydayhigh, myyearhigh, mydaylow, myyearlow
#print tickerPrint, myprice, str(round(mypercentchange, 2))+'%'
def noAction():
print "\n", args.filename + ": " + "Watchlist triggered no alerts today."
def index_summary():
print "S&P 500, Price: %s, Change: %s, Percent Change: %s" %(sp500Price, sp500Change, str(round(sp500percentChange,2))+'%'), "\n" #converts to string, and rounds my variable to 2 decimal places when printing
def outperformSP500(self):
#tickerPrint = ticker.strip('\n')
global sp500percentChange
if mypercentchange > sp500percentChange:
print ticker.strip('\n') + ", " + str(round(mypercentchange, 2))+'%' + ", " + str(round(myofAverageVolume, 2))+'%' + ", " + myprice
parser = ArgumentParser(description = 'Watchlist Filtering for Yahoo-Finance')
parser.add_argument("-f", "--file", required=True, dest="filename", help="file name of watchlist; expects plain text file with 1 ticker symbol per line, and no empty lines at the end", metavar="FILE")
parser.add_argument("-v", "--volume", action="store_true", dest="volumeFlag", default=False, help="high volume notification")
parser.add_argument("-nl", "--low", action="store_true", dest="newlowFlag", default=False, help="new 52-week low notification")
parser.add_argument("-d", "--detail", action="store_true", dest="detail", default=False, help="detailed ticker info")
parser.add_argument("-wl", "--wins", action="store_true", dest="winnerlist", default=False, help="outputs today's winners")
parser.add_argument("-ll", "--losses", action="store_true", dest="looserlist", default=False, help="outputs today's loosers")
parser.add_argument("-ip", "--indexprices", action="store_true", dest="indexprices", default=False, help="outputs current index prices")
parser.add_argument("-op", "--outperformance", action="store_true", dest="outperformance", default=False, help="prints tickers symbol and change info if ticker is outperformaning the day's S&P 500 performance. MUST USE WITH -ip")
parser.add_argument("-nh", "--newhighs", action="store_true", dest="newhighs", default=False, help="prints tickers from watchlist hitting new 52 week highs")
args = parser.parse_args()
if not isMarketOpen():
print "Between 9:00am - 9:45am Eastern (UTC-0:500) Yahoo Finance input values are not initalized. Please re-run with '-ip / -op' after 9:45am. Other arguments will function normally."
exit()
starttime = timeit.default_timer()
watchList = open(args.filename)
ticker = watchList.readlines()
watchList.close()
print '\n%s\nRunning queries on %s symboles...' % (time.strftime('%Y-%m-%d %H:%M:%S'), len(ticker))
if args.indexprices:
indexinfo = index_price()
index_summary()
for ticker in ticker:
mystock = getStock(ticker)
myprice = getPrice(mystock)
myopen = getOpen(mystock)
myvolume = getVolume(mystock)
myavgdailyvolume = getAvgDailyVolume(mystock)
mydaychange = getDayChange(mystock)
mydayhigh = getDayHigh(mystock)
mydaylow = getDayLow(mystock)
if args.newhighs:
myyearhigh = getYearHigh(mystock)
if args.newlowFlag:
mynewlog = getYearLow(mystock)
myyearlow = getYearLow(mystock)
mynewhightest = newHighTest(mystock)
myvolumehightest = volumeHighTest(mystock)
mynewlowtest = newLowTest(mystock)
mypercentchange = percentChange(mystock)
winList(mystock)
looseList(mystock)
myofAverageVolume = ofAverageVolume(mystock)
detailTicker()
if args.outperformance:
if globalTrip is 0:
print "Ticker, Percent Change, Percent of Average Volume, Price"
globalTrip = 1
outperformSP500(mystock)
if args.filename:
if args.newhighs:
yearHighSummary()
if args.newlowFlag:
yearLowSummary()
if args.volumeFlag:
volumeSummary()
if args.winnerlist:
winReport()
if args.looserlist:
lossReport()
stoptime = timeit.default_timer()
runtime = stoptime - starttime
print "Completed in: %s" % str(round(runtime, 2)), "seconds"
|
|
#!/usr/bin/env python
"""
This module has functions that generated random values for django fields.
"""
import datetime
import os
import uuid
from decimal import Decimal
from itertools import ifilter
from itertools import imap
from itertools import islice
from random import choice
from random import randint
from random import shuffle
def generate_integer(bits=32, negative_allowed=True):
length = randint(1, bits - 1) - 1
positive = True
if negative_allowed:
positive = choice([True, False])
if positive:
low = (1 << length)
high = 2 * low - 1
if low == 1:
low = 0
return randint(low, high)
else:
high = -(1 << length) - 1
low = 2 * (high + 1)
if high == -2:
high = -1
return randint(low, high)
def generate_big_integer():
return generate_integer(64)
def generate_int():
return generate_integer(32)
def generate_small_integer():
return generate_integer(16)
def generate_positive_integer():
return generate_integer(32, False)
def generate_positive_small_integer():
return generate_integer(16, False)
def generate_boolean(null_allowed=False):
res = randint(0, 1 + int(null_allowed))
if res < 2:
return bool(res)
def generate_ip():
return str.join('.', [str(randint(0, 255)) for _ in xrange(4)])
def generate_comma_separated_int(max_length):
parts = randint(0, (max_length - 1) / 4)
left = randint(1, min(3, max_length - 4 * parts))
number = [str(randint(int(bool(parts)), 10 ** left - 1))]
number.extend('%.3d' % randint(0, 999) for _ in xrange(parts))
return str.join(',', number)
def generate_string(max_length, lower=True, upper=True, digits=True,
special=True, null_allowed=False, exact_len=False):
vascii = dict([(chr(n), n) for n in xrange(128)])
allowed_characters = []
chars_in_range = lambda beg, end: [chr(n) for n in xrange(vascii[beg],
vascii[end] + 1)]
if lower:
allowed_characters.extend(chars_in_range('a', 'z'))
if upper:
allowed_characters.extend(chars_in_range('A', 'Z'))
if digits:
allowed_characters.extend(chars_in_range('0', '9'))
if special:
if (isinstance(special, list) or isinstance(special, tuple) or
isinstance(special, set)):
allowed_characters.extend(special)
elif special is True:
allowed_characters.extend(chars_in_range('!', '/'))
allowed_characters.extend(chars_in_range(':', '@'))
allowed_characters.extend(chars_in_range('[', '`'))
allowed_characters.extend(chars_in_range('{', '~'))
length = max_length
if not exact_len:
length = randint(1 - null_allowed, max_length)
return str.join('', [choice(allowed_characters) for _ in xrange(length)])
def generate_date_time(auto_now=False):
if auto_now:
return datetime.datetime.now()
else:
year = randint(1900, 2100)
month = randint(1, 12)
long_month = [1, 3, 5, 7, 8, 10, 12]
day = 0
if month in long_month:
day = randint(1, 31)
else:
if month == 2:
x = year
leap_year = int((x % 4 == 0 and not x % 100 == 0)
or x % 400 == 0)
day = randint(1, 28 + leap_year)
else:
day = randint(1, 30)
hour = randint(0, 23)
minute = randint(0, 59)
second = randint(0, 59)
microsecond = randint(0, 999999)
return datetime.datetime(year, month, day, hour, minute,
second, microsecond)
def generate_date(auto_now=False):
return generate_date_time(auto_now).date()
def generate_time(auto_now=False):
return generate_date_time(auto_now).time()
def generate_text(max_length, exact=False):
sentences = randint(1, max((max_length + 39) / 60, (max_length + 39) / 40))
rem_length = max_length
text = []
for idx in xrange(sentences):
length = rem_length / (sentences - idx)
length = min((rem_length) / (sentences - idx) + int(bool(idx)),
randint(2, 7) * 6, rem_length - int(bool(idx)))
if length > 0:
text.append(generate_sentence(length, exact=exact))
rem_length -= len(text[-1]) + int(bool(idx))
return str.join(' ', text)
def generate_sentence(max_length, lower=True, upper=False, digits=False,
seperators=[' '], end_char=['.'], exact=False):
if max_length < 3:
return generate_string(max_length, lower, upper, digits, special=False,
null_allowed=True, exact_len=True)
if not end_char:
end_char = ['']
max_length -= bool(end_char) and bool(any(char for char in end_char))
length = max_length
if not exact and length >= 5:
length = randint(1, max_length)
a = 5.0 / 6.0
no_words = randint(1, int(2 * length * (1 - a) + 1 - 2 * a) + 1)
max_word_length = int((length + 1) / no_words * a)
lengths = [randint(1, max_word_length) for _ in xrange(no_words)]
lengths.sort()
tot = length - no_words + 1 - sum(lengths)
while tot < 0 and lengths:
tot += lengths.pop()
tot += int(bool(lengths))
no_words -= 1
if tot > 1 and (exact or randint(0, 1) == 0 or not lengths):
lengths.append(tot - 1)
no_words += 1
shuffle(lengths)
words = [generate_string(word_length, lower, upper, digits, False,
False, True) for word_length in lengths]
words_endings = [choice(seperators) for _ in xrange(len(lengths) - 1)]
words_endings.append(choice(end_char))
words = map(lambda t: t[0] + t[1], zip(words, words_endings))
return str.join('', words)
def generate_decimal(max_digits, decimal_places):
integer_part_len = max_digits - decimal_places
integer_part = generate_string(integer_part_len, False, False, True,
False, False, False)
integer_part = str(int(integer_part))
decimal_part = generate_string(decimal_places, False, False, True,
False, False, False)
return Decimal('%s.%s' % (integer_part, decimal_part))
def generate_float(max_digits=50, decimal_places=30):
return float(generate_decimal(max_digits, decimal_places))
def generate_email(max_length, exact_len=False):
if max_length < 7:
return ''
dom = ['com', 'de', 'it', 'uk', 'edu', 'es', 'fr', 'eg', 'ru', 'pl', 'org',
'es', 'pk', 'jo', 'fe', 'se', 'tr', 'ch']
tot_length = (max_length - 5) / 2
parts = [generate_string(tot_length, lower=True, upper=False, digits=True,
special=False, null_allowed=False,
exact_len=exact_len) for _ in xrange(2)]
return '%s@%s.%s' % (parts[0], parts[1], choice(dom))
def generate_url(max_length):
if max_length < 16:
return ''
dom = ['com', 'de', 'it', 'uk', 'edu', 'es', 'fr', 'eg', 'ru', 'pl', 'org',
'es', 'pk', 'jo', 'fe', 'se', 'tr', 'ch']
domain = generate_sentence(randint(3, max_length - 11), lower=True,
digits=True, seperators=['.'], end_char=['.'])
domain += choice(dom)
suburl = ''
if len(domain) + 8 < max_length:
suburl = choice(['', '/'])
if randint(1, 6) > 2 and len(domain) + len(suburl) + 10 < max_length:
suburl = '/'
suburl += generate_sentence(max_length - len(domain) - 8 - len(suburl),
digits=True, seperators=[''],
end_char=['/', ''])
return '%s://%s%s' % (choice(['http', 'ftp', 'https']), domain, suburl)
def generate_uuid():
return uuid.uuid4()
def generate_file_path():
walk = os.walk(os.getcwd())
flt = ifilter(lambda path: not any(p.startswith('.')
for p in path[0]), walk)
flt = imap(lambda path: path[0], flt)
flt = islice(flt, 1000)
return choice(list(flt))
|
|
# -*- coding: utf-8 -*-
#
# boltons documentation build configuration file, created by
# sphinx-quickstart on Sat Mar 21 00:34:18 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import sphinx
from pprint import pprint
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
CUR_PATH = os.path.dirname(os.path.abspath(__file__))
PROJECT_PATH = os.path.abspath(CUR_PATH + '/../')
PACKAGE_PATH = os.path.abspath(CUR_PATH + '/../boltons/')
sys.path.insert(0, PROJECT_PATH)
sys.path.insert(0, PACKAGE_PATH)
pprint(os.environ)
def get_mod_stats():
# TODO: docstring percentage.
import pkgutil
from boltons.funcutils import get_module_callables
mod_count = 0
tot_type_count = 0
tot_func_count = 0
ignore = lambda attr_name: attr_name.startswith('_')
for _, mod_name, _ in pkgutil.iter_modules([PACKAGE_PATH]):
if not mod_name.endswith('utils'):
continue
mod = __import__(mod_name)
types, funcs = get_module_callables(mod, ignore=ignore)
if not len(types) and not len(funcs):
continue
mod_count += 1
tot_type_count += len(types)
tot_func_count += len(funcs)
ret = (mod_count, tot_type_count, tot_func_count)
print ('==== %s modules ==== %s types ==== %s funcs ====' % ret)
return ret
B_MOD_COUNT, B_TYPE_COUNT, B_FUNC_COUNT = get_mod_stats()
rst_epilog = """
.. |b_mod_count| replace:: {mod_count}
.. |b_type_count| replace:: {type_count}
.. |b_func_count| replace:: {func_count}
""".format(mod_count=B_MOD_COUNT,
type_count=B_TYPE_COUNT,
func_count=B_FUNC_COUNT)
# -- General configuration ------------------------------------------------
autosummary_generate = True
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Read the Docs is version 1.2 as of writing
if sphinx.version_info[:2] < (1, 3):
extensions.append('sphinxcontrib.napoleon')
else:
extensions.append('sphinx.ext.napoleon')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'boltons'
copyright = u'2018, Mahmoud Hashemi'
author = u'Mahmoud Hashemi'
version = '18.0'
release = '18.0.0'
if os.name != 'nt':
today_fmt = '%B %d, %Y'
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('https://docs.python.org/2.7', None)}
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
html_theme = 'default'
else: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = ['_themes', sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# TEMP: see https://github.com/rtfd/readthedocs.org/issues/1692
# Add RTD Theme Path.
#if 'html_theme_path' in globals():
# html_theme_path.append('/home/docs/checkouts/readthedocs.org/readthedocs/templates/sphinx')
#else:
# html_theme_path = ['_themes', '/home/docs/checkouts/readthedocs.org/readthedocs/templates/sphinx']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'boltonsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'boltons.tex', u'boltons Documentation',
u'Mahmoud Hashemi', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'boltons', u'boltons Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'boltons', u'boltons Documentation',
author, 'boltons', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
|
"""Import tasks for the Harvard Center for Astrophysics."""
import csv
import os
from glob import glob
from math import floor
from astrocats.catalog.photometry import PHOTOMETRY
from astrocats.catalog.utils import (is_number, jd_to_mjd, pbar, pbar_strings,
uniq_cdl)
from astropy.time import Time as astrotime
from decimal import Decimal
from ..supernova import SUPERNOVA
from ..utils import clean_snname
ACKN_CFA = ("This research has made use of the CfA Supernova Archive, "
"which is funded in part by the National Science Foundation "
"through grant AST 0907903.")
def do_cfa_photo(catalog):
"""Import photometry from the CfA archive."""
from html import unescape
import re
task_str = catalog.get_current_task_str()
file_names = glob(
os.path.join(catalog.get_current_task_repo(), 'cfa-input/*.dat'))
for fname in pbar_strings(file_names, task_str):
f = open(fname, 'r')
tsvin = csv.reader(f, delimiter=' ', skipinitialspace=True)
csv_data = []
for r, row in enumerate(tsvin):
new = []
for item in row:
new.extend(item.split('\t'))
csv_data.append(new)
for r, row in enumerate(csv_data):
for c, col in enumerate(row):
csv_data[r][c] = col.strip()
csv_data[r] = [_f for _f in csv_data[r] if _f]
eventname = os.path.basename(os.path.splitext(fname)[0])
eventparts = eventname.split('_')
name = clean_snname(eventparts[0])
name = catalog.add_entry(name)
secondaryname = 'CfA Supernova Archive'
secondaryurl = 'https://www.cfa.harvard.edu/supernova/SNarchive.html'
secondarysource = catalog.entries[name].add_source(
name=secondaryname,
url=secondaryurl,
secondary=True,
acknowledgment=ACKN_CFA)
catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name,
secondarysource)
year = re.findall(r'\d+', name)[0]
catalog.entries[name].add_quantity(SUPERNOVA.DISCOVER_DATE, year,
secondarysource)
eventbands = list(eventparts[1])
tu = 'MJD'
jdoffset = Decimal(0.)
for rc, row in enumerate(csv_data):
if len(row) > 0 and row[0][0] == "#":
if len(row[0]) > 2 and row[0][:3] == '#JD':
tu = 'JD'
rowparts = row[0].split('-')
jdoffset = Decimal(rowparts[1])
elif len(row[0]) > 6 and row[0][:7] == '#Julian':
tu = 'JD'
jdoffset = Decimal(0.)
elif len(row) > 1 and row[1].lower() == 'photometry':
for ci, col in enumerate(row[2:]):
if col[0] == "(":
refstr = ' '.join(row[2 + ci:])
refstr = refstr.replace('(', '').replace(')', '')
bibcode = unescape(refstr)
source = catalog.entries[name].add_source(
bibcode=bibcode)
elif len(row) > 1 and row[1] == 'HJD':
tu = 'HJD'
continue
elif len(row) > 0:
mjd = row[0]
for v, val in enumerate(row):
if v == 0:
if tu == 'JD':
mjd = str(jd_to_mjd(Decimal(val) + jdoffset))
tuout = 'MJD'
elif tu == 'HJD':
mjd = str(jd_to_mjd(Decimal(val)))
tuout = 'MJD'
else:
mjd = val
tuout = tu
elif v % 2 != 0:
if float(row[v]) < 90.0:
src = secondarysource + ',' + source
photodict = {
PHOTOMETRY.U_TIME: tuout,
PHOTOMETRY.TIME: mjd,
PHOTOMETRY.BAND_SET: 'Standard',
PHOTOMETRY.BAND: eventbands[(v - 1) // 2],
PHOTOMETRY.MAGNITUDE: row[v],
PHOTOMETRY.E_MAGNITUDE: row[v + 1],
PHOTOMETRY.SOURCE: src
}
catalog.entries[name].add_photometry(**photodict)
f.close()
# Hicken 2012
with open(
os.path.join(catalog.get_current_task_repo(),
'hicken-2012-standard.dat'), 'r') as infile:
tsvin = list(csv.reader(infile, delimiter='|', skipinitialspace=True))
for r, row in enumerate(pbar(tsvin, task_str)):
if r <= 47:
continue
if row[0][:2] != 'sn':
name = 'SN' + row[0].strip()
else:
name = row[0].strip()
name = catalog.add_entry(name)
source = catalog.entries[name].add_source(
bibcode='2012ApJS..200...12H')
catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE, 'Ia',
source)
photodict = {
PHOTOMETRY.U_TIME: 'MJD',
PHOTOMETRY.TIME: row[2].strip(),
PHOTOMETRY.BAND: row[1].strip(),
PHOTOMETRY.BAND_SET: 'Standard',
PHOTOMETRY.MAGNITUDE: row[6].strip(),
PHOTOMETRY.E_MAGNITUDE: row[7].strip(),
PHOTOMETRY.SOURCE: source
}
catalog.entries[name].add_photometry(**photodict)
# Bianco 2014
with open(
os.path.join(catalog.get_current_task_repo(),
'bianco-2014-standard.dat'), 'r') as infile:
tsvin = list(csv.reader(infile, delimiter=' ', skipinitialspace=True))
for row in pbar(tsvin, task_str):
name = 'SN' + row[0]
name = catalog.add_entry(name)
source = catalog.entries[name].add_source(
bibcode='2014ApJS..213...19B')
catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
photodict = {
PHOTOMETRY.U_TIME: 'MJD',
PHOTOMETRY.TIME: row[2],
PHOTOMETRY.BAND: row[1],
PHOTOMETRY.MAGNITUDE: row[3],
PHOTOMETRY.E_MAGNITUDE: row[4],
PHOTOMETRY.TELESCOPE: row[5],
PHOTOMETRY.BAND_SET: 'Standard',
PHOTOMETRY.SOURCE: source
}
catalog.entries[name].add_photometry(**photodict)
catalog.journal_entries()
return
def do_cfa_spectra(catalog):
"""Import spectra from the CfA archive."""
task_str = catalog.get_current_task_str()
# II spectra
oldname = ''
file_names = next(
os.walk(os.path.join(catalog.get_current_task_repo(), 'CfA_SNII')))[1]
for ni, name in enumerate(pbar_strings(file_names, task_str)):
fullpath = os.path.join(catalog.get_current_task_repo(),
'CfA_SNII/') + name
origname = name
if name.startswith('sn') and is_number(name[2:6]):
name = 'SN' + name[2:]
name = catalog.get_preferred_name(name)
if oldname and name != oldname:
catalog.journal_entries()
oldname = name
name = catalog.add_entry(name)
reference = 'CfA Supernova Archive'
refurl = 'https://www.cfa.harvard.edu/supernova/SNarchive.html'
source = catalog.entries[name].add_source(
name=reference,
url=refurl,
secondary=True,
acknowledgment=ACKN_CFA)
catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
for fi, fname in enumerate(
sorted(
glob(fullpath + '/*'), key=lambda s: s.lower())):
filename = os.path.basename(fname)
fileparts = filename.split('-')
if origname.startswith('sn') and is_number(origname[2:6]):
year = fileparts[1][:4]
month = fileparts[1][4:6]
day = fileparts[1][6:]
instrument = fileparts[2].split('.')[0]
else:
year = fileparts[2][:4]
month = fileparts[2][4:6]
day = fileparts[2][6:]
instrument = fileparts[3].split('.')[0]
time = str(
astrotime(year + '-' + month + '-' + str(floor(float(day)))
.zfill(2)).mjd + float(day) - floor(float(day)))
f = open(fname, 'r')
data = csv.reader(f, delimiter=' ', skipinitialspace=True)
data = [list(i) for i in zip(*data)]
wavelengths = data[0]
fluxes = data[1]
errors = data[2]
sources = uniq_cdl([
source,
(catalog.entries[name]
.add_source(bibcode='2017arXiv170601030H'))
])
catalog.entries[name].add_spectrum(
u_wavelengths='Angstrom',
u_fluxes='erg/s/cm^2/Angstrom',
filename=filename,
wavelengths=wavelengths,
fluxes=fluxes,
u_time='MJD' if time else '',
time=time,
instrument=instrument,
u_errors='ergs/s/cm^2/Angstrom',
errors=errors,
source=sources,
dereddened=False,
deredshifted=False)
if catalog.args.travis and ni >= catalog.TRAVIS_QUERY_LIMIT:
break
catalog.journal_entries()
# Ia spectra
oldname = ''
file_names = next(
os.walk(os.path.join(catalog.get_current_task_repo(), 'CfA_SNIa')))[1]
for ni, name in enumerate(pbar_strings(file_names, task_str)):
fullpath = os.path.join(catalog.get_current_task_repo(),
'CfA_SNIa/') + name
origname = name
if name.startswith('sn') and is_number(name[2:6]):
name = 'SN' + name[2:]
if name.startswith('snf') and is_number(name[3:7]):
name = 'SNF' + name[3:]
name = catalog.get_preferred_name(name)
if oldname and name != oldname:
catalog.journal_entries()
oldname = name
name = catalog.add_entry(name)
reference = 'CfA Supernova Archive'
refurl = 'https://www.cfa.harvard.edu/supernova/SNarchive.html'
source = catalog.entries[name].add_source(
name=reference,
url=refurl,
secondary=True,
acknowledgment=ACKN_CFA)
catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
for fi, fname in enumerate(
sorted(
glob(fullpath + '/*'), key=lambda s: s.lower())):
filename = os.path.basename(fname)
fileparts = filename.split('-')
if origname.startswith('sn') and is_number(origname[2:6]):
year = fileparts[1][:4]
month = fileparts[1][4:6]
day = fileparts[1][6:]
instrument = fileparts[2].split('.')[0]
else:
year = fileparts[2][:4]
month = fileparts[2][4:6]
day = fileparts[2][6:]
instrument = fileparts[3].split('.')[0]
time = str(
astrotime(year + '-' + month + '-' + str(floor(float(day)))
.zfill(2)).mjd + float(day) - floor(float(day)))
f = open(fname, 'r')
data = csv.reader(f, delimiter=' ', skipinitialspace=True)
data = [list(i) for i in zip(*data)]
wavelengths = data[0]
fluxes = data[1]
errors = data[2]
sources = uniq_cdl([
source, (catalog.entries[name]
.add_source(bibcode='2012AJ....143..126B')),
(catalog.entries[name]
.add_source(bibcode='2008AJ....135.1598M'))
])
catalog.entries[name].add_spectrum(
u_wavelengths='Angstrom',
u_fluxes='erg/s/cm^2/Angstrom',
filename=filename,
wavelengths=wavelengths,
fluxes=fluxes,
u_time='MJD' if time else '',
time=time,
instrument=instrument,
u_errors='ergs/s/cm^2/Angstrom',
errors=errors,
source=sources,
dereddened=False,
deredshifted=False)
if catalog.args.travis and ni >= catalog.TRAVIS_QUERY_LIMIT:
break
catalog.journal_entries()
# Ibc spectra
oldname = ''
file_names = next(
os.walk(os.path.join(catalog.get_current_task_repo(), 'CfA_SNIbc')))[1]
for ni, name in enumerate(pbar(file_names, task_str)):
fullpath = os.path.join(catalog.get_current_task_repo(),
'CfA_SNIbc/') + name
if name.startswith('sn') and is_number(name[2:6]):
name = 'SN' + name[2:]
name = catalog.get_preferred_name(name)
if oldname and name != oldname:
catalog.journal_entries()
oldname = name
name = catalog.add_entry(name)
reference = 'CfA Supernova Archive'
refurl = 'https://www.cfa.harvard.edu/supernova/SNarchive.html'
source = catalog.entries[name].add_source(
name=reference,
url=refurl,
secondary=True,
acknowledgment=ACKN_CFA)
catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
for fi, fname in enumerate(
sorted(
glob(fullpath + '/*'), key=lambda s: s.lower())):
filename = os.path.basename(fname)
fileparts = filename.split('-')
instrument = ''
year = fileparts[1][:4]
month = fileparts[1][4:6]
day = fileparts[1][6:].split('.')[0]
if len(fileparts) > 2:
instrument = fileparts[-1].split('.')[0]
time = str(
astrotime(year + '-' + month + '-' + str(floor(float(day)))
.zfill(2)).mjd + float(day) - floor(float(day)))
f = open(fname, 'r')
data = csv.reader(f, delimiter=' ', skipinitialspace=True)
data = [list(i) for i in zip(*data)]
wavelengths = data[0]
fluxes = data[1]
sources = uniq_cdl([
source, catalog.entries[name]
.add_source(bibcode='2014AJ....147...99M')
])
catalog.entries[name].add_spectrum(
u_wavelengths='Angstrom',
u_fluxes='erg/s/cm^2/Angstrom',
wavelengths=wavelengths,
filename=filename,
fluxes=fluxes,
u_time='MJD' if time else '',
time=time,
instrument=instrument,
source=sources,
dereddened=False,
deredshifted=False)
if catalog.args.travis and ni >= catalog.TRAVIS_QUERY_LIMIT:
break
catalog.journal_entries()
# Other spectra
oldname = ''
file_names = next(
os.walk(os.path.join(catalog.get_current_task_repo(), 'CfA_Extra')))[1]
for ni, name in enumerate(pbar_strings(file_names, task_str)):
fullpath = os.path.join(catalog.get_current_task_repo(),
'CfA_Extra/') + name
if name.startswith('sn') and is_number(name[2:6]):
name = 'SN' + name[2:]
name = catalog.get_preferred_name(name)
if oldname and name != oldname:
catalog.journal_entries()
oldname = name
name = catalog.add_entry(name)
reference = 'CfA Supernova Archive'
refurl = 'https://www.cfa.harvard.edu/supernova/SNarchive.html'
source = catalog.entries[name].add_source(
name=reference,
url=refurl,
secondary=True,
acknowledgment=ACKN_CFA)
catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
for fi, fname in enumerate(
sorted(
glob(fullpath + '/*'), key=lambda s: s.lower())):
if not os.path.isfile(fname):
continue
filename = os.path.basename(fname)
if ((not filename.startswith('sn') or
not filename.endswith('flm') or any(
x in filename
for x in ['-interp', '-z', '-dered', '-obj', '-gal']))):
continue
fileparts = filename.split('.')[0].split('-')
instrument = ''
time = ''
if len(fileparts) > 1:
year = fileparts[1][:4]
month = fileparts[1][4:6]
day = fileparts[1][6:]
if is_number(year) and is_number(month) and is_number(day):
if len(fileparts) > 2:
instrument = fileparts[-1]
time = str(
astrotime(year + '-' + month + '-' + str(
floor(float(day))).zfill(2)).mjd + float(day) -
floor(float(day)))
f = open(fname, 'r')
data = csv.reader(f, delimiter=' ', skipinitialspace=True)
data = [list(i) for i in zip(*data)]
wavelengths = data[0]
fluxes = [str(Decimal(x) * Decimal(1.0e-15)) for x in data[1]]
catalog.entries[name].add_spectrum(
u_wavelengths='Angstrom',
u_fluxes='erg/s/cm^2/Angstrom',
wavelengths=wavelengths,
filename=filename,
fluxes=fluxes,
u_time='MJD' if time else '',
time=time,
instrument=instrument,
source=source,
dereddened=False,
deredshifted=False)
if catalog.args.travis and ni >= catalog.TRAVIS_QUERY_LIMIT:
break
catalog.journal_entries()
return
|
|
import contextlib
from collections import namedtuple, defaultdict
from datetime import datetime
from dask.callbacks import Callback
# The names of the Ray-specific callbacks. These are the kwarg names that
# RayDaskCallback will accept on construction, and is considered the
# source-of-truth for what Ray-specific callbacks exist.
CBS = (
"ray_presubmit",
"ray_postsubmit",
"ray_pretask",
"ray_posttask",
"ray_postsubmit_all",
"ray_finish",
)
# The Ray-specific callback method names for RayDaskCallback.
CB_FIELDS = tuple("_" + field for field in CBS)
# The Ray-specific callbacks that we do _not_ wish to drop from RayCallbacks
# if not given on a RayDaskCallback instance (will be filled with None
# instead).
CBS_DONT_DROP = {"ray_pretask", "ray_posttask"}
# The Ray-specific callbacks for a single RayDaskCallback.
RayCallback = namedtuple("RayCallback", " ".join(CBS))
# The Ray-specific callbacks for one or more RayDaskCallbacks.
RayCallbacks = namedtuple("RayCallbacks", " ".join([field + "_cbs" for field in CBS]))
class RayDaskCallback(Callback):
"""
Extends Dask's `Callback` class with Ray-specific hooks. When instantiating
or subclassing this class, both the normal Dask hooks (e.g. pretask,
posttask, etc.) and the Ray-specific hooks can be provided.
See `dask.callbacks.Callback` for usage.
Caveats: Any Dask-Ray scheduler must bring the Ray-specific callbacks into
context using the `local_ray_callbacks` context manager, since the built-in
`local_callbacks` context manager provided by Dask isn't aware of this
class.
"""
# Set of active Ray-specific callbacks.
ray_active = set()
def __init__(self, **kwargs):
"""
Ray-specific callbacks:
- def _ray_presubmit(task, key, deps):
Run before submitting a Ray task. If this callback returns a
non-`None` value, a Ray task will _not_ be created and this
value will be used as the would-be task's result value.
Args:
task (tuple): A Dask task, where the first tuple item is
the task function, and the remaining tuple items are
the task arguments (either the actual argument values,
or Dask keys into the deps dictionary whose
corresponding values are the argument values).
key (str): The Dask graph key for the given task.
deps (dict): The dependencies of this task.
Returns:
Either None, in which case a Ray task will be submitted, or
a non-None value, in which case a Ray task will not be
submitted and this return value will be used as the
would-be task result value.
- def _ray_postsubmit(task, key, deps, object_ref):
Run after submitting a Ray task.
Args:
task (tuple): A Dask task, where the first tuple item is
the task function, and the remaining tuple items are
the task arguments (either the actual argument values,
or Dask keys into the deps dictionary whose
corresponding values are the argument values).
key (str): The Dask graph key for the given task.
deps (dict): The dependencies of this task.
object_ref (ray.ObjectRef): The object reference for the
return value of the Ray task.
- def _ray_pretask(key, object_refs):
Run before executing a Dask task within a Ray task. This
executes after the task has been submitted, within a Ray
worker. The return value of this task will be passed to the
_ray_posttask callback, if provided.
Args:
key (str): The Dask graph key for the Dask task.
object_refs (List[ray.ObjectRef]): The object references
for the arguments of the Ray task.
Returns:
A value that will be passed to the corresponding
_ray_posttask callback, if said callback is defined.
- def _ray_posttask(key, result, pre_state):
Run after executing a Dask task within a Ray task. This
executes within a Ray worker. This callback receives the return
value of the _ray_pretask callback, if provided.
Args:
key (str): The Dask graph key for the Dask task.
result (object): The task result value.
pre_state (object): The return value of the corresponding
_ray_pretask callback, if said callback is defined.
- def _ray_postsubmit_all(object_refs, dsk):
Run after all Ray tasks have been submitted.
Args:
object_refs (List[ray.ObjectRef]): The object references
for the output (leaf) Ray tasks of the task graph.
dsk (dict): The Dask graph.
- def _ray_finish(result):
Run after all Ray tasks have finished executing and the final
result has been returned.
Args:
result (object): The final result (output) of the Dask
computation, before any repackaging is done by
Dask collection-specific post-compute callbacks.
"""
for cb in CBS:
cb_func = kwargs.pop(cb, None)
if cb_func is not None:
setattr(self, "_" + cb, cb_func)
super().__init__(**kwargs)
@property
def _ray_callback(self):
return RayCallback(*[getattr(self, field, None) for field in CB_FIELDS])
def __enter__(self):
self._ray_cm = add_ray_callbacks(self)
self._ray_cm.__enter__()
super().__enter__()
return self
def __exit__(self, *args):
super().__exit__(*args)
self._ray_cm.__exit__(*args)
def register(self):
type(self).ray_active.add(self._ray_callback)
super().register()
def unregister(self):
type(self).ray_active.remove(self._ray_callback)
super().unregister()
class add_ray_callbacks:
def __init__(self, *callbacks):
self.callbacks = [normalize_ray_callback(c) for c in callbacks]
RayDaskCallback.ray_active.update(self.callbacks)
def __enter__(self):
return self
def __exit__(self, *args):
for c in self.callbacks:
RayDaskCallback.ray_active.discard(c)
def normalize_ray_callback(cb):
if isinstance(cb, RayDaskCallback):
return cb._ray_callback
elif isinstance(cb, RayCallback):
return cb
else:
raise TypeError(
"Callbacks must be either 'RayDaskCallback' or 'RayCallback' " "namedtuple"
)
def unpack_ray_callbacks(cbs):
"""Take an iterable of callbacks, return a list of each callback."""
if cbs:
# Only drop callback methods that aren't in CBS_DONT_DROP.
return RayCallbacks(
*(
[cb for cb in cbs_ if cb or CBS[idx] in CBS_DONT_DROP] or None
for idx, cbs_ in enumerate(zip(*cbs))
)
)
else:
return RayCallbacks(*([()] * len(CBS)))
@contextlib.contextmanager
def local_ray_callbacks(callbacks=None):
"""
Allows Dask-Ray callbacks to work with nested schedulers.
Callbacks will only be used by the first started scheduler they encounter.
This means that only the outermost scheduler will use global callbacks.
"""
global_callbacks = callbacks is None
if global_callbacks:
callbacks, RayDaskCallback.ray_active = (RayDaskCallback.ray_active, set())
try:
yield callbacks or ()
finally:
if global_callbacks:
RayDaskCallback.ray_active = callbacks
class ProgressBarCallback(RayDaskCallback):
def __init__(self):
import ray
@ray.remote
class ProgressBarActor:
def __init__(self):
self._init()
def submit(self, key, deps, now):
for dep in deps.keys():
self.deps[key].add(dep)
self.submitted[key] = now
self.submission_queue.append((key, now))
def task_scheduled(self, key, now):
self.scheduled[key] = now
def finish(self, key, now):
self.finished[key] = now
def result(self):
return len(self.submitted), len(self.finished)
def report(self):
result = defaultdict(dict)
for key, finished in self.finished.items():
submitted = self.submitted[key]
scheduled = self.scheduled[key]
# deps = self.deps[key]
result[key]["execution_time"] = (
finished - scheduled
).total_seconds()
# Calculate the scheduling time.
# This is inaccurate.
# We should subtract scheduled - (last dep completed).
# But currently it is not easy because
# of how getitem is implemented in dask on ray sort.
result[key]["scheduling_time"] = (
scheduled - submitted
).total_seconds()
result["submission_order"] = self.submission_queue
return result
def ready(self):
pass
def reset(self):
self._init()
def _init(self):
self.submission_queue = []
self.submitted = defaultdict(None)
self.scheduled = defaultdict(None)
self.finished = defaultdict(None)
self.deps = defaultdict(set)
try:
self.pb = ray.get_actor("_dask_on_ray_pb")
ray.get(self.pb.reset.remote())
except ValueError:
self.pb = ProgressBarActor.options(name="_dask_on_ray_pb").remote()
ray.get(self.pb.ready.remote())
def _ray_postsubmit(self, task, key, deps, object_ref):
# Indicate the dask task is submitted.
self.pb.submit.remote(key, deps, datetime.now())
def _ray_pretask(self, key, object_refs):
self.pb.task_scheduled.remote(key, datetime.now())
def _ray_posttask(self, key, result, pre_state):
# Indicate the dask task is finished.
self.pb.finish.remote(key, datetime.now())
def _ray_finish(self, result):
print("All tasks are completed.")
|
|
# Load and dump a diagnostics database in CDD format.
import logging
from xml.etree import ElementTree
from ..data import Data
from ..did import Did
from ..internal_database import InternalDatabase
from ...errors import ParseError
from ...utils import cdd_offset_to_dbc_start_bit
LOGGER = logging.getLogger(__name__)
class DataType(object):
def __init__(self,
name,
id_,
bit_length,
encoding,
minimum,
maximum,
choices,
byte_order,
unit,
factor,
offset):
self.name = name
self.id_ = id_
self.bit_length = bit_length
self.encoding = encoding
self.minimum = minimum
self.maximum = maximum
self.choices = choices
self.byte_order = byte_order
self.unit = unit
self.factor = factor
self.offset = offset
def _load_choices(data_type):
choices = {}
for choice in data_type.findall('TEXTMAP'):
start = int(choice.attrib['s'].strip('()'))
end = int(choice.attrib['e'].strip('()'))
if start == end:
choices[start] = choice.find('TEXT/TUV[1]').text
if not choices:
choices = None
return choices
def _load_data_types(ecu_doc):
"""Load all data types found in given ECU doc element.
"""
data_types = {}
types = ecu_doc.findall('DATATYPES/IDENT')
types += ecu_doc.findall('DATATYPES/LINCOMP')
types += ecu_doc.findall('DATATYPES/TEXTTBL')
types += ecu_doc.findall('DATATYPES/STRUCTDT')
types += ecu_doc.findall('DATATYPES/EOSITERDT')
for data_type in types:
# Default values.
byte_order = 'big_endian'
unit = None
factor = 1
offset = 0
bit_length = None
encoding = None
minimum = None
maximum = None
# Name and id.
type_name = data_type.find('NAME/TUV[1]').text
type_id = data_type.attrib['id']
# Load from C-type element.
ctype = data_type.find('CVALUETYPE')
for key, value in ctype.attrib.items():
if key == 'bl':
bit_length = int(value)
elif key == 'enc':
encoding = value
elif key == 'minsz':
minimum = int(value)
elif key == 'maxsz':
maximum = int(value)
else:
LOGGER.debug("Ignoring unsupported attribute '%s'.", key)
if ctype.attrib['bo'] == '21':
byte_order = 'big_endian'
elif ctype.attrib['bo'] == '12':
byte_order = 'little_endian'
else:
raise ParseError("Unknown byte order code: %s" % ctype.attrib['bo'])
# Load from P-type element.
ptype_unit = data_type.find('PVALUETYPE/UNIT')
if ptype_unit is not None:
unit = ptype_unit.text
# Choices, scale and offset.
choices = _load_choices(data_type)
# Slope and offset.
comp = data_type.find('COMP')
if comp is not None:
factor = float(comp.attrib['f'])
offset = float(comp.attrib['o'])
data_types[type_id] = DataType(type_name,
type_id,
bit_length,
encoding,
minimum,
maximum,
choices,
byte_order,
unit,
factor,
offset)
return data_types
def _load_data_element(data, offset, data_types):
"""Load given signal element and return a signal object.
"""
data_type = data_types[data.attrib['dtref']]
# Map CDD/c-style field offset to the DBC/can.Signal.start bit numbering
# convention for compatability with can.Signal objects and the shared codec
# infrastructure.
#
dbc_start_bitnum = cdd_offset_to_dbc_start_bit(offset, data_type.bit_length, data_type.byte_order)
return Data(name=data.find('QUAL').text,
start = dbc_start_bitnum,
length=data_type.bit_length,
byte_order = data_type.byte_order,
scale=data_type.factor,
offset=data_type.offset,
minimum=data_type.minimum,
maximum=data_type.maximum,
unit=data_type.unit,
choices=data_type.choices)
def _load_did_element(did, data_types):
"""Load given DID element and return a did object.
"""
offset = 0
datas = []
data_objs = did.findall('SIMPLECOMPCONT/DATAOBJ')
data_objs += did.findall('SIMPLECOMPCONT/UNION/STRUCT/DATAOBJ')
for data_obj in data_objs:
data = _load_data_element(data_obj,
offset,
data_types)
if data:
datas.append(data)
offset += data.length
identifier = int(did.find('STATICVALUE').attrib['v'])
name = did.find('QUAL').text
length = (offset + 7) // 8
return Did(identifier=identifier,
name=name,
length=length,
datas=datas)
def load_string(string):
"""Parse given CDD format string.
"""
root = ElementTree.fromstring(string)
ecu_doc = root.find('ECUDOC')
data_types = _load_data_types(ecu_doc)
var = ecu_doc.findall('ECU')[0].find('VAR')
dids = []
for diag_class in var.findall('DIAGCLASS'):
for diag_inst in diag_class.findall('DIAGINST'):
did = _load_did_element(diag_inst,
data_types)
dids.append(did)
return InternalDatabase(dids)
|
|
# -*- coding: utf-8 -*-
"""
Django settings for test_project project.
Generated by 'django-admin startproject' using Django 1.11.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
from logging.handlers import SysLogHandler
from modoboa.test_settings import * # noqa
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.realpath(os.path.dirname(os.path.dirname(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "w537@nm@5n)=+e%-7*z-jxf21a#0k%uv^rbu**+cj4=_u57e(8"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = "DEBUG" in os.environ
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = [
"127.0.0.1",
"localhost",
]
SITE_ID = 1
# Security settings
X_FRAME_OPTIONS = "SAMEORIGIN"
# Application definition
INSTALLED_APPS = (
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.sites",
"django.contrib.staticfiles",
"reversion",
"ckeditor",
"ckeditor_uploader",
"rest_framework",
"rest_framework.authtoken",
'django_otp',
'django_otp.plugins.otp_totp',
'django_otp.plugins.otp_static',
)
# A dedicated place to register Modoboa applications
# Do not delete it.
# Do not change the order.
MODOBOA_APPS = (
"modoboa",
"modoboa.core",
"modoboa.lib",
"modoboa.admin",
"modoboa.transport",
"modoboa.relaydomains",
"modoboa.limits",
"modoboa.parameters",
"modoboa.dnstools",
"modoboa.maillog",
# Modoboa extensions here.
"modoboa_amavis",
)
INSTALLED_APPS += MODOBOA_APPS
AUTH_USER_MODEL = "core.User"
MIDDLEWARE = (
"x_forwarded_for.middleware.XForwardedForMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
'django_otp.middleware.OTPMiddleware',
'modoboa.core.middleware.TwoFAMiddleware',
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"modoboa.core.middleware.LocalConfigMiddleware",
"modoboa.lib.middleware.AjaxLoginRedirect",
"modoboa.lib.middleware.CommonExceptionCatcher",
"modoboa.lib.middleware.RequestCatcherMiddleware",
)
AUTHENTICATION_BACKENDS = (
# 'modoboa.lib.authbackends.LDAPBackend',
# 'modoboa.lib.authbackends.SMTPBackend',
"django.contrib.auth.backends.ModelBackend",
)
# SMTP authentication
# AUTH_SMTP_SERVER_ADDRESS = 'localhost'
# AUTH_SMTP_SERVER_PORT = 25
# AUTH_SMTP_SECURED_MODE = None # 'ssl' or 'starttls' are accepted
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"modoboa.core.context_processors.top_notifications",
],
"debug": TEMPLATE_DEBUG,
},
},
]
ROOT_URLCONF = "test_project.urls"
WSGI_APPLICATION = "test_project.wsgi.application"
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = "/sitestatic/"
STATIC_ROOT = os.path.join(BASE_DIR, "sitestatic")
STATICFILES_DIRS = (
# os.path.join(BASE_DIR, '..', 'modoboa', 'bower_components'),
)
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
# Rest framework settings
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.TokenAuthentication",
"rest_framework.authentication.SessionAuthentication",
),
}
# Modoboa settings
# MODOBOA_CUSTOM_LOGO = os.path.join(MEDIA_URL, "custom_logo.png")
# DOVECOT_LOOKUP_PATH = ('/path/to/dovecot', )
MODOBOA_API_URL = "https://api.modoboa.org/1/"
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", # NOQA:E501
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator", # NOQA:E501
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator", # NOQA:E501
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator", # NOQA:E501
},
{
"NAME": "modoboa.core.password_validation.ComplexityValidator",
"OPTIONS": {
"upper": 1,
"lower": 1,
"digits": 1,
"specials": 0
}
},
]
# CKeditor
CKEDITOR_UPLOAD_PATH = "uploads/"
CKEDITOR_IMAGE_BACKEND = "pillow"
CKEDITOR_RESTRICT_BY_USER = True
CKEDITOR_BROWSE_SHOW_DIRS = True
CKEDITOR_ALLOW_NONIMAGE_FILES = False
CKEDITOR_CONFIGS = {
"default": {
"allowedContent": True,
"toolbar": "Modoboa",
"width": None,
"toolbar_Modoboa": [
["Bold", "Italic", "Underline"],
["JustifyLeft", "JustifyCenter", "JustifyRight", "JustifyBlock"],
["BidiLtr", "BidiRtl", "Language"],
["NumberedList", "BulletedList", "-", "Outdent", "Indent"],
["Undo", "Redo"],
["Link", "Unlink", "Anchor", "-", "Smiley"],
["TextColor", "BGColor", "-", "Source"],
["Font", "FontSize"],
["Image", ],
["SpellChecker"]
],
},
}
# Logging configuration
LOGGING = {
"version": 1,
"formatters": {
"syslog": {
"format": "%(name)s: %(levelname)s %(message)s"
},
},
"handlers": {
"syslog-auth": {
"class": "logging.handlers.SysLogHandler",
"facility": SysLogHandler.LOG_AUTH,
"formatter": "syslog"
},
"modoboa": {
"class": "modoboa.core.loggers.SQLHandler",
}
},
"loggers": {
"modoboa.auth": {
"handlers": ["syslog-auth", "modoboa"],
"level": "INFO",
"propagate": False
},
"modoboa.admin": {
"handlers": ["modoboa"],
"level": "INFO",
"propagate": False
}
}
}
# Load settings from extensions
try:
from modoboa_amavis import settings as modoboa_amavis_settings
modoboa_amavis_settings.apply(globals())
except AttributeError:
from modoboa_amavis.settings import * # noqa
MIGRATION_MODULES = {
"modoboa_amavis": None
}
TEST_RUNNER = "modoboa_amavis.test_runners.UnManagedModelTestRunner"
# We force sqlite backend for tests because the generated database is
# not the same as the one provided by amavis...
DATABASES.update({ # noqa
"amavis": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "amavis.db",
"PORT": "",
"ATOMIC_REQUESTS": True,
},
})
# sqlite defaults to UTF-8
AMAVIS_DEFAULT_DATABASE_ENCODING = "UTF-8"
|
|
"""Nodes that make up parse trees
Parsing spits out a tree of these, which you can then tell to walk itself and
spit out a useful value. Or you can walk it yourself; the structural attributes
are public.
"""
# TODO: If this is slow, think about using cElementTree or something.
from inspect import isfunction
from sys import version_info, exc_info
from parsimonious.exceptions import VisitationError, UndefinedLabel
from parsimonious.utils import StrAndRepr
from six import reraise, python_2_unicode_compatible, with_metaclass,
iteritems
@python_2_unicode_compatible
class Node(StrAndRepr):
"""A parse tree node
Consider these immutable once constructed. As a side effect of a
memory-saving strategy in the cache, multiple references to a single
``Node`` might be returned in a single parse tree. So, if you start
messing with one, you'll see surprising parallel changes pop up elsewhere.
My philosophy is that parse trees (and their nodes) should be
representation-agnostic. That is, they shouldn't get all mixed up with what
the final rendered form of a wiki page (or the intermediate representation
of a programming language, or whatever) is going to be: you should be able
to parse once and render several representations from the tree, one after
another.
"""
# I tried making this subclass list, but it got ugly. I had to construct
# invalid ones and patch them up later, and there were other problems.
__slots__ = ['expr_name', # The name of the expression that generated me
'full_text', # The full text fed to the parser
'start', # The position in the text where that expr started matching
'end', # The position after starft where the expr first didn't
# match. [start:end] follow Python slice conventions.
'children'] # List of child parse tree nodes
def __init__(self, expr_name, full_text, start, end, children=None):
self.expr_name = expr_name
self.full_text = full_text
self.start = start
self.end = end
self.children = children or []
def __iter__(self):
"""Support looping over my children and doing tuple unpacks on me.
It can be very handy to unpack nodes in arg lists; see
:class:`PegVisitor` for an example.
"""
return iter(self.children)
@property
def text(self):
"""Return the text this node matched."""
return self.full_text[self.start:self.end]
# From here down is just stuff for testing and debugging.
def prettily(self, error=None):
"""Return a unicode, pretty-printed representation of me.
:arg error: The node to highlight because an error occurred there
"""
# TODO: If a Node appears multiple times in the tree, we'll point to
# them all. Whoops.
def indent(text):
return '\n'.join((' ' + line) for line in text.splitlines())
ret = [u'<%s%s matching "%s">%s' % (
self.__class__.__name__,
(' called "%s"' % self.expr_name) if self.expr_name else '',
self.text,
' <-- *** We were here. ***' if error is self else '')]
for n in self:
ret.append(indent(n.prettily(error=error)))
return '\n'.join(ret)
def __str__(self):
"""Return a compact, human-readable representation of me."""
return self.prettily()
def __eq__(self, other):
"""Support by-value deep comparison with other nodes for testing."""
return (other is not None and
self.expr_name == other.expr_name and
self.full_text == other.full_text and
self.start == other.start and
self.end == other.end and
self.children == other.children)
def __ne__(self, other):
return not self == other
def __repr__(self, top_level=True):
"""Return a bit of code (though not an expression) that will recreate
me."""
# repr() of unicode flattens everything out to ASCII, so we don't need
# to explicitly encode things afterward.
ret = ["s = %r" % self.full_text] if top_level else []
ret.append("%s(%r, s, %s, %s%s)" % (
self.__class__.__name__,
self.expr_name,
self.start,
self.end,
(', children=[%s]' %
', '.join([c.__repr__(top_level=False) for c in self.children]))
if self.children else ''))
return '\n'.join(ret)
class RegexNode(Node):
"""Node returned from a ``Regex`` expression
Grants access to the ``re.Match`` object, in case you want to access
capturing groups, etc.
"""
__slots__ = ['match']
class RuleDecoratorMeta(type):
def __new__(metaclass, name, bases, namespace):
def unvisit(name):
"""Remove any leading "visit_" from a method name."""
return name[6:] if name.startswith('visit_') else name
methods = [v for k, v in iteritems(namespace) if
hasattr(v, '_rule') and isfunction(v)]
if methods:
from parsimonious.grammar import Grammar # circular import dodge
methods.sort(key=(lambda x: x.func_code.co_firstlineno)
if version_info[0] < 3 else
(lambda x: x.__code__.co_firstlineno))
# Possible enhancement: once we get the Grammar extensibility story
# solidified, we can have @rules *add* to the default grammar
# rather than pave over it.
namespace['grammar'] = Grammar(
'\n'.join('{name} = {expr}'.format(name=unvisit(m.__name__),
expr=m._rule)
for m in methods))
return super(RuleDecoratorMeta,
metaclass).__new__(metaclass, name, bases, namespace)
class NodeVisitor(with_metaclass(RuleDecoratorMeta,object)):
"""A shell for writing things that turn parse trees into something useful
Performs a depth-first traversal of an AST. Subclass this, add methods for
each expr you care about, instantiate, and call
``visit(top_node_of_parse_tree)``. It'll return the useful stuff. This API
is very similar to that of ``ast.NodeVisitor``.
These could easily all be static methods, but that would add at least as
much weirdness at the call site as the ``()`` for instantiation. And this
way, we support subclasses that require state state: options, for example,
or a symbol table constructed from a programming language's AST.
We never transform the parse tree in place, because...
* There are likely multiple references to the same ``Node`` object in a
parse tree, and changes to one reference would surprise you elsewhere.
* It makes it impossible to report errors: you'd end up with the "error"
arrow pointing someplace in a half-transformed mishmash of nodes--and
that's assuming you're even transforming the tree into another tree.
Heaven forbid you're making it into a string or something else.
"""
#: The :term:`default grammar`: the one recommended for use with this
#: visitor. If you populate this, you will be able to call
#: :meth:`NodeVisitor.parse()` as a shortcut.
grammar = None
#: Classes of exceptions you actually intend to raise during visitation
#: and which should propogate out of the visitor. These will not be
#: wrapped in a VisitationError when they arise.
unwrapped_exceptions = ()
# TODO: If we need to optimize this, we can go back to putting subclasses
# in charge of visiting children; they know when not to bother. Or we can
# mark nodes as not descent-worthy in the grammar.
def visit(self, node):
"""Walk a parse tree, transforming it into another representation.
Recursively descend a parse tree, dispatching to the method named after
the rule in the :class:`~parsimonious.grammar.Grammar` that produced
each node. If, for example, a rule was... ::
bold = '<b>'
...the ``visit_bold()`` method would be called. It is your
responsibility to subclass :class:`NodeVisitor` and implement those
methods.
"""
method = getattr(self, 'visit_' + node.expr_name, self.generic_visit)
# Call that method, and show where in the tree it failed if it blows
# up.
try:
return method(node, [self.visit(n) for n in node])
except (VisitationError, UndefinedLabel):
# Don't catch and re-wrap already-wrapped exceptions.
raise
except self.unwrapped_exceptions:
raise
except Exception:
# Catch any exception, and tack on a parse tree so it's easier to
# see where it went wrong.
exc_class, exc, tb = exc_info()
reraise(VisitationError, VisitationError(exc, exc_class, node), tb)
def generic_visit(self, node, visited_children):
"""Default visitor method
:arg node: The node we're visiting
:arg visited_children: The results of visiting the children of that
node, in a list
I'm not sure there's an implementation of this that makes sense across
all (or even most) use cases, so we leave it to subclasses to implement
for now.
"""
raise NotImplementedError("No visitor method was defined for %s." %
node.expr_name)
# Convenience methods:
def parse(self, text, pos=0):
"""Parse some text with this Visitor's default grammar.
``SomeVisitor().parse('some_string')`` is a shortcut for
``SomeVisitor().visit(some_grammar.parse('some_string'))``.
"""
return self._parse_or_match(text, pos, 'parse')
def match(self, text, pos=0):
"""Parse some text with this Visitor's default grammar, but don't
insist on parsing all the way to the end.
``SomeVisitor().match('some_string')`` is a shortcut for
``SomeVisitor().visit(some_grammar.match('some_string'))``.
"""
return self._parse_or_match(text, pos, 'match')
# Internal convenience methods to help you write your own visitors:
def lift_child(self, node, _a):
"""Lift the sole child of ``node`` up to replace the node."""
(first_child,) = _a
return first_child
# Private methods:
def _parse_or_match(self, text, pos, method_name):
"""Execute a parse or match on the default grammar, followed by a
visitation.
Raise RuntimeError if there is no default grammar specified.
"""
if not self.grammar:
raise RuntimeError(
"The {cls}.{method}() shortcut won't work because {cls} was "
"never associated with a specific " "grammar. Fill out its "
"`grammar` attribute, and try again.".format(
cls=self.__class__.__name__,
method=method_name))
return self.visit(getattr(self.grammar, method_name)(text, pos=pos))
def rule(rule_string):
"""Decorate a NodeVisitor ``visit_*`` method to tie a grammar rule to it.
The following will arrange for the ``visit_digit`` method to receive the
results of the ``~"[0-9]"`` parse rule::
@rule('~"[0-9]"')
def visit_digit(self, node, visited_children):
...
Notice that there is no "digit = " as part of the rule; that gets inferred
from the method name.
In cases where there is only one kind of visitor interested in a grammar,
using ``@rule`` saves you having to look back and forth between the visitor
and the grammar definition.
On an implementation level, all ``@rule`` rules get stitched together into
a :class:`~parsimonoius.Grammar` that becomes the NodeVisitor's
:term:`default grammar`.
Typically, the choice of a default rule for this grammar is simple: whatever
``@rule`` comes first in the class is the default. But the choice may become
surprising if you divide the ``@rule`` calls among subclasses. At the
moment, which method "comes first" is decided simply by comparing line
numbers, so whatever method is on the smallest-numbered line will be the
default. In a future release, this will change to pick the
first ``@rule`` call on the basemost class that has one. That way, a
subclass which does not override the default rule's ``visit_*`` method
won't unintentionally change which rule is the default.
"""
def decorator(method):
method._rule = rule_string # XXX: Maybe register them on a class var instead so we can just override a @rule'd visitor method on a subclass without blowing away the rule string that comes with it.
return method
return decorator
|
|
# $Id$
#
# Copyright (C) 2007,2008 Greg Landrum
#
# @@ All Rights Reserved @@
#
import os,sys
import io
import unittest
from rdkit.six.moves import cPickle
from rdkit import RDConfig
from rdkit import DataStructs as ds
def feq(v1,v2,tol=1e-4):
return abs(v1-v2)<tol
class TestCase(unittest.TestCase):
def setUp(self) :
pass
def test1Int(self):
"""
"""
v1 = ds.IntSparseIntVect(5)
self.assertRaises(IndexError,lambda:v1[5])
v1[0]=1
v1[2]=2
v1[3]=3
self.assertTrue(v1==v1)
self.assertTrue(v1.GetLength()==5)
v2= ds.IntSparseIntVect(5)
self.assertTrue(v1!=v2)
v2|=v1
self.assertTrue(v2==v1)
v3=v2|v1
self.assertTrue(v3==v1)
onVs = v1.GetNonzeroElements()
self.assertTrue(onVs=={0:1,2:2,3:3})
def test2Long(self):
"""
"""
l=1<<42
v1 = ds.LongSparseIntVect(l)
self.assertRaises(IndexError,lambda:v1[l])
v1[0]=1
v1[2]=2
v1[1<<35]=3
self.assertTrue(v1==v1)
self.assertTrue(v1.GetLength()==l)
v2= ds.LongSparseIntVect(l)
self.assertTrue(v1!=v2)
v2|=v1
self.assertTrue(v2==v1)
v3=v2|v1
self.assertTrue(v3==v1)
onVs = v1.GetNonzeroElements()
self.assertTrue(onVs=={0:1,2:2,1<<35:3})
def test3Pickle1(self):
"""
"""
l=1<<42
v1 = ds.LongSparseIntVect(l)
self.assertRaises(IndexError,lambda:v1[l+1])
v1[0]=1
v1[2]=2
v1[1<<35]=3
self.assertTrue(v1==v1)
v2= cPickle.loads(cPickle.dumps(v1))
self.assertTrue(v2==v1)
v3= ds.LongSparseIntVect(v2.ToBinary())
self.assertTrue(v2==v3)
self.assertTrue(v1==v3)
#cPickle.dump(v1,file('lsiv.pkl','wb+'))
with open(
os.path.join(RDConfig.RDBaseDir,
'Code/DataStructs/Wrap/testData/lsiv.pkl'),
'r'
) as tf:
buf = tf.read().replace('\r\n', '\n').encode('utf-8')
tf.close()
with io.BytesIO(buf) as f:
v3 = cPickle.load(f)
self.assertTrue(v3==v1)
def test3Pickle2(self):
"""
"""
l=1<<21
v1 = ds.IntSparseIntVect(l)
self.assertRaises(IndexError,lambda:v1[l+1])
v1[0]=1
v1[2]=2
v1[1<<12]=3
self.assertTrue(v1==v1)
v2= cPickle.loads(cPickle.dumps(v1))
self.assertTrue(v2==v1)
v3= ds.IntSparseIntVect(v2.ToBinary())
self.assertTrue(v2==v3)
self.assertTrue(v1==v3)
#cPickle.dump(v1,file('isiv.pkl','wb+'))
with open(
os.path.join(RDConfig.RDBaseDir,
'Code/DataStructs/Wrap/testData/isiv.pkl'),
'r'
) as tf:
buf = tf.read().replace('\r\n', '\n').encode('utf-8')
tf.close()
with io.BytesIO(buf) as f:
v3 = cPickle.load(f)
self.assertTrue(v3==v1)
def test4Update(self):
"""
"""
v1 = ds.IntSparseIntVect(5)
self.assertRaises(IndexError,lambda:v1[6])
v1[0]=1
v1[2]=2
v1[3]=3
self.assertTrue(v1==v1)
v2 = ds.IntSparseIntVect(5)
v2.UpdateFromSequence((0,2,3,3,2,3))
self.assertTrue(v1==v2)
def test5Dice(self):
"""
"""
v1 = ds.IntSparseIntVect(5)
v1[4]=4;
v1[0]=2;
v1[3]=1;
self.assertTrue(feq(ds.DiceSimilarity(v1,v1),1.0))
v1 = ds.IntSparseIntVect(5)
v1[0]=2;
v1[2]=1;
v1[3]=4;
v1[4]=6;
v2 = ds.IntSparseIntVect(5)
v2[1]=2;
v2[2]=3;
v2[3]=4;
v2[4]=4;
self.assertTrue(feq(ds.DiceSimilarity(v1,v2),18.0/26.))
self.assertTrue(feq(ds.DiceSimilarity(v2,v1),18.0/26.))
def test6BulkDice(self):
"""
"""
sz=10
nToSet=5
nVs=6
import random
vs = []
for i in range(nVs):
v = ds.IntSparseIntVect(sz)
for j in range(nToSet):
v[random.randint(0,sz-1)]=random.randint(1,10)
vs.append(v)
baseDs = [ds.DiceSimilarity(vs[0],vs[x]) for x in range(1,nVs)]
bulkDs = ds.BulkDiceSimilarity(vs[0],vs[1:])
for i in range(len(baseDs)):
self.assertTrue(feq(baseDs[i],bulkDs[i]))
def test6BulkTversky(self):
"""
"""
sz=10
nToSet=5
nVs=6
import random
vs = []
for i in range(nVs):
v = ds.IntSparseIntVect(sz)
for j in range(nToSet):
v[random.randint(0,sz-1)]=random.randint(1,10)
vs.append(v)
baseDs = [ds.TverskySimilarity(vs[0],vs[x],.5,.5) for x in range(1,nVs)]
bulkDs = ds.BulkTverskySimilarity(vs[0],vs[1:],0.5,0.5)
diceDs = [ds.DiceSimilarity(vs[0],vs[x]) for x in range(1,nVs)]
for i in range(len(baseDs)):
self.assertTrue(feq(baseDs[i],bulkDs[i]))
self.assertTrue(feq(baseDs[i],diceDs[i]))
bulkDs = ds.BulkTverskySimilarity(vs[0],vs[1:],1.0,1.0)
taniDs = [ds.TanimotoSimilarity(vs[0],vs[x]) for x in range(1,nVs)]
for i in range(len(bulkDs)):
self.assertTrue(feq(bulkDs[i],taniDs[i]))
taniDs = ds.BulkTanimotoSimilarity(vs[0],vs[1:])
for i in range(len(bulkDs)):
self.assertTrue(feq(bulkDs[i],taniDs[i]))
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
# coding: utf-8
import logging
from multiprocessing.pool import ThreadPool
import pprint
import struct
import threading
import time
import traceback
import unittest
import environment
import tablet
import utils
from protocols_flavor import protocols_flavor
from vtdb import dbexceptions
from vtdb import keyrange
from vtdb import keyrange_constants
from vtdb import vtdb_logger
from vtdb import vtgate_cursor
from vtdb import vtgate_client
shard_0_master = tablet.Tablet()
shard_0_replica1 = tablet.Tablet()
shard_0_replica2 = tablet.Tablet()
shard_1_master = tablet.Tablet()
shard_1_replica1 = tablet.Tablet()
shard_1_replica2 = tablet.Tablet()
KEYSPACE_NAME = 'test_keyspace'
SHARD_NAMES = ['-80', '80-']
SHARD_KID_MAP = {
'-80': [
527875958493693904, 626750931627689502,
345387386794260318, 332484755310826578,
1842642426274125671, 1326307661227634652,
1761124146422844620, 1661669973250483744,
3361397649937244239, 2444880764308344533],
'80-': [
9767889778372766922, 9742070682920810358,
10296850775085416642, 9537430901666854108,
10440455099304929791, 11454183276974683945,
11185910247776122031, 10460396697869122981,
13379616110062597001, 12826553979133932576],
}
CREATE_VT_INSERT_TEST = '''create table vt_insert_test (
id bigint auto_increment,
msg varchar(64),
keyspace_id bigint(20) unsigned NOT NULL,
primary key (id)
) Engine=InnoDB'''
CREATE_VT_A = '''create table vt_a (
eid bigint,
id int,
keyspace_id bigint(20) unsigned NOT NULL,
primary key(eid, id)
) Engine=InnoDB'''
CREATE_VT_FIELD_TYPES = '''create table vt_field_types (
id bigint(20) auto_increment,
uint_val bigint(20) unsigned,
str_val varchar(64),
unicode_val varchar(64),
float_val float(5, 1),
keyspace_id bigint(20) unsigned NOT NULL,
primary key(id)
) Engine=InnoDB'''
create_tables = [CREATE_VT_INSERT_TEST, CREATE_VT_A, CREATE_VT_FIELD_TYPES]
pack_kid = struct.Struct('!Q').pack
class DBRow(object):
def __init__(self, column_names, row_tuple):
self.__dict__ = dict(zip(column_names, row_tuple))
def __repr__(self):
return pprint.pformat(self.__dict__, 4)
def setUpModule():
logging.debug('in setUpModule')
try:
environment.topo_server().setup()
# start mysql instance external to the test
setup_procs = [shard_0_master.init_mysql(),
shard_0_replica1.init_mysql(),
shard_0_replica2.init_mysql(),
shard_1_master.init_mysql(),
shard_1_replica1.init_mysql(),
shard_1_replica2.init_mysql()
]
utils.wait_procs(setup_procs)
setup_tablets()
except:
tearDownModule()
raise
def tearDownModule():
logging.debug('in tearDownModule')
if utils.options.skip_teardown:
return
logging.debug('Tearing down the servers and setup')
tablet.kill_tablets([shard_0_master,
shard_0_replica1, shard_0_replica2,
shard_1_master,
shard_1_replica1, shard_1_replica2])
teardown_procs = [shard_0_master.teardown_mysql(),
shard_0_replica1.teardown_mysql(),
shard_0_replica2.teardown_mysql(),
shard_1_master.teardown_mysql(),
shard_1_replica1.teardown_mysql(),
shard_1_replica2.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
shard_0_master.remove_tree()
shard_0_replica1.remove_tree()
shard_0_replica2.remove_tree()
shard_1_master.remove_tree()
shard_1_replica1.remove_tree()
shard_1_replica2.remove_tree()
def setup_tablets():
# Start up a master mysql and vttablet
logging.debug('Setting up tablets')
utils.run_vtctl(['CreateKeyspace', KEYSPACE_NAME])
utils.run_vtctl(['SetKeyspaceShardingInfo', '-force', KEYSPACE_NAME,
'keyspace_id', 'uint64'])
shard_0_master.init_tablet('master', keyspace=KEYSPACE_NAME, shard='-80')
shard_0_replica1.init_tablet('replica', keyspace=KEYSPACE_NAME, shard='-80')
shard_0_replica2.init_tablet('replica', keyspace=KEYSPACE_NAME, shard='-80')
shard_1_master.init_tablet('master', keyspace=KEYSPACE_NAME, shard='80-')
shard_1_replica1.init_tablet('replica', keyspace=KEYSPACE_NAME, shard='80-')
shard_1_replica2.init_tablet('replica', keyspace=KEYSPACE_NAME, shard='80-')
utils.run_vtctl(['RebuildKeyspaceGraph', KEYSPACE_NAME], auto_log=True)
for t in [shard_0_master, shard_0_replica1, shard_0_replica2,
shard_1_master, shard_1_replica1, shard_1_replica2]:
t.create_db('vt_test_keyspace')
for create_table in create_tables:
t.mquery(shard_0_master.dbname, create_table)
t.start_vttablet(wait_for_state=None)
for t in [shard_0_master, shard_0_replica1, shard_0_replica2,
shard_1_master, shard_1_replica1, shard_1_replica2]:
t.wait_for_vttablet_state('SERVING')
utils.run_vtctl(['InitShardMaster', KEYSPACE_NAME+'/-80',
shard_0_master.tablet_alias], auto_log=True)
utils.run_vtctl(['InitShardMaster', KEYSPACE_NAME+'/80-',
shard_1_master.tablet_alias], auto_log=True)
utils.run_vtctl(
['RebuildKeyspaceGraph', KEYSPACE_NAME], auto_log=True)
utils.check_srv_keyspace(
'test_nj', KEYSPACE_NAME,
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n')
utils.VtGate().start()
def get_connection(timeout=10.0):
protocol = protocols_flavor().vtgate_python_protocol()
try:
return vtgate_client.connect(protocol, utils.vtgate.addr(), timeout)
except Exception:
logging.exception('Connection to vtgate (timeout=%s) failed.', timeout)
raise
def get_keyrange(shard_name):
kr = None
if shard_name == keyrange_constants.SHARD_ZERO:
kr = keyrange.KeyRange(keyrange_constants.NON_PARTIAL_KEYRANGE)
else:
kr = keyrange.KeyRange(shard_name)
return kr
def _delete_all(shard_index, table_name):
vtgate_conn = get_connection()
# This write is to set up the test with fresh insert
# and hence performing it directly on the connection.
vtgate_conn.begin()
vtgate_conn._execute('delete from %s' % table_name, {},
KEYSPACE_NAME, 'master',
keyranges=[get_keyrange(SHARD_NAMES[shard_index])])
vtgate_conn.commit()
def do_write(count, shard_index):
kid_list = SHARD_KID_MAP[SHARD_NAMES[shard_index]]
_delete_all(shard_index, 'vt_insert_test')
vtgate_conn = get_connection()
for x in xrange(count):
keyspace_id = kid_list[x % len(kid_list)]
cursor = vtgate_conn.cursor(KEYSPACE_NAME, 'master',
keyspace_ids=[pack_kid(keyspace_id)],
writable=True)
cursor.begin()
cursor.execute(
'insert into vt_insert_test (msg, keyspace_id) '
'values (%(msg)s, %(keyspace_id)s)',
{'msg': 'test %s' % x, 'keyspace_id': keyspace_id})
cursor.commit()
def restart_vtgate(extra_args=None):
if extra_args is None:
extra_args = {}
port = utils.vtgate.port
utils.vtgate.kill()
utils.VtGate(port=port).start(extra_args=extra_args)
class BaseTestCase(unittest.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
logging.info('Start: %s.', '.'.join(self.id().split('.')[-2:]))
class TestCoreVTGateFunctions(BaseTestCase):
def setUp(self):
super(TestCoreVTGateFunctions, self).setUp()
self.shard_index = 1
self.keyrange = get_keyrange(SHARD_NAMES[self.shard_index])
self.master_tablet = shard_1_master
self.replica_tablet = shard_1_replica1
def test_status(self):
self.assertIn('</html>', utils.vtgate.get_status())
def test_connect(self):
vtgate_conn = get_connection()
self.assertNotEqual(vtgate_conn, None)
def test_writes(self):
vtgate_conn = get_connection()
_delete_all(self.shard_index, 'vt_insert_test')
count = 10
kid_list = SHARD_KID_MAP[SHARD_NAMES[self.shard_index]]
for x in xrange(count):
keyspace_id = kid_list[count%len(kid_list)]
cursor = vtgate_conn.cursor(KEYSPACE_NAME, 'master',
keyspace_ids=[pack_kid(keyspace_id)],
writable=True)
cursor.begin()
cursor.execute(
'insert into vt_insert_test (msg, keyspace_id) '
'values (%(msg)s, %(keyspace_id)s)',
{'msg': 'test %s' % x, 'keyspace_id': keyspace_id})
cursor.commit()
cursor = vtgate_conn.cursor(KEYSPACE_NAME, 'master',
keyranges=[self.keyrange])
rowcount = cursor.execute('select * from vt_insert_test', {})
self.assertEqual(rowcount, count, 'master fetch works')
def test_query_routing(self):
"""Test VtGate routes queries to the right tablets."""
row_counts = [20, 30]
for shard_index in [0, 1]:
do_write(row_counts[shard_index], shard_index)
vtgate_conn = get_connection()
for shard_index in [0, 1]:
# Fetch all rows in each shard
cursor = vtgate_conn.cursor(
KEYSPACE_NAME, 'master',
keyranges=[get_keyrange(SHARD_NAMES[shard_index])])
rowcount = cursor.execute('select * from vt_insert_test', {})
# Verify row count
self.assertEqual(rowcount, row_counts[shard_index])
# Verify keyspace id
for result in cursor.results:
kid = result[2]
self.assertIn(kid, SHARD_KID_MAP[SHARD_NAMES[shard_index]])
# Do a cross shard range query and assert all rows are fetched
cursor = vtgate_conn.cursor(KEYSPACE_NAME, 'master',
keyranges=[get_keyrange('75-95')])
rowcount = cursor.execute('select * from vt_insert_test', {})
self.assertEqual(rowcount, row_counts[0] + row_counts[1])
def test_rollback(self):
vtgate_conn = get_connection()
count = 10
_delete_all(self.shard_index, 'vt_insert_test')
kid_list = SHARD_KID_MAP[SHARD_NAMES[self.shard_index]]
for x in xrange(count):
keyspace_id = kid_list[x%len(kid_list)]
cursor = vtgate_conn.cursor(KEYSPACE_NAME, 'master',
keyspace_ids=[pack_kid(keyspace_id)],
writable=True)
cursor.begin()
cursor.execute(
'insert into vt_insert_test (msg, keyspace_id) '
'values (%(msg)s, %(keyspace_id)s)',
{'msg': 'test %s' % x, 'keyspace_id': keyspace_id})
cursor.commit()
vtgate_conn.begin()
vtgate_conn._execute(
'delete from vt_insert_test', {},
KEYSPACE_NAME, 'master',
keyranges=[self.keyrange])
vtgate_conn.rollback()
cursor = vtgate_conn.cursor(KEYSPACE_NAME, 'master',
keyranges=[self.keyrange])
rowcount = cursor.execute('select * from vt_insert_test', {})
logging.debug('ROLLBACK TEST rowcount %d count %d', rowcount, count)
self.assertEqual(
rowcount, count,
"Fetched rows(%d) != inserted rows(%d), rollback didn't work" %
(rowcount, count))
do_write(10, self.shard_index)
def test_execute_entity_ids(self):
vtgate_conn = get_connection()
count = 10
_delete_all(self.shard_index, 'vt_a')
eid_map = {}
kid_list = SHARD_KID_MAP[SHARD_NAMES[self.shard_index]]
for x in xrange(count):
keyspace_id = kid_list[x%len(kid_list)]
eid_map[x] = pack_kid(keyspace_id)
cursor = vtgate_conn.cursor(KEYSPACE_NAME, 'master',
keyspace_ids=[pack_kid(keyspace_id)],
writable=True)
cursor.begin()
cursor.execute(
'insert into vt_a (eid, id, keyspace_id) '
'values (%(eid)s, %(id)s, %(keyspace_id)s)',
{'eid': x, 'id': x, 'keyspace_id': keyspace_id})
cursor.commit()
cursor = vtgate_conn.cursor(KEYSPACE_NAME, 'master', keyspace_ids=None)
rowcount = cursor.execute(
'select * from vt_a', {},
entity_keyspace_id_map=eid_map, entity_column_name='id')
self.assertEqual(rowcount, count, 'entity_ids works')
def test_batch_read(self):
vtgate_conn = get_connection()
count = 10
_delete_all(self.shard_index, 'vt_insert_test')
shard_name = SHARD_NAMES[self.shard_index]
kid_list = SHARD_KID_MAP[shard_name]
for x in xrange(count):
keyspace_id = kid_list[x%len(kid_list)]
cursor = vtgate_conn.cursor(KEYSPACE_NAME, 'master',
keyspace_ids=[pack_kid(keyspace_id)],
writable=True)
cursor.begin()
cursor.execute(
'insert into vt_insert_test (msg, keyspace_id) '
'values (%(msg)s, %(keyspace_id)s)',
{'msg': 'test %s' % x, 'keyspace_id': keyspace_id})
cursor.commit()
_delete_all(self.shard_index, 'vt_a')
for x in xrange(count):
keyspace_id = kid_list[x%len(kid_list)]
cursor = vtgate_conn.cursor(KEYSPACE_NAME, 'master',
keyspace_ids=[pack_kid(keyspace_id)],
writable=True)
cursor.begin()
cursor.execute(
'insert into vt_a (eid, id, keyspace_id) '
'values (%(eid)s, %(id)s, %(keyspace_id)s)',
{'eid': x, 'id': x, 'keyspace_id': keyspace_id})
cursor.commit()
kid_list = [pack_kid(kid) for kid in kid_list]
cursor = vtgate_conn.cursor(keyspace=None, tablet_type='master')
params_list = [
dict(sql='select msg, keyspace_id from vt_insert_test',
bind_variables={},
keyspace=KEYSPACE_NAME, keyspace_ids=kid_list,
shards=None),
dict(sql='select eid, id, keyspace_id from vt_a',
bind_variables={},
keyspace=KEYSPACE_NAME,
keyspace_ids=None,
shards=[shard_name]),
dict(sql='select eid + 100, id, keyspace_id from vt_a',
bind_variables={},
keyspace=KEYSPACE_NAME, keyspace_ids=kid_list,
shards=None),
]
cursor.executemany(sql=None, params_list=params_list)
self.assertEqual(cursor.rowcount, count)
msg_0, msg_1 = (row[0] for row in sorted(cursor.fetchall())[:2])
self.assertEqual(msg_0, 'test 0')
self.assertEqual(msg_1, 'test 1')
self.assertTrue(cursor.nextset())
self.assertEqual(cursor.rowcount, count)
eid_0, eid_1 = (row[0] for row in sorted(cursor.fetchall())[:2])
self.assertEqual(eid_0, 0)
self.assertEqual(eid_1, 1)
self.assertTrue(cursor.nextset())
eid_0_plus_100, eid_1_plus_100 = (
row[0] for row in sorted(cursor.fetchall())[:2])
self.assertEqual(eid_0_plus_100, 100)
self.assertEqual(eid_1_plus_100, 101)
self.assertFalse(cursor.nextset())
def test_batch_write(self):
vtgate_conn = get_connection()
cursor = vtgate_conn.cursor(keyspace=None, tablet_type='master')
kid_list = SHARD_KID_MAP[SHARD_NAMES[self.shard_index]]
all_ids = [pack_kid(kid) for kid in kid_list]
count = 10
cursor.executemany(
sql=None,
params_list=[
dict(sql='delete from vt_insert_test', bind_variables=None,
keyspace=KEYSPACE_NAME, keyspace_ids=all_ids,
shards=None)])
params_list = []
for x in xrange(count):
keyspace_id = kid_list[x%len(kid_list)]
params_list.append(
dict(sql=None,
bind_variables=
{'msg': 'test %s' % x, 'keyspace_id': keyspace_id},
keyspace=KEYSPACE_NAME,
keyspace_ids=[pack_kid(keyspace_id)],
shards=None))
cursor.executemany(
sql='insert into vt_insert_test (msg, keyspace_id) '
'values (%(msg)s, %(keyspace_id)s)',
params_list=params_list)
cursor.executemany(
sql=None,
params_list=[
dict(sql='delete from vt_a', bind_variables=None,
keyspace=KEYSPACE_NAME, keyspace_ids=all_ids, shards=None)])
params_list = []
for x in xrange(count):
keyspace_id = kid_list[x%len(kid_list)]
sql = (
'insert into vt_a (eid, id, keyspace_id) '
'values (%(eid)s, %(id)s, %(keyspace_id)s)')
bind_variables = {'eid': x, 'id': x, 'keyspace_id': keyspace_id}
keyspace = KEYSPACE_NAME
keyspace_ids = [pack_kid(keyspace_id)]
params_list.append(dict(
sql=sql, bind_variables=bind_variables, keyspace=keyspace,
keyspace_ids=keyspace_ids, shards=None))
cursor.executemany(sql=None, params_list=params_list)
_, rowcount, _, _ = vtgate_conn._execute(
'select * from vt_insert_test', {},
KEYSPACE_NAME, 'master',
keyranges=[self.keyrange])
self.assertEqual(rowcount, count)
_, rowcount, _, _ = vtgate_conn._execute(
'select * from vt_a', {},
KEYSPACE_NAME, 'master',
keyranges=[self.keyrange])
self.assertEqual(rowcount, count)
def test_streaming_fetchsubset(self):
count = 30
do_write(count, self.shard_index)
# Fetch a subset of the total size.
vtgate_conn = get_connection()
def get_stream_cursor():
return vtgate_conn.cursor(
KEYSPACE_NAME, 'master',
keyranges=[self.keyrange],
cursorclass=vtgate_cursor.StreamVTGateCursor)
def fetch_first_10_rows(stream_cursor):
stream_cursor.execute('select msg from vt_insert_test', {})
rows = stream_cursor.fetchmany(size=10)
self.assertEqual(rows, [('test %d' % x,) for x in xrange(10)])
def fetch_next_10_rows(stream_cursor):
rows = stream_cursor.fetchmany(size=10)
self.assertEqual(rows, [('test %d' % x,) for x in xrange(10, 20)])
stream_cursor_1 = get_stream_cursor()
stream_cursor_2 = get_stream_cursor()
fetch_first_10_rows(stream_cursor_1)
fetch_first_10_rows(stream_cursor_2)
fetch_next_10_rows(stream_cursor_1)
fetch_next_10_rows(stream_cursor_2)
stream_cursor_1.close()
stream_cursor_2.close()
def test_streaming_fetchall(self):
count = 30
do_write(count, self.shard_index)
# Fetch all.
vtgate_conn = get_connection()
stream_cursor = vtgate_conn.cursor(
KEYSPACE_NAME, 'master',
keyranges=[self.keyrange],
cursorclass=vtgate_cursor.StreamVTGateCursor)
stream_cursor.execute('select * from vt_insert_test', {})
rows = stream_cursor.fetchall()
rowcount = len(list(rows))
self.assertEqual(rowcount, count)
stream_cursor.close()
def test_streaming_fetchone(self):
count = 30
do_write(count, self.shard_index)
# Fetch one.
vtgate_conn = get_connection()
stream_cursor = vtgate_conn.cursor(
KEYSPACE_NAME, 'master',
keyranges=[self.keyrange],
cursorclass=vtgate_cursor.StreamVTGateCursor)
stream_cursor.execute('select * from vt_insert_test', {})
rows = stream_cursor.fetchone()
self.assertTrue(type(rows) == tuple, 'Received a valid row')
stream_cursor.close()
def test_streaming_multishards(self):
count = 30
do_write(count, 0)
do_write(count, 1)
vtgate_conn = get_connection()
stream_cursor = vtgate_conn.cursor(
KEYSPACE_NAME, 'master',
keyranges=[keyrange.KeyRange(
keyrange_constants.NON_PARTIAL_KEYRANGE)],
cursorclass=vtgate_cursor.StreamVTGateCursor)
stream_cursor.execute('select * from vt_insert_test', {})
rows = stream_cursor.fetchall()
rowcount = len(list(rows))
self.assertEqual(rowcount, count * 2)
stream_cursor.close()
def test_streaming_zero_results(self):
vtgate_conn = get_connection()
vtgate_conn.begin()
vtgate_conn._execute('delete from vt_insert_test', {},
KEYSPACE_NAME, 'master',
keyranges=[self.keyrange])
vtgate_conn.commit()
# After deletion, should result zero.
stream_cursor = vtgate_conn.cursor(
KEYSPACE_NAME, 'master',
keyranges=[self.keyrange],
cursorclass=vtgate_cursor.StreamVTGateCursor)
stream_cursor.execute('select * from vt_insert_test', {})
rows = stream_cursor.fetchall()
rowcount = len(list(rows))
self.assertEqual(rowcount, 0)
def test_interleaving(self):
tablet_type = 'master'
try:
vtgate_conn = get_connection()
vtgate_conn.begin()
vtgate_conn._execute(
'delete from vt_insert_test', {},
KEYSPACE_NAME, tablet_type,
keyranges=[self.keyrange])
kid_list = SHARD_KID_MAP[SHARD_NAMES[self.shard_index]]
count = len(kid_list)
for x in xrange(count):
keyspace_id = kid_list[x]
vtgate_conn._execute(
'insert into vt_insert_test (msg, keyspace_id) '
'values (%(msg)s, %(keyspace_id)s)',
{'msg': 'test %s' % x, 'keyspace_id': keyspace_id},
KEYSPACE_NAME, tablet_type, keyspace_ids=[pack_kid(keyspace_id)])
vtgate_conn.commit()
vtgate_conn2 = get_connection()
query = (
'select keyspace_id from vt_insert_test where keyspace_id = %(kid)s')
thd = threading.Thread(target=self._query_lots, args=(
vtgate_conn2,
query,
{'kid': kid_list[0]},
KEYSPACE_NAME,
tablet_type,
[pack_kid(kid_list[0])]))
thd.start()
for i in xrange(count):
(result, _, _, _) = vtgate_conn._execute(
query,
{'kid': kid_list[i]},
KEYSPACE_NAME, tablet_type,
keyspace_ids=[pack_kid(kid_list[i])])
self.assertEqual(result, [(kid_list[i],)])
if i % 10 == 0:
generator, _ = vtgate_conn._stream_execute(
query, {'kid': kid_list[i]}, KEYSPACE_NAME,
tablet_type,
keyspace_ids=[pack_kid(kid_list[i])])
for result in generator:
self.assertEqual(result, (kid_list[i],))
thd.join()
except Exception, e:
self.fail('Failed with error %s %s' % (str(e), traceback.format_exc()))
def test_field_types(self):
vtgate_conn = get_connection()
_delete_all(self.shard_index, 'vt_field_types')
count = 10
base_uint = int('8' + '0' * 15, base=16)
kid_list = SHARD_KID_MAP[SHARD_NAMES[self.shard_index]]
for x in xrange(1, count):
keyspace_id = kid_list[count % len(kid_list)]
cursor = vtgate_conn.cursor(KEYSPACE_NAME, 'master',
keyspace_ids=[pack_kid(keyspace_id)],
writable=True)
cursor.begin()
cursor.execute(
'insert into vt_field_types '
'(uint_val, str_val, unicode_val, float_val, keyspace_id) '
'values (%(uint_val)s, %(str_val)s, %(unicode_val)s, '
'%(float_val)s, %(keyspace_id)s)',
{'uint_val': base_uint + x, 'str_val': 'str_%d' % x,
'unicode_val': unicode('str_%d' % x), 'float_val': x * 1.2,
'keyspace_id': keyspace_id})
cursor.commit()
cursor = vtgate_conn.cursor(KEYSPACE_NAME, 'master',
keyranges=[self.keyrange])
rowcount = cursor.execute('select * from vt_field_types', {})
field_names = [f[0] for f in cursor.description]
self.assertEqual(rowcount, count -1, "rowcount doesn't match")
id_list = []
uint_val_list = []
str_val_list = []
unicode_val_list = []
float_val_list = []
for r in cursor.results:
row = DBRow(field_names, r)
id_list.append(row.id)
uint_val_list.append(row.uint_val)
str_val_list.append(row.str_val)
unicode_val_list.append(row.unicode_val)
float_val_list.append(row.float_val)
# iterable type checks - list, tuple, set are supported.
query = 'select * from vt_field_types where id in %(id_1)s'
rowcount = cursor.execute(query, {'id_1': id_list})
self.assertEqual(rowcount, len(id_list), "rowcount doesn't match")
rowcount = cursor.execute(query, {'id_1': tuple(id_list)})
self.assertEqual(rowcount, len(id_list), "rowcount doesn't match")
rowcount = cursor.execute(query, {'id_1': set(id_list)})
self.assertEqual(rowcount, len(id_list), "rowcount doesn't match")
for r in cursor.results:
row = DBRow(field_names, r)
self.assertIsInstance(row.id, (int, long))
# received field types same as input.
# uint
query = 'select * from vt_field_types where uint_val in %(uint_val_1)s'
rowcount = cursor.execute(query, {'uint_val_1': uint_val_list})
self.assertEqual(rowcount, len(uint_val_list), "rowcount doesn't match")
for _, r in enumerate(cursor.results):
row = DBRow(field_names, r)
self.assertIsInstance(row.uint_val, long)
self.assertGreaterEqual(
row.uint_val, base_uint, 'uint value not in correct range')
# str
query = 'select * from vt_field_types where str_val in %(str_val_1)s'
rowcount = cursor.execute(query, {'str_val_1': str_val_list})
self.assertEqual(rowcount, len(str_val_list), "rowcount doesn't match")
for i, r in enumerate(cursor.results):
row = DBRow(field_names, r)
self.assertIsInstance(row.str_val, str)
# unicode str
query = (
'select * from vt_field_types where unicode_val in %(unicode_val_1)s')
rowcount = cursor.execute(query, {'unicode_val_1': unicode_val_list})
self.assertEqual(
rowcount, len(unicode_val_list), "rowcount doesn't match")
for i, r in enumerate(cursor.results):
row = DBRow(field_names, r)
self.assertIsInstance(row.unicode_val, basestring)
# deliberately eliminating the float test since it is flaky due
# to mysql float precision handling.
def _query_lots(
self, conn, query, bind_vars, keyspace_name, tablet_type, keyspace_ids):
for _ in xrange(500):
result, _, _, _ = conn._execute(
query, bind_vars, keyspace_name, tablet_type,
keyspace_ids=keyspace_ids)
self.assertEqual(result, [tuple(bind_vars.values())])
class TestFailures(BaseTestCase):
def setUp(self):
super(TestFailures, self).setUp()
self.shard_index = 1
self.keyrange = get_keyrange(SHARD_NAMES[self.shard_index])
self.master_tablet = shard_1_master
self.master_tablet.kill_vttablet()
self.tablet_start(self.master_tablet, 'master')
self.replica_tablet = shard_1_replica1
self.replica_tablet.kill_vttablet()
self.tablet_start(self.replica_tablet, 'replica')
self.replica_tablet2 = shard_1_replica2
self.replica_tablet2.kill_vttablet()
self.tablet_start(self.replica_tablet2, 'replica')
port = utils.vtgate.port
utils.vtgate.kill()
utils.VtGate(port=port).start()
def tablet_start(self, tablet, tablet_type, lameduck_period='0.5s'):
_ = tablet_type
return tablet.start_vttablet(lameduck_period=lameduck_period)
def test_status_with_error(self):
"""Tests that the status page loads correctly after a VTGate error."""
vtgate_conn = get_connection()
cursor = vtgate_conn.cursor(
'INVALID_KEYSPACE', 'replica', keyspace_ids=['0'])
# We expect to see a DatabaseError due to an invalid keyspace
with self.assertRaises(dbexceptions.DatabaseError):
cursor.execute('select * from vt_insert_test', {})
# Page should have loaded successfully
self.assertIn('</html>', utils.vtgate.get_status())
def test_tablet_restart_read(self):
# Since we're going to kill the tablet, there will be a race between the
# client timeout here and the vtgate->vttablet connection timeout, so we
# increase it for this test.
vtgate_conn = get_connection(timeout=30)
self.replica_tablet.kill_vttablet()
self.replica_tablet2.kill_vttablet()
with self.assertRaises(dbexceptions.DatabaseError):
vtgate_conn._execute(
'select 1 from vt_insert_test', {},
KEYSPACE_NAME, 'replica',
keyranges=[self.keyrange])
self.tablet_start(self.replica_tablet, 'replica')
self.tablet_start(self.replica_tablet2, 'replica')
try:
_ = vtgate_conn._execute(
'select 1 from vt_insert_test', {},
KEYSPACE_NAME, 'replica',
keyranges=[self.keyrange])
except Exception, e:
self.fail('Communication with shard %s replica failed with error %s' %
(SHARD_NAMES[self.shard_index], str(e)))
def test_vtgate_restart_read(self):
vtgate_conn = get_connection()
port = utils.vtgate.port
utils.vtgate.kill()
with self.assertRaises(dbexceptions.OperationalError):
vtgate_conn._execute(
'select 1 from vt_insert_test', {},
KEYSPACE_NAME, 'replica',
keyranges=[self.keyrange])
utils.VtGate(port=port).start()
vtgate_conn = get_connection()
vtgate_conn._execute(
'select 1 from vt_insert_test', {},
KEYSPACE_NAME, 'replica',
keyranges=[self.keyrange])
def test_tablet_restart_stream_execute(self):
# Since we're going to kill the tablet, there will be a race between the
# client timeout here and the vtgate->vttablet connection timeout, so we
# increase it for this test.
vtgate_conn = get_connection(timeout=30)
stream_cursor = vtgate_conn.cursor(
KEYSPACE_NAME, 'replica',
keyranges=[self.keyrange],
cursorclass=vtgate_cursor.StreamVTGateCursor)
self.replica_tablet.kill_vttablet()
self.replica_tablet2.kill_vttablet()
with self.assertRaises(dbexceptions.DatabaseError):
stream_cursor.execute('select * from vt_insert_test', {})
self.tablet_start(self.replica_tablet, 'replica')
self.tablet_start(self.replica_tablet2, 'replica')
try:
stream_cursor.execute('select * from vt_insert_test', {})
except Exception, e:
self.fail('Communication with shard0 replica failed with error %s' %
str(e))
def test_vtgate_restart_stream_execute(self):
vtgate_conn = get_connection()
stream_cursor = vtgate_conn.cursor(
KEYSPACE_NAME, 'replica',
keyranges=[self.keyrange],
cursorclass=vtgate_cursor.StreamVTGateCursor)
port = utils.vtgate.port
utils.vtgate.kill()
with self.assertRaises(dbexceptions.OperationalError):
stream_cursor.execute('select * from vt_insert_test', {})
utils.VtGate(port=port).start()
vtgate_conn = get_connection()
stream_cursor = vtgate_conn.cursor(
KEYSPACE_NAME, 'replica',
keyranges=[self.keyrange],
cursorclass=vtgate_cursor.StreamVTGateCursor)
try:
stream_cursor.execute('select * from vt_insert_test', {})
except Exception, e:
self.fail('Communication with shard0 replica failed with error %s' %
str(e))
# vtgate begin doesn't make any back-end connections to
# vttablet so the kill and restart shouldn't have any effect.
def test_tablet_restart_begin(self):
vtgate_conn = get_connection()
self.master_tablet.kill_vttablet()
vtgate_conn.begin()
_ = self.tablet_start(self.master_tablet, 'master')
vtgate_conn.begin()
# this succeeds only if retry_count > 0
vtgate_conn._execute(
'delete from vt_insert_test', {},
KEYSPACE_NAME, 'master',
keyranges=[self.keyrange])
vtgate_conn.commit()
def test_vtgate_restart_begin(self):
vtgate_conn = get_connection()
port = utils.vtgate.port
utils.vtgate.kill()
with self.assertRaises(dbexceptions.OperationalError):
vtgate_conn.begin()
utils.VtGate(port=port).start()
vtgate_conn = get_connection()
vtgate_conn.begin()
def test_tablet_fail_write(self):
# Since we're going to kill the tablet, there will be a race between the
# client timeout here and the vtgate->vttablet connection timeout, so we
# increase it for this test.
vtgate_conn = get_connection(timeout=30)
with self.assertRaises(dbexceptions.DatabaseError):
vtgate_conn.begin()
self.master_tablet.kill_vttablet()
vtgate_conn._execute(
'delete from vt_insert_test', {},
KEYSPACE_NAME, 'master',
keyranges=[self.keyrange])
vtgate_conn.commit()
_ = self.tablet_start(self.master_tablet, 'master')
vtgate_conn.begin()
vtgate_conn._execute(
'delete from vt_insert_test', {},
KEYSPACE_NAME, 'master',
keyranges=[self.keyrange])
vtgate_conn.commit()
def test_vttablet_errors_not_logged(self):
"""Verifies that errors from VtTablet aren't logged as such in VTGate.
Instead of making assertions by reading the log stream, we read a debug
vars that is incremented by VTGate whenever it chooses to log exceptions
to Infof instead of Errorf.
"""
vtgate_conn = get_connection()
keyspace_id = SHARD_KID_MAP[SHARD_NAMES[self.shard_index]][0]
cursor = vtgate_conn.cursor(KEYSPACE_NAME, 'master',
keyspace_ids=[pack_kid(keyspace_id)],
writable=True)
with self.assertRaises(dbexceptions.DatabaseError):
cursor.execute('this is not valid syntax, throw an error', {})
try:
non_vtgate_errors = (
utils.vtgate.get_vars()['VtgateInfoErrorCounts']['NonVtgateErrors'])
except KeyError:
self.fail(
"No errors in VTGate that weren't logged as exceptions: "
"'NonVtgateErrors' vars not found")
self.assertEqual(non_vtgate_errors, 1)
def test_error_on_dml(self):
vtgate_conn = get_connection()
vtgate_conn.begin()
keyspace_id = SHARD_KID_MAP[SHARD_NAMES[
(self.shard_index+1)%len(SHARD_NAMES)
]][0]
try:
vtgate_conn._execute(
'insert into vt_insert_test values(:msg, :keyspace_id)',
{'msg': 'test4', 'keyspace_id': keyspace_id}, KEYSPACE_NAME,
'master', keyranges=[self.keyrange])
vtgate_conn.commit()
self.fail('Failed to raise DatabaseError exception')
except dbexceptions.DatabaseError:
# FIXME(alainjobart) add a method to get the session to vtgate_client,
# instead of poking into it like this.
if protocols_flavor().vtgate_python_protocol() == 'gorpc':
logging.info(
'SHARD SESSIONS: %s', vtgate_conn.session['ShardSessions'])
transaction_id = (
vtgate_conn.session['ShardSessions'][0]['TransactionId'])
else:
transaction_id = vtgate_conn.session.shard_sessions[0].transaction_id
self.assertTrue(transaction_id != 0)
except Exception, e:
self.fail('Expected DatabaseError as exception, got %s' % str(e))
finally:
vtgate_conn.rollback()
def test_vtgate_fail_write(self):
vtgate_conn = get_connection()
port = utils.vtgate.port
with self.assertRaises(dbexceptions.OperationalError):
vtgate_conn.begin()
utils.vtgate.kill()
vtgate_conn._execute(
'delete from vt_insert_test', {},
KEYSPACE_NAME, 'master',
keyranges=[self.keyrange])
vtgate_conn.commit()
utils.VtGate(port=port).start()
vtgate_conn = get_connection()
vtgate_conn.begin()
vtgate_conn._execute(
'delete from vt_insert_test', {},
KEYSPACE_NAME, 'master',
keyranges=[self.keyrange])
vtgate_conn.commit()
# test timeout between py client and vtgate
def test_vtgate_timeout(self):
vtgate_conn = get_connection(timeout=3.0)
with self.assertRaises(dbexceptions.TimeoutError):
vtgate_conn._execute(
'select sleep(4) from dual', {},
KEYSPACE_NAME, 'replica',
keyranges=[self.keyrange])
vtgate_conn = get_connection(timeout=3.0)
with self.assertRaises(dbexceptions.TimeoutError):
vtgate_conn._execute(
'select sleep(4) from dual', {},
KEYSPACE_NAME, 'master',
keyranges=[self.keyrange])
# Currently this is causing vttablet to become unreachable at
# the timeout boundary and kill any query being executed
# at the time. Prevent flakiness in other tests by sleeping
# until the query times out.
# TODO fix b/17733518
time.sleep(3)
# test timeout between vtgate and vttablet
# the timeout is set to 5 seconds
def test_tablet_timeout(self):
# this test only makes sense if there is a shorter/protective timeout
# set for vtgate-vttablet connection.
# TODO(liguo): evaluate if we want such a timeout
return
vtgate_conn = get_connection()
with self.assertRaises(dbexceptions.DatabaseError):
vtgate_conn.begin()
vtgate_conn._execute(
'select sleep(7) from dual', {},
KEYSPACE_NAME, 'replica',
keyranges=[self.keyrange])
vtgate_conn = get_connection()
with self.assertRaises(dbexceptions.DatabaseError):
vtgate_conn.begin()
vtgate_conn._execute(
'select sleep(7) from dual', {},
KEYSPACE_NAME, 'master',
keyranges=[self.keyrange])
# Test the case that no query sent during tablet shuts down (single tablet)
def test_restart_mysql_tablet_idle(self):
self.replica_tablet2.kill_vttablet()
vtgate_conn = get_connection()
utils.wait_procs([self.replica_tablet.shutdown_mysql(),])
try:
vtgate_conn._execute(
'select 1 from vt_insert_test', {},
KEYSPACE_NAME, 'replica',
keyranges=[self.keyrange])
self.fail('DatabaseError should have been raised')
except Exception, e:
self.assertIsInstance(e, dbexceptions.DatabaseError)
self.assertNotIsInstance(e, dbexceptions.IntegrityError)
self.assertNotIsInstance(e, dbexceptions.OperationalError)
self.assertNotIsInstance(e, dbexceptions.TimeoutError)
utils.wait_procs([self.replica_tablet.start_mysql(),])
# force health check so tablet can become serving
utils.run_vtctl(
['RunHealthCheck', self.replica_tablet.tablet_alias, 'replica'],
auto_log=True)
self.replica_tablet.wait_for_vttablet_state('SERVING')
vtgate_conn._execute(
'select 1 from vt_insert_test', {},
KEYSPACE_NAME, 'replica',
keyranges=[self.keyrange])
self.replica_tablet.kill_vttablet()
self.tablet_start(self.replica_tablet, 'replica')
self.replica_tablet.wait_for_vttablet_state('SERVING')
# TODO: expect to fail until we can detect vttablet shuts down gracefully
# while VTGate is idle.
# NOTE: with grpc, it will reconnect, and not trigger an error.
if protocols_flavor().tabletconn_protocol() == 'grpc':
return
try:
result = vtgate_conn._execute(
'select 1 from vt_insert_test', {},
KEYSPACE_NAME, 'replica',
keyranges=[self.keyrange])
self.fail(
'DatabaseError should have been raised, but got %s' % str(result))
except Exception, e:
self.assertIsInstance(e, dbexceptions.DatabaseError)
self.assertNotIsInstance(e, dbexceptions.IntegrityError)
self.assertNotIsInstance(e, dbexceptions.OperationalError)
self.assertNotIsInstance(e, dbexceptions.TimeoutError)
self.tablet_start(self.replica_tablet2, 'replica')
# Test the case that there are queries sent during vttablet shuts down,
# and all querys fail because there is only one vttablet.
def test_restart_mysql_tablet_queries(self):
vtgate_conn = get_connection()
utils.wait_procs([self.replica_tablet.shutdown_mysql(),])
utils.wait_procs([self.replica_tablet2.shutdown_mysql(),])
try:
vtgate_conn._execute(
'select 1 from vt_insert_test', {},
KEYSPACE_NAME, 'replica',
keyranges=[self.keyrange])
self.fail('DatabaseError should have been raised')
except Exception, e:
self.assertIsInstance(e, dbexceptions.DatabaseError)
self.assertNotIsInstance(e, dbexceptions.IntegrityError)
self.assertNotIsInstance(e, dbexceptions.OperationalError)
self.assertNotIsInstance(e, dbexceptions.TimeoutError)
utils.wait_procs([self.replica_tablet.start_mysql(),])
utils.wait_procs([self.replica_tablet2.start_mysql(),])
# force health check so tablet can become serving
utils.run_vtctl(
['RunHealthCheck', self.replica_tablet.tablet_alias, 'replica'],
auto_log=True)
utils.run_vtctl(
['RunHealthCheck', self.replica_tablet2.tablet_alias, 'replica'],
auto_log=True)
self.replica_tablet.wait_for_vttablet_state('SERVING')
self.replica_tablet2.wait_for_vttablet_state('SERVING')
self.replica_tablet2.kill_vttablet()
self.replica_tablet.kill_vttablet(wait=False)
time.sleep(0.1)
# send query while vttablet is in lameduck, should fail as no vttablet
try:
vtgate_conn._execute(
'select 1 from vt_insert_test', {},
KEYSPACE_NAME, 'replica',
keyranges=[self.keyrange])
self.fail('DatabaseError should have been raised')
except Exception, e:
self.assertIsInstance(e, dbexceptions.DatabaseError)
self.assertNotIsInstance(e, dbexceptions.IntegrityError)
self.assertNotIsInstance(e, dbexceptions.OperationalError)
self.assertNotIsInstance(e, dbexceptions.TimeoutError)
# send another query, should also fail
try:
vtgate_conn._execute(
'select 1 from vt_insert_test', {},
KEYSPACE_NAME, 'replica',
keyranges=[self.keyrange])
self.fail('DatabaseError should have been raised')
except Exception, e:
self.assertIsInstance(e, dbexceptions.DatabaseError)
self.assertNotIsInstance(e, dbexceptions.IntegrityError)
self.assertNotIsInstance(e, dbexceptions.OperationalError)
self.assertNotIsInstance(e, dbexceptions.TimeoutError)
# sleep over the lameduck period
time.sleep(0.5)
self.tablet_start(self.replica_tablet, 'replica')
self.tablet_start(self.replica_tablet2, 'replica')
self.replica_tablet.wait_for_vttablet_state('SERVING')
self.replica_tablet2.wait_for_vttablet_state('SERVING')
# as the cached vtgate-tablet conn was marked down, it should succeed
vtgate_conn._execute(
'select 1 from vt_insert_test', {},
KEYSPACE_NAME, 'replica',
keyranges=[self.keyrange])
# Test the case that there are queries sent during one vttablet shuts down,
# and all querys succeed because there is another vttablet.
def test_restart_mysql_tablet_queries_multi_tablets(self):
vtgate_conn = get_connection()
utils.wait_procs([self.replica_tablet.shutdown_mysql(),])
# should retry on tablet2 and succeed
vtgate_conn._execute(
'select 1 from vt_insert_test', {},
KEYSPACE_NAME, 'replica',
keyranges=[self.keyrange])
utils.wait_procs([self.replica_tablet.start_mysql(),])
# force health check so tablet can become serving
utils.run_vtctl(
['RunHealthCheck', self.replica_tablet.tablet_alias, 'replica'],
auto_log=True)
self.replica_tablet.wait_for_vttablet_state('SERVING')
tablet2_vars = utils.get_vars(self.replica_tablet2.port)
t2_query_count_before = int(tablet2_vars['Queries']['TotalCount'])
vtgate_conn._execute(
'select 1 from vt_insert_test', {},
KEYSPACE_NAME, 'replica',
keyranges=[self.keyrange])
tablet2_vars = utils.get_vars(self.replica_tablet2.port)
t2_query_count_after = int(tablet2_vars['Queries']['TotalCount'])
self.assertTrue((t2_query_count_after-t2_query_count_before) == 1)
# kill tablet2 and leave it in lameduck mode
self.replica_tablet2.kill_vttablet(wait=False)
time.sleep(0.1)
# send query while tablet2 is in lameduck, should retry on tablet1
tablet1_vars = utils.get_vars(self.replica_tablet.port)
t1_query_count_before = int(tablet1_vars['Queries']['TotalCount'])
vtgate_conn._execute(
'select 1 from vt_insert_test', {},
KEYSPACE_NAME, 'replica',
keyranges=[self.keyrange])
tablet1_vars = utils.get_vars(self.replica_tablet.port)
t1_query_count_after = int(tablet1_vars['Queries']['TotalCount'])
self.assertTrue((t1_query_count_after-t1_query_count_before) == 1)
# sleep over the lameduck period
time.sleep(0.5)
# send another query, should also succeed on tablet1
tablet1_vars = utils.get_vars(self.replica_tablet.port)
t1_query_count_before = int(tablet1_vars['Queries']['TotalCount'])
vtgate_conn._execute(
'select 1 from vt_insert_test', {},
KEYSPACE_NAME, 'replica',
keyranges=[self.keyrange])
tablet1_vars = utils.get_vars(self.replica_tablet.port)
t1_query_count_after = int(tablet1_vars['Queries']['TotalCount'])
self.assertTrue((t1_query_count_after-t1_query_count_before) == 1)
# start tablet2
self.tablet_start(self.replica_tablet2, 'replica')
self.replica_tablet2.wait_for_vttablet_state('SERVING')
# it should succeed on tablet1
tablet1_vars = utils.get_vars(self.replica_tablet.port)
t1_query_count_before = int(tablet1_vars['Queries']['TotalCount'])
vtgate_conn._execute(
'select 1 from vt_insert_test', {},
KEYSPACE_NAME, 'replica',
keyranges=[self.keyrange])
tablet1_vars = utils.get_vars(self.replica_tablet.port)
t1_query_count_after = int(tablet1_vars['Queries']['TotalCount'])
self.assertTrue((t1_query_count_after-t1_query_count_before) == 1)
# Test the case that there are queries sent during one vttablet is killed,
# and all queries succeed because there is another vttablet.
def test_kill_mysql_tablet_queries_multi_tablets(self):
vtgate_conn = get_connection()
utils.wait_procs([self.replica_tablet.shutdown_mysql(),])
# should retry on tablet2 and succeed
tablet2_vars = utils.get_vars(self.replica_tablet2.port)
t2_query_count_before = int(tablet2_vars['Queries']['TotalCount'])
vtgate_conn._execute(
'select 1 from vt_insert_test', {},
KEYSPACE_NAME, 'replica',
keyranges=[self.keyrange])
tablet2_vars = utils.get_vars(self.replica_tablet2.port)
t2_query_count_after = int(tablet2_vars['Queries']['TotalCount'])
self.assertTrue((t2_query_count_after-t2_query_count_before) == 1)
# start tablet1 mysql
utils.wait_procs([self.replica_tablet.start_mysql(),])
# force health check so tablet can become serving
utils.run_vtctl(
['RunHealthCheck', self.replica_tablet.tablet_alias, 'replica'],
auto_log=True)
self.replica_tablet.wait_for_vttablet_state('SERVING')
# should succeed on tablet2
tablet2_vars = utils.get_vars(self.replica_tablet2.port)
t2_query_count_before = int(tablet2_vars['Queries']['TotalCount'])
vtgate_conn._execute(
'select 1 from vt_insert_test', {},
KEYSPACE_NAME, 'replica',
keyranges=[self.keyrange])
tablet2_vars = utils.get_vars(self.replica_tablet2.port)
t2_query_count_after = int(tablet2_vars['Queries']['TotalCount'])
self.assertTrue((t2_query_count_after-t2_query_count_before) == 1)
# hard kill tablet2
self.replica_tablet2.hard_kill_vttablet()
# send query after tablet2 is killed, should not retry on the cached conn
try:
vtgate_conn._execute(
'select 1 from vt_insert_test', {},
KEYSPACE_NAME, 'replica',
keyranges=[self.keyrange])
self.fail('DatabaseError should have been raised')
except Exception, e:
self.assertIsInstance(e, dbexceptions.DatabaseError)
self.assertNotIsInstance(e, dbexceptions.IntegrityError)
self.assertNotIsInstance(e, dbexceptions.OperationalError)
self.assertNotIsInstance(e, dbexceptions.TimeoutError)
# send another query, should succeed on tablet1
tablet1_vars = utils.get_vars(self.replica_tablet.port)
t1_query_count_before = int(tablet1_vars['Queries']['TotalCount'])
vtgate_conn._execute(
'select 1 from vt_insert_test', {},
KEYSPACE_NAME, 'replica',
keyranges=[self.keyrange])
tablet1_vars = utils.get_vars(self.replica_tablet.port)
t1_query_count_after = int(tablet1_vars['Queries']['TotalCount'])
self.assertTrue((t1_query_count_after-t1_query_count_before) == 1)
# start tablet2
self.tablet_start(self.replica_tablet2, 'replica')
self.replica_tablet2.wait_for_vttablet_state('SERVING')
# it should succeed on tablet1
tablet1_vars = utils.get_vars(self.replica_tablet.port)
t1_query_count_before = int(tablet1_vars['Queries']['TotalCount'])
vtgate_conn._execute(
'select 1 from vt_insert_test', {},
KEYSPACE_NAME, 'replica',
keyranges=[self.keyrange])
tablet1_vars = utils.get_vars(self.replica_tablet.port)
t1_query_count_after = int(tablet1_vars['Queries']['TotalCount'])
self.assertTrue((t1_query_count_after-t1_query_count_before) == 1)
def test_bind_vars_in_exception_message(self):
vtgate_conn = get_connection()
keyspace_id = None
count = 1
vtgate_conn.begin()
vtgate_conn._execute(
'delete from vt_a', {},
KEYSPACE_NAME, 'master',
keyranges=[get_keyrange(SHARD_NAMES[self.shard_index])])
vtgate_conn.commit()
eid_map = {}
# start transaction
vtgate_conn.begin()
kid_list = SHARD_KID_MAP[SHARD_NAMES[self.shard_index]]
# kill vttablet
self.master_tablet.kill_vttablet()
try:
# perform write, this should fail
for x in xrange(count):
keyspace_id = kid_list[x%len(kid_list)]
eid_map[x] = str(keyspace_id)
vtgate_conn._execute(
'insert into vt_a (eid, id, keyspace_id) '
'values (%(eid)s, %(id)s, %(keyspace_id)s)',
{'eid': x, 'id': x, 'keyspace_id': keyspace_id},
KEYSPACE_NAME, 'master', keyspace_ids=[pack_kid(keyspace_id)])
vtgate_conn.commit()
except Exception, e:
# check that bind var value is not present in exception message.
if str(keyspace_id) in str(e):
self.fail('bind_vars present in the exception message')
finally:
vtgate_conn.rollback()
# Start master tablet again
self.tablet_start(self.master_tablet, 'master')
def test_fail_fast_when_no_serving_tablets(self):
"""Verify VtGate requests fail-fast when tablets are unavailable.
When there are no SERVING tablets available to serve a request,
VtGate should fail-fast (returning an appropriate error) without
waiting around till the request deadline expires.
"""
tablet_type = 'replica'
keyranges = [get_keyrange(SHARD_NAMES[self.shard_index])]
query = 'select * from vt_insert_test'
# Execute a query to warm VtGate's caches for connections and endpoints
get_rtt(KEYSPACE_NAME, query, tablet_type, keyranges)
# Shutdown mysql and ensure tablet is in NOT_SERVING state
utils.wait_procs([self.replica_tablet.shutdown_mysql(),])
utils.wait_procs([self.replica_tablet2.shutdown_mysql(),])
try:
get_rtt(KEYSPACE_NAME, query, tablet_type, keyranges)
self.replica_tablet.wait_for_vttablet_state('NOT_SERVING')
self.replica_tablet2.wait_for_vttablet_state('NOT_SERVING')
except Exception:
self.fail('unable to set tablet to NOT_SERVING state')
# Fire off a few requests in parallel
num_requests = 10
pool = ThreadPool(processes=num_requests)
async_results = []
for _ in range(num_requests):
async_result = pool.apply_async(
get_rtt, (KEYSPACE_NAME, query, tablet_type, keyranges))
async_results.append(async_result)
# Fetch all round trip times and verify max
rt_times = []
for async_result in async_results:
rt_times.append(async_result.get())
# The true upper limit is 2 seconds (1s * 2 retries as in
# utils.py). To account for network latencies and other variances,
# we keep an upper bound of 3 here.
self.assertTrue(
max(rt_times) < 3,
'at least one request did not fail-fast; round trip times: %s' %
rt_times)
# Restart tablet and put it back to SERVING state
utils.wait_procs([self.replica_tablet.start_mysql(),])
utils.wait_procs([self.replica_tablet2.start_mysql(),])
# force health check so tablet can become serving
utils.run_vtctl(
['RunHealthCheck', self.replica_tablet.tablet_alias, 'replica'],
auto_log=True)
utils.run_vtctl(
['RunHealthCheck', self.replica_tablet2.tablet_alias, 'replica'],
auto_log=True)
self.replica_tablet.wait_for_vttablet_state('SERVING')
self.replica_tablet2.wait_for_vttablet_state('SERVING')
def test_lameduck_ongoing_query_single(self):
vtgate_conn = get_connection()
utils.wait_procs([self.replica_tablet2.shutdown_mysql(),])
utils.run_vtctl(
['RunHealthCheck', self.replica_tablet2.tablet_alias, 'replica'],
auto_log=True)
self.replica_tablet.kill_vttablet()
self.tablet_start(self.replica_tablet, 'replica', '5s')
self.replica_tablet.wait_for_vttablet_state('SERVING')
# make sure query can go through tablet1
tablet1_vars = utils.get_vars(self.replica_tablet.port)
t1_query_count_before = int(tablet1_vars['Queries']['TotalCount'])
vtgate_conn._execute(
'select 1 from vt_insert_test', {},
KEYSPACE_NAME, 'replica',
keyranges=[self.keyrange])
tablet1_vars = utils.get_vars(self.replica_tablet.port)
t1_query_count_after = int(tablet1_vars['Queries']['TotalCount'])
self.assertTrue((t1_query_count_after-t1_query_count_before) == 1)
# start a long running query
num_requests = 10
pool = ThreadPool(processes=num_requests)
async_results = []
for _ in range(5):
async_result = pool.apply_async(
send_long_query, (KEYSPACE_NAME, 'replica', [self.keyrange], 2))
async_results.append(async_result)
# soft kill vttablet
# **should wait till previous queries are sent out**
time.sleep(1)
self.replica_tablet.kill_vttablet(wait=False)
# send query while vttablet is in lameduck, should fail as no vttablet
time.sleep(0.1)
try:
vtgate_conn._execute(
'select 1 from vt_insert_test', {},
KEYSPACE_NAME, 'replica',
keyranges=[self.keyrange])
self.fail('DatabaseError should have been raised')
except Exception, e:
self.assertIsInstance(e, dbexceptions.DatabaseError)
self.assertNotIsInstance(e, dbexceptions.IntegrityError)
self.assertNotIsInstance(e, dbexceptions.OperationalError)
self.assertNotIsInstance(e, dbexceptions.TimeoutError)
# Fetch all ongoing query results
query_results = []
for async_result in async_results:
query_results.append(async_result.get())
# all should succeed
for query_result in query_results:
self.assertTrue(query_result)
# sleep over lameduck period
time.sleep(5)
# start tablet1
self.tablet_start(self.replica_tablet, 'replica')
self.replica_tablet.wait_for_vttablet_state('SERVING')
# send another query, should succeed on tablet1
vtgate_conn._execute(
'select 1 from vt_insert_test', {},
KEYSPACE_NAME, 'replica',
keyranges=[self.keyrange])
# start tablet2
utils.wait_procs([self.replica_tablet2.start_mysql(),])
utils.run_vtctl(
['RunHealthCheck', self.replica_tablet2.tablet_alias, 'replica'],
auto_log=True)
self.replica_tablet2.wait_for_vttablet_state('SERVING')
# Return round trip time for a VtGate query, ignore any errors
def get_rtt(keyspace, query, tablet_type, keyranges):
vtgate_conn = get_connection()
cursor = vtgate_conn.cursor(keyspace, tablet_type, keyranges=keyranges)
start = time.time()
try:
cursor.execute(query, {})
except Exception:
pass
duration = time.time() - start
return duration
# Send out a long query, return if it succeeds.
def send_long_query(keyspace, tablet_type, keyranges, delay):
try:
vtgate_conn = get_connection()
cursor = vtgate_conn.cursor(keyspace, tablet_type, keyranges=keyranges)
query = 'select sleep(%s) from dual' % str(delay)
try:
cursor.execute(query, {})
except Exception:
return False
return True
except Exception:
return False
class VTGateTestLogger(vtdb_logger.VtdbLogger):
def __init__(self):
self._integrity_error_count = 0
def integrity_error(self, e):
self._integrity_error_count += 1
def get_integrity_error_count(self):
return self._integrity_error_count
DML_KEYWORDS = ['insert', 'update', 'delete']
class TestExceptionLogging(BaseTestCase):
def setUp(self):
super(TestExceptionLogging, self).setUp()
self.shard_index = 1
self.keyrange = get_keyrange(SHARD_NAMES[self.shard_index])
self.master_tablet = shard_1_master
self.replica_tablet = shard_1_replica1
vtdb_logger.register_vtdb_logger(VTGateTestLogger())
self.logger = vtdb_logger.get_logger()
def test_integrity_error_logging(self):
vtgate_conn = get_connection()
vtgate_conn.begin()
vtgate_conn._execute(
'delete from vt_a', {},
KEYSPACE_NAME, 'master',
keyranges=[self.keyrange])
vtgate_conn.commit()
keyspace_id = SHARD_KID_MAP[SHARD_NAMES[self.shard_index]][0]
old_error_count = self.logger.get_integrity_error_count()
try:
vtgate_conn.begin()
vtgate_conn._execute(
'insert into vt_a (eid, id, keyspace_id) '
'values (%(eid)s, %(id)s, %(keyspace_id)s)',
{'eid': 1, 'id': 1, 'keyspace_id': keyspace_id}, KEYSPACE_NAME,
'master', keyspace_ids=[pack_kid(keyspace_id)])
vtgate_conn._execute(
'insert into vt_a (eid, id, keyspace_id) '
'values (%(eid)s, %(id)s, %(keyspace_id)s)',
{'eid': 1, 'id': 1, 'keyspace_id': keyspace_id}, KEYSPACE_NAME,
'master', keyspace_ids=[pack_kid(keyspace_id)])
vtgate_conn.commit()
except dbexceptions.IntegrityError as e:
parts = str(e).split(',')
exc_msg = parts[0]
for kw in DML_KEYWORDS:
if kw in exc_msg:
self.fail("IntegrityError shouldn't contain the query %s" % exc_msg)
except Exception as e:
self.fail('Expected IntegrityError to be raised, raised %s' % str(e))
finally:
vtgate_conn.rollback()
# The underlying execute is expected to catch and log the integrity error.
self.assertEqual(self.logger.get_integrity_error_count(), old_error_count+1)
if __name__ == '__main__':
utils.main()
|
|
#from seq2seq import ngramlm, neurallm
import cPickle, pickle
import numpy as np
import os
from collections import defaultdict
import dynet as dy
from dynet import *
import numpy
import random
from nltk.tokenize import RegexpTokenizer
class Attention_Batch:
def __init__(self, src_vocab_size, tgt_vocab_size, model, state_dim, embed_size, src_lookup, tgt_lookup, minibatch_size, builder=dy.LSTMBuilder):
self.model = model
#self.trainer = dy.SimpleSGDTrainer(self.model)
self.layers = 1
self.embed_size = 128
self.hidden_size = 128
self.state_size = 128
self.src_vocab_size = src_vocab_size
self.tgt_vocab_size = tgt_vocab_size
self.attention_size = 32
self.minibatch_size = minibatch_size
self.enc_fwd_lstm = dy.LSTMBuilder(self.layers, self.embed_size, self.state_size, model)
self.enc_bwd_lstm = dy.LSTMBuilder(self.layers, self.embed_size,self.state_size, model)
self.dec_lstm = dy.LSTMBuilder(self.layers, self.state_size*2 + self.embed_size, self.state_size, model)
self.input_lookup = src_lookup
self.output_lookup = tgt_lookup
self.attention_w1 = model.add_parameters( (self.attention_size, self.state_size*2))
self.attention_w2 = model.add_parameters( (self.attention_size , self.state_size * self.layers* 2))
self.attention_v = model.add_parameters( (1, self.attention_size))
self.decoder_w = model.add_parameters( (self.src_vocab_size , self.state_size ))
self.decoder_b = model.add_parameters( ( self.src_vocab_size ))
#self.output_lookup = lookup
self.duration_weight = model.add_parameters(( 1, self.state_size ))
self.duration_bias = model.add_parameters( ( 1 ))
def run_lstm(self, init_state, input_vecs):
s = init_state
out_vectors = []
for vector in input_vecs:
x_t = lookup(self.input_lookup, int(vector))
s = s.add_input(x_t)
out_vector = s.output()
out_vectors.append(out_vector)
return out_vectors
## I am just gonna loop over them
def run_lstm_batch(self, init_state, input_vecs_batch):
out_vectors_array = []
for input_vecs in input_vecs_batch:
s = init_state
out_vectors = []
for vector in input_vecs:
x_t = lookup(self.input_lookup, int(vector))
s = s.add_input(x_t)
out_vector = s.output()
out_vectors.append(out_vector)
out_vectors_array.append(out_vectors)
def embed_sentence(self, sentence):
sentence = [EOS] + list(sentence) + [EOS]
sentence = [char2int[c] for c in sentence]
global input_lookup
return [input_lookup[char] for char in sentence]
def attend_batch(self, input_mat_array, state_array, w1dt_array):
context_array = []
for input_mat, state, w1dt in zip(input_mat_array, state_array, w1dt_array):
context_array.append(attend(input_mat, state, w1dt))
return context_array
def attend(self, input_mat, state, w1dt):
#global self.attention_w2
#global self.attention_v
w2 = dy.parameter(self.attention_w2)
v = dy.parameter(self.attention_v)
w2dt = w2*dy.concatenate(list(state.s()))
unnormalized = dy.transpose(v * dy.tanh(dy.colwise_add(w1dt, w2dt)))
att_weights = dy.softmax(unnormalized)
context = input_mat * att_weights
return context
def test_duration(self, state, idx):
dw = dy.parameter(self.duration_weight)
db = dy.parameter(self.duration_bias)
dur = dw * state.output() + db
return dy.squared_norm(dur - idx)
def decode_batch(self, vectors_array, output_array, end_token):
loss_array = []
for vector, output in zip(vectors_array, output_array):
l,t = self.decode(vector, output, end_token)
loss_array.append(l)
return dy.esum(loss_array) , t
def decode(self, vectors, output, end_token):
#output = [EOS] + list(output) + [EOS]
#output = [char2int[c] for c in output]
w = dy.parameter(self.decoder_w)
b = dy.parameter(self.decoder_b)
w1 = dy.parameter(self.attention_w1)
input_mat = dy.concatenate_cols(vectors)
w1dt = None
last_output_embeddings = self.output_lookup[2]
s = self.dec_lstm.initial_state().add_input(dy.concatenate([dy.vecInput(self.state_size *2), last_output_embeddings]))
loss = []
dur_loss = []
c = 1
for word in output:
c += 1
# w1dt can be computed and cached once for the entire decoding phase
w1dt = w1dt or w1 * input_mat
vector = dy.concatenate([self.attend(input_mat, s, w1dt), last_output_embeddings])
s = s.add_input(vector)
k = s
#print "Going"
dloss = self.test_duration(k, c)
#print "Back"
dur_loss.append(dloss)
out_vector = w * s.output() + b
probs = dy.softmax(out_vector)
last_output_embeddings = self.output_lookup[word]
loss.append(-dy.log(dy.pick(probs, word)))
loss = dy.esum(loss)
return loss, c
def generate(self, sentence):
#embedded = embed_sentence(in_seq)
encoded = self.encode_sentence(sentence)
w = dy.parameter(self.decoder_w)
b = dy.parameter(self.decoder_b)
w1 = dy.parameter(self.attention_w1)
input_mat = dy.concatenate_cols(encoded)
w1dt = None
last_output_embeddings = self.output_lookup[2]
s = self.dec_lstm.initial_state().add_input(dy.concatenate([dy.vecInput(self.state_size * 2), last_output_embeddings]))
out = ''
res = []
count_EOS = 0
for i in range(len(sentence)):
if count_EOS == 2: break
# w1dt can be computed and cached once for the entire decoding phase
w1dt = w1dt or w1 * input_mat
vector = dy.concatenate([self.attend(input_mat, s, w1dt), last_output_embeddings])
s = s.add_input(vector)
#k = s
#dloss = self.test_duration(k, i, b)
out_vector = w * s.output() + b
probs = dy.softmax(out_vector).vec_value()
next_word = probs.index(max(probs))
last_output_embeddings = self.output_lookup[next_word]
if next_word == 2:
count_EOS += 1
continue
res.append(next_word)
#out += int2char[next_word]
return res
def get_loss(self, sentence):
dy.renew_cg()
#embedded = self.embed_sentence(sentence)
encoded = self.encode_sentence(sentence)
end_token = '</s>'
return self.decode(encoded, sentence, end_token)
def get_loss_batch(self, src_sentence_array, tgt_sentence_array):
dy.renew_cg()
#embedded = self.embed_sentence(sentence)
encoded_array = self.encode_sentence_batch(src_sentence_array)
end_token = '</s>'
return self.decode_batch(encoded_array, tgt_sentence_array, end_token)
def encode_sentence(self, sentence):
sentence_rev = list(reversed(sentence))
fwd_vectors = self.run_lstm(self.enc_fwd_lstm.initial_state(), sentence)
bwd_vectors = self.run_lstm(self.enc_bwd_lstm.initial_state(), sentence_rev)
bwd_vectors = list(reversed(bwd_vectors))
vectors = [dy.concatenate(list(p)) for p in zip(fwd_vectors, bwd_vectors)]
return vectors
def encode_sentence_batch(self, sentence_array):
vectors_array = []
for sentence in sentence_array:
vectors_array.append(self.encode_sentence(sentence))
return vectors_array
def encode_sentence_batch_advanced(self, sentence_array):
sentence_rev_array = []
for sent in sentence_array:
sentence_rev_arrayy.append(list(reversed(sent)))
fwd_vectors = self.run_lstm_batch(self.enc_fwd_lstm.initial_state(), sentence_arry)
bwd_vectors = self.run_lstm_batch(self.enc_bwd_lstm.initial_state(), sentence_rev_array)
bwd_vectors_array = []
for v in bwd_vectors:
bwd_vectors_array.append(list(reversed(v)))
fwd_vectors_array = fwd_vectors
vectors_batch = []
for fwd_vector, bwd_vector in zip(fwd_vectors_array, bwd_vectors_array):
vector = [dy.concatenate(list(p)) for p in zip(fwd_vector, bwd_vector)]
vectors_batch.append(vector)
return vectors_batch
class Attention:
def __init__(self, vocab_size, model, lookup):
self.model = model
#self.trainer = dy.SimpleSGDTrainer(self.model)
self.layers = 1
self.embed_size = 128
self.hidden_size = 128
self.state_size = 128
self.src_vocab_size = vocab_size
self.tgt_vocab_size = vocab_size
self.attention_size = 32
self.enc_fwd_lstm = dy.LSTMBuilder(self.layers, self.embed_size, self.state_size, model)
self.enc_bwd_lstm = dy.LSTMBuilder(self.layers, self.embed_size,self.state_size, model)
self.dec_lstm = dy.LSTMBuilder(self.layers, self.state_size*2 + self.embed_size, self.state_size, model)
self.input_lookup = lookup
self.attention_w1 = model.add_parameters( (self.attention_size, self.state_size*2))
self.attention_w2 = model.add_parameters( (self.attention_size , self.state_size * self.layers* 2))
self.attention_v = model.add_parameters( (1, self.attention_size))
self.decoder_w = model.add_parameters( (self.src_vocab_size , self.state_size ))
self.decoder_b = model.add_parameters( ( self.src_vocab_size ))
self.output_lookup = lookup
self.duration_weight = model.add_parameters(( 1, self.state_size ))
self.duration_bias = model.add_parameters( ( 1 ))
def run_lstm(self, init_state, input_vecs):
s = init_state
out_vectors = []
for vector in input_vecs:
x_t = lookup(self.input_lookup, int(vector))
s = s.add_input(x_t)
out_vector = s.output()
out_vectors.append(out_vector)
return out_vectors
def embed_sentence(self, sentence):
sentence = [EOS] + list(sentence) + [EOS]
sentence = [char2int[c] for c in sentence]
global input_lookup
return [input_lookup[char] for char in sentence]
def attend(self, input_mat, state, w1dt):
#global self.attention_w2
#global self.attention_v
w2 = dy.parameter(self.attention_w2)
v = dy.parameter(self.attention_v)
w2dt = w2*dy.concatenate(list(state.s()))
unnormalized = dy.transpose(v * dy.tanh(dy.colwise_add(w1dt, w2dt)))
att_weights = dy.softmax(unnormalized)
context = input_mat * att_weights
return context
def test_duration(self, state, idx):
dw = dy.parameter(self.duration_weight)
db = dy.parameter(self.duration_bias)
dur = dw * state.output() + db
return dy.squared_norm(dur - idx)
def decode(self, vectors, output, end_token):
#output = [EOS] + list(output) + [EOS]
#output = [char2int[c] for c in output]
w = dy.parameter(self.decoder_w)
b = dy.parameter(self.decoder_b)
w1 = dy.parameter(self.attention_w1)
input_mat = dy.concatenate_cols(vectors)
w1dt = None
last_output_embeddings = self.output_lookup[2]
s = self.dec_lstm.initial_state().add_input(dy.concatenate([dy.vecInput(self.state_size *2), last_output_embeddings]))
loss = []
dur_loss = []
c = 1
for word in output:
c += 1
# w1dt can be computed and cached once for the entire decoding phase
w1dt = w1dt or w1 * input_mat
vector = dy.concatenate([self.attend(input_mat, s, w1dt), last_output_embeddings])
s = s.add_input(vector)
k = s
#print "Going"
dloss = self.test_duration(k, c)
#print "Back"
dur_loss.append(dloss)
out_vector = w * s.output() + b
probs = dy.softmax(out_vector)
last_output_embeddings = self.output_lookup[word]
loss.append(-dy.log(dy.pick(probs, word)))
loss = dy.esum(loss)
return loss, dy.esum(dur_loss)
def generate(self, sentence):
#embedded = embed_sentence(in_seq)
encoded = self.encode_sentence(sentence)
w = dy.parameter(self.decoder_w)
b = dy.parameter(self.decoder_b)
w1 = dy.parameter(self.attention_w1)
input_mat = dy.concatenate_cols(encoded)
w1dt = None
last_output_embeddings = self.output_lookup[2]
s = self.dec_lstm.initial_state().add_input(dy.concatenate([dy.vecInput(self.state_size * 2), last_output_embeddings]))
out = ''
res = []
count_EOS = 0
for i in range(len(sentence)):
if count_EOS == 2: break
# w1dt can be computed and cached once for the entire decoding phase
w1dt = w1dt or w1 * input_mat
vector = dy.concatenate([self.attend(input_mat, s, w1dt), last_output_embeddings])
s = s.add_input(vector)
#k = s
#dloss = self.test_duration(k, i, b)
out_vector = w * s.output() + b
probs = dy.softmax(out_vector).vec_value()
next_word = probs.index(max(probs))
last_output_embeddings = self.output_lookup[next_word]
if next_word == 2:
count_EOS += 1
continue
res.append(next_word)
#out += int2char[next_word]
return res
def get_loss(self, sentence):
dy.renew_cg()
#embedded = self.embed_sentence(sentence)
encoded = self.encode_sentence(sentence)
end_token = '</s>'
return self.decode(encoded, sentence, end_token)
def encode_sentence(self, sentence):
sentence_rev = list(reversed(sentence))
fwd_vectors = self.run_lstm(self.enc_fwd_lstm.initial_state(), sentence)
bwd_vectors = self.run_lstm(self.enc_bwd_lstm.initial_state(), sentence_rev)
bwd_vectors = list(reversed(bwd_vectors))
vectors = [dy.concatenate(list(p)) for p in zip(fwd_vectors, bwd_vectors)]
return vectors
class EncoderDecoder:
def __init__(self, vocab_size):
self.model = Model()
self.trainer = SimpleSGDTrainer(self.model)
self.layers = 2
self.embed_size = 128
self.hidden_size = 128
self.src_vocab_size = vocab_size
self.tgt_vocab_size = vocab_size
self.enc_builder = LSTMBuilder(self.layers, self.embed_size, self.hidden_size, self.model)
self.dec_builder = LSTMBuilder(self.layers, self.embed_size, self.hidden_size, self.model)
self.src_lookup = self.model.add_lookup_parameters((self.src_vocab_size, self.embed_size))
self.tgt_lookup = self.model.add_lookup_parameters((self.tgt_vocab_size, self.embed_size))
self.W_y = self.model.add_parameters((self.tgt_vocab_size, self.hidden_size))
self.b_y = self.model.add_parameters((self.tgt_vocab_size))
def encode(self, instance, wids):
dy.renew_cg()
W_y = dy.parameter(self.W_y)
b_y = dy.parameter(self.b_y)
# print "chceking wids here",wids["about"]
src_sent = instance.split()
#print "printing src sentnce length", len(src_sent)
losses = []
total_words = 0
# Encoder
enc_state = self.enc_builder.initial_state()
#for current_word in src_sent:
for (cw, nw) in zip(src_sent, src_sent[1:]):
state = enc_state.add_input(self.src_lookup[wids[cw]])
encoded = (W_y * state.output()) + b_y
err = pickneglogsoftmax(encoded, int(wids[nw]))
losses.append(err)
return dy.esum(losses)
dec_state = self.dec_builder.initial_state()
dec_state = self.dec_builder.initial_state(encoded)
errs = []
# Calculate losses for decoding
for (cw, nw) in zip(src_sent, src_sent[1:]):
dec_state = dec_state.add_input(self.tgt_lookup[wids[current_word]])
decoded = dec_state.output()
ystar = (W_y * dec_state.output()) + b_y
#print "current word is >>>>>>>", cw
#print "next word shud be", nw
loss = dy.pickneglogsoftmax(ystar, wids[nw])
losses.append(loss)
'''
total_words += 1
dist_array = []
for i in range(0,len(ystar.value())):
dist = numpy.linalg.norm(self.tgt_lookup[wids[nw]].value() - ystar.value()[i])
dist_array.append(dist)
#print " predicted next_word ========>", src_sent[dist_array.index(min(dist_array))]
'''
return dy.esum(losses) #, total_words, src_sent[dist_array.index(min(dist_array))]
class EncoderDecoder_debug:
def __init__(self, vocab_size):
self.model = dy.Model()
self.trainer = dy.SimpleSGDTrainer(self.model)
self.layers = 2
self.embed_size = 1
self.hidden_size = 1
self.src_vocab_size = vocab_size
self.tgt_vocab_size = vocab_size
self.enc_builder = dy.LSTMBuilder(self.layers, self.embed_size, self.hidden_size, self.model)
self.dec_builder = dy.LSTMBuilder(self.layers, self.embed_size, self.hidden_size, self.model)
self.src_lookup = self.model.add_lookup_parameters((self.src_vocab_size, self.embed_size))
self.tgt_lookup = self.model.add_lookup_parameters((self.tgt_vocab_size, self.embed_size))
self.W_y = self.model.add_parameters((self.tgt_vocab_size, self.hidden_size))
self.b_y = self.model.add_parameters((self.tgt_vocab_size))
def encode(self, instance, wids):
dy.renew_cg()
W_y = dy.parameter(self.W_y)
b_y = dy.parameter(self.b_y)
# print "chceking wids here",wids["about"]
src_sent = instance.split()
#print "printing src sentnce length", len(src_sent)
losses = []
total_words = 0
# Encoder
enc_state = self.enc_builder.initial_state()
for current_word in src_sent:
state = enc_state.add_input(self.src_lookup[wids[current_word]])
encoded = (W_y * state.output()) + b_y
dec_state = self.dec_builder.initial_state()
dec_state = self.dec_builder.initial_state(encoded)
errs = []
# Calculate losses for decoding
for (cw, nw) in zip(src_sent, src_sent[1:]):
dec_state = dec_state.add_input(self.tgt_lookup[wids[current_word]])
decoded = dec_state.output()
ystar = (W_y * dec_state.output()) + b_y
print "current word is >>>>>>>", cw
print "next word shud be", nw
#loss = dy.pickneglogsoftmax(ystar, wids[nw])
for wid in wids:
loss = dy.pickneglogsoftmax(ystar, wids[wid])
print "Loss for ", wid, " w.r.t ", nw, " is ", loss.value()
from collections import defaultdict
from itertools import count
import sys
class RNNLanguageModel_batch:
def __init__(self, model, LAYERS, INPUT_DIM, HIDDEN_DIM, VOCAB_SIZE, lookup, builder=SimpleRNNBuilder):
self.builder = builder(LAYERS, INPUT_DIM, HIDDEN_DIM, model)
self.lookup = lookup
self.R = model.add_parameters((VOCAB_SIZE, HIDDEN_DIM))
self.bias = model.add_parameters((VOCAB_SIZE))
def save_to_disk(self, filename):
model.save(filename, [self.builder, self.lookup, self.R, self.bias])
def load_from_disk(self, filename):
(self.builder, self.lookup, self.R, self.bias) = model.load(filename)
def build_lm_graph(self, sent):
renew_cg()
init_state = self.builder.initial_state()
R = parameter(self.R)
bias = parameter(self.bias)
errs = [] # will hold expressions
es=[]
state = init_state
for (cw,nw) in zip(sent,sent[1:]):
# assume word is already a word-id
x_t = lookup(self.lookup, int(cw))
state = state.add_input(x_t)
y_t = state.output()
r_t = bias + (R * y_t)
err = pickneglogsoftmax(r_t, int(nw))
errs.append(err)
nerr = esum(errs)
return nerr
def get_loss_batch(self, sent_array):
renew_cg()
init_state = self.builder.initial_state()
R = parameter(self.R)
bias = parameter(self.bias)
wids = []
masks = []
# get the wids and masks for each step
# "I am good", "This is good", "Good Morning" -> [['I', 'Today', 'Good'], ['am', 'is', 'Morning'], ['good', 'good', '<S>'], ['I', 'Today', 'Good'], ['am', 'is', 'Morning'], ['good', 'good', '<S>']]
tot_words = 0
wids = []
masks = []
for i in range(len(sent_array[0])):
wids.append([
(sent[i] if len(sent)>i else 3) for sent in sent_array])
mask = [(1 if len(sent)>i else 0) for sent in sent_array]
masks.append(mask)
tot_words += sum(mask)
# start the rnn by inputting "<s>"
init_ids = [2] * len(sent_array)
#print dy.lookup_batch(self.lookup,init_ids)
#print "Looked up"
s = init_state.add_input(dy.lookup_batch(self.lookup,init_ids))
# feed word vectors into the RNN and predict the next word
losses = []
for wid, mask in zip(wids, masks):
# calculate the softmax and loss
#print "WID ", wid
score = dy.affine_transform([bias, R, s.output()])
loss = dy.pickneglogsoftmax_batch(score, wid)
# mask the loss if at least one sentence is shorter
if mask[-1] != 1:
mask_expr = dy.inputVector(mask)
mask_expr = dy.reshape(mask_expr, (1,), len(sent_array))
loss = loss * mask_expr
losses.append(loss)
# update the state of the RNN
wemb = dy.lookup_batch(self.lookup, wid)
s = s.add_input(wemb)
return dy.sum_batches(dy.esum(losses)), tot_words
errs = [] # will hold expressions
es=[]
for (wid, mask) in zip(wids, masks):
# assume word is already a word-id
x_t = lookup(self.lookup, int(cw))
state = state.add_input(x_t)
y_t = state.output()
r_t = bias + (R * y_t)
err = pickneglogsoftmax(r_t, int(nw))
errs.append(err)
nerr = esum(errs)
return nerr
def predict_next_word(self, sentence):
renew_cg()
init_state = self.builder.initial_state()
R = parameter(self.R)
bias = parameter(self.bias)
state = init_state
for cw in sentence:
# assume word is already a word-id
x_t = lookup(self.lookup, int(cw))
state = state.add_input(x_t)
y_t = state.output()
r_t = bias + (R * y_t)
prob = softmax(r_t)
return prob
def sample(self, first=1, nchars=0, stop=-1):
res = [first]
renew_cg()
state = self.builder.initial_state()
R = parameter(self.R)
bias = parameter(self.bias)
cw = first
while True:
x_t = lookup(self.lookup, cw)
state = state.add_input(x_t)
y_t = state.output()
r_t = bias + (R * y_t)
ydist = softmax(r_t)
dist = ydist.vec_value()
rnd = random.random()
for i,p in enumerate(dist):
rnd -= p
if rnd <= 0: break
res.append(i)
cw = i
if cw == stop: break
if nchars and len(res) > nchars: break
return res
from collections import defaultdict
from itertools import count
import sys
class RNNLanguageModel:
def __init__(self, model, LAYERS, INPUT_DIM, HIDDEN_DIM, VOCAB_SIZE, builder=SimpleRNNBuilder):
self.builder = builder(LAYERS, INPUT_DIM, HIDDEN_DIM, model)
self.lookup = model.add_lookup_parameters((VOCAB_SIZE, INPUT_DIM))
self.R = model.add_parameters((VOCAB_SIZE, HIDDEN_DIM))
self.bias = model.add_parameters((VOCAB_SIZE))
def save_to_disk(self, filename):
model.save(filename, [self.builder, self.lookup, self.R, self.bias])
def load_from_disk(self, filename):
(self.builder, self.lookup, self.R, self.bias) = model.load(filename)
def build_lm_graph(self, sent):
renew_cg()
init_state = self.builder.initial_state()
R = parameter(self.R)
bias = parameter(self.bias)
errs = [] # will hold expressions
es=[]
state = init_state
for (cw,nw) in zip(sent,sent[1:]):
# assume word is already a word-id
x_t = lookup(self.lookup, int(cw))
state = state.add_input(x_t)
y_t = state.output()
r_t = bias + (R * y_t)
err = pickneglogsoftmax(r_t, int(nw))
errs.append(err)
nerr = esum(errs)
return nerr
def predict_next_word(self, sentence):
renew_cg()
init_state = self.builder.initial_state()
R = parameter(self.R)
bias = parameter(self.bias)
state = init_state
for cw in sentence:
# assume word is already a word-id
x_t = lookup(self.lookup, int(cw))
state = state.add_input(x_t)
y_t = state.output()
r_t = bias + (R * y_t)
prob = softmax(r_t)
return prob
def sample(self, first=1, nchars=0, stop=-1):
res = [first]
renew_cg()
state = self.builder.initial_state()
R = parameter(self.R)
bias = parameter(self.bias)
cw = first
while True:
x_t = lookup(self.lookup, cw)
state = state.add_input(x_t)
y_t = state.output()
r_t = bias + (R * y_t)
ydist = softmax(r_t)
dist = ydist.vec_value()
rnd = random.random()
for i,p in enumerate(dist):
rnd -= p
if rnd <= 0: break
res.append(i)
cw = i
if cw == stop: break
if nchars and len(res) > nchars: break
return res
class RNNEncoderDecoder:
def __init__(self, model, LAYERS, INPUT_DIM, HIDDEN_DIM, VOCAB_SIZE, lookup, builder=SimpleRNNBuilder):
self.builder = builder(LAYERS, INPUT_DIM, HIDDEN_DIM, model)
self.lookup = lookup
self.R = model.add_parameters((VOCAB_SIZE, HIDDEN_DIM))
self.bias = model.add_parameters((VOCAB_SIZE))
def save_to_disk(self, filename):
model.save(filename, [self.builder, self.lookup, self.R, self.bias])
def load_from_disk(self, filename):
(self.builder, self.lookup, self.R, self.bias) = model.load(filename)
def build_lm_graph(self, sent):
renew_cg()
init_state = self.builder.initial_state()
R = parameter(self.R)
bias = parameter(self.bias)
errs = [] # will hold expressions
es=[]
state = init_state
for (cw,nw) in zip(sent,sent[1:]):
# assume word is already a word-id
x_t = lookup(self.lookup, int(cw))
state = state.add_input(x_t)
y_t = state.output()
r_t = bias + (R * y_t)
err = pickneglogsoftmax(r_t, int(nw))
errs.append(err)
nerr = esum(errs)
#return nerr
encoded = r_t
dec_state = self.builder.initial_state()
dec_state = self.builder.initial_state(encoded)
decoder_errs = []
# Calculate losses for decoding
for (cw, nw) in zip(sent, sent[1:]):
x_t = lookup(self.lookup, int(cw))
dec_state = dec_state.add_input(x_t)
y_t = dec_state.output()
ystar = (R * y_t) + bias
err = pickneglogsoftmax(r_t, int(nw))
decoder_errs.append(err)
derr = esum(decoder_errs)
return derr
def predict_next_word(self, sentence):
renew_cg()
init_state = self.builder.initial_state()
R = parameter(self.R)
bias = parameter(self.bias)
state = init_state
for cw in sentence:
# assume word is already a word-id
x_t = lookup(self.lookup, int(cw))
state = state.add_input(x_t)
y_t = state.output()
r_t = bias + (R * y_t)
prob = softmax(r_t)
return prob
def resynth(self, sent):
renew_cg()
init_state = self.builder.initial_state()
R = parameter(self.R)
bias = parameter(self.bias)
errs = [] # will hold expressions
es=[]
state = init_state
for cw in sent:
# assume word is already a word-id
x_t = lookup(self.lookup, int(cw))
state = state.add_input(x_t)
y_t = state.output()
r_t = bias + (R * y_t)
err = pickneglogsoftmax(r_t, int(nw))
errs.append(err)
nerr = esum(errs)
#return nerr
encoded = r_t
dec_state = self.builder.initial_state()
dec_state = self.builder.initial_state(encoded)
decoder_errs = []
# Calculate losses for decoding
for (cw, nw) in zip(sent, sent[1:]):
x_t = lookup(self.lookup, int(cw))
dec_state = dec_state.add_input(x_t)
y_t = dec_state.output()
ystar = (R * y_t) + bias
err = pickneglogsoftmax(r_t, int(nw))
decoder_errs.append(err)
derr = esum(decoder_errs)
return derr
def sample(self, first=1, nchars=0, stop=-1):
res = [first]
renew_cg()
state = self.builder.initial_state()
R = parameter(self.R)
bias = parameter(self.bias)
cw = first
while True:
x_t = lookup(self.lookup, cw)
state = state.add_input(x_t)
y_t = state.output()
r_t = bias + (R * y_t)
ydist = softmax(r_t)
dist = ydist.vec_value()
rnd = random.random()
for i,p in enumerate(dist):
rnd -= p
if rnd <= 0: break
res.append(i)
cw = i
if cw == stop: break
if nchars and len(res) > nchars: break
return res
class nnlm:
def __init__(self):
self.feats_and_values ={}
self.wids = defaultdict(lambda: len(self.wids))
self.unigrams = {}
self.model = dy.Model()
self.EMB_SIZE = 1
self.HID_SIZE = 1
self.N = 3
M = self.model.add_lookup_parameters((len(self.wids), self.EMB_SIZE))
W_mh = self.model.add_parameters((self.HID_SIZE, self.EMB_SIZE * (self.N-1)))
b_hh = self.model.add_parameters((self.HID_SIZE))
W_hs = self.model.add_parameters((len(self.wids), self.HID_SIZE))
b_s = self.model.add_parameters((len(self.wids)))
def read_corpus(self, file):
print file
tokenizer = RegexpTokenizer(r'\w+')
f = open(file)
self.data_array_train = []
for line in f:
line = '<s> ' + line.split('\n')[0] + ' </s>'
words = line.split()
for word in words:
if word == '<s>' or '</s>':
pass
else:
word = tokenizer.tokenize(word)[0]
if word in self.unigrams:
self.unigrams[word] = self.unigrams[word] + 1
else:
self.unigrams[word] = 1
f.close()
self.assign_ids()
self.create_data(file)
self.save_wids()
return self.trigramfeaturedict, self.wids
def save_wids(self):
f = open('wids.txt','w')
for w in self.wids:
f.write(w + ' ' + str(self.wids[w]) + '\n')
f.close()
'''
from nltk.tokenize import RegexpTokenizer
def read_corpus(self, file):
print file
f = open(file)
tokenizer = RegexpTokenizer(r'\w+')
self.data_array_train = []
for line in f:
line = line.split('\n')[0]
line = ['<s>'] + tokenizer.tokenize(line) + ['</s>']
words = [w.lower() for w in line]
for word in words:
if word in self.unigrams:
self.unigrams[word] = self.unigrams[word] + 1
else:
self.unigrams[word] = 1
f.close()
self.assign_ids()
self.create_data(file)
self.save_wids()
return self.trigramfeaturedict, self.wids
def save_wids(self):
f = open('wids.txt','w')
for w in self.wids:
f.write(w + ' ' + str(self.wids[w]) + '\n')
f.close()
'''
def assign_ids(self):
self.wids["<unk>"] = 0
self.wids["<s>"] = 1
self.wids["</s>"] = 2
for w in self.unigrams:
# if self.unigrams[w] > 3:
self.wids[w]
# else:
# self.wids[w] = 0
#print "prinitn wids frm nnlm--------------", self.wids
return
def create_data(self, file):
self.accumulate_trigramfeatures(file)
def build_nnlm_graph(self, dictionary):
dy.renew_cg()
M = self.model.add_lookup_parameters((len(self.wids), self.EMB_SIZE))
W_mh = self.model.add_parameters((self.HID_SIZE, self.EMB_SIZE * (self.N-1)))
b_hh = self.model.add_parameters((self.HID_SIZE))
W_hs = self.model.add_parameters((len(self.wids), self.HID_SIZE))
b_s = self.model.add_parameters((len(self.wids)))
w_xh = dy.parameter(W_mh)
b_h = dy.parameter(b_hh)
W_hy = dy.parameter(W_hs)
b_y = dy.parameter(b_s)
errs = []
for context, next_word in dictionary:
#print context, next_word
k = M[self.wids[context.split()[0]]]
kk = M[self.wids[context.split()[1]]]
#print k , kk
#print k.value()
x = k.value() + kk.value()
#print x
h_val = dy.tanh(w_xh * dy.inputVector(x) + b_h)
y_val = W_hy * h_val + b_y
err = dy.pickneglogsoftmax(y_val,self.wids[next_word])
errs.append(err)
gen_err = dy.esum(errs)
return gen_err
def accumulate_trigramfeatures(self, file):
self.trigramfeaturedict = {}
g = open(file)
for line in g:
line = '<s> ' + line.split('\n')[0] + ' </s>'
line = line.split()
contexts = zip(line[0:len(line)-2], line[1:len(line)-1], line[2:])
for prev_2, prev_1, current in contexts:
#print prev_2, prev_1, current
context = prev_2 + ' ' + prev_1
self.trigramfeaturedict[context] = current
g.close()
return
class loglinearlm:
def __init__(self):
self.feats_and_values ={}
self.wids = defaultdict(lambda: len(self.wids))
def read_corpus(self, file):
print file
f = open(file)
self.data_array_train = []
for line in f:
line = '<s> ' + line.split('\n')[0] + ' </s>'
self.data_array_train.append(line)
words = line.split()
for word in words:
wid = self.wids[word]
f.close()
#self.get_feature_vectors(file)
def accumulate_trigramfeatures(self, file):
self.trigramfeaturedict = {}
g = open(file)
for line in g:
line = '<s> ' + line.split('\n')[0] + ' </s>'
line = line.split()
contexts = zip(line[0:len(line)-2], line[1:len(line)-1], line[2:])
for prev_2, prev_1, current in contexts:
#print prev_2, prev_1, current
context = prev_2 + ' ' + prev_1
self.trigramfeaturedict[context] = current
g.close()
def print_words(self):
for wid in self.wids:
print wid, self.wids[wid]
def get_vocab_size(self):
return len(self.wids)
def calculate_feature_F1(self, file):
# This is a trigram context feature
local_features = []
local_words = []
feature_vector_prime = np.zeros(self.get_vocab_size())
g = open(file)
for line in g:
line = '<s> ' + line.split('\n')[0] + ' </s>'
#print line
line = line.split()
contexts = zip(line[0:len(line)-2], line[1:len(line)-1], line[2:])
for prev_2, prev_1, current in contexts:
feature_vector = feature_vector_prime
#print prev_2, prev_1, current, self.get_vocab_size()
#print prev_2, self.wids[prev_2], feature_vector
prev_2_id = self.wids[prev_2]
feature_vector[prev_2_id] = 1.0
prev_1_id = self.wids[prev_1]
feature_vector[prev_1_id] = 1.0
local_features.append(feature_vector)
local_words.append(current)
#print feature_vector
#print features[0]
g.close()
return local_features, local_words
def sparse_features_to_dense_features(self, features):
ret = np.zeros(len(features))
#print ret
for f in features:
#print f
ret[f] += 1
return ret
def get_feature_vectors(self, file):
features = []
#features.append(self.sparse_features_to_dense_features(self.calculate_feature_F1(file)))
feats,words = self.calculate_feature_F1(file)
features.append(feats)
#features.append(calculate_feature_F2())
#features.append(calculate_feature_F3())
#features.append(calculate_feature_F4())
#return zip(features, words)
return zip(feats, words)
class ngramlm:
def __init__(self, order):
self.order = order
self.unigrams = {}
self.bigrams = {}
self.alpha_unk = 0.02
self.alpha_1 = 0.245
self.alpha_2 = 0.735
self.wids = defaultdict(lambda: len(self.wids))
def get_vocab_size(self):
return len(self.wids)
def store_counts(self, file):
self.get_ngrams(file, self.order)
if self.print_flag == 1:
print "Unique Unigrams like :", list(self.unigrams)[0], " are ", len(self.unigrams)
print "Unique Bigrams like :", list(self.bigrams)[0], " are ", len(self.bigrams)
def get_features_v1(self):
# This is a basic version which returns wid of every word as feature and its likelihood as target
self.training_data = []
self.num_features = 1
c = 1
feature_vector = np.zeros(int(self.get_vocab_size())) # One hot k
print feature_vector
for line in self.data_array_train:
line = line.split()
for word in line:
feature_vector = np.zeros(int(self.get_vocab_size())) # One hot k
c = c + 1
wid = self.wids[word]
feature_vector[wid] = 1
if c % 1000 == 1:
print word, wid, feature_vector[wid]
self.training_data.append(feature_vector)
return
def feature_function(self, ctxt):
features = []
features.append(self.calculate_wid(ctxt))
return features
#def get_likelihood(self, word):
def calculate_wid(self, ctxt):
return wids[ctxt]
def read_corpus(self, file):
# for each line in the file, split the words and turn them into IDs like this:
print file
f = open(file)
self.data_array_train = []
for line in f:
line = line.split('\n')[0]
self.data_array_train.append(line)
words = line.split()
for word in words:
wid = self.wids[word]
def print_words(self):
for wid in self.wids:
print wid, self.wids[wid]
def print_dicts(self):
print "Printing unigrams"
for k in self.unigrams:
print k,self.unigrams[k]
print "Printing bigrams"
for k in self.bigrams:
print k, self.bigrams[k]
def save_dicts(self):
with open('unigrams.pkl', 'wb') as f:
pickle.dump(self.unigrams, f, pickle.HIGHEST_PROTOCOL)
with open('bigrams.pkl', 'wb') as f:
pickle.dump(self.bigrams, f, pickle.HIGHEST_PROTOCOL)
def load_dicts(self):
with open('unigrams.pkl', 'rb') as f:
self.unigrams = pickle.load(f)
with open('bigrams.pkl', 'rb') as f:
self.bigrams = pickle.load(f)
def get_counts(self, file):
self.print_flag = 1
self.store_counts(file)
# Calcuates n grams from a line
def ngrams(self,line, n):
lst = line.split()
output = {}
for i in range(len(lst) -n + 1):
g = ' '.join(lst[i:i+n])
output.setdefault(g, 0)
output[g] += 1
return output
def combine_dicts(self, base_dict, small_dict):
for key in small_dict:
if base_dict.has_key(key):
base_dict[key] = int(base_dict[key]) + int(small_dict[key])
else:
base_dict[key] = int(small_dict[key])
return base_dict
# Calculates n grams from a file
def get_ngrams(self, file, count):
f = open(file)
for line in f:
line = '<s> ' + line.split('\n')[0] + ' </s>'
bigrams = self.ngrams(line,2)
self.bigrams = self.combine_dicts(self.bigrams,bigrams)
unigrams = self.ngrams(line,1)
self.unigrams = self.combine_dicts(self.unigrams,unigrams)
self.remove_singletons(self.unigrams)
def remove_singletons(self, base_dict):
for key in base_dict:
#print key, base_dict[key]
if base_dict[key] < 2:
#print key
base_dict[key] = 0
return
def eqn8(self, strng, print_flag):
l = len(strng.split())
if l == 1:
estimate = (1 - self.alpha_1) * (float(self.unigrams[strng]) / sum(self.unigrams.values())) + self.alpha_unk * np.exp(1e-7)
return estimate
else:
c = strng
#strng = strng.split()[:-1]
#p = ' '.join(tk for tk in strng)
p = c.split()[-1]
if print_flag ==1:
print "Bigram is ", c
print "Unigram is ", p
#print (1 - self.alpha_2) * (float(self.bigrams[c]) / self.bigrams.values()))
#print
estimate = (1 - self.alpha_2) * (float(self.bigrams[c]) / sum(self.bigrams.values())) + (1 - self.alpha_1) * (float(self.unigrams[p]) / sum(self.unigrams.values())) + self.alpha_unk * np.exp(1e-7)
return estimate
def get_file_perplexity(self, file):
f = open(file)
print_flag = 0
self.num_sentences = 0
self.num_oovs = 0
self.num_words = 0
self.logprob = 0
arr = []
for line in f:
line = line.split('\n')[0].lower()
#line = '<s> ' + line.split('\n')[0] + ' </s>'
ppl_sentence = self.get_sentence_perplexity(line,0)
if print_flag ==1:
print line, ppl_sentence, '\n'
arr.append(ppl_sentence)
#print np.mean(arr)
log_arr = np.log(arr)
print log_arr
ml_corpus = -1.0 * np.sum(log_arr) * 1.0/len(arr)
print np.exp(ml_corpus)
print 'Sentences: ', self.num_sentences
print 'Words: ', self.num_words
print 'OOVs: ' , self.num_oovs
print 'Log probability: ', self.logprob
self.perplexity = np.exp( -1.0 * self.logprob / ( self.num_words + self.num_sentences - self.num_oovs) * 2.71) # SRILM constant
print "Perplexity over corpus is: ", self.perplexity
def get_sentence_perplexity(self, string, print_flag):
#print len(string)
num_tokens = 0
num_oovs = 0
if len(string.split()) < 2:
print "The Sentence you gave me is very short"
return -1.0 * np.log(self.eqn8(string,0) / 2)
else:
mle_sentence = np.log(1.0)
line = string.split('\n')[0] + ' </s>'
length_line = len(string.split())
line = line.split()
b = 0
while b < len(line) - 1:
if print_flag ==1:
print "The value of b is ", b
kv = line[b] + ' ' + line[b+1]
if print_flag ==1:
print "I am looking for ", kv
if line[b+1] == '</s>':
kv = line[b]
if print_flag ==1:
print "I am looking for ", kv
if kv in self.bigrams and self.bigrams[kv] > 0:
if print_flag ==1:
print "Found ",kv , " in bigrams"
mle = self.eqn8(kv,0)
length_gram = 2
else:
if print_flag ==1:
print "I did not find ", kv, " in bigrams"
kv = line[b]
if print_flag ==1:
print "Now, I am searching for ", kv
if kv in self.unigrams and self.unigrams[kv] > 0:
if print_flag ==1:
print "Found ",kv , " in unigrams"
mle = self.eqn8(kv,0)
length_gram = 1
else:
if print_flag ==1:
print "I did not find ", kv, " in unigrams or it was a singleton. I think its an UNK"
kv = line[b]
mle = self.alpha_unk * np.exp(1e-7)
length_gram = 1
num_oovs = num_oovs + 1
b = b + length_gram
num_tokens = num_tokens + 1
mle_sentence = mle_sentence + np.log(mle)
self.num_oovs += num_oovs
self.num_sentences += 1
self.num_words += length_line
self.logprob += mle_sentence
print_flag = 0
mle_sentence_old = mle_sentence
mle_sentence = mle_sentence * (- 1.0 / (length_line + 1 +1 - num_oovs ) )
ppl_sentence = np.exp(mle_sentence * 2.3)
if print_flag ==1:
print "MLE of sentence is ", mle_sentence_old, " and PPL of sentence is ", ppl_sentence, " number of words: ", length_line, " number of OOVs: " , num_oovs
g = open('t','w')
g.write(string + '\n')
g.close()
cmd = 'ngram -lm ../data/en-de/01_srilm_bigram.model -ppl t'
os.system(cmd)
print '\n\n'
print_flag = 0
return ppl_sentence
|
|
"""
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from fnmatch import fnmatch
import json
import os
from streamalert.shared.config import firehose_alerts_bucket
from streamalert.shared.logger import get_logger
from streamalert.shared.utils import get_data_file_format
from streamalert_cli.athena.handler import create_table, create_log_tables
from streamalert_cli.helpers import check_credentials, continue_prompt, run_command
from streamalert_cli.manage_lambda.deploy import deploy
from streamalert_cli.terraform.generate import terraform_generate_handler
from streamalert_cli.terraform.helpers import terraform_check, terraform_runner
from streamalert_cli.utils import (
add_clusters_arg,
CLICommand,
set_parser_epilog,
UniqueSortedListAction,
)
LOGGER = get_logger(__name__)
class TerraformInitCommand(CLICommand):
description = 'Initialize StreamAlert infrastructure'
@classmethod
def setup_subparser(cls, subparser):
"""Manage.py init takes no arguments"""
@classmethod
def handler(cls, options, config):
"""Initialize infrastructure using Terraform
Args:
config (CLIConfig): Loaded StreamAlert config
Returns:
bool: False if errors occurred, True otherwise
"""
LOGGER.info('Initializing StreamAlert')
# generate init Terraform files
if not terraform_generate_handler(config=config, init=True):
return False
LOGGER.info('Initializing Terraform')
if not run_command(['terraform', 'init'], cwd=config.build_directory):
return False
# build init infrastructure
LOGGER.info('Building initial infrastructure')
init_targets = [
'aws_s3_bucket.lambda_source', 'aws_s3_bucket.logging_bucket',
'aws_s3_bucket.streamalert_secrets', 'aws_s3_bucket.terraform_remote_state',
'aws_s3_bucket.streamalerts',
'aws_kms_key.server_side_encryption', 'aws_kms_alias.server_side_encryption',
'aws_kms_key.streamalert_secrets', 'aws_kms_alias.streamalert_secrets',
'module.streamalert_athena', #required for the alerts table
'aws_dynamodb_table.terraform_remote_state_lock'
]
# this bucket must exist before the log tables can be created, but
# shouldn't be created unless the firehose is enabled
if config['global']['infrastructure'].get('firehose', {}).get('enabled'):
init_targets.append('aws_s3_bucket.streamalert_data')
if not terraform_runner(config, targets=init_targets):
LOGGER.error('An error occurred while running StreamAlert init')
return False
# generate the main.tf with remote state enabled
LOGGER.info('Configuring Terraform Remote State')
if not terraform_generate_handler(config=config, check_tf=False, check_creds=False):
return False
if not run_command(['terraform', 'init'], cwd=config.build_directory):
return False
LOGGER.info('Deploying Lambda Functions')
functions = ['rule', 'alert', 'alert_merger', 'athena', 'classifier']
deploy(config, functions)
# we need to manually create the streamalerts table since terraform does not support this
# See: https://github.com/terraform-providers/terraform-provider-aws/issues/1486
if get_data_file_format(config) == 'json':
# Terraform v0.12 now supports creating Athena tables. We will support
# to use terraform aws_glue_catalog_table resource to create table only
# when data file_format is set to "parquet" in "athena_partitioner_config"
#
# For "json" file_format, we will continue using Athena DDL query to
# create tables. However, this capabity will be faded out in the future
# release because we want users to take advantage of parquet performance.
alerts_bucket = firehose_alerts_bucket(config)
create_table('alerts', alerts_bucket, config)
# Create the glue catalog tables for the enabled logs
if not create_log_tables(config=config):
return
LOGGER.info('Building remaining infrastructure')
return terraform_runner(config, refresh=False)
class TerraformBuildCommand(CLICommand):
description = 'Run terraform against StreamAlert modules, optionally targeting specific modules'
@classmethod
def setup_subparser(cls, subparser):
"""Add build subparser: manage.py build [options]"""
set_parser_epilog(
subparser,
epilog=(
'''\
Example:
manage.py build --target alert_processor_lambda
'''
)
)
_add_default_tf_args(subparser, add_cluster_args=False)
@classmethod
def handler(cls, options, config):
"""Run Terraform with an optional set of targets and clusters
Args:
options (argparse.Namespace): Parsed arguments from manage.py
config (CLIConfig): Loaded StreamAlert config
Returns:
bool: False if errors occurred, True otherwise
"""
if not terraform_generate_handler(config=config):
return False
# Will create log tables only when file_format set to "json" and return erlier if
# log tables creation failed.
# This capabity will be faded out in the future release.
if get_data_file_format(config) == 'json' and not create_log_tables(config=config):
return
target_modules, valid = _get_valid_tf_targets(config, options.target)
if not valid:
return False
return terraform_runner(config, targets=target_modules if target_modules else None)
class TerraformDestroyCommand(CLICommand):
description = 'Destroy StreamAlert infrastructure, optionally targeting specific modules'
@classmethod
def setup_subparser(cls, subparser):
"""Add destroy subparser: manage.py destroy [options]"""
set_parser_epilog(
subparser,
epilog=(
'''\
Example:
manage.py destroy --target aws_s3_bucket-streamalerts
'''
)
)
_add_default_tf_args(subparser)
@classmethod
def handler(cls, options, config):
"""Use Terraform to destroy any existing infrastructure
Args:
options (argparse.Namespace): Parsed arguments from manage.py
config (CLIConfig): Loaded StreamAlert config
Returns:
bool: False if errors occurred, True otherwise
"""
# Check for valid credentials
if not check_credentials():
return False
# Verify terraform is installed
if not terraform_check():
return False
# Ask for approval here since multiple Terraform commands may be necessary
if not continue_prompt(message='Are you sure you want to destroy?'):
return False
if options.target:
target_modules, valid = _get_valid_tf_targets(config, options.target)
if not valid:
return False
return terraform_runner(
config,
destroy=True,
auto_approve=True,
targets=target_modules if target_modules else None
)
# Migrate back to local state so Terraform can successfully
# destroy the S3 bucket used by the backend.
# Do not check for terraform or aws creds again since these were checked above
if not terraform_generate_handler(config=config, init=True, check_tf=False,
check_creds=False):
return False
if not run_command(['terraform', 'init'], cwd=config.build_directory):
return False
# Destroy all of the infrastructure
return terraform_runner(config, destroy=True, auto_approve=True)
class TerraformListTargetsCommand(CLICommand):
description = 'List available Terraform modules to be used for targeted builds'
@classmethod
def setup_subparser(cls, subparser):
"""Manage.py list-targets does not take any arguments"""
@classmethod
def handler(cls, options, config):
"""Print the available terraform targets
Args:
config (CLIConfig): Loaded StreamAlert config
Returns:
bool: False if errors occurred, True otherwise
"""
modules = get_tf_modules(config, True)
if not modules:
return False
max_resource_len = max(len(value) for values in modules.values() for value in values) + 8
row_format_str = '{prefix:<{pad}}{value}'
header = row_format_str.format(prefix='Target', pad=max_resource_len, value='Type')
print(header)
print('-' * (len(header) + 4))
for value_type in sorted(modules):
for item in sorted(modules[value_type]):
print(row_format_str.format(prefix=item, pad=max_resource_len, value=value_type))
return True
def _add_default_tf_args(tf_parser, add_cluster_args=True):
"""Add the default terraform parser options"""
tf_parser.add_argument(
'-t',
'--target',
metavar='TARGET',
help=(
'One or more Terraform module name to target. Use `list-targets` for a list '
'of available targets'
),
action=UniqueSortedListAction,
default=[],
nargs='+'
)
if add_cluster_args:
# Add the option to specify cluster(s)
add_clusters_arg(tf_parser)
def _get_valid_tf_targets(config, targets):
all_matches = set()
if not targets:
return all_matches, True # Empty targets is acceptable
modules = get_tf_modules(config)
if not modules:
return all_matches, False
for target in targets:
matches = {
'{}.{}'.format(value_type, value) if value_type == 'module' else value
for value_type, values in modules.items()
for value in values
if fnmatch(value, target)
}
if not matches:
LOGGER.error('Invalid terraform target supplied: %s', target)
continue
all_matches.update(matches)
if not all_matches:
LOGGER.error(
'No terraform targets found matching supplied target(s): %s',
', '.join(sorted(targets))
)
return all_matches, False
return all_matches, True
def get_tf_modules(config, generate=False):
if generate:
if not terraform_generate_handler(config=config, check_tf=False, check_creds=False):
return False
modules = set()
resources = set()
for root, _, files in os.walk(config.build_directory):
for file_name in files:
path = os.path.join(root, file_name)
if path.endswith('.tf.json'):
with open(path, 'r') as tf_file:
tf_data = json.load(tf_file)
modules.update(set((tf_data['module'])))
resources.update(
'{}.{}'.format(resource, value)
for resource, values in tf_data.get('resource', {}).items()
for value in values
)
return {'module': modules, 'resource': resources}
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from webob import exc
from magnum.api.controllers import base
from magnum.api.controllers import versions
from magnum.api import versioned_method
from magnum.tests import base as test_base
class TestVersion(test_base.TestCase):
def setUp(self):
super(TestVersion, self).setUp()
self.a = versions.Version(
{versions.Version.string: "container-infra 2.0"},
"container-infra 2.0", "container-infra 2.1")
self.b = versions.Version(
{versions.Version.string: "container-infra 2.0"},
"container-infra 2.0", "container-infra 2.1")
self.c = versions.Version(
{versions.Version.string: "container-infra 2.2"},
"container-infra 2.0", "container-infra 2.2")
def test_is_null_true(self):
self.a.major = 0
self.a.minor = 0
self.assertEqual(0 == 0, self.a.is_null())
def test_is_null_false(self):
self.assertEqual(2 == 0, self.a.is_null())
def test__eq__with_equal(self):
self.assertEqual(2 == 2, self.a == self.b)
def test__eq__with_unequal(self):
self.a.major = 1
self.assertEqual(1 == 2, self.a == self.b)
def test__ne__with_equal(self):
self.assertEqual(2 != 2, self.a != self.b)
def test__ne__with_unequal(self):
self.a.major = 1
self.assertEqual(1 != 2, self.a != self.b)
def test__lt__with_higher_major_version(self):
self.a.major = 2
self.b.major = 1
self.assertEqual(2 < 1, self.a < self.b)
def test__lt__with_lower_major_version(self):
self.a.major = 1
self.b.major = 2
self.assertEqual(1 < 2, self.a < self.b)
def test__lt__with_higher_minor_version(self):
self.a.minor = 2
self.b.minor = 1
self.assertEqual(self.a.major, self.b.major)
self.assertEqual(2 < 1, self.a < self.b)
def test__lt__with_lower_minor_version(self):
self.a.minor = 1
self.b.minor = 2
self.assertEqual(self.a.major, self.b.major)
self.assertEqual(1 < 2, self.a < self.b)
def test__gt__with_higher_major_version(self):
self.a.major = 2
self.b.major = 1
self.assertEqual(2 > 1, self.a > self.b)
def test__gt__with_lower_major_version(self):
self.a.major = 1
self.b.major = 2
self.assertEqual(1 > 2, self.a > self.b)
def test__gt__with_higher_minor_version(self):
self.a.minor = 2
self.b.minor = 1
self.assertEqual(self.a.major, self.b.major)
self.assertEqual(2 > 1, self.a > self.b)
def test__gt__with_lower_minor_version(self):
self.a.minor = 1
self.b.minor = 2
self.assertEqual(self.a.major, self.b.major)
self.assertEqual(1 > 2, self.a > self.b)
def test__le__with_equal(self):
self.assertEqual(2 == 2, self.a <= self.b)
def test__le__with_higher_version(self):
self.a.major = 3
self.assertEqual(3 <= 2, self.a <= self.b)
def test__le__with_lower_version(self):
self.a.major = 1
self.assertEqual(1 <= 2, self.a <= self.b)
def test__ge__with_equal(self):
self.assertEqual(2 >= 2, self.a >= self.b)
def test__ge__with_higher_version(self):
self.a.major = 3
self.assertEqual(3 >= 2, self.a >= self.b)
def test__ge__with_lower_version(self):
self.a.major = 1
self.assertEqual(1 >= 2, self.a >= self.b)
def test_matches_start_version(self):
self.assertEqual(0 >= 0, self.a.matches(self.b, self.c))
def test_matches_end_version(self):
self.a.minor = 2
self.assertEqual(2 <= 2, self.a.matches(self.b, self.c))
def test_matches_valid_version(self):
self.a.minor = 1
self.assertEqual(0 <= 1 <= 2, self.a.matches(self.b, self.c))
def test_matches_version_too_high(self):
self.a.minor = 3
self.assertEqual(0 <= 3 <= 2, self.a.matches(self.b, self.c))
def test_matches_version_too_low(self):
self.a.major = 1
self.assertEqual(2 <= 1 <= 2, self.a.matches(self.b, self.c))
def test_matches_null_version(self):
self.a.major = 0
self.a.minor = 0
self.assertRaises(ValueError, self.a.matches, self.b, self.c)
@mock.patch('magnum.api.controllers.versions.Version.parse_headers')
def test_init(self, mock_parse):
a = mock.Mock()
b = mock.Mock()
mock_parse.return_value = (a, b)
v = versions.Version('test', 'foo', 'bar')
mock_parse.assert_called_with('test', 'foo', 'bar')
self.assertEqual(a, v.major)
self.assertEqual(b, v.minor)
@mock.patch('magnum.api.controllers.versions.Version.parse_headers')
def test_repr(self, mock_parse):
mock_parse.return_value = (123, 456)
v = versions.Version('test', mock.ANY, mock.ANY)
result = "%s" % v
self.assertEqual('123.456', result)
@mock.patch('magnum.api.controllers.versions.Version.parse_headers')
def test_repr_with_strings(self, mock_parse):
mock_parse.return_value = ('abc', 'def')
v = versions.Version('test', mock.ANY, mock.ANY)
result = "%s" % v
self.assertEqual('abc.def', result)
def test_parse_headers_ok(self):
version = versions.Version.parse_headers(
{versions.Version.string: 'container-infra 123.456'},
mock.ANY, mock.ANY)
self.assertEqual((123, 456), version)
def test_parse_headers_latest(self):
for s in ['magnum latest', 'magnum LATEST']:
version = versions.Version.parse_headers(
{versions.Version.string: s}, mock.ANY, 'container-infra 1.9')
self.assertEqual((1, 9), version)
def test_parse_headers_bad_length(self):
self.assertRaises(
exc.HTTPNotAcceptable,
versions.Version.parse_headers,
{versions.Version.string: 'container-infra 1'},
mock.ANY,
mock.ANY)
self.assertRaises(
exc.HTTPNotAcceptable,
versions.Version.parse_headers,
{versions.Version.string: 'container-infra 1.2.3'},
mock.ANY,
mock.ANY)
def test_parse_no_header(self):
# this asserts that the minimum version string is applied
version = versions.Version.parse_headers({}, 'container-infra 1.1',
'container-infra 1.5')
self.assertEqual((1, 1), version)
def test_parse_incorrect_service_type(self):
self.assertRaises(
exc.HTTPNotAcceptable,
versions.Version.parse_headers,
{versions.Version.string: '1.1'},
'container-infra 1.1',
'container-infra 1.1')
self.assertRaises(
exc.HTTPNotAcceptable,
versions.Version.parse_headers,
{versions.Version.string: 'nova 1.1'},
'container-infra 1.1',
'container-infra 1.1')
class TestController(test_base.TestCase):
def test_check_for_versions_intersection_negative(self):
func_list = \
[versioned_method.VersionedMethod('foo',
versions.Version('', '', '',
'2.1'),
versions.Version('', '', '',
'2.4'),
None),
versioned_method.VersionedMethod('foo',
versions.Version('', '', '',
'2.11'),
versions.Version('', '', '',
'3.1'),
None),
versioned_method.VersionedMethod('foo',
versions.Version('', '', '',
'2.8'),
versions.Version('', '', '',
'2.9'),
None),
]
result = base.Controller.check_for_versions_intersection(
func_list=func_list)
self.assertFalse(result)
func_list = \
[versioned_method.VersionedMethod('foo',
versions.Version('', '', '',
'2.12'),
versions.Version('', '', '',
'2.14'),
None),
versioned_method.VersionedMethod('foo',
versions.Version('', '', '',
'3.0'),
versions.Version('', '', '',
'3.4'),
None)
]
result = base.Controller.check_for_versions_intersection(
func_list=func_list)
self.assertFalse(result)
def test_check_for_versions_intersection_positive(self):
func_list = \
[versioned_method.VersionedMethod('foo',
versions.Version('', '', '',
'2.1'),
versions.Version('', '', '',
'2.4'),
None),
versioned_method.VersionedMethod('foo',
versions.Version('', '', '',
'2.3'),
versions.Version('', '', '',
'3.1'),
None),
versioned_method.VersionedMethod('foo',
versions.Version('', '', '',
'2.9'),
versions.Version('', '', '',
'3.4'),
None)
]
result = base.Controller.check_for_versions_intersection(
func_list=func_list)
self.assertTrue(result)
def test_check_for_versions_intersection_shared_start_end(self):
func_list = \
[versioned_method.VersionedMethod('foo',
versions.Version('', '', '',
'1.1'),
versions.Version('', '', '',
'1.1'),
None),
versioned_method.VersionedMethod('foo',
versions.Version('', '', '',
'1.1'),
versions.Version('', '', '',
'1.2'),
None)
]
result = base.Controller.check_for_versions_intersection(
func_list=func_list)
self.assertTrue(result)
def test_api_version_decorator(self):
class MyController(base.Controller):
@base.Controller.api_version('1.0', '1.1')
def testapi1(self):
return 'API1_1.0_1.1'
@base.Controller.api_version('1.2', '1.3') # noqa
def testapi1(self): # noqa
return 'API1_1.2_1.3'
@base.Controller.api_version('2.1', '2.2')
def testapi2(self):
return 'API2_2.1_2.2'
@base.Controller.api_version('1.0', '2.0') # noqa
def testapi2(self): # noqa
return 'API2_1.0_2.0'
controller = MyController()
# verify list was added to controller
self.assertIsNotNone(controller.versioned_methods)
api1_list = controller.versioned_methods['testapi1']
api2_list = controller.versioned_methods['testapi2']
# verify versioned_methods reordered correctly
self.assertEqual('1.2', str(api1_list[0].start_version))
self.assertEqual('1.3', str(api1_list[0].end_version))
self.assertEqual('1.0', str(api1_list[1].start_version))
self.assertEqual('1.1', str(api1_list[1].end_version))
# verify stored methods can be called
result = api1_list[0].func(controller)
self.assertEqual('API1_1.2_1.3', result)
result = api1_list[1].func(controller)
self.assertEqual('API1_1.0_1.1', result)
# verify versioned_methods reordered correctly
self.assertEqual('2.1', str(api2_list[0].start_version))
self.assertEqual('2.2', str(api2_list[0].end_version))
self.assertEqual('1.0', str(api2_list[1].start_version))
self.assertEqual('2.0', str(api2_list[1].end_version))
# Verify stored methods can be called
result = api2_list[0].func(controller)
self.assertEqual('API2_2.1_2.2', result)
result = api2_list[1].func(controller)
self.assertEqual('API2_1.0_2.0', result)
@mock.patch('pecan.request')
def test_controller_get_attribute(self, mock_pecan_request):
class MyController(base.Controller):
@base.Controller.api_version('1.0', '1.1')
def testapi1(self):
return 'API1_1.0_1.1'
@base.Controller.api_version('1.2', '1.3') # noqa
def testapi1(self): # noqa
return 'API1_1.2_1.3'
controller = MyController()
mock_pecan_request.version = versions.Version("", "",
"", "1.2")
controller.request = mock_pecan_request
method = controller.__getattribute__('testapi1')
result = method()
self.assertEqual('API1_1.2_1.3', result)
@mock.patch('pecan.request')
def test_controller_get_attr_version_not_found(self,
mock_pecan_request):
class MyController(base.Controller):
@base.Controller.api_version('1.0', '1.1')
def testapi1(self):
return 'API1_1.0_1.1'
@base.Controller.api_version('1.3', '1.4') # noqa
def testapi1(self): # noqa
return 'API1_1.3_1.4'
controller = MyController()
mock_pecan_request.version = versions.Version("", "",
"", "1.2")
controller.request = mock_pecan_request
self.assertRaises(exc.HTTPNotAcceptable,
controller.__getattribute__, 'testapi1')
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bigip_virtual_address
short_description: Manage LTM virtual addresses on a BIG-IP
description:
- Manage LTM virtual addresses on a BIG-IP system.
version_added: "1.0.0"
options:
name:
description:
- Name of the virtual address.
- If this parameter is not provided, the system uses the value of C(address).
type: str
address:
description:
- Specifies the virtual address. This value cannot be modified after it is set.
- If you never created a virtual address, but did create virtual servers,
a virtual address for each virtual server was created automatically. The name
of this virtual address is its IP address value.
type: str
netmask:
description:
- Specifies the netmask of the provided virtual address. This value cannot be
modified after it is set.
- When creating a new virtual address, if this parameter is not specified, the
default value is C(255.255.255.255) for IPv4 addresses and
C(ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff) for IPv6 addresses.
type: str
connection_limit:
description:
- Specifies the number of concurrent connections the system
allows on this virtual address.
type: int
arp:
description:
- Specifies whether the system accepts ARP requests.
- When C(no), specifies the system does not accept ARP requests.
- When C(yes), the packets are dropped.
- Both ARP and ICMP Echo must be disabled in order for forwarding
virtual servers using that virtual address to forward ICMP packets.
- When creating a new virtual address, if this parameter is not specified,
the default value is C(yes).
type: bool
auto_delete:
description:
- Specifies whether the system automatically deletes the virtual
address with the deletion of the last associated virtual server.
When C(no), specifies the system leaves the virtual
address, even when all associated virtual servers have been deleted.
When creating the virtual address, the default value is C(yes).
type: bool
icmp_echo:
description:
- Specifies how the system sends responses to (ICMP) echo requests
on a per-virtual address basis for enabling route advertisement.
When C(enabled), the BIG-IP system intercepts ICMP echo request
packets and responds to them directly. When C(disabled), the BIG-IP
system passes ICMP echo requests through to the backend servers.
When (selective), causes the BIG-IP system to internally enable or
disable responses based on virtual server state; C(when_any_available),
C(when_all_available, or C(always), regardless of the state of any
virtual servers.
type: str
choices:
- enabled
- disabled
- selective
state:
description:
- The virtual address state. If C(absent), the system makes an attempt
to delete the virtual address. This will only succeed if this
virtual address is not in use by a virtual server. C(present) creates
the virtual address and enables it. If C(enabled), enables the virtual
address if it exists. If C(disabled), creates the virtual address if
needed, and sets the state to C(disabled).
type: str
choices:
- present
- absent
- enabled
- disabled
default: present
availability_calculation:
description:
- Specifies which routes of the virtual address the system advertises.
When C(when_any_available), advertises the route when any virtual
server is available. When C(when_all_available), advertises the
route when all virtual servers are available. When (always), always
advertises the route regardless of the virtual servers available.
type: str
choices:
- always
- when_all_available
- when_any_available
aliases: ['advertise_route']
route_advertisement:
description:
- Specifies whether the system uses route advertisement for this
virtual address.
- When disabled, the system does not advertise routes for this virtual address.
- The majority of these options are only supported on versions 13.0.0-HF1 or
later. On versions prior than this, all choices expect C(disabled)
translate to C(enabled).
- When C(always), the BIG-IP system always advertises the route for the
virtual address, regardless of availability status. This requires an C(enabled)
virtual address.
- When C(enabled), the BIG-IP system advertises the route for the available
virtual address, based on the calculation method in the availability calculation.
- When C(disabled), the BIG-IP system does not advertise the route for the virtual
address, regardless of the availability status.
- When C(selective), you can also selectively enable ICMP echo responses, which
causes the BIG-IP system to internally enable or disable responses based on
virtual server state.
- When C(any), the BIG-IP system advertises the route for the virtual address
when any virtual server is available.
- When C(all), the BIG-IP system advertises the route for the virtual address
when all virtual servers are available.
type: str
choices:
- disabled
- enabled
- always
- selective
- any
- all
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
traffic_group:
description:
- The traffic group for the virtual address. When creating a new address,
if this value is not specified, the default is C(/Common/traffic-group-1).
type: str
route_domain:
description:
- The route domain of the C(address) you want to use.
- This value cannot be modified after it is set.
type: str
spanning:
description:
- Enables all BIG-IP systems in a device group to listen for and process traffic
on the same virtual address.
- Spanning for a virtual address occurs when you enable the C(spanning) option on a
device, and then sync the virtual address to the other members of the device group.
- Spanning also relies on the upstream router to distribute application flows to the
BIG-IP systems using ECMP routes. ECMP defines a route to the virtual address using
distinct Floating self-IP addresses configured on each BIG-IP system.
- You must also configure MAC masquerade addresses and disable C(arp) on the virtual
address when Spanning is enabled.
- When creating a new virtual address, if this parameter is not specified, the default
valus is C(no).
type: bool
extends_documentation_fragment: f5networks.f5_modules.f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Add virtual address
bigip_virtual_address:
state: present
partition: Common
address: 10.10.10.10
provider:
server: lb.mydomain.net
user: admin
password: secret
delegate_to: localhost
- name: Enable route advertisement on the virtual address
bigip_virtual_address:
state: present
address: 10.10.10.10
route_advertisement: any
provider:
server: lb.mydomain.net
user: admin
password: secret
delegate_to: localhost
'''
RETURN = r'''
availability_calculation:
description: Specifies which routes of the virtual address the system advertises.
returned: changed
type: str
sample: always
auto_delete:
description: New setting for auto deleting virtual address.
returned: changed
type: bool
sample: yes
icmp_echo:
description: New ICMP echo setting applied to virtual address.
returned: changed
type: str
sample: disabled
connection_limit:
description: The new connection limit of the virtual address.
returned: changed
type: int
sample: 1000
netmask:
description: The netmask of the virtual address.
returned: created
type: int
sample: 2345
arp:
description: The new way the virtual address handles ARP requests.
returned: changed
type: bool
sample: yes
address:
description: The address of the virtual address.
returned: created
type: int
sample: 2345
state:
description: The new state of the virtual address.
returned: changed
type: str
sample: disabled
spanning:
description: Whether spanning is enabled or not.
returned: changed
type: str
sample: disabled
'''
from datetime import datetime
from distutils.version import LooseVersion
from ansible.module_utils.basic import (
AnsibleModule, env_fallback
)
from ansible.module_utils.parsing.convert_bool import (
BOOLEANS_TRUE, BOOLEANS_FALSE
)
from ..module_utils.bigip import F5RestClient
from ..module_utils.common import (
F5ModuleError, AnsibleF5Parameters, transform_name, f5_argument_spec, fq_name, flatten_boolean
)
from ..module_utils.icontrol import tmos_version
from ..module_utils.ipaddress import (
is_valid_ip, compress_address
)
from ..module_utils.teem import send_teem
class Parameters(AnsibleF5Parameters):
api_map = {
'routeAdvertisement': 'route_advertisement_type',
'autoDelete': 'auto_delete',
'icmpEcho': 'icmp_echo',
'connectionLimit': 'connection_limit',
'serverScope': 'availability_calculation',
'mask': 'netmask',
'trafficGroup': 'traffic_group',
}
updatables = [
'route_advertisement_type',
'auto_delete',
'icmp_echo',
'connection_limit',
'arp',
'enabled',
'availability_calculation',
'traffic_group',
'spanning',
]
returnables = [
'route_advertisement_type',
'auto_delete',
'icmp_echo',
'connection_limit',
'netmask',
'arp',
'address',
'state',
'traffic_group',
'route_domain',
'spanning',
'availability_calculation',
]
api_attributes = [
'routeAdvertisement',
'autoDelete',
'icmpEcho',
'connectionLimit',
'advertiseRoute',
'arp',
'mask',
'enabled',
'serverScope',
'trafficGroup',
'spanning',
'serverScope',
]
@property
def availability_calculation(self):
if self._values['availability_calculation'] is None:
return None
elif self._values['availability_calculation'] in ['any', 'when_any_available']:
return 'any'
elif self._values['availability_calculation'] in ['all', 'when_all_available']:
return 'all'
elif self._values['availability_calculation'] in ['none', 'always']:
return 'none'
@property
def connection_limit(self):
if self._values['connection_limit'] is None:
return None
return int(self._values['connection_limit'])
@property
def enabled(self):
if self._values['state'] in ['enabled', 'present']:
return 'yes'
elif self._values['enabled'] in BOOLEANS_TRUE:
return 'yes'
elif self._values['state'] == 'disabled':
return 'no'
elif self._values['enabled'] in BOOLEANS_FALSE:
return 'no'
else:
return None
@property
def netmask(self):
if self._values['netmask'] is None:
return None
if is_valid_ip(self._values['netmask']):
return self._values['netmask']
else:
raise F5ModuleError(
"The provided 'netmask' is not a valid IP address"
)
@property
def auto_delete(self):
result = flatten_boolean(self._values['auto_delete'])
if result == 'yes':
return 'true'
if result == 'no':
return 'false'
@property
def state(self):
if self.enabled == 'yes' and self._values['state'] != 'present':
return 'enabled'
elif self.enabled == 'no':
return 'disabled'
else:
return self._values['state']
@property
def traffic_group(self):
if self._values['traffic_group'] is None:
return None
else:
result = fq_name(self.partition, self._values['traffic_group'])
if result.startswith('/Common/'):
return result
else:
raise F5ModuleError(
"Traffic groups can only exist in /Common"
)
@property
def route_advertisement_type(self):
if self.route_advertisement:
return self.route_advertisement
else:
return self._values['route_advertisement_type']
@property
def route_advertisement(self):
if self._values['route_advertisement'] is None:
return None
version = tmos_version(self.client)
if LooseVersion(version) <= LooseVersion('13.0.0'):
if self._values['route_advertisement'] == 'disabled':
return 'disabled'
else:
return 'enabled'
else:
return self._values['route_advertisement']
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def address(self):
if self._values['address'] is None:
return None
if is_valid_ip(self._values['address']):
return compress_address(self._values['address'])
else:
raise F5ModuleError(
"The provided 'address' is not a valid IP address"
)
@property
def full_address(self):
if self.route_domain is not None:
return '{0}%{1}'.format(self.address, self.route_domain)
return self.address
@property
def name(self):
if self._values['name'] is None:
result = str(self.address)
if self.route_domain:
result = "{0}%{1}".format(result, self.route_domain)
else:
result = self._values['name']
return result
@property
def route_domain(self):
if self._values['route_domain'] is None:
return None
try:
return int(self._values['route_domain'])
except ValueError:
uri = "https://{0}:{1}/mgmt/tm/net/route-domain/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self._values['partition'], self._values['route_domain'])
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
raise F5ModuleError(
"The specified 'route_domain' was not found."
)
if resp.status == 404 or 'code' in response and response['code'] == 404:
raise F5ModuleError(
"The specified 'route_domain' was not found."
)
return int(response['id'])
@property
def arp(self):
result = flatten_boolean(self._values['arp'])
if result == 'yes':
return 'enabled'
if result == 'no':
return 'disabled'
@property
def spanning(self):
result = flatten_boolean(self._values['spanning'])
if result == 'yes':
return 'enabled'
if result == 'no':
return 'disabled'
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
raise
return result
class UsableChanges(Changes):
@property
def address(self):
if self._values['address'] is None:
return None
if self._values['route_domain'] is None:
return self._values['address']
result = "{0}%{1}".format(self._values['address'], self.route_domain)
return result
class ReportableChanges(Changes):
@property
def arp(self):
if self._values['arp'] == 'disabled':
return 'no'
elif self._values['arp'] == 'enabled':
return 'yes'
@property
def spanning(self):
if self._values['spanning'] == 'disabled':
return 'no'
elif self._values['spanning'] == 'enabled':
return 'yes'
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def traffic_group(self):
if self.want.traffic_group != self.have.traffic_group:
return self.want.traffic_group
@property
def spanning(self):
if self.want.spanning is None:
return None
if self.want.spanning != self.have.spanning:
return self.want.spanning
@property
def arp(self):
if self.want.arp is None:
return None
if self.want.arp != self.have.arp:
return self.want.arp
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.have = ApiParameters()
self.want = ModuleParameters(client=self.client, params=self.module.params)
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def exec_module(self):
start = datetime.now().isoformat()
version = tmos_version(self.client)
changed = False
result = dict()
state = self.want.state
if state in ['present', 'enabled', 'disabled']:
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
if self.module._diff and self.have:
result['diff'] = self.make_diff()
result.update(dict(changed=changed))
self._announce_deprecations(result)
send_teem(start, self.module, version)
return result
def _grab_attr(self, item):
result = dict()
updatables = Parameters.updatables
for k in updatables:
if getattr(item, k) is not None:
result[k] = getattr(item, k)
return result
def make_diff(self):
result = dict(before=self._grab_attr(self.have), after=self._grab_attr(self.want))
return result
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
changed = False
if self.exists():
changed = self.remove()
return changed
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the virtual address")
return True
def create(self):
self._set_changed_options()
if self.want.traffic_group is None:
self.want.update({'traffic_group': '/Common/traffic-group-1'})
if self.want.arp is None:
self.want.update({'arp': True})
if self.want.spanning is None:
self.want.update({'spanning': False})
if self.want.netmask is None:
if is_valid_ip(self.want.address, type='ipv4'):
self.want.update({'netmask': '255.255.255.255'})
else:
self.want.update({'netmask': 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'})
if self.want.arp == 'enabled' and self.want.spanning == 'enabled':
raise F5ModuleError(
"'arp' and 'spanning' cannot both be enabled on virtual address."
)
if self.module.check_mode:
return True
self.create_on_device()
if self.exists():
return True
else:
raise F5ModuleError("Failed to create the virtual address")
def update(self):
self.have = self.read_current_from_device()
if self.want.netmask is not None:
if self.have.netmask != self.want.netmask:
raise F5ModuleError(
"The netmask cannot be changed. Delete and recreate "
"the virtual address if you need to do this."
)
if self.want.address is not None:
if self.have.address != self.want.full_address:
raise F5ModuleError(
"The address cannot be changed. Delete and recreate "
"the virtual address if you need to do this."
)
if self.changes.arp == 'enabled' and self.changes.spanning == 'enabled':
raise F5ModuleError(
"'arp' and 'spanning' cannot both be enabled on virtual address."
)
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/virtual-address/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
errors = [401, 403, 409, 500, 501, 502, 503, 504]
if resp.status in errors or 'code' in response and response['code'] in errors:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/virtual-address/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/ltm/virtual-address/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
params['address'] = self.changes.address
uri = "https://{0}:{1}/mgmt/tm/ltm/virtual-address/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403, 409]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['selfLink']
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/virtual-address/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
state=dict(
default='present',
choices=['present', 'absent', 'disabled', 'enabled']
),
name=dict(),
address=dict(),
netmask=dict(),
connection_limit=dict(
type='int'
),
auto_delete=dict(
type='bool'
),
icmp_echo=dict(
choices=['enabled', 'disabled', 'selective'],
),
availability_calculation=dict(
choices=['always', 'when_all_available', 'when_any_available'],
aliases=['advertise_route']
),
traffic_group=dict(),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
),
route_domain=dict(),
spanning=dict(type='bool'),
route_advertisement=dict(
choices=[
'disabled',
'enabled',
'always',
'selective',
'any',
'all',
]
),
arp=dict(type='bool'),
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.required_one_of = [
['name', 'address']
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
required_one_of=spec.required_one_of
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
|
|
from __future__ import division
import math
from sorl.thumbnail.engines.base import EngineBase
from sorl.thumbnail.compat import BufferIO
try:
from PIL import Image, ImageFile, ImageDraw, ImageChops, ImageFilter
except ImportError:
import Image, ImageFile, ImageDraw, ImageChops
def round_corner(radius, fill):
"""Draw a round corner"""
corner = Image.new('L', (radius, radius), 0) # (0, 0, 0, 0))
draw = ImageDraw.Draw(corner)
draw.pieslice((0, 0, radius * 2, radius * 2), 180, 270, fill=fill)
return corner
def round_rectangle(size, radius, fill):
"""Draw a rounded rectangle"""
width, height = size
rectangle = Image.new('L', size, 255) # fill
corner = round_corner(radius, 255) # fill
rectangle.paste(corner, (0, 0))
rectangle.paste(corner.rotate(90),
(0, height - radius)) # Rotate the corner and paste it
rectangle.paste(corner.rotate(180), (width - radius, height - radius))
rectangle.paste(corner.rotate(270), (width - radius, 0))
return rectangle
class GaussianBlur(ImageFilter.Filter):
name = "GaussianBlur"
def __init__(self, radius=2):
self.radius = radius
def filter(self, image):
return image.gaussian_blur(self.radius)
class Engine(EngineBase):
def get_image(self, source):
buffer = BufferIO(source.read())
return Image.open(buffer)
def get_image_size(self, image):
return image.size
def get_image_info(self, image):
return image.info or {}
def is_valid_image(self, raw_data):
buffer = BufferIO(raw_data)
try:
trial_image = Image.open(buffer)
trial_image.verify()
except Exception:
return False
return True
def _cropbox(self, image, x, y, x2, y2):
return image.crop((x, y, x2, y2))
def _orientation(self, image):
try:
exif = image._getexif()
except (AttributeError, IOError, KeyError, IndexError):
exif = None
if exif:
orientation = exif.get(0x0112)
if orientation == 2:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 3:
image = image.rotate(180)
elif orientation == 4:
image = image.transpose(Image.FLIP_TOP_BOTTOM)
elif orientation == 5:
image = image.rotate(-90).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 6:
image = image.rotate(-90)
elif orientation == 7:
image = image.rotate(90).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 8:
image = image.rotate(90)
return image
def _colorspace(self, image, colorspace):
if colorspace == 'RGB':
if image.mode == 'RGBA':
return image # RGBA is just RGB + Alpha
if image.mode == 'LA' or (image.mode == 'P' and 'transparency' in image.info):
return image.convert('RGBA')
return image.convert('RGB')
if colorspace == 'GRAY':
return image.convert('L')
return image
def _remove_border(self, image, image_width, image_height):
image_entropy = self._get_image_entropy(image)
borders = {
'top': lambda iy, dy, y: (dy, dy + y),
'right': lambda ix, dx, x: (ix - dx - x, ix - dx),
'bottom': lambda iy, dy, y: (iy - dy - y, iy - dy),
'left': lambda ix, dx, x: (dx, dx + x),
}
offset = {'top': 0, 'right': 0, 'bottom': 0, 'left': 0, }
for border in ['top', 'bottom']:
# Don't remove too much, the image may just be plain
while offset[border] < image_height / 3.5:
slice_size = min(image_width / 20, 10)
y_range = borders[border](image_height, offset[border], slice_size)
section = image.crop((0, y_range[0], image_width, y_range[1]))
# If this section is below the threshold; remove it
if self._get_image_entropy(section) < 2.0:
offset[border] += slice_size
else:
break
for border in ['left', 'right']:
while offset[border] < image_width / 3.5:
slice_size = min(image_height / 20, 10)
x_range = borders[border](image_width, offset[border], slice_size)
section = image.crop((x_range[0], 0, x_range[1], image_height))
if self._get_image_entropy(section) < 2.0:
offset[border] += slice_size
else:
break
return image.crop(
(offset['left'], offset['top'], image_width - offset['right'], image_height - offset['bottom']))
# Credit to chrisopherhan https://github.com/christopherhan/pycrop
# This is just a slight rework of pycrops implimentation
def _entropy_crop(self, image, geometry_width, geometry_height, image_width, image_height):
geometry_ratio = geometry_width / geometry_height
# The is proportionally wider than it should be
while image_width / image_height > geometry_ratio:
slice_width = max(image_width - geometry_width, 10)
right = image.crop((image_width - slice_width, 0, image_width, image_height))
left = image.crop((0, 0, slice_width, image_height))
if self._get_image_entropy(left) < self._get_image_entropy(right):
image = image.crop((slice_width, 0, image_width, image_height))
else:
image = image.crop((0, 0, image_height - slice_width, image_height))
image_width -= slice_width
# The image is proportionally taller than it should be
while image_width / image_height < geometry_ratio:
slice_height = min(image_height - geometry_height, 10)
bottom = image.crop((0, image_height - slice_height, image_width, image_height))
top = image.crop((0, 0, image_width, slice_height))
if self._get_image_entropy(bottom) < self._get_image_entropy(top):
image = image.crop((0, 0, image_width, image_height - slice_height))
else:
image = image.crop((0, slice_height, image_width, image_height))
image_height -= slice_height
return image
def _scale(self, image, width, height):
return image.resize((width, height), resample=Image.ANTIALIAS)
def _crop(self, image, width, height, x_offset, y_offset):
return image.crop((x_offset, y_offset,
width + x_offset, height + y_offset))
def _rounded(self, image, r):
i = round_rectangle(image.size, r, "notusedblack")
image.putalpha(i)
return image
def _blur(self, image, radius):
return image.filter(GaussianBlur(radius))
def _padding(self, image, geometry, options):
x_image, y_image = self.get_image_size(image)
left = int((geometry[0] - x_image) / 2)
top = int((geometry[1] - y_image) / 2)
color = options.get('padding_color')
im = Image.new(image.mode, geometry, color)
im.paste(image, (left, top))
return im
def _get_raw_data(self, image, format_, quality, image_info=None, progressive=False):
# Increase (but never decrease) PIL buffer size
ImageFile.MAXBLOCK = max(ImageFile.MAXBLOCK, image.size[0] * image.size[1])
bf = BufferIO()
params = {
'format': format_,
'quality': quality,
'optimize': 1,
}
# keeps icc_profile
if 'icc_profile' in image_info:
params['icc_profile'] = image_info['icc_profile']
raw_data = None
if format_ == 'JPEG' and progressive:
params['progressive'] = True
try:
# Do not save unnecessary exif data for smaller thumbnail size
params.pop('exif', {})
image.save(bf, **params)
except (IOError, OSError):
# Try without optimization.
params.pop('optimize')
image.save(bf, **params)
else:
raw_data = bf.getvalue()
finally:
bf.close()
return raw_data
def _get_image_entropy(self, image):
"""calculate the entropy of an image"""
hist = image.histogram()
hist_size = sum(hist)
hist = [float(h) / hist_size for h in hist]
return -sum([p * math.log(p, 2) for p in hist if p != 0])
|
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_db import exception as db_exception
import six
from manila import context
from manila.db import api as db_api
from manila.db.sqlalchemy import api as sqlalchemy_api
from manila.db.sqlalchemy import models
from manila import exception
from manila import test
class ShareNetworkDBTest(test.TestCase):
def __init__(self, *args, **kwargs):
super(ShareNetworkDBTest, self).__init__(*args, **kwargs)
self.fake_context = context.RequestContext(user_id='fake user',
project_id='fake project',
is_admin=False)
def _check_fields(self, expected, actual):
for key in expected:
self.assertEqual(actual[key], expected[key])
def setUp(self):
super(ShareNetworkDBTest, self).setUp()
self.share_nw_dict = {'id': 'fake network id',
'neutron_net_id': 'fake net id',
'neutron_subnet_id': 'fake subnet id',
'project_id': self.fake_context.project_id,
'user_id': 'fake_user_id',
'network_type': 'vlan',
'segmentation_id': 1000,
'cidr': '10.0.0.0/24',
'ip_version': 4,
'name': 'whatever',
'description': 'fake description'}
def test_create_one_network(self):
result = db_api.share_network_create(self.fake_context,
self.share_nw_dict)
self._check_fields(expected=self.share_nw_dict, actual=result)
self.assertEqual(len(result['shares']), 0)
self.assertEqual(len(result['security_services']), 0)
def test_create_two_networks_in_different_tenants(self):
share_nw_dict2 = self.share_nw_dict.copy()
share_nw_dict2['id'] = None
share_nw_dict2['project_id'] = 'fake project 2'
result1 = db_api.share_network_create(self.fake_context,
self.share_nw_dict)
result2 = db_api.share_network_create(self.fake_context,
share_nw_dict2)
self._check_fields(expected=self.share_nw_dict, actual=result1)
self._check_fields(expected=share_nw_dict2, actual=result2)
def test_create_two_networks_in_one_tenant(self):
share_nw_dict2 = self.share_nw_dict.copy()
share_nw_dict2['id'] = share_nw_dict2['id'] + "suffix"
result1 = db_api.share_network_create(self.fake_context,
self.share_nw_dict)
result2 = db_api.share_network_create(self.fake_context,
share_nw_dict2)
self._check_fields(expected=self.share_nw_dict, actual=result1)
self._check_fields(expected=share_nw_dict2, actual=result2)
def test_create_with_duplicated_id(self):
db_api.share_network_create(self.fake_context, self.share_nw_dict)
self.assertRaises(db_exception.DBDuplicateEntry,
db_api.share_network_create,
self.fake_context,
self.share_nw_dict)
def test_get(self):
db_api.share_network_create(self.fake_context, self.share_nw_dict)
result = db_api.share_network_get(self.fake_context,
self.share_nw_dict['id'])
self._check_fields(expected=self.share_nw_dict, actual=result)
self.assertEqual(len(result['shares']), 0)
self.assertEqual(len(result['security_services']), 0)
def test_get_with_one_share(self):
share_dict1 = {'id': 'fake share id1',
'share_network_id': self.share_nw_dict['id']}
db_api.share_network_create(self.fake_context, self.share_nw_dict)
db_api.share_create(self.fake_context, share_dict1)
result = db_api.share_network_get(self.fake_context,
self.share_nw_dict['id'])
self.assertEqual(len(result['shares']), 1)
self._check_fields(expected=share_dict1,
actual=result['shares'][0])
def test_get_with_two_shares(self):
share_dict1 = {'id': 'fake share id1',
'share_network_id': self.share_nw_dict['id']}
share_dict2 = {'id': 'fake share id2',
'share_network_id': self.share_nw_dict['id']}
db_api.share_network_create(self.fake_context, self.share_nw_dict)
db_api.share_create(self.fake_context, share_dict1)
db_api.share_create(self.fake_context, share_dict2)
result = db_api.share_network_get(self.fake_context,
self.share_nw_dict['id'])
self.assertEqual(len(result['shares']), 2)
def test_get_with_one_security_service(self):
security_dict1 = {'id': 'fake security service id1',
'project_id': self.fake_context.project_id,
'type': 'fake type'}
db_api.share_network_create(self.fake_context, self.share_nw_dict)
db_api.security_service_create(self.fake_context, security_dict1)
db_api.share_network_add_security_service(self.fake_context,
self.share_nw_dict['id'],
security_dict1['id'])
result = db_api.share_network_get(self.fake_context,
self.share_nw_dict['id'])
self.assertEqual(len(result['security_services']), 1)
self._check_fields(expected=security_dict1,
actual=result['security_services'][0])
def test_get_with_two_security_services(self):
security_dict1 = {'id': 'fake security service id1',
'project_id': self.fake_context.project_id,
'type': 'fake type'}
security_dict2 = {'id': 'fake security service id2',
'project_id': self.fake_context.project_id,
'type': 'fake type'}
db_api.share_network_create(self.fake_context, self.share_nw_dict)
db_api.security_service_create(self.fake_context, security_dict1)
db_api.security_service_create(self.fake_context, security_dict2)
db_api.share_network_add_security_service(self.fake_context,
self.share_nw_dict['id'],
security_dict1['id'])
db_api.share_network_add_security_service(self.fake_context,
self.share_nw_dict['id'],
security_dict2['id'])
result = db_api.share_network_get(self.fake_context,
self.share_nw_dict['id'])
self.assertEqual(len(result['security_services']), 2)
def test_get_not_found(self):
self.assertRaises(exception.ShareNetworkNotFound,
db_api.share_network_get,
self.fake_context,
'fake id')
def test_delete(self):
db_api.share_network_create(self.fake_context, self.share_nw_dict)
db_api.share_network_delete(self.fake_context,
self.share_nw_dict['id'])
self.assertRaises(exception.ShareNetworkNotFound,
db_api.share_network_get,
self.fake_context,
self.share_nw_dict['id'])
def test_delete_not_found(self):
self.assertRaises(exception.ShareNetworkNotFound,
db_api.share_network_delete,
self.fake_context,
'fake id')
def test_update(self):
new_name = 'fake_new_name'
db_api.share_network_create(self.fake_context, self.share_nw_dict)
result_update = db_api.share_network_update(self.fake_context,
self.share_nw_dict['id'],
{'name': new_name})
result_get = db_api.share_network_get(self.fake_context,
self.share_nw_dict['id'])
self.assertEqual(result_update['name'], new_name)
self._check_fields(expected=dict(six.iteritems(result_update)),
actual=dict(six.iteritems(result_get)))
def test_update_not_found(self):
self.assertRaises(exception.ShareNetworkNotFound,
db_api.share_network_update,
self.fake_context,
'fake id',
{})
def test_get_all_one_record(self):
db_api.share_network_create(self.fake_context, self.share_nw_dict)
result = db_api.share_network_get_all(self.fake_context)
self.assertEqual(len(result), 1)
self._check_fields(expected=self.share_nw_dict, actual=result[0])
def test_get_all_two_records(self):
share_nw_dict2 = dict(self.share_nw_dict)
share_nw_dict2['id'] = 'fake subnet id2'
share_nw_dict2['neutron_subnet_id'] = 'fake subnet id2'
db_api.share_network_create(self.fake_context, self.share_nw_dict)
db_api.share_network_create(self.fake_context, share_nw_dict2)
result = db_api.share_network_get_all(self.fake_context)
self.assertEqual(len(result), 2)
def test_get_all_by_project(self):
share_nw_dict2 = dict(self.share_nw_dict)
share_nw_dict2['id'] = 'fake share nw id2'
share_nw_dict2['project_id'] = 'fake project 2'
share_nw_dict2['neutron_subnet_id'] = 'fake subnet id2'
db_api.share_network_create(self.fake_context, self.share_nw_dict)
db_api.share_network_create(self.fake_context, share_nw_dict2)
result = db_api.share_network_get_all_by_project(
self.fake_context,
share_nw_dict2['project_id'])
self.assertEqual(len(result), 1)
self._check_fields(expected=share_nw_dict2, actual=result[0])
def test_add_security_service(self):
security_dict1 = {'id': 'fake security service id1',
'project_id': self.fake_context.project_id,
'type': 'fake type'}
db_api.share_network_create(self.fake_context, self.share_nw_dict)
db_api.security_service_create(self.fake_context, security_dict1)
db_api.share_network_add_security_service(self.fake_context,
self.share_nw_dict['id'],
security_dict1['id'])
result = sqlalchemy_api.model_query(
self.fake_context,
models.ShareNetworkSecurityServiceAssociation).\
filter_by(security_service_id=security_dict1['id']).\
filter_by(share_network_id=self.share_nw_dict['id']).first()
self.assertTrue(result is not None)
def test_add_security_service_not_found_01(self):
security_service_id = 'unknown security service'
db_api.share_network_create(self.fake_context, self.share_nw_dict)
self.assertRaises(exception.SecurityServiceNotFound,
db_api.share_network_add_security_service,
self.fake_context,
self.share_nw_dict['id'],
security_service_id)
def test_add_security_service_not_found_02(self):
security_dict1 = {'id': 'fake security service id1',
'project_id': self.fake_context.project_id,
'type': 'fake type'}
share_nw_id = 'unknown share network'
db_api.security_service_create(self.fake_context, security_dict1)
self.assertRaises(exception.ShareNetworkNotFound,
db_api.share_network_add_security_service,
self.fake_context,
share_nw_id,
security_dict1['id'])
def test_add_security_service_association_error_already_associated(self):
security_dict1 = {'id': 'fake security service id1',
'project_id': self.fake_context.project_id,
'type': 'fake type'}
db_api.share_network_create(self.fake_context, self.share_nw_dict)
db_api.security_service_create(self.fake_context, security_dict1)
db_api.share_network_add_security_service(self.fake_context,
self.share_nw_dict['id'],
security_dict1['id'])
self.assertRaises(
exception.ShareNetworkSecurityServiceAssociationError,
db_api.share_network_add_security_service,
self.fake_context,
self.share_nw_dict['id'],
security_dict1['id'])
def test_remove_security_service(self):
security_dict1 = {'id': 'fake security service id1',
'project_id': self.fake_context.project_id,
'type': 'fake type'}
db_api.share_network_create(self.fake_context, self.share_nw_dict)
db_api.security_service_create(self.fake_context, security_dict1)
db_api.share_network_add_security_service(self.fake_context,
self.share_nw_dict['id'],
security_dict1['id'])
db_api.share_network_remove_security_service(self.fake_context,
self.share_nw_dict['id'],
security_dict1['id'])
result = sqlalchemy_api.model_query(
self.fake_context,
models.ShareNetworkSecurityServiceAssociation).\
filter_by(security_service_id=security_dict1['id']).\
filter_by(share_network_id=self.share_nw_dict['id']).first()
self.assertTrue(result is None)
share_nw_ref = db_api.share_network_get(self.fake_context,
self.share_nw_dict['id'])
self.assertEqual(len(share_nw_ref['security_services']), 0)
def test_remove_security_service_not_found_01(self):
security_service_id = 'unknown security service'
db_api.share_network_create(self.fake_context, self.share_nw_dict)
self.assertRaises(exception.SecurityServiceNotFound,
db_api.share_network_remove_security_service,
self.fake_context,
self.share_nw_dict['id'],
security_service_id)
def test_remove_security_service_not_found_02(self):
security_dict1 = {'id': 'fake security service id1',
'project_id': self.fake_context.project_id,
'type': 'fake type'}
share_nw_id = 'unknown share network'
db_api.security_service_create(self.fake_context, security_dict1)
self.assertRaises(exception.ShareNetworkNotFound,
db_api.share_network_remove_security_service,
self.fake_context,
share_nw_id,
security_dict1['id'])
def test_remove_security_service_dissociation_error(self):
security_dict1 = {'id': 'fake security service id1',
'project_id': self.fake_context.project_id,
'type': 'fake type'}
db_api.share_network_create(self.fake_context, self.share_nw_dict)
db_api.security_service_create(self.fake_context, security_dict1)
self.assertRaises(
exception.ShareNetworkSecurityServiceDissociationError,
db_api.share_network_remove_security_service,
self.fake_context,
self.share_nw_dict['id'],
security_dict1['id'])
def test_security_services_relation(self):
security_dict1 = {'id': 'fake security service id1',
'project_id': self.fake_context.project_id,
'type': 'fake type'}
db_api.share_network_create(self.fake_context, self.share_nw_dict)
db_api.security_service_create(self.fake_context, security_dict1)
result = db_api.share_network_get(self.fake_context,
self.share_nw_dict['id'])
self.assertEqual(len(result['security_services']), 0)
def test_shares_relation(self):
share_dict = {'id': 'fake share id1'}
db_api.share_network_create(self.fake_context, self.share_nw_dict)
db_api.share_create(self.fake_context, share_dict)
result = db_api.share_network_get(self.fake_context,
self.share_nw_dict['id'])
self.assertEqual(len(result['shares']), 0)
|
|
# _*_ coding:utf-8 _*_
import tornado.web
import ujson
import pymongo
import datetime
import time
import logging
from Constant import ConstantVar, SUtils
logger = logging.getLogger()
class CpReportDetailHandler(tornado.web.RequestHandler):
"""
report handler for every cid every day
post method
"""
def validate_parameter(self, st, et, t, cids, n, s):
"""
check param ok or not
:param d: date string like 20180101
:param t: time string int(time.time()), len is 10
:param n:nonce not empty
:param s:sign string not empty
:return:
"""
if not n or not s:
return False
if not SUtils.is_time_str(t):
return False
if st:
if not SUtils.is_date_str(st):
return False
if et:
if not SUtils.is_date_str(et):
return False
if cids:
try:
tl = eval(cids)
if not isinstance(tl, list):
return False
except Exception as e:
return False
return True
def post(self):
"""
: http post handler:
"""
try:
SUtils.init()
return_val = {
'data': {},
'status': ConstantVar.st_other,
'msg': '',
}
logger.info("\n\npost: %s", self.request.body)
param_time = self.get_argument('t', '')
param_nonce = self.get_argument('n', '')
param_sign = self.get_argument('s', '')
para_start_date = self.get_argument('start_date', '')
para_end_date = self.get_argument('end_date', '')
cids_list = self.get_argument('cids', '')
crawler_channel = self.get_argument('crawler_channel', '')
telecom = self.get_argument('telecom', '')
province = self.get_argument('province', '')
be_ok = self.validate_parameter(para_start_date, para_end_date, param_time, cids_list, param_nonce, param_sign)
if not be_ok:
return_val['msg'] = ConstantVar.status_msg[ConstantVar.st_param_error]
return_val['status'] = ConstantVar.st_param_error
result_str = ujson.encode(return_val, ensure_ascii=False)
self.write(result_str)
return
try:
be, desc = SUtils.check_sign(param_time, param_nonce, param_sign)
if not be:
return_val['msg'] = ConstantVar.status_msg[ConstantVar.st_access_deny]
return_val['status'] = ConstantVar.st_access_deny
result_str = ujson.encode(return_val, ensure_ascii=False)
self.write(result_str)
return
except Exception as e:
logger.error(e)
return_val['status'] = ConstantVar.st_other
return_val['msg'] = ConstantVar.status_msg[ConstantVar.st_other]
result_str = ujson.encode(return_val, ensure_ascii=False)
self.write(result_str)
return
if cids_list:
cids_list = eval(cids_list)
# deal
result = self.deal(para_start_date, para_end_date, cids_list, telecom, province, crawler_channel)
# json obj 2 str
return_val['data'] = result
return_val['status'] = ConstantVar.st_success
result_str = ujson.encode(return_val, ensure_ascii=False)
self.write(result_str)
except Exception as e:
logger.error(e)
return_val['status'] = ConstantVar.st_other
return_val['msg'] = ConstantVar.status_msg[ConstantVar.st_other]
result_str = ujson.encode(return_val, ensure_ascii=False)
self.write(result_str)
def get_pwd_reset_data(self, data, province, telecom, cids_list, para_start_date, para_end_date):
"""
data like {cid:tel_set}
:return:{} like {cid:(pwd_reset_success, pwd_reset_total, pwd_rt_success_pct)}
"""
logger.info("")
result = {}
condition = {}
if telecom:
condition['telecom'] = telecom
if province:
condition['province'] = province
# in tid_info end_time is a string
if para_start_date:
para_date_timstamp_start = str(time.mktime(
datetime.datetime.strptime(para_start_date + "00:00:00", "%Y%m%d%H:%M:%S").timetuple()))
condition["end_time"] = {'$gte': para_date_timstamp_start}
if para_end_date:
para_date_timstamp_end = str(time.mktime(
datetime.datetime.strptime(para_end_date + "23:59:59", "%Y%m%d%H:%M:%S").timetuple()))
if 'end_time' in condition:
condition['end_time']['$lte'] = para_date_timstamp_end
else:
condition["end_time"] = {'$lte': para_date_timstamp_end}
if cids_list:
# in tel_info cid's type is int32
condition["cid"] = {'$in': [str(i) for i in cids_list]}
logger.info(condition)
conn = None
try:
conn = pymongo.MongoClient(ConstantVar.mongo_db_host, ConstantVar.mongo_db_port)
col = conn[ConstantVar.mongo_db_pwd_name][ConstantVar.mongo_db_pwd_col]
success_count = 0
all_count = 0
all = col.find(condition, {'end_time': 1, 'tel': 1, 'status': 1, 'cid':1})
for p in all:
try:
status = p['status']
tel = p['tel']
end_time = int(float(p['end_time']))
end_date = datetime.datetime.fromtimestamp(end_time)
end_date = end_date.strftime("%Y%m%d")
if not (tel in data.get(end_date, set())):
continue
if status == 0:
success_count += 1
all_count += 1
if all_count != 0:
result[end_date] = [success_count, all_count, float(success_count)/all_count]
except Exception as e:
logger.error("error: %s, data:%s", e, p)
return result
finally:
if conn:
conn.close()
def get_tel_num(self, mongo_handle, condition):
"""
:param param_date:
:param mongo_handle:
:return form sid_info summary tel_num (unique)
"""
logger.info(condition)
ori_sid_info = mongo_handle[ConstantVar.mongo_db_name][ConstantVar.mongo_db_ori_col]
if 'rpt_date' in condition:
v = condition['rpt_date']
if '$gte' in v:
para_date_timstamp_start = time.mktime(
datetime.datetime.strptime(v['$gte'] + "00:00:00", "%Y%m%d%H:%M:%S").timetuple())
v['$gte'] = para_date_timstamp_start
if '$lte' in v:
para_date_timstamp_end = time.mktime(
datetime.datetime.strptime(v['$lte'] + "23:59:59", "%Y%m%d%H:%M:%S").timetuple())
v['$lte'] = para_date_timstamp_end
condition['end_time'] = v
del condition['rpt_date']
if 'cid' in condition:
# in [sid_info] cid is a string , should be utf8 string
v = condition['cid']
condition['cid'] = [str(i).decode('utf8') for i in v]
logger.info(condition)
ori_telnums_data = ori_sid_info.find(condition, {'cid': 1, 'tel': 1})
# get realtime data from sid_info
cid_tel_data = {}
for k in ori_telnums_data:
end_time = k.get('end_time', None)
if not end_time:
continue
rpt_date = datetime.datetime.fromtimestamp(end_time)
rpt_date = rpt_date.strftime("%Y%m%d")
if rpt_date not in cid_tel_data:
cid_tel_data[rpt_date] = set()
tel = k.get('tel', ConstantVar.other_default_value)
if isinstance(tel, dict) or isinstance(tel, list):
# tel maybe a dict or list whick can not be add into a set
logger.info("tel :%s is not a str", tel)
continue
cid_tel_data[rpt_date].add(tel)
return cid_tel_data
def deal(self, para_start_date, para_end_date, cids_list, telecom, province, crawler_channel):
"""
:param para_date:
:return summarize data contail key finally_require_keys
finally_require_keys = ['cid', 'total_nums', 'authen_nums', 'crawl_nums', 'report_nums',
'tel_nums', 'authen_pct', 'crawl_pct', 'report_pct', 'log_loss_pct',
'tel_num_diff']
"""
logger.info("")
mongo_handle = None
try:
# get data from mongodb
# get summarize data from rpt table
mongo_handle = pymongo.MongoClient(ConstantVar.mongo_db_host, ConstantVar.mongo_db_port)
rpt_collection = mongo_handle[ConstantVar.mongo_db_name][ConstantVar.mongo_db_rpt_col]
condition = {}
if telecom:
condition['telecom'] = telecom
if province:
condition['province'] = province
if crawler_channel:
condition['crawler_channel'] = crawler_channel
if para_start_date:
condition["rpt_date"] = {'$gte': para_start_date}
if para_end_date:
if 'rpt_date' in condition:
condition['rpt_date']['$lte'] = para_end_date
else:
condition["rpt_date"] = {'$lte': para_end_date}
if cids_list:
# in sid_info_data_rpt cid's type is int32
cids_list = [int(k) for k in cids_list]
condition["cid"] = {'$in': cids_list}
logger.info(condition)
one_day_rpt = rpt_collection.find(condition)
# get data realtime data from sid_info
all_tmp_data = {}
# maybe multi cid(every cid one result)
# maybe multi rows return from mongo (sum them)
for k in one_day_rpt:
rpt_date = k['rpt_date']
if rpt_date not in all_tmp_data:
all_tmp_data[rpt_date] = {
'total_nums': k.get('total_nums', 0),
'authen_nums': k.get('authen_nums', 0),
'crawl_nums': k.get('crawl_nums', 0),
'report_nums': k.get('report_nums', 0),
'final_nums': k.get('final_nums', 0),
'call_log_intact_nums': k.get('call_log_intact_nums', 0),
'bill_intact_nums': k.get('bill_intact_nums', 0),
'date': rpt_date,
'log_loss_pct': 0,
'bill_loss_pct': 0,
'tel_num': 0,
'pwd_rt_success': 0,
'pwd_rt_total': 0,
'pwd_rt_pct': 0,
}
else:
all_tmp_data[rpt_date]['total_nums'] += k.get('total_nums', 0)
all_tmp_data[rpt_date]['authen_nums'] += k.get('authen_nums', 0)
all_tmp_data[rpt_date]['crawl_nums'] += k.get('crawl_nums', 0)
all_tmp_data[rpt_date]['report_nums'] += k.get('report_nums', 0)
all_tmp_data[rpt_date]['call_log_intact_nums'] += k.get('call_log_intact_nums', 0)
all_tmp_data[rpt_date]['bill_intact_nums'] += k.get('bill_intact_nums', 0)
all_tmp_data[rpt_date]['final_nums'] += k.get('final_nums', 0)
# get all tel num
all_tel_num_data = self.get_tel_num(mongo_handle, condition)
pwd_result = self.get_pwd_reset_data(all_tel_num_data, province, telecom, cids_list, para_start_date, para_end_date)
# calc all pct
for rpt_date in all_tmp_data:
v = all_tmp_data[rpt_date]
if v['total_nums'] == 0:
v['authen_pct'] = 0
else:
v['authen_pct'] = float(v['authen_nums']) / v['total_nums']
if v['authen_nums'] == 0:
v['crawl_pct'] = 0
else:
v['crawl_pct'] = float(v['crawl_nums']) / v['authen_nums']
if v['crawl_nums'] == 0:
v['report_pct'] = 0
else:
v['report_pct'] = float(v['report_nums']) / v['crawl_nums']
if v['final_nums'] == 0:
v['log_loss_pct'] = 0
v['bill_loss_pct'] = 0
else:
v['log_loss_pct'] = 1 - float(v['call_log_intact_nums']) / v['final_nums']
v['bill_loss_pct'] = 1 - float(v['bill_intact_nums']) / v['final_nums']
v['tel_num'] = len(all_tel_num_data.get(rpt_date, set()))
pwd_data = pwd_result.get(rpt_date, [0, 0, 0])
v['pwd_rt_success'], v['pwd_rt_total'], v['pwd_rt_pct'] = pwd_data
except Exception as e:
logger.error(e)
finally:
if mongo_handle:
mongo_handle.close()
return all_tmp_data
|
|
#!/usr/bin/python
import csv
def comma_decimal(val):
return float(val.replace(",", "."))
def comma_decimal_formatter(val):
return str(val).replace(".", ",")
class CSVError(Exception):
pass
class CSVHeaderError(CSVError):
def __init__(self, expected, actual):
CSVError.__init__(self, "Invalid header")
self.expected = expected
self.actual = actual
class CSVFieldError(CSVError):
def __init__(self, field, file=None):
if file is None:
CSVError.__init__(self, "Unknown field '%s'" % field)
else:
CSVError.__init__(self, "Unknown field '%s' in file '%s'" % (field, file))
class CSVMod(object):
def __init__(self, controller):
self.controller = controller
def start(self):
reader = self.controller.reader
reader.begin()
writer = self.controller.writer
writer.begin()
writer.writeheader()
for data in reader:
row = reader.create_row(data)
update = self.controller.handle(row)
self.controller.post_progress(row)
if update is None:
update = row.is_changed
if update:
writer.write(row.fields)
self.controller.finish()
class CSVRow(object):
"""
:type joins: dict
"""
def __init__(self, fields, joins, aliases: dict, file_name=None):
self.fields = fields
self.origin = dict(fields)
self.joins = joins
self.aliases = aliases
self.file_name = file_name
def __getitem__(self, item):
return self.fields[self._get_field_name(item, True)]
def __setitem__(self, key, value):
getattr(self, "fields")[self._get_field_name(key, False)] = value
def __repr__(self):
return str(self.fields)
def _get_field_name(self, key, strict=True) -> str:
if key in self.fields:
return key
if key in self.aliases:
return self.aliases[key]
if strict:
raise CSVFieldError(key, file=self.file_name)
else:
return key
@property
def is_changed(self) -> bool:
return self.origin != self.fields
def join(self, name, field=None):
"""
:rtype: CSVRow
"""
try:
join = self.joins[name]
except KeyError:
raise CSVError("Unknown join '%s'" % name)
joint = join.auto_join(self)
if joint is None:
return None
if field is not None:
return joint[field]
return joint
def has_join(self, name) -> bool:
return name in self.joins.keys()
class CSVFile(object):
def __init__(self, **kwargs):
self._fields = list()
self.file_name = kwargs.pop("file")
self.file_handle = None
self.format = dict(delimiter=";", quotechar='"')
self.encoding = kwargs.pop("encoding", "utf-8")
self.aliases = kwargs.pop("aliases", dict())
self.fields = kwargs.pop("fields", list())
self.converter = kwargs.pop("converter", dict())
self.base_csv = None
self.name = kwargs.pop("name", None)
if "format" in kwargs:
self.format.update(kwargs.pop("format"))
if len(kwargs) > 0:
raise KeyError("Invalid option: %s" % ", ".join(kwargs.keys()))
@property
def fields(self):
return self._fields
@fields.setter
def fields(self, lst):
fields = list()
for field in lst:
if field in self.aliases:
fields.append(self.aliases[field])
else:
fields.append(field)
self._fields = fields
def begin(self):
pass
def end(self):
self.file_handle.close()
def _reduce_fields(self, row: dict) -> dict:
return {k: v for k, v in row.items() if k in self.fields}
class CSVReadFile(CSVFile):
def __init__(self, **kwargs):
self._joins = dict()
self.joins = kwargs.pop("joins", list())
super().__init__(**kwargs)
@property
def joins(self):
return self._joins
@joins.setter
def joins(self, joins):
if not hasattr(joins, "__iter__"):
joins = (joins, )
self._joins = dict()
for join in joins:
self._joins[join.name] = join
def __iter__(self):
return self.base_csv
def check_header(self, header):
if self.fields is None:
return True
header = list(header)
fields = list(self.fields)
for field in fields:
if field not in header:
raise CSVHeaderError(field, header)
return True
def create_row(self, data) -> CSVRow:
for field, converter in self.converter.items():
data[field] = converter(data[field])
return CSVRow(self._reduce_fields(data), self.joins, self.aliases, self.name)
@property
def reader(self) -> csv.DictReader:
if not self.base_csv:
self.file_handle = open(self.file_name, "r", encoding=self.encoding)
self.base_csv = csv.DictReader(self.file_handle, **self.format)
return self.base_csv
def begin(self):
if self.fields is None:
self.fields = self.reader.fieldnames
else:
self.check_header(self.reader.fieldnames)
for join in self.joins.values():
join.begin()
def end(self):
super().end()
for join in self.joins.values():
join.end()
class CSVWriteFile(CSVFile):
def __init__(self, **kwargs):
self.formatter = kwargs.pop("formatter", dict())
if "name" not in kwargs:
kwargs["name"] = "target"
super().__init__(**kwargs)
@property
def writer(self) -> csv.DictWriter:
if not self.base_csv:
self.file_handle = open(self.file_name, "w", encoding=self.encoding)
self.base_csv = csv.DictWriter(self.file_handle, self.fields, **self.format)
return self.base_csv
@CSVFile.fields.setter
def fields(self, val):
self._fields = val
def write(self, data):
data = dict(data)
for field, formatter in self.formatter.items():
data[field] = formatter(data[field])
self.writer.writerow(self._reduce_fields(data))
def writeheader(self):
self.writer.writeheader()
def _reduce_fields(self, row: dict):
result = dict()
for k, v in row.items():
if k in self.fields:
result[k] = v
if k in self.aliases:
result[self.aliases[k]] = v
return result
class JoinCSV(CSVReadFile):
def __init__(self, **kwargs):
self.local_field = kwargs.pop("local")
self.join_field = kwargs.pop("remote")
self.cache_enabled = kwargs.pop("cache", True)
self.cache = dict()
if "name" not in kwargs:
kwargs["name"] = kwargs["file"]
super().__init__(**kwargs)
def get_row(self, criteria) -> CSVRow:
if self.cache_enabled:
f = self.get_row_cached
else:
f = self.get_row_uncached
data = f(criteria)
return data
def get_row_uncached(self, criteria) -> dict:
for row in self.reader:
if self._is_match(row, criteria):
return self.create_row(row)
def get_row_cached(self, criteria) -> dict:
if criteria in self.cache:
return self.cache[criteria]
for row in self.reader:
r = self.create_row(row)
self.cache[row[self.join_field]] = r
if self._is_match(r, criteria):
return r
def auto_join(self, row: CSVRow) -> CSVRow:
criteria = row[self.local_field]
return self.get_row(criteria)
def _is_match(self, row, criteria) -> bool:
return row[self.join_field] == criteria
class Statistics(object):
class Counter(object):
def __init__(self, allow_negative=True):
self.slots = dict()
self.allow_negative = allow_negative
def plus(self, slot, n=1):
try:
self.slots[slot] += n
except KeyError:
self.slots[slot] = n
if not self.allow_negative and self[slot] < 0:
self.slots[slot] = 0
def minus(self, slot, n=1):
self.plus(slot, -n)
def __getitem__(self, item):
return self.slots.get(item, 0)
def __init__(self):
self.changes = {}
self.rows = 0
def process(self, data):
changed = False
for field in data.origin.keys():
if data.origin[field] != data[field]:
changed = True
self._incr("changes", field)
if changed:
self.rows += 1
def finish(self):
import operator
print("Finished, modified %d rows." % self.rows)
s = sorted(self.changes.items(), key=operator.itemgetter(1))
for field, changes in s:
print("%6d %s" % (changes, field))
def _incr(self, prop, index, n=1):
try:
self.__dict__[prop][index] += n
except KeyError:
self.__dict__[prop][index] = n
class Controller(object):
statistics = [Statistics()]
settings = dict()
output = dict()
def __init__(self, input_file=None, output_file=None):
if input_file is not None:
self.settings["file"] = input_file
if output_file is not None:
self.output["file"] = output_file
self._reader = None
self._writer = None
def handle(self, data):
pass
@property
def reader(self) -> CSVReadFile:
if not self._reader:
if "name" not in self.settings:
self.settings["name"] = "main"
self._reader = CSVReadFile(**self.settings)
return self._reader
@property
def writer(self) -> CSVWriteFile:
if not self._writer:
opts = dict(self.output)
if opts.get("fields") is None:
opts["fields"] = self.reader.fields
if "name" not in opts:
opts["name"] = "main"
self._writer = CSVWriteFile(**opts)
return self._writer
def post_progress(self, data):
for stat in self.statistics:
stat.process(data)
def finish(self):
self._reader.end()
for stat in self.statistics:
stat.finish()
if __name__ == "__main__":
from sys import argv, path
from os import getcwd
def import_controller(name):
components = name.split('.')
module = __import__(components[0])
for comp in components[1:]:
module = getattr(module, comp)
return module
if len(argv) != 4:
print("Usage: %s <controller> <input> <output>" % argv[0])
exit(1)
controller_name = argv[1]
read_file = argv[2]
write_file = argv[3]
path.append(getcwd())
chosen_controller = import_controller(controller_name)
mod = CSVMod(chosen_controller(read_file, write_file))
try:
mod.start()
except CSVHeaderError as e:
print("Unexpected header detected.")
print(e.expected)
print(e.actual)
|
|
#!/usr/bin/python
# Author: Brian Kellogg
# Version: 1.4
#
# Bro Intel file MUST have the below header and it MUST be TAB DELIMITED
# #fields indicator indicator_type meta.source meta.desc meta.url meta.do_notice meta.if_in
#
# This script was written for Bro and OSSEC intel file updates on SecurityOnion
import sys
from subprocess import call
import ConfigParser
import re
import os
from shutil import copy
# string colorization
# from http://korbinin.blogspot.com/2012/10/color-text-output-from-python.html
def hilite(string, status, bold):
attr = []
if sys.stdout.isatty():
if status == 'g':
# green
attr.append('32')
elif status == 'r':
# red
attr.append('31')
elif status == 'y':
# yellow
attr.append('33')
elif status == 'b':
# blue
attr.append('34')
elif status == 'm':
# magenta
attr.append('35')
if bold:
attr.append('1')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
else:
return string
# check if file exists and is writeable
# if file does not exist then ask to create it otherwise exit
def exists_and_writable(intel_file):
if os.path.isfile(intel_file):
try:
with open(intel_file, 'a+') as f:
f.closed
except IOError:
print(hilite("\nFile, %s, is not writeable!\n", "r", True) % intel_file)
exit(4)
elif "bro" in intel_file:
print(hilite("\nBro intel file, %s, does not exist!\n", "r", True) % intel_file)
create = raw_input("Create intel file (y/n)? ")
if create == 'y':
try:
with open(intel_file, 'w+') as f:
f.write('#fields\tindicator\tindicator_type\tmeta.source\tmeta.desc\tmeta.url\tmeta.do_notice\tmeta.if_in\n')
except IOError:
print(hilite("\nCould not create file!\n", "r", True))
exit(4)
else:
exit(0)
elif "ossec" in intel_file:
print(hilite("\nOSSEC intel file, %s, does not exist!\n", "r", True) % intel_file)
create = raw_input("Create intel file (y/n)? ")
if create == 'y':
try:
with open(intel_file, 'w+') as f:
f.closed
except IOError:
print(hilite("\nCould not create file!\n", "r", True))
exit(4)
else:
exit(0)
# check if file is executable
def is_executable(program):
def is_exe(file_path):
return os.path.isfile(file_path) and os.access(file_path, os.X_OK)
if is_exe(program):
return program
else:
print(hilite("\n%s is not executable or does not exist!\n", "r", True) % program)
exit(4)
# clean up duplicate lines in intel files
def remove_duplicate_lines(intel_file):
# backup intel files before any modifications by script
copy(intel_file, BACKUP_DIR)
lines_seen = set() # holds lines already seen
with open(intel_file, 'r') as f:
lines = f.readlines()
with open(intel_file, 'w') as f:
for line in lines:
if line not in lines_seen: # not a duplicate
f.write(line)
lines_seen.add(line)
# clean up OSSEC intel file by removing any complete /24s and adding the three octet equivalent
def ossec_collapse_full_nets(addr, source, intel_file):
count = 0
with open(intel_file, 'r') as f:
lines = f.readlines()
for line in lines:
if addr in line:
count += 1
if count > 255:
delete_ip(addr, 0, 255, intel_file)
add_ip(addr, 0, 255, source, None, None, None, None, OSSEC_IP_FILE)
# add IP(s) to intel file
def add_ip(addr, start_ip, end_ip, source, desc, url, notice, if_in, intel_file):
with open(intel_file, 'a+') as f:
lines = f.readlines()
ossec_full_range = addr + ":"
found = False
# if adding a /24 to ossec then no need to add each individual IP
if "ossec" in intel_file:
for line in lines:
# did we intially add a /24 to the OSSEC intel file?
if ossec_full_range in line:
print(hilite("%s already exists in %s!", "r", True) % (line, intel_file))
found = True
break
if not found and start_ip == 0 and end_ip == 255:
# remove any existing IPs in this range from the OSSEC intel file
delete_ip(addr, start_ip, end_ip, intel_file)
f.write('%s:%s\n' % (addr, source))
print(hilite("Added %s to %s", "y", True) % (addr, intel_file))
# since we are adding a /24 lets not trigger the next if clause
found = True
if not found:
for last_octet in range(start_ip, end_ip + 1):
found = False
for line in lines:
if addr in line:
if "ossec" in intel_file:
temp = line.split(":")
else:
temp = line.split('\t')
temp = temp[0].split(".")
temp = int(temp[3])
if last_octet == temp:
print(hilite("%s already exists in %s!", "r", True) % (line, intel_file))
found = True
break
if "ossec" not in intel_file and not found:
f.write('%s\t%s\t%s\t%s\t%s\t%s\t%s\n' %
(addr + str(last_octet), "Intel::ADDR", source, desc, url, notice, if_in))
print(hilite("Added %s to %s", "y", True) % (addr + str(last_octet), intel_file))
elif not found:
f.write('%s:%s\n' % (addr + str(last_octet), source))
print(hilite("Added %s to %s", "y", True) % (addr + str(last_octet), intel_file))
# lets see if we can take this /24 and rewrite it in the OSSEC short form
if "ossec" in intel_file:
ossec_collapse_full_nets(addr, source, intel_file)
# remove IP(s) from intel file
def delete_ip(addr, start_ip, end_ip, intel_file):
with open(intel_file, 'r') as f:
lines = f.readlines()
with open(intel_file, 'w') as f:
ossec_full_range = addr + ":"
ossec_add_ip_ranges = False
for line in lines:
found = False
# did we initially add a /24 to the OSSEC intel file?
if ("ossec" in intel_file) and (ossec_full_range in line):
print(hilite("Removed %s from %s!", "y", True) % (addr, intel_file))
# pull out what is after the : so that we can reuse it if we need to add some of the range back in
source = line.split(":")
# remove newlines
source = source[1].rstrip('\n')
ossec_add_ip_ranges = True
found = True
elif addr in line:
if "ossec" in intel_file:
last_octet = line.split(":")
else:
last_octet = line.split('\t')
last_octet = last_octet[0].split(".")
last_octet = int(last_octet[3])
if start_ip <= last_octet <= end_ip:
print(hilite("Removed %s from %s!", "y", True) % (addr + str(last_octet), intel_file))
found = True
# write line back to file if not found
if not found:
f.write(line)
# if we removed a /24 from the OSSEC intel file and we need to add back some of that /24 range lets do that
if ossec_add_ip_ranges and start_ip != 0 and end_ip != 255:
if start_ip == 0:
start = end_ip + 1
end = 255
elif end_ip == 255:
start = 0
end = start_ip - 1
else:
start = 0
end = start_ip - 1
add_ip(addr, start, end, source, None, None, None, None, OSSEC_IP_FILE)
start = end_ip + 1
end = 255
add_ip(addr, start, end, source, None, None, None, None, OSSEC_IP_FILE)
# choose where Bro is to look for the intel
def get_if_in():
cont = True
while cont:
print("""
Choose where to look for intel:
Hit enter for default of (-)
********************************
1. Conn::IN_ORIG
2. Conn::IN_RESP
3. Files::IN_HASH
4. Files::IN_NAME
5. DNS::IN_REQUEST
6. DNS::IN_RESPONSE
7. HTTP::IN_HOST_HEADER
8. HTTP::IN_REFERRER_HEADER
9. HTTP::IN_USER_AGENT_HEADER
10. HTTP::IN_X_FORWARDED_FOR_HEADER
11. HTTP::IN_URL
12. SMTP::IN_MAIL_FROM
13. SMTP::IN_RCPT_TO
14. SMTP::IN_FROM
15. SMTP::IN_TO
16. SMTP::IN_RECEIVED_HEADER
17. SMTP::IN_REPLY_TO
18. SMTP::IN_X_ORIGINATING_IP_HEADER
19. SMTP::IN_MESSAGE
20. SSL::IN_SERVER_CERT
21. SSL::IN_CLIENT_CERT
22. SSL::IN_SERVER_NAME
23. SMTP::IN_HEADER
24. Leave Blank
""")
ans = raw_input("Choice (-)? ")
if ans == "1":
return "Conn::IN_ORIG"
elif ans == "2":
return "Conn::IN_RESP"
elif ans == "3":
return "Files::IN_HASH"
elif ans == "4":
return "Files::IN_NAME"
elif ans == "5":
return "DNS::IN_REQUEST"
elif ans == "6":
return "DNS::IN_RESPONSE"
elif ans == "7":
return "HTTP::IN_HOST_HEADER"
elif ans == "8":
return "HTTP::IN_REFERRER_HEADER"
elif ans == "9":
return "HTTP::IN_USER_AGENT_HEADER"
elif ans == "10":
return "HTTP::IN_X_FORWARDED_FOR_HEADER"
elif ans == "11":
return "HTTP::IN_URL"
elif ans == "12":
return "SMTP::IN_MAIL_FROM"
elif ans == "13":
return "SMTP::IN_RCPT_TO"
elif ans == "14":
return "SMTP::IN_FROM"
elif ans == "15":
return "SMTP::IN_TO"
elif ans == "16":
return "SMTP::IN_RECEIVED_HEADER"
elif ans == "17":
return "SMTP::IN_REPLY_TO"
elif ans == "18":
return "SMTP::IN_X_ORIGINATING_IP_HEADER"
elif ans == "19":
return "SMTP::IN_MESSAGE"
elif ans == "20":
return "SSL::IN_SERVER_CERT"
elif ans == "21":
return "SSL::IN_CLIENT_CERT"
elif ans == "22":
return "SSL::IN_SERVER_NAME"
elif ans == "23":
return "SMTP::IN_HEADER"
elif ans == "24":
return "-"
else:
return "-"
# get all the info needed to add the intel
def get_info():
desc = raw_input("Description? ")
if not desc:
desc = "-"
source = raw_input("Source (drc)? ")
if not source:
source = "drc"
url = raw_input("URL? ")
if not url:
url = "-"
notice = raw_input("Do notice (T)? ")
notice = notice.upper()
if notice != "T" or notice != "F":
notice = "T"
if_in = get_if_in()
return source, desc, url, notice, if_in
# get the information to add or remove intel then perform the specified operation
def misc_intel(op, header, type, intel_file):
print("\n%s" % header)
print("----------------------------")
intel = raw_input("Intel? ")
source, desc, url, notice, if_in = get_info()
if op == "add":
add_misc_intel(type, intel, intel_file, source, desc, url, notice, if_in)
else:
delete_misc_intel(intel, intel_file)
return intel
# add all other types of intel
def add_misc_intel(intel_type, intel, intel_file, source, desc, url, notice, if_in):
with open(intel_file, 'a+') as f:
lines = f.readlines()
# Lets see if this intel is already in the file
for line in lines:
if intel in line:
print(hilite("%s already exists in file!", "r", True) % intel)
# if we get a match then exit
return
# write line to file if not found
# how we write to the file is dependent on if it is an OSSEC intel file or a Bro intel file
if "ossec" in intel_file:
f.write('%s:drc\n' % intel)
else:
f.write('%s\t%s\t%s\t%s\t%s\t%s\t%s\n' % (intel, intel_type, source, desc, url, notice, if_in))
print(hilite("Added %s to %s", "y", True) % (intel, intel_file))
# remove misc intel type
def delete_misc_intel(intel, intel_file):
with open(intel_file, 'r') as f:
lines = f.readlines()
# open intel file for writing
with open(intel_file, 'w') as f:
found = False
for line in lines:
# skip matching line we want to remove
if intel in line:
found = True
print(hilite("Removed %s from %s!", "y", True) % (intel, intel_file))
continue
# write line back to file if not a match
f.write(line)
if not found:
print(hilite("%s not found int %s", "y", True) % (intel, intel_file))
# Get user input and run correct function to add or remove intel IP
def do_ip(header, question, single_ip, ip_add):
print("\n%s" % header)
print("----------------------------")
addr = raw_input("First three octets including the trailing . ? ")
# need to convert beginning IP to int for comparison
start_ip = int(raw_input(question))
# if singleIP is TRUE then then set endIP = begIP
if single_ip:
end_ip = start_ip
else:
# need to convert ending IP to int for comparison
end_ip = int(raw_input("Last IP in last octet? "))
# is the IP information valid, if not return to main menu
if start_ip < 0 or end_ip > 255 or not (re.match(IP_REGEX, addr)):
print(hilite("\n\nInvalid IP information.", "r", True))
return
if ip_add:
source, desc, url, notice, if_in = get_info()
print(hilite("\n------------RESULTS---------------", "y", True))
if ip_add:
add_ip(addr, start_ip, end_ip, source, desc, url, notice, if_in, BRO_INTEL_FILE)
add_ip(addr, start_ip, end_ip, source, desc, url, notice, if_in, OSSEC_IP_FILE)
else:
delete_ip(addr, start_ip, end_ip, BRO_INTEL_FILE)
delete_ip(addr, start_ip, end_ip, OSSEC_IP_FILE)
call(OSSEC_MAKELISTS)
def main_menu():
loop = True
while loop:
print("""
Intel Update:
##############################
1. Add single intel IP
2. Add range of intel IPs
3. Remove single intel IP
4. Remove range of intel IPs
5. Add URL
6. Remove URL
7. Add Software
8. Remove Software
9. Add Email
10. Remove Email
11. Add Domain
12. Remove Domain
13. Add Username
14. Remove Username
15. Add File Hash
16. Remove File Hash
17. Add File Name
18. Remove File Name
19. Add Cert Hash
20. Remove Cert Hash
q. Quit
##############################
""")
choice = raw_input("Choice? ")
if choice == "1":
do_ip("\nAdd single intel IP:", "Last octet? ", True, True)
elif choice == "2":
do_ip("\nAdd range of intel IPs:", "First IP in last octet? ", False, True)
elif choice == "3":
do_ip("\nRemove single intel IP:", "Last octet? ", True, False)
elif choice == "4":
do_ip("\nRemove range of intel IPs:", "First IP in last octet? ", False, False)
elif choice == "5":
misc_intel("add", "\nAdd URL:", "Intel::URL", BRO_INTEL_FILE)
elif choice == "6":
misc_intel("rem", "\nRemove URL:", None, BRO_INTEL_FILE)
elif choice == "7":
misc_intel("add", "\nAdd software:", "Intel::SOFTWARE", BRO_INTEL_FILE)
elif choice == "8":
misc_intel("rem", "\nRemove software:", None, BRO_INTEL_FILE)
elif choice == "9":
misc_intel("add", "\nAdd Email:", "Intel::EMAIL", BRO_INTEL_FILE)
elif choice == "10":
misc_intel("rem", "\nRemove Email:", None, BRO_INTEL_FILE)
elif choice == "11":
intel = misc_intel("add", "\nAdd domain:", "Intel::DOMAIN", BRO_INTEL_FILE)
add_misc_intel(None, intel, OSSEC_DNS_FILE, None, None, None, None, None)
call(OSSEC_MAKELISTS)
elif choice == "12":
intel = misc_intel("rem", "\nRemove domain:", None, BRO_INTEL_FILE)
delete_misc_intel(intel, OSSEC_DNS_FILE)
call(OSSEC_MAKELISTS)
elif choice == "13":
intel = misc_intel("add", "\nAdd username:", "Intel::USER_NAME", BRO_INTEL_FILE)
add_misc_intel(None, intel, OSSEC_USERS_FILE, None, None, None, None, None)
call(OSSEC_MAKELISTS)
elif choice == "14":
intel = misc_intel("rem", "\nRemove username:", None, BRO_INTEL_FILE)
delete_misc_intel(intel, OSSEC_USERS_FILE)
call(OSSEC_MAKELISTS)
elif choice == "15":
misc_intel("add", "\nAdd file hash:", "Intel::FILE_HASH", BRO_INTEL_FILE)
elif choice == "16":
misc_intel("rem", "\nRemove file hash:", None, BRO_INTEL_FILE)
elif choice == "17":
misc_intel("add", "\nAdd file name:", "Intel::FILE_NAME", BRO_INTEL_FILE)
elif choice == "18":
misc_intel("rem", "\nRemove file name:", None, BRO_INTEL_FILE)
elif choice == "19":
misc_intel("add", "\nAdd Cert hash:", "Intel::CERT_HASH", BRO_INTEL_FILE)
elif choice == "20":
misc_intel("rem", "\nRemove Cert hash:", None, BRO_INTEL_FILE)
elif choice == "q":
exit(0)
else:
print(hilite("\nInvalid input!", "r", True))
def main():
usage = """
usage: %prog
Update the modIntel.conf file to point to your Bro and OSSEC intel files.
The modIntel.conf file must reside in the same directory or
modify this script with the location you placed it in.
Header of the intel file must be:
#fields\tindicator\tindicator_type\tmeta.source\tmeta.desc\tmeta.url\tmeta.do_notice\tmeta.if_in
Remember Bro intel files MUST be tab delimited!!!
Any Bro intel file must be loaded into Bro to be used.
Example, add below to /opt/bro/share/bro/site/local.bro in order to load the your intel1.txt custom file:
redef Intel::read_files += {
"/opt/bro/share/bro/policy/intel1.txt",
};
Bro adds new intel but does not remove without a restart:
sudo broctl install
sudo broctl restart
or sudo nsme_sensor_ps-restart --only-bro
The script will also run the ossec-makelists command to compile any updated CDB files.
The script, before performing any other action on intel files, will backup all intel files
to the specified location in modIntel.conf.
The script will also parse all intel files for duplicates upon startup.
The script does its best to honor OSSEC's ability to specify IPs at the octet boundaries but only for /24s.
Logic is not included for /8s or /16s.
"""
if len(sys.argv) > 1:
print(hilite("\n%s", "r", True) % usage)
# globals to hold Bro and OSSEC intel file locations
global IP_REGEX, BRO_INTEL_FILE, OSSEC_IP_FILE, OSSEC_DNS_FILE, OSSEC_MAKELISTS, OSSEC_USERS_FILE, BACKUP_DIR
# regex to match first three octets of IP including trailing "."
IP_REGEX = re.compile("^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.$")
# read in configs
config = ConfigParser.ConfigParser()
config.readfp(open(r'modIntel.conf'))
BRO_INTEL_FILE = config.get('files', 'bro')
OSSEC_IP_FILE = config.get('files', 'ossecIP')
OSSEC_DNS_FILE = config.get('files', 'ossecDNS')
OSSEC_USERS_FILE = config.get('files', 'ossecUsers')
OSSEC_MAKELISTS = config.get('files', 'ossecMLISTS')
BACKUP_DIR = config.get('files', 'backupDir')
if not os.access(BACKUP_DIR, os.W_OK):
print(hilite("\n%s is not writeable or does not exist!\nPlease check your configuration.\n", "r", True) % BACKUP_DIR)
exit(4)
# tuple of files to check for exist, write access, and duplicate lines
check_files = BRO_INTEL_FILE, OSSEC_IP_FILE, OSSEC_DNS_FILE, OSSEC_USERS_FILE
for check in check_files:
exists_and_writable(check)
remove_duplicate_lines(check)
# is the OSSEC CDB compiler there and executable
is_executable(OSSEC_MAKELISTS)
# compile OSSEC CDBs in case they were modified by the above duplicate checks
call(OSSEC_MAKELISTS)
# goto main menu
main_menu()
if __name__ == '__main__':
main()
|
|
from topaz.objects.fileobject import FNM_NOESCAPE, FNM_DOTMATCH
from topaz.objects.regexpobject import RegexpCache
from topaz.utils.glob import Glob
from ..base import BaseTopazTest
class GlobHelper(object):
def __init__(self, space, tmpdir, monkeypatch):
self.space = space
self.tmpdir = tmpdir
monkeypatch.chdir(tmpdir)
def create_paths(self, mock_files):
for path in mock_files:
self.tmpdir.join(path).ensure()
def glob(self, pattern, flags=0):
glob = Glob(self.space.fromcache(RegexpCache))
glob.glob(pattern, flags)
return glob.matches()
def sglob(self, pattern, flags=0):
return sorted(self.glob(pattern, flags))
def pytest_funcarg__glob_helper(request):
space = request.getfuncargvalue("space")
tmpdir = request.getfuncargvalue("tmpdir")
monkeypatch = request.getfuncargvalue("monkeypatch")
return GlobHelper(space, tmpdir, monkeypatch)
class TestGlob(BaseTopazTest):
"""
These tests are almost entirely copied from rubyspec. They are included
separately here because globs are required for running specs.
"""
def test_absolute(self, glob_helper):
assert glob_helper.glob("/") == ["/"]
def test_non_dotfiles_with_star(self, glob_helper):
glob_helper.create_paths([
".dotfile", ".dotsubdir/.dotfile", ".dotsubdir/nondotfile",
"file_one.ext", "file_two.ext", "nondotfile"
])
assert glob_helper.sglob("*") == [
"file_one.ext", "file_two.ext", "nondotfile"
]
assert glob_helper.sglob("**") == [
"file_one.ext", "file_two.ext", "nondotfile"]
assert glob_helper.sglob("*file") == ["nondotfile"]
def test_dotfiles_with_star(self, glob_helper):
glob_helper.create_paths([
".dotfile", ".dotsubdir/.dotfile", ".dotsubdir/nondotfile",
"file_one.ext", "file_two.ext", "nondotfile"
])
assert glob_helper.sglob(".*") == [".", "..", ".dotfile", ".dotsubdir"]
assert glob_helper.sglob(".**") == [
".", "..", ".dotfile", ".dotsubdir"
]
assert glob_helper.sglob(".*file") == [".dotfile"]
def test_empty_pattern_no_matches(self, glob_helper):
assert glob_helper.glob("") == []
def test_regexp_specials(self, glob_helper):
glob_helper.create_paths([
"special/+", "special/^", "special/$", "special/(", "special/)",
"special/[", "special/]", "special/{", "special/}"
])
assert glob_helper.glob("special/+") == ["special/+"]
assert glob_helper.glob("special/^") == ["special/^"]
assert glob_helper.glob("special/$") == ["special/$"]
assert glob_helper.glob("special/(") == ["special/("]
assert glob_helper.glob("special/)") == ["special/)"]
assert glob_helper.glob("special/\\[") == ["special/["]
assert glob_helper.glob("special/]") == ["special/]"]
assert glob_helper.glob("special/\\{") == ["special/{"]
assert glob_helper.glob("special/\\}") == ["special/}"]
# TODO: Skip these on Windows
glob_helper.create_paths(["special/*", "special/?", "special/|"])
assert glob_helper.glob("special/\\*") == ["special/*"]
assert glob_helper.glob("special/\\?") == ["special/?"]
assert glob_helper.glob("special/|") == ["special/|"]
def test_matches_paths_with_globs(self, glob_helper):
glob_helper.create_paths(["special/test{1}/file[1]"])
assert glob_helper.glob("special/test\\{1\\}/*") == [
"special/test{1}/file[1]"
]
def test_dstar_recursion(self, glob_helper):
glob_helper.create_paths([
".dotfile", ".dotsubdir/.dotfile", ".dotsubdir/nondotfile",
"file_one.ext", "file_two.ext", "nondotfile",
"subdir_one/.dotfile", "subdir_one/nondotfile",
"subdir_two/nondotfile", "subdir_two/nondotfile.ext",
"deeply/.dotfile", "deeply/nested/.dotfile.ext",
"deeply/nested/directory/structure/.ext",
"deeply/nested/directory/structure/file_one",
"deeply/nested/directory/structure/file_one.ext",
"deeply/nested/directory/structure/foo", "deeply/nondotfile"
])
assert glob_helper.sglob("**/") == [
"deeply/", "deeply/nested/", "deeply/nested/directory/",
"deeply/nested/directory/structure/", "subdir_one/", "subdir_two/"
]
assert glob_helper.sglob("**/*fil*") == [
"deeply/nested/directory/structure/file_one",
"deeply/nested/directory/structure/file_one.ext",
"deeply/nondotfile", "file_one.ext", "file_two.ext", "nondotfile",
"subdir_one/nondotfile", "subdir_two/nondotfile",
"subdir_two/nondotfile.ext"
]
def test_question_mark(self, glob_helper):
glob_helper.create_paths(["subdir_one", "subdir_two"])
assert glob_helper.sglob("?ubdir_one") == ["subdir_one"]
assert glob_helper.sglob("subdir_???") == ["subdir_one", "subdir_two"]
def test_character_group(self, glob_helper):
glob_helper.create_paths(["subdir_one", "subdir_two"])
assert glob_helper.sglob("[stfu]ubdir_one") == ["subdir_one"]
assert glob_helper.sglob("[A-Za-z]ubdir_one") == ["subdir_one"]
assert glob_helper.sglob("subdir_[a-z][a-z][a-z]") == [
"subdir_one", "subdir_two"
]
def test_negated_character_group(self, glob_helper):
glob_helper.create_paths(["subdir_one", "subdir_two"])
assert glob_helper.sglob("[^stfu]ubdir_one") == []
assert glob_helper.sglob("[^wtf]ubdir_one") == ["subdir_one"]
assert glob_helper.sglob("[^a-zA-Z]ubdir_one") == []
assert glob_helper.sglob("[^0-9a-fA-F]ubdir_one") == ["subdir_one"]
def test_braces(self, glob_helper):
glob_helper.create_paths([
".dotfile", ".dotsubdir/.dotfile", ".dotsubdir/nondotfile",
"subdir_one/.dotfile", "subdir_one/nondotfile",
"subdir_two/nondotfile", "subdir_two/nondotfile.ext"
])
assert glob_helper.sglob("subdir_{one,two,three}") == [
"subdir_one", "subdir_two"
]
assert glob_helper.sglob("sub*_{one,two,three}") == [
"subdir_one", "subdir_two"
]
assert glob_helper.sglob("subdir_two/nondotfile{.ext,}") == [
"subdir_two/nondotfile", "subdir_two/nondotfile.ext"
]
assert glob_helper.sglob("{,.}*") == [
".", "..", ".dotfile", ".dotsubdir", "subdir_one", "subdir_two"
]
def test_braces_ordering(self, glob_helper):
glob_helper.create_paths([
"brace/a", "brace/a.js", "brace/a.erb", "brace/a.js.rjs",
"brace/a.html.erb"
])
assert glob_helper.glob("brace/a{.js,.html}{.erb,.rjs}") == [
"brace/a.js.rjs", "brace/a.html.erb"
]
assert glob_helper.glob("brace/a{.{js,html},}{.{erb,rjs},}") == [
"brace/a.js.rjs", "brace/a.js", "brace/a.html.erb", "brace/a.erb",
"brace/a"
]
def test_escaping(self, glob_helper):
glob_helper.create_paths(["foo^bar", "nondotfile"])
assert glob_helper.glob("foo?bar") == ["foo^bar"]
assert glob_helper.glob("foo\\?bar") == []
assert glob_helper.glob("nond\\otfile") == ["nondotfile"]
def test_preserves_separator(self, glob_helper):
glob_helper.create_paths([
"deeply/nested/directory/structure/file_one.ext"
])
assert glob_helper.glob("deeply/nested//directory/*/*.ext") == [
"deeply/nested//directory/structure/file_one.ext"
]
assert glob_helper.glob("deeply/*/directory/structure//**/*.ext") == [
"deeply/nested/directory/structure//file_one.ext"
]
def test_ignores_missing_dirs(self, glob_helper):
assert glob_helper.glob("deeply/notthere/blah*/whatever") == []
assert glob_helper.glob("deeply/notthere/blah/") == []
def test_multiple_recursives(self, glob_helper):
glob_helper.create_paths(["a/x/b/y/e", "a/x/b/y/b/z/e"])
assert glob_helper.sglob("a/**/b/**/e") == [
"a/x/b/y/b/z/e", "a/x/b/y/e"
]
def test_flag_dotmatch(self, glob_helper):
glob_helper.create_paths([
".dotfile", ".dotsubdir/.dotfile", ".dotsubdir/nondotfile",
"file_one.ext", "file_two.ext", "nondotfile",
"deeply/nested/.dotfile.ext"
])
assert glob_helper.sglob("*", FNM_DOTMATCH) == [
".", "..", ".dotfile", ".dotsubdir", "deeply", "file_one.ext",
"file_two.ext", "nondotfile"
]
assert glob_helper.sglob("**", FNM_DOTMATCH) == [
".", "..", ".dotfile", ".dotsubdir", "deeply", "file_one.ext",
"file_two.ext", "nondotfile"
]
assert glob_helper.sglob("*file", FNM_DOTMATCH) == [
".dotfile", "nondotfile"
]
assert glob_helper.sglob("**/", FNM_DOTMATCH) == [
".dotsubdir/", "deeply/", "deeply/nested/"
]
assert glob_helper.sglob("./**/", FNM_DOTMATCH) == [
"./", "./.dotsubdir/", "./deeply/", "./deeply/nested/"
]
def test_flag_noescape(self, glob_helper):
# TODO: Skip this on Windows
glob_helper.create_paths(["foo?bar"])
assert glob_helper.glob("foo?bar", FNM_NOESCAPE) == ["foo?bar"]
assert glob_helper.glob("foo\\?bar", FNM_NOESCAPE) == []
glob_helper.create_paths(["foo\\?bar"])
assert glob_helper.glob("foo\\?bar", FNM_NOESCAPE) == ["foo\\?bar"]
|
|
import os
import json
import numpy as np
from shutil import copyfile
from keras.optimizers import SGD
import keras.backend as K
from AlphaGo.ai import ProbabilisticPolicyPlayer
import AlphaGo.go as go
from AlphaGo.models.policy import CNNPolicy
from AlphaGo.util import flatten_idx
def _make_training_pair(st, mv, preprocessor):
# Convert move to one-hot
st_tensor = preprocessor.state_to_tensor(st)
mv_tensor = np.zeros((1, st.size * st.size))
mv_tensor[(0, flatten_idx(mv, st.size))] = 1
return (st_tensor, mv_tensor)
def run_n_games(optimizer, learner, opponent, num_games, mock_states=[]):
'''Run num_games games to completion, keeping track of each position and move of the learner.
(Note: learning cannot happen until all games have completed)
'''
board_size = learner.policy.model.input_shape[-1]
states = [go.GameState(size=board_size) for _ in range(num_games)]
learner_net = learner.policy.model
# Allowing injection of a mock state object for testing purposes
if mock_states:
states = mock_states
# Create one list of features (aka state tensors) and one of moves for each game being played.
state_tensors = [[] for _ in range(num_games)]
move_tensors = [[] for _ in range(num_games)]
# List of booleans indicating whether the 'learner' player won.
learner_won = [None] * num_games
# Start all odd games with moves by 'opponent'. Even games will have 'learner' black.
learner_color = [go.BLACK if i % 2 == 0 else go.WHITE for i in range(num_games)]
odd_states = states[1::2]
moves = opponent.get_moves(odd_states)
for st, mv in zip(odd_states, moves):
st.do_move(mv)
current = learner
other = opponent
idxs_to_unfinished_states = {i: states[i] for i in range(num_games)}
while len(idxs_to_unfinished_states) > 0:
# Get next moves by current player for all unfinished states.
moves = current.get_moves(idxs_to_unfinished_states.values())
just_finished = []
# Do each move to each state in order.
for (idx, state), mv in zip(idxs_to_unfinished_states.iteritems(), moves):
# Order is important here. We must get the training pair on the unmodified state before
# updating it with do_move.
is_learnable = current is learner and mv is not go.PASS_MOVE
if is_learnable:
(st_tensor, mv_tensor) = _make_training_pair(state, mv, learner.policy.preprocessor)
state_tensors[idx].append(st_tensor)
move_tensors[idx].append(mv_tensor)
state.do_move(mv)
if state.is_end_of_game:
learner_won[idx] = state.get_winner() == learner_color[idx]
just_finished.append(idx)
# Remove games that have finished from dict.
for idx in just_finished:
del idxs_to_unfinished_states[idx]
# Swap 'current' and 'other' for next turn.
current, other = other, current
# Train on each game's results, setting the learning rate negative to 'unlearn' positions from
# games where the learner lost.
for (st_tensor, mv_tensor, won) in zip(state_tensors, move_tensors, learner_won):
optimizer.lr = K.abs(optimizer.lr) * (+1 if won else -1)
learner_net.train_on_batch(np.concatenate(st_tensor, axis=0),
np.concatenate(mv_tensor, axis=0))
# Return the win ratio.
wins = sum(state.get_winner() == pc for (state, pc) in zip(states, learner_color))
return float(wins) / num_games
def log_loss(y_true, y_pred):
'''Keras 'loss' function for the REINFORCE algorithm, where y_true is the action that was
taken, and updates with the negative gradient will make that action more likely. We use the
negative gradient because keras expects training data to minimize a loss function.
'''
return -y_true * K.log(K.clip(y_pred, K.epsilon(), 1.0 - K.epsilon()))
def run_training(cmd_line_args=None):
import argparse
parser = argparse.ArgumentParser(description='Perform reinforcement learning to improve given policy network. Second phase of pipeline.') # noqa: E501
parser.add_argument("model_json", help="Path to policy model JSON.")
parser.add_argument("initial_weights", help="Path to HDF5 file with inital weights (i.e. result of supervised training).") # noqa: E501
parser.add_argument("out_directory", help="Path to folder where the model params and metadata will be saved after each epoch.") # noqa: E501
parser.add_argument("--learning-rate", help="Keras learning rate (Default: 0.001)", type=float, default=0.001) # noqa: E501
parser.add_argument("--policy-temp", help="Distribution temperature of players using policies (Default: 0.67)", type=float, default=0.67) # noqa: E501
parser.add_argument("--save-every", help="Save policy as a new opponent every n batches (Default: 500)", type=int, default=500) # noqa: E501
parser.add_argument("--game-batch", help="Number of games per mini-batch (Default: 20)", type=int, default=20) # noqa: E501
parser.add_argument("--move-limit", help="Maximum number of moves per game", type=int, default=500) # noqa: E501
parser.add_argument("--iterations", help="Number of training batches/iterations (Default: 10000)", type=int, default=10000) # noqa: E501
parser.add_argument("--resume", help="Load latest weights in out_directory and resume", default=False, action="store_true") # noqa: E501
parser.add_argument("--verbose", "-v", help="Turn on verbose mode", default=False, action="store_true") # noqa: E501
# Baseline function (TODO) default lambda state: 0 (receives either file
# paths to JSON and weights or None, in which case it uses default baseline 0)
if cmd_line_args is None:
args = parser.parse_args()
else:
args = parser.parse_args(cmd_line_args)
ZEROTH_FILE = "weights.00000.hdf5"
if args.resume:
if not os.path.exists(os.path.join(args.out_directory, "metadata.json")):
raise ValueError("Cannot resume without existing output directory")
if not os.path.exists(args.out_directory):
if args.verbose:
print("creating output directory {}".format(args.out_directory))
os.makedirs(args.out_directory)
if not args.resume:
# make a copy of weights file, "weights.00000.hdf5" in the output directory
copyfile(args.initial_weights, os.path.join(args.out_directory, ZEROTH_FILE))
if args.verbose:
print("copied {} to {}".format(args.initial_weights,
os.path.join(args.out_directory, ZEROTH_FILE)))
player_weights = ZEROTH_FILE
else:
# if resuming, we expect initial_weights to be just a
# "weights.#####.hdf5" file, not a full path
args.initial_weights = os.path.join(args.out_directory,
os.path.basename(args.initial_weights))
if not os.path.exists(args.initial_weights):
raise ValueError("Cannot resume; weights {} do not exist".format(args.initial_weights))
elif args.verbose:
print("Resuming with weights {}".format(args.initial_weights))
player_weights = os.path.basename(args.initial_weights)
# Set initial conditions
policy = CNNPolicy.load_model(args.model_json)
policy.model.load_weights(args.initial_weights)
player = ProbabilisticPolicyPlayer(policy, temperature=args.policy_temp,
move_limit=args.move_limit)
# different opponents come from simply changing the weights of 'opponent.policy.model'. That
# is, only 'opp_policy' needs to be changed, and 'opponent' will change.
opp_policy = CNNPolicy.load_model(args.model_json)
opponent = ProbabilisticPolicyPlayer(opp_policy, temperature=args.policy_temp,
move_limit=args.move_limit)
if args.verbose:
print("created player and opponent with temperature {}".format(args.policy_temp))
if not args.resume:
metadata = {
"model_file": args.model_json,
"init_weights": args.initial_weights,
"learning_rate": args.learning_rate,
"temperature": args.policy_temp,
"game_batch": args.game_batch,
"opponents": [ZEROTH_FILE], # which weights from which to sample an opponent each batch
"win_ratio": {} # map from player to tuple of (opponent, win ratio) Useful for
# validating in lieu of 'accuracy/loss'
}
else:
with open(os.path.join(args.out_directory, "metadata.json"), "r") as f:
metadata = json.load(f)
# Append args of current run to history of full command args.
metadata["cmd_line_args"] = metadata.get("cmd_line_args", [])
metadata["cmd_line_args"].append(vars(args))
def save_metadata():
with open(os.path.join(args.out_directory, "metadata.json"), "w") as f:
json.dump(metadata, f, sort_keys=True, indent=2)
optimizer = SGD(lr=args.learning_rate)
player.policy.model.compile(loss=log_loss, optimizer=optimizer)
for i_iter in range(1, args.iterations + 1):
# Randomly choose opponent from pool (possibly self), and playing
# game_batch games against them.
opp_weights = np.random.choice(metadata["opponents"])
opp_path = os.path.join(args.out_directory, opp_weights)
# Load new weights into opponent's network, but keep the same opponent object.
opponent.policy.model.load_weights(opp_path)
if args.verbose:
print("Batch {}\tsampled opponent is {}".format(i_iter, opp_weights))
# Run games (and learn from results). Keep track of the win ratio vs each opponent over
# time.
win_ratio = run_n_games(optimizer, player, opponent, args.game_batch)
metadata["win_ratio"][player_weights] = (opp_weights, win_ratio)
# Save all intermediate models.
player_weights = "weights.%05d.hdf5" % i_iter
player.policy.model.save_weights(os.path.join(args.out_directory, player_weights))
# Add player to batch of oppenents once in a while.
if i_iter % args.save_every == 0:
metadata["opponents"].append(player_weights)
save_metadata()
if __name__ == '__main__':
run_training()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.