code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# cython: auto_cpdef=True, infer_types=True, language_level=3, py2_import=True
#
# Parser
#
# This should be done automatically
import cython
cython.declare(Nodes=object, ExprNodes=object, EncodedString=object)
import os
import re
import sys
from Cython.Compiler.Scanning import PyrexScanner, FileSourceDescriptor
import Nodes
import ExprNodes
import StringEncoding
from StringEncoding import EncodedString, BytesLiteral, _unicode, _bytes
from ModuleNode import ModuleNode
from Errors import error, warning, InternalError
from Cython import Utils
import Future
import Options
class Ctx(object):
# Parsing context
level = 'other'
visibility = 'private'
cdef_flag = 0
typedef_flag = 0
api = 0
overridable = 0
nogil = 0
namespace = None
templates = None
allow_struct_enum_decorator = False
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __call__(self, **kwds):
ctx = Ctx()
d = ctx.__dict__
d.update(self.__dict__)
d.update(kwds)
return ctx
def p_ident(s, message = "Expected an identifier"):
if s.sy == 'IDENT':
name = s.systring
s.next()
return name
else:
s.error(message)
def p_ident_list(s):
names = []
while s.sy == 'IDENT':
names.append(s.systring)
s.next()
if s.sy != ',':
break
s.next()
return names
#------------------------------------------
#
# Expressions
#
#------------------------------------------
def p_binop_operator(s):
pos = s.position()
op = s.sy
s.next()
return op, pos
def p_binop_expr(s, ops, p_sub_expr):
n1 = p_sub_expr(s)
while s.sy in ops:
op, pos = p_binop_operator(s)
n2 = p_sub_expr(s)
n1 = ExprNodes.binop_node(pos, op, n1, n2)
if op == '/':
if Future.division in s.context.future_directives:
n1.truedivision = True
else:
n1.truedivision = None # unknown
return n1
#lambdef: 'lambda' [varargslist] ':' test
def p_lambdef(s, allow_conditional=True):
# s.sy == 'lambda'
pos = s.position()
s.next()
if s.sy == ':':
args = []
star_arg = starstar_arg = None
else:
args, star_arg, starstar_arg = p_varargslist(
s, terminator=':', annotated=False)
s.expect(':')
if allow_conditional:
expr = p_test(s)
else:
expr = p_test_nocond(s)
return ExprNodes.LambdaNode(
pos, args = args,
star_arg = star_arg, starstar_arg = starstar_arg,
result_expr = expr)
#lambdef_nocond: 'lambda' [varargslist] ':' test_nocond
def p_lambdef_nocond(s):
return p_lambdef(s, allow_conditional=False)
#test: or_test ['if' or_test 'else' test] | lambdef
def p_test(s):
if s.sy == 'lambda':
return p_lambdef(s)
pos = s.position()
expr = p_or_test(s)
if s.sy == 'if':
s.next()
test = p_or_test(s)
s.expect('else')
other = p_test(s)
return ExprNodes.CondExprNode(pos, test=test, true_val=expr, false_val=other)
else:
return expr
#test_nocond: or_test | lambdef_nocond
def p_test_nocond(s):
if s.sy == 'lambda':
return p_lambdef_nocond(s)
else:
return p_or_test(s)
#or_test: and_test ('or' and_test)*
def p_or_test(s):
return p_rassoc_binop_expr(s, ('or',), p_and_test)
def p_rassoc_binop_expr(s, ops, p_subexpr):
n1 = p_subexpr(s)
if s.sy in ops:
pos = s.position()
op = s.sy
s.next()
n2 = p_rassoc_binop_expr(s, ops, p_subexpr)
n1 = ExprNodes.binop_node(pos, op, n1, n2)
return n1
#and_test: not_test ('and' not_test)*
def p_and_test(s):
#return p_binop_expr(s, ('and',), p_not_test)
return p_rassoc_binop_expr(s, ('and',), p_not_test)
#not_test: 'not' not_test | comparison
def p_not_test(s):
if s.sy == 'not':
pos = s.position()
s.next()
return ExprNodes.NotNode(pos, operand = p_not_test(s))
else:
return p_comparison(s)
#comparison: expr (comp_op expr)*
#comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
def p_comparison(s):
n1 = p_starred_expr(s)
if s.sy in comparison_ops:
pos = s.position()
op = p_cmp_op(s)
n2 = p_starred_expr(s)
n1 = ExprNodes.PrimaryCmpNode(pos,
operator = op, operand1 = n1, operand2 = n2)
if s.sy in comparison_ops:
n1.cascade = p_cascaded_cmp(s)
return n1
def p_test_or_starred_expr(s):
if s.sy == '*':
return p_starred_expr(s)
else:
return p_test(s)
def p_starred_expr(s):
pos = s.position()
if s.sy == '*':
starred = True
s.next()
else:
starred = False
expr = p_bit_expr(s)
if starred:
expr = ExprNodes.StarredTargetNode(pos, expr)
return expr
def p_cascaded_cmp(s):
pos = s.position()
op = p_cmp_op(s)
n2 = p_starred_expr(s)
result = ExprNodes.CascadedCmpNode(pos,
operator = op, operand2 = n2)
if s.sy in comparison_ops:
result.cascade = p_cascaded_cmp(s)
return result
def p_cmp_op(s):
if s.sy == 'not':
s.next()
s.expect('in')
op = 'not_in'
elif s.sy == 'is':
s.next()
if s.sy == 'not':
s.next()
op = 'is_not'
else:
op = 'is'
else:
op = s.sy
s.next()
if op == '<>':
op = '!='
return op
comparison_ops = (
'<', '>', '==', '>=', '<=', '<>', '!=',
'in', 'is', 'not'
)
#expr: xor_expr ('|' xor_expr)*
def p_bit_expr(s):
return p_binop_expr(s, ('|',), p_xor_expr)
#xor_expr: and_expr ('^' and_expr)*
def p_xor_expr(s):
return p_binop_expr(s, ('^',), p_and_expr)
#and_expr: shift_expr ('&' shift_expr)*
def p_and_expr(s):
return p_binop_expr(s, ('&',), p_shift_expr)
#shift_expr: arith_expr (('<<'|'>>') arith_expr)*
def p_shift_expr(s):
return p_binop_expr(s, ('<<', '>>'), p_arith_expr)
#arith_expr: term (('+'|'-') term)*
def p_arith_expr(s):
return p_binop_expr(s, ('+', '-'), p_term)
#term: factor (('*'|'/'|'%') factor)*
def p_term(s):
return p_binop_expr(s, ('*', '/', '%', '//'), p_factor)
#factor: ('+'|'-'|'~'|'&'|typecast|sizeof) factor | power
def p_factor(s):
# little indirection for C-ification purposes
return _p_factor(s)
def _p_factor(s):
sy = s.sy
if sy in ('+', '-', '~'):
op = s.sy
pos = s.position()
s.next()
return ExprNodes.unop_node(pos, op, p_factor(s))
elif sy == '&':
pos = s.position()
s.next()
arg = p_factor(s)
return ExprNodes.AmpersandNode(pos, operand = arg)
elif sy == "<":
return p_typecast(s)
elif sy == 'IDENT' and s.systring == "sizeof":
return p_sizeof(s)
else:
return p_power(s)
def p_typecast(s):
# s.sy == "<"
pos = s.position()
s.next()
base_type = p_c_base_type(s)
is_memslice = isinstance(base_type, Nodes.MemoryViewSliceTypeNode)
is_template = isinstance(base_type, Nodes.TemplatedTypeNode)
if not is_memslice and not is_template and base_type.name is None:
s.error("Unknown type")
declarator = p_c_declarator(s, empty = 1)
if s.sy == '?':
s.next()
typecheck = 1
else:
typecheck = 0
s.expect(">")
operand = p_factor(s)
if is_memslice:
return ExprNodes.CythonArrayNode(pos, base_type_node=base_type,
operand=operand)
return ExprNodes.TypecastNode(pos,
base_type = base_type,
declarator = declarator,
operand = operand,
typecheck = typecheck)
def p_sizeof(s):
# s.sy == ident "sizeof"
pos = s.position()
s.next()
s.expect('(')
# Here we decide if we are looking at an expression or type
# If it is actually a type, but parsable as an expression,
# we treat it as an expression here.
if looking_at_expr(s):
operand = p_test(s)
node = ExprNodes.SizeofVarNode(pos, operand = operand)
else:
base_type = p_c_base_type(s)
declarator = p_c_declarator(s, empty = 1)
node = ExprNodes.SizeofTypeNode(pos,
base_type = base_type, declarator = declarator)
s.expect(')')
return node
def p_yield_expression(s):
# s.sy == "yield"
pos = s.position()
s.next()
if s.sy != ')' and s.sy not in statement_terminators:
arg = p_testlist(s)
else:
arg = None
return ExprNodes.YieldExprNode(pos, arg=arg)
def p_yield_statement(s):
# s.sy == "yield"
yield_expr = p_yield_expression(s)
return Nodes.ExprStatNode(yield_expr.pos, expr=yield_expr)
#power: atom trailer* ('**' factor)*
def p_power(s):
if s.systring == 'new' and s.peek()[0] == 'IDENT':
return p_new_expr(s)
n1 = p_atom(s)
while s.sy in ('(', '[', '.'):
n1 = p_trailer(s, n1)
if s.sy == '**':
pos = s.position()
s.next()
n2 = p_factor(s)
n1 = ExprNodes.binop_node(pos, '**', n1, n2)
return n1
def p_new_expr(s):
# s.systring == 'new'.
pos = s.position()
s.next()
cppclass = p_c_base_type(s)
return p_call(s, ExprNodes.NewExprNode(pos, cppclass = cppclass))
#trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
def p_trailer(s, node1):
pos = s.position()
if s.sy == '(':
return p_call(s, node1)
elif s.sy == '[':
return p_index(s, node1)
else: # s.sy == '.'
s.next()
name = EncodedString( p_ident(s) )
return ExprNodes.AttributeNode(pos,
obj = node1, attribute = name)
# arglist: argument (',' argument)* [',']
# argument: [test '='] test # Really [keyword '='] test
def p_call_parse_args(s, allow_genexp = True):
# s.sy == '('
pos = s.position()
s.next()
positional_args = []
keyword_args = []
star_arg = None
starstar_arg = None
while s.sy not in ('**', ')'):
if s.sy == '*':
if star_arg:
s.error("only one star-arg parameter allowed",
pos = s.position())
s.next()
star_arg = p_test(s)
else:
arg = p_test(s)
if s.sy == '=':
s.next()
if not arg.is_name:
s.error("Expected an identifier before '='",
pos = arg.pos)
encoded_name = EncodedString(arg.name)
keyword = ExprNodes.IdentifierStringNode(arg.pos, value = encoded_name)
arg = p_test(s)
keyword_args.append((keyword, arg))
else:
if keyword_args:
s.error("Non-keyword arg following keyword arg",
pos = arg.pos)
if star_arg:
s.error("Non-keyword arg following star-arg",
pos = arg.pos)
positional_args.append(arg)
if s.sy != ',':
break
s.next()
if s.sy == 'for':
if len(positional_args) == 1 and not star_arg:
positional_args = [ p_genexp(s, positional_args[0]) ]
elif s.sy == '**':
s.next()
starstar_arg = p_test(s)
if s.sy == ',':
s.next()
s.expect(')')
return positional_args, keyword_args, star_arg, starstar_arg
def p_call_build_packed_args(pos, positional_args, keyword_args,
star_arg, starstar_arg):
arg_tuple = None
keyword_dict = None
if positional_args or not star_arg:
arg_tuple = ExprNodes.TupleNode(pos,
args = positional_args)
if star_arg:
star_arg_tuple = ExprNodes.AsTupleNode(pos, arg = star_arg)
if arg_tuple:
arg_tuple = ExprNodes.binop_node(pos,
operator = '+', operand1 = arg_tuple,
operand2 = star_arg_tuple)
else:
arg_tuple = star_arg_tuple
if keyword_args or starstar_arg:
keyword_args = [ExprNodes.DictItemNode(pos=key.pos, key=key, value=value)
for key, value in keyword_args]
if starstar_arg:
keyword_dict = ExprNodes.KeywordArgsNode(
pos,
starstar_arg = starstar_arg,
keyword_args = keyword_args)
else:
keyword_dict = ExprNodes.DictNode(
pos, key_value_pairs = keyword_args)
return arg_tuple, keyword_dict
def p_call(s, function):
# s.sy == '('
pos = s.position()
positional_args, keyword_args, star_arg, starstar_arg = \
p_call_parse_args(s)
if not (keyword_args or star_arg or starstar_arg):
return ExprNodes.SimpleCallNode(pos,
function = function,
args = positional_args)
else:
arg_tuple, keyword_dict = p_call_build_packed_args(
pos, positional_args, keyword_args, star_arg, starstar_arg)
return ExprNodes.GeneralCallNode(pos,
function = function,
positional_args = arg_tuple,
keyword_args = keyword_dict)
#lambdef: 'lambda' [varargslist] ':' test
#subscriptlist: subscript (',' subscript)* [',']
def p_index(s, base):
# s.sy == '['
pos = s.position()
s.next()
subscripts = p_subscript_list(s)
if len(subscripts) == 1 and len(subscripts[0]) == 2:
start, stop = subscripts[0]
result = ExprNodes.SliceIndexNode(pos,
base = base, start = start, stop = stop)
else:
indexes = make_slice_nodes(pos, subscripts)
if len(indexes) == 1:
index = indexes[0]
else:
index = ExprNodes.TupleNode(pos, args = indexes)
result = ExprNodes.IndexNode(pos,
base = base, index = index)
s.expect(']')
return result
def p_subscript_list(s):
items = [p_subscript(s)]
while s.sy == ',':
s.next()
if s.sy == ']':
break
items.append(p_subscript(s))
return items
#subscript: '.' '.' '.' | test | [test] ':' [test] [':' [test]]
def p_subscript(s):
# Parse a subscript and return a list of
# 1, 2 or 3 ExprNodes, depending on how
# many slice elements were encountered.
pos = s.position()
start = p_slice_element(s, (':',))
if s.sy != ':':
return [start]
s.next()
stop = p_slice_element(s, (':', ',', ']'))
if s.sy != ':':
return [start, stop]
s.next()
step = p_slice_element(s, (':', ',', ']'))
return [start, stop, step]
def p_slice_element(s, follow_set):
# Simple expression which may be missing iff
# it is followed by something in follow_set.
if s.sy not in follow_set:
return p_test(s)
else:
return None
def expect_ellipsis(s):
s.expect('.')
s.expect('.')
s.expect('.')
def make_slice_nodes(pos, subscripts):
# Convert a list of subscripts as returned
# by p_subscript_list into a list of ExprNodes,
# creating SliceNodes for elements with 2 or
# more components.
result = []
for subscript in subscripts:
if len(subscript) == 1:
result.append(subscript[0])
else:
result.append(make_slice_node(pos, *subscript))
return result
def make_slice_node(pos, start, stop = None, step = None):
if not start:
start = ExprNodes.NoneNode(pos)
if not stop:
stop = ExprNodes.NoneNode(pos)
if not step:
step = ExprNodes.NoneNode(pos)
return ExprNodes.SliceNode(pos,
start = start, stop = stop, step = step)
#atom: '(' [yield_expr|testlist_comp] ')' | '[' [listmaker] ']' | '{' [dict_or_set_maker] '}' | '`' testlist '`' | NAME | NUMBER | STRING+
def p_atom(s):
pos = s.position()
sy = s.sy
if sy == '(':
s.next()
if s.sy == ')':
result = ExprNodes.TupleNode(pos, args = [])
elif s.sy == 'yield':
result = p_yield_expression(s)
else:
result = p_testlist_comp(s)
s.expect(')')
return result
elif sy == '[':
return p_list_maker(s)
elif sy == '{':
return p_dict_or_set_maker(s)
elif sy == '`':
return p_backquote_expr(s)
elif sy == '.':
expect_ellipsis(s)
return ExprNodes.EllipsisNode(pos)
elif sy == 'INT':
return p_int_literal(s)
elif sy == 'FLOAT':
value = s.systring
s.next()
return ExprNodes.FloatNode(pos, value = value)
elif sy == 'IMAG':
value = s.systring[:-1]
s.next()
return ExprNodes.ImagNode(pos, value = value)
elif sy == 'BEGIN_STRING':
kind, bytes_value, unicode_value = p_cat_string_literal(s)
if kind == 'c':
return ExprNodes.CharNode(pos, value = bytes_value)
elif kind == 'u':
return ExprNodes.UnicodeNode(pos, value = unicode_value, bytes_value = bytes_value)
elif kind == 'b':
return ExprNodes.BytesNode(pos, value = bytes_value)
else:
return ExprNodes.StringNode(pos, value = bytes_value, unicode_value = unicode_value)
elif sy == 'IDENT':
name = EncodedString( s.systring )
s.next()
if name == "None":
return ExprNodes.NoneNode(pos)
elif name == "True":
return ExprNodes.BoolNode(pos, value=True)
elif name == "False":
return ExprNodes.BoolNode(pos, value=False)
elif name == "NULL" and not s.in_python_file:
return ExprNodes.NullNode(pos)
else:
return p_name(s, name)
else:
s.error("Expected an identifier or literal")
def p_int_literal(s):
pos = s.position()
value = s.systring
s.next()
unsigned = ""
longness = ""
while value[-1] in u"UuLl":
if value[-1] in u"Ll":
longness += "L"
else:
unsigned += "U"
value = value[:-1]
# '3L' is ambiguous in Py2 but not in Py3. '3U' and '3LL' are
# illegal in Py2 Python files. All suffixes are illegal in Py3
# Python files.
is_c_literal = None
if unsigned:
is_c_literal = True
elif longness:
if longness == 'LL' or s.context.language_level >= 3:
is_c_literal = True
if s.in_python_file:
if is_c_literal:
error(pos, "illegal integer literal syntax in Python source file")
is_c_literal = False
return ExprNodes.IntNode(pos,
is_c_literal = is_c_literal,
value = value,
unsigned = unsigned,
longness = longness)
def p_name(s, name):
pos = s.position()
if not s.compile_time_expr and name in s.compile_time_env:
value = s.compile_time_env.lookup_here(name)
rep = repr(value)
if isinstance(value, bool):
return ExprNodes.BoolNode(pos, value = value)
elif isinstance(value, int):
return ExprNodes.IntNode(pos, value = rep)
elif isinstance(value, long):
return ExprNodes.IntNode(pos, value = rep, longness = "L")
elif isinstance(value, float):
return ExprNodes.FloatNode(pos, value = rep)
elif isinstance(value, _unicode):
return ExprNodes.UnicodeNode(pos, value = value)
elif isinstance(value, _bytes):
return ExprNodes.BytesNode(pos, value = value)
else:
error(pos, "Invalid type for compile-time constant: %s"
% value.__class__.__name__)
return ExprNodes.NameNode(pos, name = name)
def p_cat_string_literal(s):
# A sequence of one or more adjacent string literals.
# Returns (kind, bytes_value, unicode_value)
# where kind in ('b', 'c', 'u', '')
kind, bytes_value, unicode_value = p_string_literal(s)
if kind == 'c' or s.sy != 'BEGIN_STRING':
return kind, bytes_value, unicode_value
bstrings, ustrings = [bytes_value], [unicode_value]
bytes_value = unicode_value = None
while s.sy == 'BEGIN_STRING':
pos = s.position()
next_kind, next_bytes_value, next_unicode_value = p_string_literal(s)
if next_kind == 'c':
error(pos, "Cannot concatenate char literal with another string or char literal")
elif next_kind != kind:
error(pos, "Cannot mix string literals of different types, expected %s'', got %s''" %
(kind, next_kind))
else:
bstrings.append(next_bytes_value)
ustrings.append(next_unicode_value)
# join and rewrap the partial literals
if kind in ('b', 'c', '') or kind == 'u' and None not in bstrings:
# Py3 enforced unicode literals are parsed as bytes/unicode combination
bytes_value = BytesLiteral( StringEncoding.join_bytes(bstrings) )
bytes_value.encoding = s.source_encoding
if kind in ('u', ''):
unicode_value = EncodedString( u''.join([ u for u in ustrings if u is not None ]) )
return kind, bytes_value, unicode_value
def p_opt_string_literal(s, required_type='u'):
if s.sy == 'BEGIN_STRING':
kind, bytes_value, unicode_value = p_string_literal(s, required_type)
if required_type == 'u':
return unicode_value
elif required_type == 'b':
return bytes_value
else:
s.error("internal parser configuration error")
else:
return None
def check_for_non_ascii_characters(string):
for c in string:
if c >= u'\x80':
return True
return False
def p_string_literal(s, kind_override=None):
# A single string or char literal. Returns (kind, bvalue, uvalue)
# where kind in ('b', 'c', 'u', ''). The 'bvalue' is the source
# code byte sequence of the string literal, 'uvalue' is the
# decoded Unicode string. Either of the two may be None depending
# on the 'kind' of string, only unprefixed strings have both
# representations.
# s.sy == 'BEGIN_STRING'
pos = s.position()
is_raw = 0
is_python3_source = s.context.language_level >= 3
has_non_ASCII_literal_characters = False
kind = s.systring[:1].lower()
if kind == 'r':
kind = ''
is_raw = 1
elif kind in 'ub':
is_raw = s.systring[1:2].lower() == 'r'
elif kind != 'c':
kind = ''
if kind == '' and kind_override is None and Future.unicode_literals in s.context.future_directives:
chars = StringEncoding.StrLiteralBuilder(s.source_encoding)
kind = 'u'
else:
if kind_override is not None and kind_override in 'ub':
kind = kind_override
if kind == 'u':
chars = StringEncoding.UnicodeLiteralBuilder()
elif kind == '':
chars = StringEncoding.StrLiteralBuilder(s.source_encoding)
else:
chars = StringEncoding.BytesLiteralBuilder(s.source_encoding)
while 1:
s.next()
sy = s.sy
systr = s.systring
#print "p_string_literal: sy =", sy, repr(s.systring) ###
if sy == 'CHARS':
chars.append(systr)
if is_python3_source and not has_non_ASCII_literal_characters and check_for_non_ascii_characters(systr):
has_non_ASCII_literal_characters = True
elif sy == 'ESCAPE':
if is_raw:
chars.append(systr)
if is_python3_source and not has_non_ASCII_literal_characters \
and check_for_non_ascii_characters(systr):
has_non_ASCII_literal_characters = True
else:
c = systr[1]
if c in u"01234567":
chars.append_charval( int(systr[1:], 8) )
elif c in u"'\"\\":
chars.append(c)
elif c in u"abfnrtv":
chars.append(
StringEncoding.char_from_escape_sequence(systr))
elif c == u'\n':
pass
elif c == u'x':
if len(systr) == 4:
chars.append_charval( int(systr[2:], 16) )
else:
s.error("Invalid hex escape '%s'" % systr)
elif c in u'Uu':
if kind in ('u', ''):
if len(systr) in (6,10):
chrval = int(systr[2:], 16)
if chrval > 1114111: # sys.maxunicode:
s.error("Invalid unicode escape '%s'" % systr)
else:
s.error("Invalid unicode escape '%s'" % systr)
else:
# unicode escapes in byte strings are not unescaped
chrval = None
chars.append_uescape(chrval, systr)
else:
chars.append(u'\\' + systr[1:])
if is_python3_source and not has_non_ASCII_literal_characters \
and check_for_non_ascii_characters(systr):
has_non_ASCII_literal_characters = True
elif sy == 'NEWLINE':
chars.append(u'\n')
elif sy == 'END_STRING':
break
elif sy == 'EOF':
s.error("Unclosed string literal", pos = pos)
else:
s.error(
"Unexpected token %r:%r in string literal" %
(sy, s.systring))
if kind == 'c':
unicode_value = None
bytes_value = chars.getchar()
if len(bytes_value) != 1:
error(pos, u"invalid character literal: %r" % bytes_value)
else:
bytes_value, unicode_value = chars.getstrings()
if is_python3_source and has_non_ASCII_literal_characters:
# Python 3 forbids literal non-ASCII characters in byte strings
if kind != 'u':
s.error("bytes can only contain ASCII literal characters.", pos = pos)
bytes_value = None
s.next()
return (kind, bytes_value, unicode_value)
# list_display ::= "[" [listmaker] "]"
# listmaker ::= expression ( comp_for | ( "," expression )* [","] )
# comp_iter ::= comp_for | comp_if
# comp_for ::= "for" expression_list "in" testlist [comp_iter]
# comp_if ::= "if" test [comp_iter]
def p_list_maker(s):
# s.sy == '['
pos = s.position()
s.next()
if s.sy == ']':
s.expect(']')
return ExprNodes.ListNode(pos, args = [])
expr = p_test(s)
if s.sy == 'for':
target = ExprNodes.ListNode(pos, args = [])
append = ExprNodes.ComprehensionAppendNode(
pos, expr=expr, target=ExprNodes.CloneNode(target))
loop = p_comp_for(s, append)
s.expect(']')
return ExprNodes.ComprehensionNode(
pos, loop=loop, append=append, target=target,
# list comprehensions leak their loop variable in Py2
has_local_scope = s.context.language_level >= 3)
else:
if s.sy == ',':
s.next()
exprs = p_simple_expr_list(s, expr)
else:
exprs = [expr]
s.expect(']')
return ExprNodes.ListNode(pos, args = exprs)
def p_comp_iter(s, body):
if s.sy == 'for':
return p_comp_for(s, body)
elif s.sy == 'if':
return p_comp_if(s, body)
else:
# insert the 'append' operation into the loop
return body
def p_comp_for(s, body):
# s.sy == 'for'
pos = s.position()
s.next()
kw = p_for_bounds(s, allow_testlist=False)
kw.update(else_clause = None, body = p_comp_iter(s, body))
return Nodes.ForStatNode(pos, **kw)
def p_comp_if(s, body):
# s.sy == 'if'
pos = s.position()
s.next()
test = p_test_nocond(s)
return Nodes.IfStatNode(pos,
if_clauses = [Nodes.IfClauseNode(pos, condition = test,
body = p_comp_iter(s, body))],
else_clause = None )
#dictmaker: test ':' test (',' test ':' test)* [',']
def p_dict_or_set_maker(s):
# s.sy == '{'
pos = s.position()
s.next()
if s.sy == '}':
s.next()
return ExprNodes.DictNode(pos, key_value_pairs = [])
item = p_test(s)
if s.sy == ',' or s.sy == '}':
# set literal
values = [item]
while s.sy == ',':
s.next()
if s.sy == '}':
break
values.append( p_test(s) )
s.expect('}')
return ExprNodes.SetNode(pos, args=values)
elif s.sy == 'for':
# set comprehension
target = ExprNodes.SetNode(pos, args=[])
append = ExprNodes.ComprehensionAppendNode(
item.pos, expr=item, target=ExprNodes.CloneNode(target))
loop = p_comp_for(s, append)
s.expect('}')
return ExprNodes.ComprehensionNode(
pos, loop=loop, append=append, target=target)
elif s.sy == ':':
# dict literal or comprehension
key = item
s.next()
value = p_test(s)
if s.sy == 'for':
# dict comprehension
target = ExprNodes.DictNode(pos, key_value_pairs = [])
append = ExprNodes.DictComprehensionAppendNode(
item.pos, key_expr=key, value_expr=value,
target=ExprNodes.CloneNode(target))
loop = p_comp_for(s, append)
s.expect('}')
return ExprNodes.ComprehensionNode(
pos, loop=loop, append=append, target=target)
else:
# dict literal
items = [ExprNodes.DictItemNode(key.pos, key=key, value=value)]
while s.sy == ',':
s.next()
if s.sy == '}':
break
key = p_test(s)
s.expect(':')
value = p_test(s)
items.append(
ExprNodes.DictItemNode(key.pos, key=key, value=value))
s.expect('}')
return ExprNodes.DictNode(pos, key_value_pairs=items)
else:
# raise an error
s.expect('}')
return ExprNodes.DictNode(pos, key_value_pairs = [])
# NOTE: no longer in Py3 :)
def p_backquote_expr(s):
# s.sy == '`'
pos = s.position()
s.next()
args = [p_test(s)]
while s.sy == ',':
s.next()
args.append(p_test(s))
s.expect('`')
if len(args) == 1:
arg = args[0]
else:
arg = ExprNodes.TupleNode(pos, args = args)
return ExprNodes.BackquoteNode(pos, arg = arg)
def p_simple_expr_list(s, expr=None):
exprs = expr is not None and [expr] or []
while s.sy not in expr_terminators:
exprs.append( p_test(s) )
if s.sy != ',':
break
s.next()
return exprs
def p_test_or_starred_expr_list(s, expr=None):
exprs = expr is not None and [expr] or []
while s.sy not in expr_terminators:
exprs.append( p_test_or_starred_expr(s) )
if s.sy != ',':
break
s.next()
return exprs
#testlist: test (',' test)* [',']
def p_testlist(s):
pos = s.position()
expr = p_test(s)
if s.sy == ',':
s.next()
exprs = p_simple_expr_list(s, expr)
return ExprNodes.TupleNode(pos, args = exprs)
else:
return expr
# testlist_star_expr: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] )
def p_testlist_star_expr(s):
pos = s.position()
expr = p_test_or_starred_expr(s)
if s.sy == ',':
s.next()
exprs = p_test_or_starred_expr_list(s, expr)
return ExprNodes.TupleNode(pos, args = exprs)
else:
return expr
# testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] )
def p_testlist_comp(s):
pos = s.position()
expr = p_test_or_starred_expr(s)
if s.sy == ',':
s.next()
exprs = p_test_or_starred_expr_list(s, expr)
return ExprNodes.TupleNode(pos, args = exprs)
elif s.sy == 'for':
return p_genexp(s, expr)
else:
return expr
def p_genexp(s, expr):
# s.sy == 'for'
loop = p_comp_for(s, Nodes.ExprStatNode(
expr.pos, expr = ExprNodes.YieldExprNode(expr.pos, arg=expr)))
return ExprNodes.GeneratorExpressionNode(expr.pos, loop=loop)
expr_terminators = (')', ']', '}', ':', '=', 'NEWLINE')
#-------------------------------------------------------
#
# Statements
#
#-------------------------------------------------------
def p_global_statement(s):
# assume s.sy == 'global'
pos = s.position()
s.next()
names = p_ident_list(s)
return Nodes.GlobalNode(pos, names = names)
def p_nonlocal_statement(s):
pos = s.position()
s.next()
names = p_ident_list(s)
return Nodes.NonlocalNode(pos, names = names)
def p_expression_or_assignment(s):
expr_list = [p_testlist_star_expr(s)]
while s.sy == '=':
s.next()
if s.sy == 'yield':
expr = p_yield_expression(s)
else:
expr = p_testlist_star_expr(s)
expr_list.append(expr)
if len(expr_list) == 1:
if re.match(r"([+*/\%^\&|-]|<<|>>|\*\*|//)=", s.sy):
lhs = expr_list[0]
if not isinstance(lhs, (ExprNodes.AttributeNode, ExprNodes.IndexNode, ExprNodes.NameNode) ):
error(lhs.pos, "Illegal operand for inplace operation.")
operator = s.sy[:-1]
s.next()
if s.sy == 'yield':
rhs = p_yield_expression(s)
else:
rhs = p_testlist(s)
return Nodes.InPlaceAssignmentNode(lhs.pos, operator = operator, lhs = lhs, rhs = rhs)
expr = expr_list[0]
if isinstance(expr, (ExprNodes.UnicodeNode, ExprNodes.StringNode, ExprNodes.BytesNode)):
return Nodes.PassStatNode(expr.pos)
else:
return Nodes.ExprStatNode(expr.pos, expr = expr)
rhs = expr_list[-1]
if len(expr_list) == 2:
return Nodes.SingleAssignmentNode(rhs.pos,
lhs = expr_list[0], rhs = rhs)
else:
return Nodes.CascadedAssignmentNode(rhs.pos,
lhs_list = expr_list[:-1], rhs = rhs)
def p_print_statement(s):
# s.sy == 'print'
pos = s.position()
ends_with_comma = 0
s.next()
if s.sy == '>>':
s.next()
stream = p_test(s)
if s.sy == ',':
s.next()
ends_with_comma = s.sy in ('NEWLINE', 'EOF')
else:
stream = None
args = []
if s.sy not in ('NEWLINE', 'EOF'):
args.append(p_test(s))
while s.sy == ',':
s.next()
if s.sy in ('NEWLINE', 'EOF'):
ends_with_comma = 1
break
args.append(p_test(s))
arg_tuple = ExprNodes.TupleNode(pos, args = args)
return Nodes.PrintStatNode(pos,
arg_tuple = arg_tuple, stream = stream,
append_newline = not ends_with_comma)
def p_exec_statement(s):
# s.sy == 'exec'
pos = s.position()
s.next()
args = [ p_bit_expr(s) ]
if s.sy == 'in':
s.next()
args.append(p_test(s))
if s.sy == ',':
s.next()
args.append(p_test(s))
return Nodes.ExecStatNode(pos, args = args)
def p_del_statement(s):
# s.sy == 'del'
pos = s.position()
s.next()
# FIXME: 'exprlist' in Python
args = p_simple_expr_list(s)
return Nodes.DelStatNode(pos, args = args)
def p_pass_statement(s, with_newline = 0):
pos = s.position()
s.expect('pass')
if with_newline:
s.expect_newline("Expected a newline")
return Nodes.PassStatNode(pos)
def p_break_statement(s):
# s.sy == 'break'
pos = s.position()
s.next()
return Nodes.BreakStatNode(pos)
def p_continue_statement(s):
# s.sy == 'continue'
pos = s.position()
s.next()
return Nodes.ContinueStatNode(pos)
def p_return_statement(s):
# s.sy == 'return'
pos = s.position()
s.next()
if s.sy not in statement_terminators:
value = p_testlist(s)
else:
value = None
return Nodes.ReturnStatNode(pos, value = value)
def p_raise_statement(s):
# s.sy == 'raise'
pos = s.position()
s.next()
exc_type = None
exc_value = None
exc_tb = None
cause = None
if s.sy not in statement_terminators:
exc_type = p_test(s)
if s.sy == ',':
s.next()
exc_value = p_test(s)
if s.sy == ',':
s.next()
exc_tb = p_test(s)
elif s.sy == 'from':
s.next()
cause = p_test(s)
if exc_type or exc_value or exc_tb:
return Nodes.RaiseStatNode(pos,
exc_type = exc_type,
exc_value = exc_value,
exc_tb = exc_tb,
cause = cause)
else:
return Nodes.ReraiseStatNode(pos)
def p_import_statement(s):
# s.sy in ('import', 'cimport')
pos = s.position()
kind = s.sy
s.next()
items = [p_dotted_name(s, as_allowed = 1)]
while s.sy == ',':
s.next()
items.append(p_dotted_name(s, as_allowed = 1))
stats = []
for pos, target_name, dotted_name, as_name in items:
dotted_name = EncodedString(dotted_name)
if kind == 'cimport':
stat = Nodes.CImportStatNode(pos,
module_name = dotted_name,
as_name = as_name)
else:
if as_name and "." in dotted_name:
name_list = ExprNodes.ListNode(pos, args = [
ExprNodes.IdentifierStringNode(pos, value = EncodedString("*"))])
else:
name_list = None
stat = Nodes.SingleAssignmentNode(pos,
lhs = ExprNodes.NameNode(pos,
name = as_name or target_name),
rhs = ExprNodes.ImportNode(pos,
module_name = ExprNodes.IdentifierStringNode(
pos, value = dotted_name),
level = None,
name_list = name_list))
stats.append(stat)
return Nodes.StatListNode(pos, stats = stats)
def p_from_import_statement(s, first_statement = 0):
# s.sy == 'from'
pos = s.position()
s.next()
if s.sy == '.':
# count relative import level
level = 0
while s.sy == '.':
level += 1
s.next()
if s.sy == 'cimport':
s.error("Relative cimport is not supported yet")
else:
level = None
if level is not None and s.sy == 'import':
# we are dealing with "from .. import foo, bar"
dotted_name_pos, dotted_name = s.position(), ''
elif level is not None and s.sy == 'cimport':
# "from .. cimport"
s.error("Relative cimport is not supported yet")
else:
(dotted_name_pos, _, dotted_name, _) = \
p_dotted_name(s, as_allowed = 0)
if s.sy in ('import', 'cimport'):
kind = s.sy
s.next()
else:
s.error("Expected 'import' or 'cimport'")
is_cimport = kind == 'cimport'
is_parenthesized = False
if s.sy == '*':
imported_names = [(s.position(), "*", None, None)]
s.next()
else:
if s.sy == '(':
is_parenthesized = True
s.next()
imported_names = [p_imported_name(s, is_cimport)]
while s.sy == ',':
s.next()
if is_parenthesized and s.sy == ')':
break
imported_names.append(p_imported_name(s, is_cimport))
if is_parenthesized:
s.expect(')')
dotted_name = EncodedString(dotted_name)
if dotted_name == '__future__':
if not first_statement:
s.error("from __future__ imports must occur at the beginning of the file")
elif level is not None:
s.error("invalid syntax")
else:
for (name_pos, name, as_name, kind) in imported_names:
if name == "braces":
s.error("not a chance", name_pos)
break
try:
directive = getattr(Future, name)
except AttributeError:
s.error("future feature %s is not defined" % name, name_pos)
break
s.context.future_directives.add(directive)
return Nodes.PassStatNode(pos)
elif kind == 'cimport':
return Nodes.FromCImportStatNode(pos,
module_name = dotted_name,
imported_names = imported_names)
else:
imported_name_strings = []
items = []
for (name_pos, name, as_name, kind) in imported_names:
encoded_name = EncodedString(name)
imported_name_strings.append(
ExprNodes.IdentifierStringNode(name_pos, value = encoded_name))
items.append(
(name,
ExprNodes.NameNode(name_pos,
name = as_name or name)))
import_list = ExprNodes.ListNode(
imported_names[0][0], args = imported_name_strings)
dotted_name = EncodedString(dotted_name)
return Nodes.FromImportStatNode(pos,
module = ExprNodes.ImportNode(dotted_name_pos,
module_name = ExprNodes.IdentifierStringNode(pos, value = dotted_name),
level = level,
name_list = import_list),
items = items)
imported_name_kinds = ('class', 'struct', 'union')
def p_imported_name(s, is_cimport):
pos = s.position()
kind = None
if is_cimport and s.systring in imported_name_kinds:
kind = s.systring
s.next()
name = p_ident(s)
as_name = p_as_name(s)
return (pos, name, as_name, kind)
def p_dotted_name(s, as_allowed):
pos = s.position()
target_name = p_ident(s)
as_name = None
names = [target_name]
while s.sy == '.':
s.next()
names.append(p_ident(s))
if as_allowed:
as_name = p_as_name(s)
return (pos, target_name, u'.'.join(names), as_name)
def p_as_name(s):
if s.sy == 'IDENT' and s.systring == 'as':
s.next()
return p_ident(s)
else:
return None
def p_assert_statement(s):
# s.sy == 'assert'
pos = s.position()
s.next()
cond = p_test(s)
if s.sy == ',':
s.next()
value = p_test(s)
else:
value = None
return Nodes.AssertStatNode(pos, cond = cond, value = value)
statement_terminators = (';', 'NEWLINE', 'EOF')
def p_if_statement(s):
# s.sy == 'if'
pos = s.position()
s.next()
if_clauses = [p_if_clause(s)]
while s.sy == 'elif':
s.next()
if_clauses.append(p_if_clause(s))
else_clause = p_else_clause(s)
return Nodes.IfStatNode(pos,
if_clauses = if_clauses, else_clause = else_clause)
def p_if_clause(s):
pos = s.position()
test = p_test(s)
body = p_suite(s)
return Nodes.IfClauseNode(pos,
condition = test, body = body)
def p_else_clause(s):
if s.sy == 'else':
s.next()
return p_suite(s)
else:
return None
def p_while_statement(s):
# s.sy == 'while'
pos = s.position()
s.next()
test = p_test(s)
body = p_suite(s)
else_clause = p_else_clause(s)
return Nodes.WhileStatNode(pos,
condition = test, body = body,
else_clause = else_clause)
def p_for_statement(s):
# s.sy == 'for'
pos = s.position()
s.next()
kw = p_for_bounds(s, allow_testlist=True)
body = p_suite(s)
else_clause = p_else_clause(s)
kw.update(body = body, else_clause = else_clause)
return Nodes.ForStatNode(pos, **kw)
def p_for_bounds(s, allow_testlist=True):
target = p_for_target(s)
if s.sy == 'in':
s.next()
iterator = p_for_iterator(s, allow_testlist)
return dict( target = target, iterator = iterator )
elif not s.in_python_file:
if s.sy == 'from':
s.next()
bound1 = p_bit_expr(s)
else:
# Support shorter "for a <= x < b" syntax
bound1, target = target, None
rel1 = p_for_from_relation(s)
name2_pos = s.position()
name2 = p_ident(s)
rel2_pos = s.position()
rel2 = p_for_from_relation(s)
bound2 = p_bit_expr(s)
step = p_for_from_step(s)
if target is None:
target = ExprNodes.NameNode(name2_pos, name = name2)
else:
if not target.is_name:
error(target.pos,
"Target of for-from statement must be a variable name")
elif name2 != target.name:
error(name2_pos,
"Variable name in for-from range does not match target")
if rel1[0] != rel2[0]:
error(rel2_pos,
"Relation directions in for-from do not match")
return dict(target = target,
bound1 = bound1,
relation1 = rel1,
relation2 = rel2,
bound2 = bound2,
step = step,
)
else:
s.expect('in')
return {}
def p_for_from_relation(s):
if s.sy in inequality_relations:
op = s.sy
s.next()
return op
else:
s.error("Expected one of '<', '<=', '>' '>='")
def p_for_from_step(s):
if s.sy == 'IDENT' and s.systring == 'by':
s.next()
step = p_bit_expr(s)
return step
else:
return None
inequality_relations = ('<', '<=', '>', '>=')
def p_target(s, terminator):
pos = s.position()
expr = p_starred_expr(s)
if s.sy == ',':
s.next()
exprs = [expr]
while s.sy != terminator:
exprs.append(p_starred_expr(s))
if s.sy != ',':
break
s.next()
return ExprNodes.TupleNode(pos, args = exprs)
else:
return expr
def p_for_target(s):
return p_target(s, 'in')
def p_for_iterator(s, allow_testlist=True):
pos = s.position()
if allow_testlist:
expr = p_testlist(s)
else:
expr = p_or_test(s)
return ExprNodes.IteratorNode(pos, sequence = expr)
def p_try_statement(s):
# s.sy == 'try'
pos = s.position()
s.next()
body = p_suite(s)
except_clauses = []
else_clause = None
if s.sy in ('except', 'else'):
while s.sy == 'except':
except_clauses.append(p_except_clause(s))
if s.sy == 'else':
s.next()
else_clause = p_suite(s)
body = Nodes.TryExceptStatNode(pos,
body = body, except_clauses = except_clauses,
else_clause = else_clause)
if s.sy != 'finally':
return body
# try-except-finally is equivalent to nested try-except/try-finally
if s.sy == 'finally':
s.next()
finally_clause = p_suite(s)
return Nodes.TryFinallyStatNode(pos,
body = body, finally_clause = finally_clause)
else:
s.error("Expected 'except' or 'finally'")
def p_except_clause(s):
# s.sy == 'except'
pos = s.position()
s.next()
exc_type = None
exc_value = None
if s.sy != ':':
exc_type = p_test(s)
# normalise into list of single exception tests
if isinstance(exc_type, ExprNodes.TupleNode):
exc_type = exc_type.args
else:
exc_type = [exc_type]
if s.sy == ',' or (s.sy == 'IDENT' and s.systring == 'as'):
s.next()
exc_value = p_test(s)
elif s.sy == 'IDENT' and s.systring == 'as':
# Py3 syntax requires a name here
s.next()
pos2 = s.position()
name = p_ident(s)
exc_value = ExprNodes.NameNode(pos2, name = name)
body = p_suite(s)
return Nodes.ExceptClauseNode(pos,
pattern = exc_type, target = exc_value, body = body)
def p_include_statement(s, ctx):
pos = s.position()
s.next() # 'include'
unicode_include_file_name = p_string_literal(s, 'u')[2]
s.expect_newline("Syntax error in include statement")
if s.compile_time_eval:
include_file_name = unicode_include_file_name
include_file_path = s.context.find_include_file(include_file_name, pos)
if include_file_path:
s.included_files.append(include_file_name)
f = Utils.open_source_file(include_file_path, mode="rU")
source_desc = FileSourceDescriptor(include_file_path)
s2 = PyrexScanner(f, source_desc, s, source_encoding=f.encoding, parse_comments=s.parse_comments)
try:
tree = p_statement_list(s2, ctx)
finally:
f.close()
return tree
else:
return None
else:
return Nodes.PassStatNode(pos)
def p_with_statement(s):
s.next() # 'with'
if s.systring == 'template' and not s.in_python_file:
node = p_with_template(s)
else:
node = p_with_items(s)
return node
def p_with_items(s):
pos = s.position()
if not s.in_python_file and s.sy == 'IDENT' and s.systring in ('nogil', 'gil'):
state = s.systring
s.next()
if s.sy == ',':
s.next()
body = p_with_items(s)
else:
body = p_suite(s)
return Nodes.GILStatNode(pos, state = state, body = body)
else:
manager = p_test(s)
target = None
if s.sy == 'IDENT' and s.systring == 'as':
s.next()
target = p_starred_expr(s)
if s.sy == ',':
s.next()
body = p_with_items(s)
else:
body = p_suite(s)
return Nodes.WithStatNode(pos, manager = manager,
target = target, body = body)
def p_with_template(s):
pos = s.position()
templates = []
s.next()
s.expect('[')
templates.append(s.systring)
s.next()
while s.systring == ',':
s.next()
templates.append(s.systring)
s.next()
s.expect(']')
if s.sy == ':':
s.next()
s.expect_newline("Syntax error in template function declaration")
s.expect_indent()
body_ctx = Ctx()
body_ctx.templates = templates
func_or_var = p_c_func_or_var_declaration(s, pos, body_ctx)
s.expect_dedent()
return func_or_var
else:
error(pos, "Syntax error in template function declaration")
def p_simple_statement(s, first_statement = 0):
#print "p_simple_statement:", s.sy, s.systring ###
if s.sy == 'global':
node = p_global_statement(s)
elif s.sy == 'nonlocal':
node = p_nonlocal_statement(s)
elif s.sy == 'print':
node = p_print_statement(s)
elif s.sy == 'exec':
node = p_exec_statement(s)
elif s.sy == 'del':
node = p_del_statement(s)
elif s.sy == 'break':
node = p_break_statement(s)
elif s.sy == 'continue':
node = p_continue_statement(s)
elif s.sy == 'return':
node = p_return_statement(s)
elif s.sy == 'raise':
node = p_raise_statement(s)
elif s.sy in ('import', 'cimport'):
node = p_import_statement(s)
elif s.sy == 'from':
node = p_from_import_statement(s, first_statement = first_statement)
elif s.sy == 'yield':
node = p_yield_statement(s)
elif s.sy == 'assert':
node = p_assert_statement(s)
elif s.sy == 'pass':
node = p_pass_statement(s)
else:
node = p_expression_or_assignment(s)
return node
def p_simple_statement_list(s, ctx, first_statement = 0):
# Parse a series of simple statements on one line
# separated by semicolons.
stat = p_simple_statement(s, first_statement = first_statement)
pos = stat.pos
stats = []
if not isinstance(stat, Nodes.PassStatNode):
stats.append(stat)
while s.sy == ';':
#print "p_simple_statement_list: maybe more to follow" ###
s.next()
if s.sy in ('NEWLINE', 'EOF'):
break
stat = p_simple_statement(s, first_statement = first_statement)
if isinstance(stat, Nodes.PassStatNode):
continue
stats.append(stat)
first_statement = False
if not stats:
stat = Nodes.PassStatNode(pos)
elif len(stats) == 1:
stat = stats[0]
else:
stat = Nodes.StatListNode(pos, stats = stats)
s.expect_newline("Syntax error in simple statement list")
return stat
def p_compile_time_expr(s):
old = s.compile_time_expr
s.compile_time_expr = 1
expr = p_testlist(s)
s.compile_time_expr = old
return expr
def p_DEF_statement(s):
pos = s.position()
denv = s.compile_time_env
s.next() # 'DEF'
name = p_ident(s)
s.expect('=')
expr = p_compile_time_expr(s)
value = expr.compile_time_value(denv)
#print "p_DEF_statement: %s = %r" % (name, value) ###
denv.declare(name, value)
s.expect_newline()
return Nodes.PassStatNode(pos)
def p_IF_statement(s, ctx):
pos = s.position()
saved_eval = s.compile_time_eval
current_eval = saved_eval
denv = s.compile_time_env
result = None
while 1:
s.next() # 'IF' or 'ELIF'
expr = p_compile_time_expr(s)
s.compile_time_eval = current_eval and bool(expr.compile_time_value(denv))
body = p_suite(s, ctx)
if s.compile_time_eval:
result = body
current_eval = 0
if s.sy != 'ELIF':
break
if s.sy == 'ELSE':
s.next()
s.compile_time_eval = current_eval
body = p_suite(s, ctx)
if current_eval:
result = body
if not result:
result = Nodes.PassStatNode(pos)
s.compile_time_eval = saved_eval
return result
def p_statement(s, ctx, first_statement = 0):
cdef_flag = ctx.cdef_flag
decorators = None
if s.sy == 'ctypedef':
if ctx.level not in ('module', 'module_pxd'):
s.error("ctypedef statement not allowed here")
#if ctx.api:
# error(s.position(), "'api' not allowed with 'ctypedef'")
return p_ctypedef_statement(s, ctx)
elif s.sy == 'DEF':
return p_DEF_statement(s)
elif s.sy == 'IF':
return p_IF_statement(s, ctx)
elif s.sy == 'DECORATOR':
if ctx.level not in ('module', 'class', 'c_class', 'function', 'property', 'module_pxd', 'c_class_pxd', 'other'):
s.error('decorator not allowed here')
s.level = ctx.level
decorators = p_decorators(s)
bad_toks = 'def', 'cdef', 'cpdef', 'class'
if not ctx.allow_struct_enum_decorator and s.sy not in bad_toks:
s.error("Decorators can only be followed by functions or classes")
elif s.sy == 'pass' and cdef_flag:
# empty cdef block
return p_pass_statement(s, with_newline = 1)
overridable = 0
if s.sy == 'cdef':
cdef_flag = 1
s.next()
elif s.sy == 'cpdef':
cdef_flag = 1
overridable = 1
s.next()
if cdef_flag:
if ctx.level not in ('module', 'module_pxd', 'function', 'c_class', 'c_class_pxd'):
s.error('cdef statement not allowed here')
s.level = ctx.level
node = p_cdef_statement(s, ctx(overridable = overridable))
if decorators is not None:
tup = Nodes.CFuncDefNode, Nodes.CVarDefNode, Nodes.CClassDefNode
if ctx.allow_struct_enum_decorator:
tup += Nodes.CStructOrUnionDefNode, Nodes.CEnumDefNode
if not isinstance(node, tup):
s.error("Decorators can only be followed by functions or classes")
node.decorators = decorators
return node
else:
if ctx.api:
s.error("'api' not allowed with this statement")
elif s.sy == 'def':
# def statements aren't allowed in pxd files, except
# as part of a cdef class
if ('pxd' in ctx.level) and (ctx.level != 'c_class_pxd'):
s.error('def statement not allowed here')
s.level = ctx.level
return p_def_statement(s, decorators)
elif s.sy == 'class':
if ctx.level not in ('module', 'function', 'class', 'other'):
s.error("class definition not allowed here")
return p_class_statement(s, decorators)
elif s.sy == 'include':
if ctx.level not in ('module', 'module_pxd'):
s.error("include statement not allowed here")
return p_include_statement(s, ctx)
elif ctx.level == 'c_class' and s.sy == 'IDENT' and s.systring == 'property':
return p_property_decl(s)
elif s.sy == 'pass' and ctx.level != 'property':
return p_pass_statement(s, with_newline = 1)
else:
if ctx.level in ('c_class_pxd', 'property'):
s.error("Executable statement not allowed here")
if s.sy == 'if':
return p_if_statement(s)
elif s.sy == 'while':
return p_while_statement(s)
elif s.sy == 'for':
return p_for_statement(s)
elif s.sy == 'try':
return p_try_statement(s)
elif s.sy == 'with':
return p_with_statement(s)
else:
return p_simple_statement_list(
s, ctx, first_statement = first_statement)
def p_statement_list(s, ctx, first_statement = 0):
# Parse a series of statements separated by newlines.
pos = s.position()
stats = []
while s.sy not in ('DEDENT', 'EOF'):
stat = p_statement(s, ctx, first_statement = first_statement)
if isinstance(stat, Nodes.PassStatNode):
continue
stats.append(stat)
first_statement = False
if not stats:
return Nodes.PassStatNode(pos)
elif len(stats) == 1:
return stats[0]
else:
return Nodes.StatListNode(pos, stats = stats)
def p_suite(s, ctx = Ctx(), with_doc = 0, with_pseudo_doc = 0):
pos = s.position()
s.expect(':')
doc = None
stmts = []
if s.sy == 'NEWLINE':
s.next()
s.expect_indent()
if with_doc or with_pseudo_doc:
doc = p_doc_string(s)
body = p_statement_list(s, ctx)
s.expect_dedent()
else:
if ctx.api:
s.error("'api' not allowed with this statement")
if ctx.level in ('module', 'class', 'function', 'other'):
body = p_simple_statement_list(s, ctx)
else:
body = p_pass_statement(s)
s.expect_newline("Syntax error in declarations")
if with_doc:
return doc, body
else:
return body
def p_positional_and_keyword_args(s, end_sy_set, templates = None):
"""
Parses positional and keyword arguments. end_sy_set
should contain any s.sy that terminate the argument list.
Argument expansion (* and **) are not allowed.
Returns: (positional_args, keyword_args)
"""
positional_args = []
keyword_args = []
pos_idx = 0
while s.sy not in end_sy_set:
if s.sy == '*' or s.sy == '**':
s.error('Argument expansion not allowed here.')
parsed_type = False
if s.sy == 'IDENT' and s.peek()[0] == '=':
ident = s.systring
s.next() # s.sy is '='
s.next()
if looking_at_expr(s):
arg = p_test(s)
else:
base_type = p_c_base_type(s, templates = templates)
declarator = p_c_declarator(s, empty = 1)
arg = Nodes.CComplexBaseTypeNode(base_type.pos,
base_type = base_type, declarator = declarator)
parsed_type = True
keyword_node = ExprNodes.IdentifierStringNode(
arg.pos, value = EncodedString(ident))
keyword_args.append((keyword_node, arg))
was_keyword = True
else:
if looking_at_expr(s):
arg = p_test(s)
else:
base_type = p_c_base_type(s, templates = templates)
declarator = p_c_declarator(s, empty = 1)
arg = Nodes.CComplexBaseTypeNode(base_type.pos,
base_type = base_type, declarator = declarator)
parsed_type = True
positional_args.append(arg)
pos_idx += 1
if len(keyword_args) > 0:
s.error("Non-keyword arg following keyword arg",
pos = arg.pos)
if s.sy != ',':
if s.sy not in end_sy_set:
if parsed_type:
s.error("Unmatched %s" % " or ".join(end_sy_set))
break
s.next()
return positional_args, keyword_args
def p_c_base_type(s, self_flag = 0, nonempty = 0, templates = None):
# If self_flag is true, this is the base type for the
# self argument of a C method of an extension type.
if s.sy == '(':
return p_c_complex_base_type(s)
else:
return p_c_simple_base_type(s, self_flag, nonempty = nonempty, templates = templates)
def p_calling_convention(s):
if s.sy == 'IDENT' and s.systring in calling_convention_words:
result = s.systring
s.next()
return result
else:
return ""
calling_convention_words = ("__stdcall", "__cdecl", "__fastcall")
def p_c_complex_base_type(s):
# s.sy == '('
pos = s.position()
s.next()
base_type = p_c_base_type(s)
declarator = p_c_declarator(s, empty = 1)
s.expect(')')
return Nodes.CComplexBaseTypeNode(pos,
base_type = base_type, declarator = declarator)
def p_c_simple_base_type(s, self_flag, nonempty, templates = None):
#print "p_c_simple_base_type: self_flag =", self_flag, nonempty
is_basic = 0
signed = 1
longness = 0
complex = 0
module_path = []
pos = s.position()
if not s.sy == 'IDENT':
error(pos, "Expected an identifier, found '%s'" % s.sy)
if looking_at_base_type(s):
#print "p_c_simple_base_type: looking_at_base_type at", s.position()
is_basic = 1
if s.sy == 'IDENT' and s.systring in special_basic_c_types:
signed, longness = special_basic_c_types[s.systring]
name = s.systring
s.next()
else:
signed, longness = p_sign_and_longness(s)
if s.sy == 'IDENT' and s.systring in basic_c_type_names:
name = s.systring
s.next()
else:
name = 'int' # long [int], short [int], long [int] complex, etc.
if s.sy == 'IDENT' and s.systring == 'complex':
complex = 1
s.next()
elif looking_at_dotted_name(s):
#print "p_c_simple_base_type: looking_at_type_name at", s.position()
name = s.systring
s.next()
while s.sy == '.':
module_path.append(name)
s.next()
name = p_ident(s)
else:
name = s.systring
s.next()
if nonempty and s.sy != 'IDENT':
# Make sure this is not a declaration of a variable or function.
if s.sy == '(':
s.next()
if s.sy == '*' or s.sy == '**' or s.sy == '&':
s.put_back('(', '(')
else:
s.put_back('(', '(')
s.put_back('IDENT', name)
name = None
elif s.sy not in ('*', '**', '[', '&'):
s.put_back('IDENT', name)
name = None
type_node = Nodes.CSimpleBaseTypeNode(pos,
name = name, module_path = module_path,
is_basic_c_type = is_basic, signed = signed,
complex = complex, longness = longness,
is_self_arg = self_flag, templates = templates)
# declarations here.
if s.sy == '[':
if is_memoryviewslice_access(s):
type_node = p_memoryviewslice_access(s, type_node)
else:
type_node = p_buffer_or_template(s, type_node, templates)
if s.sy == '.':
s.next()
name = p_ident(s)
type_node = Nodes.CNestedBaseTypeNode(pos, base_type = type_node, name = name)
return type_node
def p_buffer_or_template(s, base_type_node, templates):
# s.sy == '['
pos = s.position()
s.next()
# Note that buffer_positional_options_count=1, so the only positional argument is dtype.
# For templated types, all parameters are types.
positional_args, keyword_args = (
p_positional_and_keyword_args(s, (']',), templates)
)
s.expect(']')
keyword_dict = ExprNodes.DictNode(pos,
key_value_pairs = [
ExprNodes.DictItemNode(pos=key.pos, key=key, value=value)
for key, value in keyword_args
])
result = Nodes.TemplatedTypeNode(pos,
positional_args = positional_args,
keyword_args = keyword_dict,
base_type_node = base_type_node)
return result
def p_bracketed_base_type(s, base_type_node, nonempty, empty):
# s.sy == '['
if empty and not nonempty:
# sizeof-like thing. Only anonymous C arrays allowed (int[SIZE]).
return base_type_node
elif not empty and nonempty:
# declaration of either memoryview slice or buffer.
if is_memoryviewslice_access(s):
return p_memoryviewslice_access(s, base_type_node)
else:
return p_buffer_or_template(s, base_type_node, None)
# return p_buffer_access(s, base_type_node)
elif not empty and not nonempty:
# only anonymous C arrays and memoryview slice arrays here. We
# disallow buffer declarations for now, due to ambiguity with anonymous
# C arrays.
if is_memoryviewslice_access(s):
return p_memoryviewslice_access(s, base_type_node)
else:
return base_type_node
def is_memoryviewslice_access(s):
# s.sy == '['
# a memoryview slice declaration is distinguishable from a buffer access
# declaration by the first entry in the bracketed list. The buffer will
# not have an unnested colon in the first entry; the memoryview slice will.
saved = [(s.sy, s.systring)]
s.next()
retval = False
if s.systring == ':':
retval = True
elif s.sy == 'INT':
saved.append((s.sy, s.systring))
s.next()
if s.sy == ':':
retval = True
for sv in saved[::-1]:
s.put_back(*sv)
return retval
def p_memoryviewslice_access(s, base_type_node):
# s.sy == '['
pos = s.position()
s.next()
subscripts = p_subscript_list(s)
# make sure each entry in subscripts is a slice
for subscript in subscripts:
if len(subscript) < 2:
s.error("An axis specification in memoryview declaration does not have a ':'.")
s.expect(']')
indexes = make_slice_nodes(pos, subscripts)
result = Nodes.MemoryViewSliceTypeNode(pos,
base_type_node = base_type_node,
axes = indexes)
return result
def looking_at_name(s):
return s.sy == 'IDENT' and not s.systring in calling_convention_words
def looking_at_expr(s):
if s.systring in base_type_start_words:
return False
elif s.sy == 'IDENT':
is_type = False
name = s.systring
dotted_path = []
s.next()
while s.sy == '.':
s.next()
dotted_path.append(s.systring)
s.expect('IDENT')
saved = s.sy, s.systring
if s.sy == 'IDENT':
is_type = True
elif s.sy == '*' or s.sy == '**':
s.next()
is_type = s.sy in (')', ']')
s.put_back(*saved)
elif s.sy == '(':
s.next()
is_type = s.sy == '*'
s.put_back(*saved)
elif s.sy == '[':
s.next()
is_type = s.sy == ']'
s.put_back(*saved)
dotted_path.reverse()
for p in dotted_path:
s.put_back('IDENT', p)
s.put_back('.', '.')
s.put_back('IDENT', name)
return not is_type
else:
return True
def looking_at_base_type(s):
#print "looking_at_base_type?", s.sy, s.systring, s.position()
return s.sy == 'IDENT' and s.systring in base_type_start_words
def looking_at_dotted_name(s):
if s.sy == 'IDENT':
name = s.systring
s.next()
result = s.sy == '.'
s.put_back('IDENT', name)
return result
else:
return 0
basic_c_type_names = ("void", "char", "int", "float", "double", "bint")
special_basic_c_types = {
# name : (signed, longness)
"Py_UNICODE" : (0, 0),
"Py_UCS4" : (0, 0),
"Py_ssize_t" : (2, 0),
"ssize_t" : (2, 0),
"size_t" : (0, 0),
}
sign_and_longness_words = ("short", "long", "signed", "unsigned")
base_type_start_words = \
basic_c_type_names + sign_and_longness_words + tuple(special_basic_c_types)
def p_sign_and_longness(s):
signed = 1
longness = 0
while s.sy == 'IDENT' and s.systring in sign_and_longness_words:
if s.systring == 'unsigned':
signed = 0
elif s.systring == 'signed':
signed = 2
elif s.systring == 'short':
longness = -1
elif s.systring == 'long':
longness += 1
s.next()
return signed, longness
def p_opt_cname(s):
literal = p_opt_string_literal(s, 'u')
if literal is not None:
cname = EncodedString(literal)
cname.encoding = s.source_encoding
else:
cname = None
return cname
def p_c_declarator(s, ctx = Ctx(), empty = 0, is_type = 0, cmethod_flag = 0,
assignable = 0, nonempty = 0,
calling_convention_allowed = 0):
# If empty is true, the declarator must be empty. If nonempty is true,
# the declarator must be nonempty. Otherwise we don't care.
# If cmethod_flag is true, then if this declarator declares
# a function, it's a C method of an extension type.
pos = s.position()
if s.sy == '(':
s.next()
if s.sy == ')' or looking_at_name(s):
base = Nodes.CNameDeclaratorNode(pos, name = EncodedString(u""), cname = None)
result = p_c_func_declarator(s, pos, ctx, base, cmethod_flag)
else:
result = p_c_declarator(s, ctx, empty = empty, is_type = is_type,
cmethod_flag = cmethod_flag,
nonempty = nonempty,
calling_convention_allowed = 1)
s.expect(')')
else:
result = p_c_simple_declarator(s, ctx, empty, is_type, cmethod_flag,
assignable, nonempty)
if not calling_convention_allowed and result.calling_convention and s.sy != '(':
error(s.position(), "%s on something that is not a function"
% result.calling_convention)
while s.sy in ('[', '('):
pos = s.position()
if s.sy == '[':
result = p_c_array_declarator(s, result)
else: # sy == '('
s.next()
result = p_c_func_declarator(s, pos, ctx, result, cmethod_flag)
cmethod_flag = 0
return result
def p_c_array_declarator(s, base):
pos = s.position()
s.next() # '['
if s.sy != ']':
dim = p_testlist(s)
else:
dim = None
s.expect(']')
return Nodes.CArrayDeclaratorNode(pos, base = base, dimension = dim)
def p_c_func_declarator(s, pos, ctx, base, cmethod_flag):
# Opening paren has already been skipped
args = p_c_arg_list(s, ctx, cmethod_flag = cmethod_flag,
nonempty_declarators = 0)
ellipsis = p_optional_ellipsis(s)
s.expect(')')
nogil = p_nogil(s)
exc_val, exc_check = p_exception_value_clause(s)
with_gil = p_with_gil(s)
return Nodes.CFuncDeclaratorNode(pos,
base = base, args = args, has_varargs = ellipsis,
exception_value = exc_val, exception_check = exc_check,
nogil = nogil or ctx.nogil or with_gil, with_gil = with_gil)
supported_overloaded_operators = set([
'+', '-', '*', '/', '%',
'++', '--', '~', '|', '&', '^', '<<', '>>', ',',
'==', '!=', '>=', '>', '<=', '<',
'[]', '()',
])
def p_c_simple_declarator(s, ctx, empty, is_type, cmethod_flag,
assignable, nonempty):
pos = s.position()
calling_convention = p_calling_convention(s)
if s.sy == '*':
s.next()
base = p_c_declarator(s, ctx, empty = empty, is_type = is_type,
cmethod_flag = cmethod_flag,
assignable = assignable, nonempty = nonempty)
result = Nodes.CPtrDeclaratorNode(pos,
base = base)
elif s.sy == '**': # scanner returns this as a single token
s.next()
base = p_c_declarator(s, ctx, empty = empty, is_type = is_type,
cmethod_flag = cmethod_flag,
assignable = assignable, nonempty = nonempty)
result = Nodes.CPtrDeclaratorNode(pos,
base = Nodes.CPtrDeclaratorNode(pos,
base = base))
elif s.sy == '&':
s.next()
base = p_c_declarator(s, ctx, empty = empty, is_type = is_type,
cmethod_flag = cmethod_flag,
assignable = assignable, nonempty = nonempty)
result = Nodes.CReferenceDeclaratorNode(pos, base = base)
else:
rhs = None
if s.sy == 'IDENT':
name = EncodedString(s.systring)
if empty:
error(s.position(), "Declarator should be empty")
s.next()
cname = p_opt_cname(s)
if name != 'operator' and s.sy == '=' and assignable:
s.next()
rhs = p_test(s)
else:
if nonempty:
error(s.position(), "Empty declarator")
name = ""
cname = None
if cname is None and ctx.namespace is not None and nonempty:
cname = ctx.namespace + "::" + name
if name == 'operator' and ctx.visibility == 'extern' and nonempty:
op = s.sy
if [1 for c in op if c in '+-*/<=>!%&|([^~,']:
s.next()
# Handle diphthong operators.
if op == '(':
s.expect(')')
op = '()'
elif op == '[':
s.expect(']')
op = '[]'
if op in ['-', '+', '|', '&'] and s.sy == op:
op = op*2
s.next()
if s.sy == '=':
op += s.sy
s.next()
if op not in supported_overloaded_operators:
s.error("Overloading operator '%s' not yet supported." % op)
name = name+op
result = Nodes.CNameDeclaratorNode(pos,
name = name, cname = cname, default = rhs)
result.calling_convention = calling_convention
return result
def p_nogil(s):
if s.sy == 'IDENT' and s.systring == 'nogil':
s.next()
return 1
else:
return 0
def p_with_gil(s):
if s.sy == 'with':
s.next()
s.expect_keyword('gil')
return 1
else:
return 0
def p_exception_value_clause(s):
exc_val = None
exc_check = 0
if s.sy == 'except':
s.next()
if s.sy == '*':
exc_check = 1
s.next()
elif s.sy == '+':
exc_check = '+'
s.next()
if s.sy == 'IDENT':
name = s.systring
s.next()
exc_val = p_name(s, name)
else:
if s.sy == '?':
exc_check = 1
s.next()
exc_val = p_test(s)
return exc_val, exc_check
c_arg_list_terminators = ('*', '**', '.', ')')
def p_c_arg_list(s, ctx = Ctx(), in_pyfunc = 0, cmethod_flag = 0,
nonempty_declarators = 0, kw_only = 0, annotated = 1):
# Comma-separated list of C argument declarations, possibly empty.
# May have a trailing comma.
args = []
is_self_arg = cmethod_flag
while s.sy not in c_arg_list_terminators:
args.append(p_c_arg_decl(s, ctx, in_pyfunc, is_self_arg,
nonempty = nonempty_declarators, kw_only = kw_only,
annotated = annotated))
if s.sy != ',':
break
s.next()
is_self_arg = 0
return args
def p_optional_ellipsis(s):
if s.sy == '.':
expect_ellipsis(s)
return 1
else:
return 0
def p_c_arg_decl(s, ctx, in_pyfunc, cmethod_flag = 0, nonempty = 0,
kw_only = 0, annotated = 1):
pos = s.position()
not_none = or_none = 0
default = None
annotation = None
if s.in_python_file:
# empty type declaration
base_type = Nodes.CSimpleBaseTypeNode(pos,
name = None, module_path = [],
is_basic_c_type = 0, signed = 0,
complex = 0, longness = 0,
is_self_arg = cmethod_flag, templates = None)
else:
base_type = p_c_base_type(s, cmethod_flag, nonempty = nonempty)
declarator = p_c_declarator(s, ctx, nonempty = nonempty)
if s.sy in ('not', 'or') and not s.in_python_file:
kind = s.sy
s.next()
if s.sy == 'IDENT' and s.systring == 'None':
s.next()
else:
s.error("Expected 'None'")
if not in_pyfunc:
error(pos, "'%s None' only allowed in Python functions" % kind)
or_none = kind == 'or'
not_none = kind == 'not'
if annotated and s.sy == ':':
s.next()
annotation = p_test(s)
if s.sy == '=':
s.next()
if 'pxd' in s.level:
if s.sy not in ['*', '?']:
error(pos, "default values cannot be specified in pxd files, use ? or *")
default = ExprNodes.BoolNode(1)
s.next()
else:
default = p_test(s)
return Nodes.CArgDeclNode(pos,
base_type = base_type,
declarator = declarator,
not_none = not_none,
or_none = or_none,
default = default,
annotation = annotation,
kw_only = kw_only)
def p_api(s):
if s.sy == 'IDENT' and s.systring == 'api':
s.next()
return 1
else:
return 0
def p_cdef_statement(s, ctx):
pos = s.position()
ctx.visibility = p_visibility(s, ctx.visibility)
ctx.api = ctx.api or p_api(s)
if ctx.api:
if ctx.visibility not in ('private', 'public'):
error(pos, "Cannot combine 'api' with '%s'" % ctx.visibility)
if (ctx.visibility == 'extern') and s.sy == 'from':
return p_cdef_extern_block(s, pos, ctx)
elif s.sy == 'import':
s.next()
return p_cdef_extern_block(s, pos, ctx)
elif p_nogil(s):
ctx.nogil = 1
if ctx.overridable:
error(pos, "cdef blocks cannot be declared cpdef")
return p_cdef_block(s, ctx)
elif s.sy == ':':
if ctx.overridable:
error(pos, "cdef blocks cannot be declared cpdef")
return p_cdef_block(s, ctx)
elif s.sy == 'class':
if ctx.level not in ('module', 'module_pxd'):
error(pos, "Extension type definition not allowed here")
if ctx.overridable:
error(pos, "Extension types cannot be declared cpdef")
return p_c_class_definition(s, pos, ctx)
elif s.sy == 'IDENT' and s.systring == 'cppclass':
if ctx.visibility != 'extern':
error(pos, "C++ classes need to be declared extern")
return p_cpp_class_definition(s, pos, ctx)
elif s.sy == 'IDENT' and s.systring in ("struct", "union", "enum", "packed"):
if ctx.level not in ('module', 'module_pxd'):
error(pos, "C struct/union/enum definition not allowed here")
if ctx.overridable:
error(pos, "C struct/union/enum cannot be declared cpdef")
if s.systring == "enum":
return p_c_enum_definition(s, pos, ctx)
else:
return p_c_struct_or_union_definition(s, pos, ctx)
else:
return p_c_func_or_var_declaration(s, pos, ctx)
def p_cdef_block(s, ctx):
return p_suite(s, ctx(cdef_flag = 1))
def p_cdef_extern_block(s, pos, ctx):
if ctx.overridable:
error(pos, "cdef extern blocks cannot be declared cpdef")
include_file = None
s.expect('from')
if s.sy == '*':
s.next()
else:
include_file = p_string_literal(s, 'u')[2]
ctx = ctx(cdef_flag = 1, visibility = 'extern')
if s.systring == "namespace":
s.next()
ctx.namespace = p_string_literal(s, 'u')[2]
if p_nogil(s):
ctx.nogil = 1
body = p_suite(s, ctx)
return Nodes.CDefExternNode(pos,
include_file = include_file,
body = body,
namespace = ctx.namespace)
def p_c_enum_definition(s, pos, ctx):
# s.sy == ident 'enum'
s.next()
if s.sy == 'IDENT':
name = s.systring
s.next()
cname = p_opt_cname(s)
if cname is None and ctx.namespace is not None:
cname = ctx.namespace + "::" + name
else:
name = None
cname = None
items = None
s.expect(':')
items = []
if s.sy != 'NEWLINE':
p_c_enum_line(s, ctx, items)
else:
s.next() # 'NEWLINE'
s.expect_indent()
while s.sy not in ('DEDENT', 'EOF'):
p_c_enum_line(s, ctx, items)
s.expect_dedent()
return Nodes.CEnumDefNode(
pos, name = name, cname = cname, items = items,
typedef_flag = ctx.typedef_flag, visibility = ctx.visibility,
api = ctx.api, in_pxd = ctx.level == 'module_pxd')
def p_c_enum_line(s, ctx, items):
if s.sy != 'pass':
p_c_enum_item(s, ctx, items)
while s.sy == ',':
s.next()
if s.sy in ('NEWLINE', 'EOF'):
break
p_c_enum_item(s, ctx, items)
else:
s.next()
s.expect_newline("Syntax error in enum item list")
def p_c_enum_item(s, ctx, items):
pos = s.position()
name = p_ident(s)
cname = p_opt_cname(s)
if cname is None and ctx.namespace is not None:
cname = ctx.namespace + "::" + name
value = None
if s.sy == '=':
s.next()
value = p_test(s)
items.append(Nodes.CEnumDefItemNode(pos,
name = name, cname = cname, value = value))
def p_c_struct_or_union_definition(s, pos, ctx):
packed = False
if s.systring == 'packed':
packed = True
s.next()
if s.sy != 'IDENT' or s.systring != 'struct':
s.expected('struct')
# s.sy == ident 'struct' or 'union'
kind = s.systring
s.next()
name = p_ident(s)
cname = p_opt_cname(s)
if cname is None and ctx.namespace is not None:
cname = ctx.namespace + "::" + name
attributes = None
if s.sy == ':':
s.next()
s.expect('NEWLINE')
s.expect_indent()
attributes = []
body_ctx = Ctx()
while s.sy != 'DEDENT':
if s.sy != 'pass':
attributes.append(
p_c_func_or_var_declaration(s, s.position(), body_ctx))
else:
s.next()
s.expect_newline("Expected a newline")
s.expect_dedent()
else:
s.expect_newline("Syntax error in struct or union definition")
return Nodes.CStructOrUnionDefNode(pos,
name = name, cname = cname, kind = kind, attributes = attributes,
typedef_flag = ctx.typedef_flag, visibility = ctx.visibility,
api = ctx.api, in_pxd = ctx.level == 'module_pxd', packed = packed)
def p_visibility(s, prev_visibility):
pos = s.position()
visibility = prev_visibility
if s.sy == 'IDENT' and s.systring in ('extern', 'public', 'readonly'):
visibility = s.systring
if prev_visibility != 'private' and visibility != prev_visibility:
s.error("Conflicting visibility options '%s' and '%s'"
% (prev_visibility, visibility))
s.next()
return visibility
def p_c_modifiers(s):
if s.sy == 'IDENT' and s.systring in ('inline',):
modifier = s.systring
s.next()
return [modifier] + p_c_modifiers(s)
return []
def p_c_func_or_var_declaration(s, pos, ctx):
cmethod_flag = ctx.level in ('c_class', 'c_class_pxd')
modifiers = p_c_modifiers(s)
base_type = p_c_base_type(s, nonempty = 1, templates = ctx.templates)
declarator = p_c_declarator(s, ctx, cmethod_flag = cmethod_flag,
assignable = 1, nonempty = 1)
declarator.overridable = ctx.overridable
if s.sy == ':':
if ctx.level not in ('module', 'c_class', 'module_pxd', 'c_class_pxd') and not ctx.templates:
s.error("C function definition not allowed here")
doc, suite = p_suite(s, Ctx(level = 'function'), with_doc = 1)
result = Nodes.CFuncDefNode(pos,
visibility = ctx.visibility,
base_type = base_type,
declarator = declarator,
body = suite,
doc = doc,
modifiers = modifiers,
api = ctx.api,
overridable = ctx.overridable)
else:
#if api:
# s.error("'api' not allowed with variable declaration")
declarators = [declarator]
while s.sy == ',':
s.next()
if s.sy == 'NEWLINE':
break
declarator = p_c_declarator(s, ctx, cmethod_flag = cmethod_flag,
assignable = 1, nonempty = 1)
declarators.append(declarator)
s.expect_newline("Syntax error in C variable declaration")
result = Nodes.CVarDefNode(pos,
visibility = ctx.visibility,
base_type = base_type,
declarators = declarators,
in_pxd = ctx.level == 'module_pxd',
api = ctx.api,
overridable = ctx.overridable)
return result
def p_ctypedef_statement(s, ctx):
# s.sy == 'ctypedef'
pos = s.position()
s.next()
visibility = p_visibility(s, ctx.visibility)
api = p_api(s)
ctx = ctx(typedef_flag = 1, visibility = visibility)
if api:
ctx.api = 1
if s.sy == 'class':
return p_c_class_definition(s, pos, ctx)
elif s.sy == 'IDENT' and s.systring in ('packed', 'struct', 'union', 'enum'):
if s.systring == 'enum':
return p_c_enum_definition(s, pos, ctx)
else:
return p_c_struct_or_union_definition(s, pos, ctx)
else:
base_type = p_c_base_type(s, nonempty = 1)
if base_type.name is None:
s.error("Syntax error in ctypedef statement")
declarator = p_c_declarator(s, ctx, is_type = 1, nonempty = 1)
s.expect_newline("Syntax error in ctypedef statement")
return Nodes.CTypeDefNode(
pos, base_type = base_type,
declarator = declarator,
visibility = visibility, api = api,
in_pxd = ctx.level == 'module_pxd')
def p_decorators(s):
decorators = []
while s.sy == 'DECORATOR':
pos = s.position()
s.next()
decstring = p_dotted_name(s, as_allowed=0)[2]
names = decstring.split('.')
decorator = ExprNodes.NameNode(pos, name=EncodedString(names[0]))
for name in names[1:]:
decorator = ExprNodes.AttributeNode(pos,
attribute=EncodedString(name),
obj=decorator)
if s.sy == '(':
decorator = p_call(s, decorator)
decorators.append(Nodes.DecoratorNode(pos, decorator=decorator))
s.expect_newline("Expected a newline after decorator")
return decorators
def p_def_statement(s, decorators=None):
# s.sy == 'def'
pos = s.position()
s.next()
name = EncodedString( p_ident(s) )
s.expect('(');
args, star_arg, starstar_arg = p_varargslist(s, terminator=')')
s.expect(')')
if p_nogil(s):
error(pos, "Python function cannot be declared nogil")
return_type_annotation = None
if s.sy == '->':
s.next()
return_type_annotation = p_test(s)
doc, body = p_suite(s, Ctx(level = 'function'), with_doc = 1)
return Nodes.DefNode(pos, name = name, args = args,
star_arg = star_arg, starstar_arg = starstar_arg,
doc = doc, body = body, decorators = decorators,
return_type_annotation = return_type_annotation)
def p_varargslist(s, terminator=')', annotated=1):
args = p_c_arg_list(s, in_pyfunc = 1, nonempty_declarators = 1,
annotated = annotated)
star_arg = None
starstar_arg = None
if s.sy == '*':
s.next()
if s.sy == 'IDENT':
star_arg = p_py_arg_decl(s, annotated=annotated)
if s.sy == ',':
s.next()
args.extend(p_c_arg_list(s, in_pyfunc = 1,
nonempty_declarators = 1, kw_only = 1, annotated = annotated))
elif s.sy != terminator:
s.error("Syntax error in Python function argument list")
if s.sy == '**':
s.next()
starstar_arg = p_py_arg_decl(s, annotated=annotated)
return (args, star_arg, starstar_arg)
def p_py_arg_decl(s, annotated = 1):
pos = s.position()
name = p_ident(s)
annotation = None
if annotated and s.sy == ':':
s.next()
annotation = p_test(s)
return Nodes.PyArgDeclNode(pos, name = name, annotation = annotation)
def p_class_statement(s, decorators):
# s.sy == 'class'
pos = s.position()
s.next()
class_name = EncodedString( p_ident(s) )
class_name.encoding = s.source_encoding
arg_tuple = None
keyword_dict = None
starstar_arg = None
if s.sy == '(':
positional_args, keyword_args, star_arg, starstar_arg = \
p_call_parse_args(s, allow_genexp = False)
arg_tuple, keyword_dict = p_call_build_packed_args(
pos, positional_args, keyword_args, star_arg, None)
if arg_tuple is None:
# XXX: empty arg_tuple
arg_tuple = ExprNodes.TupleNode(pos, args = [])
doc, body = p_suite(s, Ctx(level = 'class'), with_doc = 1)
return Nodes.PyClassDefNode(pos,
name = class_name,
bases = arg_tuple,
keyword_args = keyword_dict,
starstar_arg = starstar_arg,
doc = doc, body = body, decorators = decorators)
def p_c_class_definition(s, pos, ctx):
# s.sy == 'class'
s.next()
module_path = []
class_name = p_ident(s)
while s.sy == '.':
s.next()
module_path.append(class_name)
class_name = p_ident(s)
if module_path and ctx.visibility != 'extern':
error(pos, "Qualified class name only allowed for 'extern' C class")
if module_path and s.sy == 'IDENT' and s.systring == 'as':
s.next()
as_name = p_ident(s)
else:
as_name = class_name
objstruct_name = None
typeobj_name = None
base_class_module = None
base_class_name = None
if s.sy == '(':
s.next()
base_class_path = [p_ident(s)]
while s.sy == '.':
s.next()
base_class_path.append(p_ident(s))
if s.sy == ',':
s.error("C class may only have one base class")
s.expect(')')
base_class_module = ".".join(base_class_path[:-1])
base_class_name = base_class_path[-1]
if s.sy == '[':
if ctx.visibility not in ('public', 'extern') and not ctx.api:
error(s.position(), "Name options only allowed for 'public', 'api', or 'extern' C class")
objstruct_name, typeobj_name = p_c_class_options(s)
if s.sy == ':':
if ctx.level == 'module_pxd':
body_level = 'c_class_pxd'
else:
body_level = 'c_class'
doc, body = p_suite(s, Ctx(level = body_level), with_doc = 1)
else:
s.expect_newline("Syntax error in C class definition")
doc = None
body = None
if ctx.visibility == 'extern':
if not module_path:
error(pos, "Module name required for 'extern' C class")
if typeobj_name:
error(pos, "Type object name specification not allowed for 'extern' C class")
elif ctx.visibility == 'public':
if not objstruct_name:
error(pos, "Object struct name specification required for 'public' C class")
if not typeobj_name:
error(pos, "Type object name specification required for 'public' C class")
elif ctx.visibility == 'private':
if ctx.api:
if not objstruct_name:
error(pos, "Object struct name specification required for 'api' C class")
if not typeobj_name:
error(pos, "Type object name specification required for 'api' C class")
else:
error(pos, "Invalid class visibility '%s'" % ctx.visibility)
return Nodes.CClassDefNode(pos,
visibility = ctx.visibility,
typedef_flag = ctx.typedef_flag,
api = ctx.api,
module_name = ".".join(module_path),
class_name = class_name,
as_name = as_name,
base_class_module = base_class_module,
base_class_name = base_class_name,
objstruct_name = objstruct_name,
typeobj_name = typeobj_name,
in_pxd = ctx.level == 'module_pxd',
doc = doc,
body = body)
def p_c_class_options(s):
objstruct_name = None
typeobj_name = None
s.expect('[')
while 1:
if s.sy != 'IDENT':
break
if s.systring == 'object':
s.next()
objstruct_name = p_ident(s)
elif s.systring == 'type':
s.next()
typeobj_name = p_ident(s)
if s.sy != ',':
break
s.next()
s.expect(']', "Expected 'object' or 'type'")
return objstruct_name, typeobj_name
def p_property_decl(s):
pos = s.position()
s.next() # 'property'
name = p_ident(s)
doc, body = p_suite(s, Ctx(level = 'property'), with_doc = 1)
return Nodes.PropertyNode(pos, name = name, doc = doc, body = body)
def p_doc_string(s):
if s.sy == 'BEGIN_STRING':
pos = s.position()
kind, bytes_result, unicode_result = p_cat_string_literal(s)
if s.sy != 'EOF':
s.expect_newline("Syntax error in doc string")
if kind in ('u', ''):
return unicode_result
warning(pos, "Python 3 requires docstrings to be unicode strings")
return bytes_result
else:
return None
def p_code(s, level=None, ctx=Ctx):
body = p_statement_list(s, ctx(level = level), first_statement = 1)
if s.sy != 'EOF':
s.error("Syntax error in statement [%s,%s]" % (
repr(s.sy), repr(s.systring)))
return body
COMPILER_DIRECTIVE_COMMENT_RE = re.compile(r"^#\s*cython\s*:\s*((\w|[.])+\s*=.*)$")
def p_compiler_directive_comments(s):
result = {}
while s.sy == 'commentline':
m = COMPILER_DIRECTIVE_COMMENT_RE.match(s.systring)
if m:
directives = m.group(1).strip()
try:
result.update( Options.parse_directive_list(
directives, ignore_unknown=True) )
except ValueError, e:
s.error(e.args[0], fatal=False)
s.next()
return result
def p_module(s, pxd, full_module_name, ctx=Ctx):
pos = s.position()
directive_comments = p_compiler_directive_comments(s)
s.parse_comments = False
if 'language_level' in directive_comments:
s.context.set_language_level(directive_comments['language_level'])
doc = p_doc_string(s)
if pxd:
level = 'module_pxd'
else:
level = 'module'
body = p_statement_list(s, ctx(level=level), first_statement = 1)
if s.sy != 'EOF':
s.error("Syntax error in statement [%s,%s]" % (
repr(s.sy), repr(s.systring)))
return ModuleNode(pos, doc = doc, body = body,
full_module_name = full_module_name,
directive_comments = directive_comments)
def p_cpp_class_definition(s, pos, ctx):
# s.sy == 'cppclass'
s.next()
module_path = []
class_name = p_ident(s)
cname = p_opt_cname(s)
if cname is None and ctx.namespace is not None:
cname = ctx.namespace + "::" + class_name
if s.sy == '.':
error(pos, "Qualified class name not allowed C++ class")
if s.sy == '[':
s.next()
templates = [p_ident(s)]
while s.sy == ',':
s.next()
templates.append(p_ident(s))
s.expect(']')
else:
templates = None
if s.sy == '(':
s.next()
base_classes = [p_dotted_name(s, False)[2]]
while s.sy == ',':
s.next()
base_classes.append(p_dotted_name(s, False)[2])
s.expect(')')
else:
base_classes = []
if s.sy == '[':
error(s.position(), "Name options not allowed for C++ class")
if s.sy == ':':
s.next()
s.expect('NEWLINE')
s.expect_indent()
attributes = []
body_ctx = Ctx(visibility = ctx.visibility)
body_ctx.templates = templates
while s.sy != 'DEDENT':
if s.systring == 'cppclass':
attributes.append(
p_cpp_class_definition(s, s.position(), body_ctx))
elif s.sy != 'pass':
attributes.append(
p_c_func_or_var_declaration(s, s.position(), body_ctx))
else:
s.next()
s.expect_newline("Expected a newline")
s.expect_dedent()
else:
attributes = None
s.expect_newline("Syntax error in C++ class definition")
return Nodes.CppClassNode(pos,
name = class_name,
cname = cname,
base_classes = base_classes,
visibility = ctx.visibility,
in_pxd = ctx.level == 'module_pxd',
attributes = attributes,
templates = templates)
#----------------------------------------------
#
# Debugging
#
#----------------------------------------------
def print_parse_tree(f, node, level, key = None):
from types import ListType, TupleType
from Nodes import Node
ind = " " * level
if node:
f.write(ind)
if key:
f.write("%s: " % key)
t = type(node)
if t is tuple:
f.write("(%s @ %s\n" % (node[0], node[1]))
for i in xrange(2, len(node)):
print_parse_tree(f, node[i], level+1)
f.write("%s)\n" % ind)
return
elif isinstance(node, Node):
try:
tag = node.tag
except AttributeError:
tag = node.__class__.__name__
f.write("%s @ %s\n" % (tag, node.pos))
for name, value in node.__dict__.items():
if name != 'tag' and name != 'pos':
print_parse_tree(f, value, level+1, name)
return
elif t is list:
f.write("[\n")
for i in xrange(len(node)):
print_parse_tree(f, node[i], level+1)
f.write("%s]\n" % ind)
return
f.write("%s%s\n" % (ind, node))
| hpfem/cython | Cython/Compiler/Parsing.py | Python | apache-2.0 | 99,707 |
def isPrime(num):
if(num==1):
return False
for x in range(2, num//2+1):
if(num%x==0):
return False
return True
count=2
number=3
while(count<10001):
number+=2
if(isPrime(number)):
print(str(count)+" "+str(number))
count+=1
print(number)
| scottnm/ProjectEuler | python/Problem7-firstNprimes.py | Python | apache-2.0 | 314 |
"""
read informs dataset
"""
# !/usr/bin/env python
# coding=utf-8
# Read data and read tree fuctions for INFORMS data
# user att ['DUID','PID','DUPERSID','DOBMM','DOBYY','SEX','RACEX','RACEAX','RACEBX','RACEWX','RACETHNX','HISPANX','HISPCAT','EDUCYEAR','Year','marry','income','poverty']
# condition att ['DUID','DUPERSID','ICD9CODX','year']
__DEBUG = False
USER_ATT = ['DUID', 'PID', 'DUPERSID', 'DOBMM', 'DOBYY', 'SEX', 'RACEX', 'RACEAX',
'RACEBX', 'RACEWX', 'RACETHNX', 'HISPANX', 'HISPCAT', 'EDUCYEAR',
'Year', 'marry', 'income', 'poverty']
CONDITION_ATT = ['DUID', 'DUPERSID', 'ICD9CODX', 'year']
# Only 5 relational attributes and 1 transaction attribute are selected (according to Poulis's paper)
QI_INDEX = [3, 4, 6, 13, 16]
__DEBUG = False
def read_data():
"""
read microda for *.txt and return read data
"""
data = []
userfile = open('data/demographics.csv', 'rU')
conditionfile = open('data/conditions.csv', 'rU')
userdata = {}
# We selet 3,4,5,6,13,15,15 att from demographics05, and 2 from condition05
# print "Reading Data..."
for i, line in enumerate(userfile):
line = line.strip()
# ignore first line of csv
if i == 0:
continue
row = line.split(',')
row[2] = row[2][1:-1]
try:
userdata[row[2]].append(row)
except:
userdata[row[2]] = row
conditiondata = {}
for i, line in enumerate(conditionfile):
line = line.strip()
# ignore first line of csv
if i == 0:
continue
row = line.split(',')
row[1] = row[1][1:-1]
row[2] = row[2][1:-1]
try:
conditiondata[row[1]].append(row)
except KeyError:
conditiondata[row[1]] = [row]
hashdata = {}
for k, v in list(userdata.items()):
if k in conditiondata:
temp = []
for t in conditiondata[k]:
temp.append(t[2])
hashdata[k] = []
for i in range(len(QI_INDEX)):
index = QI_INDEX[i]
hashdata[k].append(v[index])
hashdata[k].append(temp)
for k, v in list(hashdata.items()):
data.append(v)
userfile.close()
conditionfile.close()
return data
| qiyuangong/Mondrian | utils/read_informs_data.py | Python | mit | 2,303 |
#!/usr/bin/python
import sys, subprocess, time, socket
sys.path.append("/home/pi/Adafruit-Raspberry-Pi-Python-Code/Adafruit_CharLCDPlate")
from Adafruit_CharLCDPlate import Adafruit_CharLCDPlate
from PiLCDDisplay import PiLCDDisplay
HOLD_TIME = 3.0 #Time (seconds) to hold select button for shut down
REFRESH_TIME = 3.0 #Time (seconds) between data updates
HALT_ON_EXIT = True
display = PiLCDDisplay()
lcd = display.lcd
prevCol = -1
prev = -1
lastTime = time.time()
def shutdown():
lcd.clear()
if HALT_ON_EXIT:
lcd.message('Wait 30 seconds\nto unplug...')
subprocess.call("sync")
subprocess.call(["shutdown", "-h", "now"])
else:
exit(0)
#Check for network connection at startup
t = time.time()
while True:
lcd.clear()
lcd.message('Checking network\nconnection ...')
if (time.time() - t) > 120:
# No connection reached after 2 minutes
lcd.clear()
lcd.message('Network is\nunavailable')
time.sleep(30)
exit(0)
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 0))
lcd.backlight(lcd.ON)
lcd.clear()
lcd.message('IP address:\n' + s.getsockname()[0])
time.sleep(5)
display.initInfo() # Start info gathering/display
break # Success
except:
time.sleep(1) # Pause a moment, keep trying
# Listen for button presses
while True:
b = lcd.buttons()
if b is not prev:
if lcd.buttonPressed(lcd.SELECT):
tt = time.time() # Start time of button press
while lcd.buttonPressed(lcd.SELECT): # Wait for button release
if (time.time() - tt) >= HOLD_TIME: # Extended hold?
shutdown() # We're outta here
display.backlightStep()
elif lcd.buttonPressed(lcd.LEFT):
display.scrollRight()
elif lcd.buttonPressed(lcd.RIGHT):
display.scrollLeft()
elif lcd.buttonPressed(lcd.UP):
display.modeUp()
elif lcd.buttonPressed(lcd.DOWN):
display.modeDown()
prev = b
lastTime = time.time()
else:
now = time.time()
since = now - lastTime
if since > REFRESH_TIME or since < 0.0:
display.update()
lastTime = now
| denmojo/PiLCD | PiLCD.py | Python | gpl-3.0 | 2,328 |
#!/usr/bin/env python
"""
@package ion.agents.data.handlers.slocum_data_handler
@file ion/agents/data/handlers/slocum_data_handler
@author Christopher Mueller
@brief
"""
from pyon.public import log
from pyon.util.containers import get_safe
from ion.services.dm.utility.granule.record_dictionary import RecordDictionaryTool
from ion.agents.data.handlers.base_data_handler import BaseDataHandler
from ion.agents.data.handlers.handler_utils import list_file_info, get_sbuffer, calculate_iteration_count, get_time_from_filename
import numpy as np
DH_CONFIG_DETAILS = {
'ds_desc_params': [
('base_url', str, 'base path/url for this dataset'),
('header_count', int, '# of header lines'),
('pattern', str, 'The filter pattern for this dataset. If file-based, use shell-style notation; if remote (http, ftp), use regex'),
],
}
class SlocumDataHandler(BaseDataHandler):
@classmethod
def _init_acquisition_cycle(cls, config):
# TODO: Can't build a parser here because we won't have a file name!! Just a directory :)
# May not be much to do in this method...
# maybe just ensure access to the dataset_dir and move the 'buried' params up to the config dict?
ext_dset_res = get_safe(config, 'external_dataset_res', None)
if not ext_dset_res:
raise SystemError('external_dataset_res not present in configuration, cannot continue')
config['ds_params'] = ext_dset_res.dataset_description.parameters
# base_url = ext_dset_res.dataset_description.parameters['base_url']
# hdr_cnt = get_safe(ext_dset_res.dataset_description.parameters, 'header_count', 17)
# pattern = get_safe(ext_dset_res.dataset_description.parameters, 'pattern')
# config['header_count'] = hdr_cnt
# config['base_url'] = base_url
# config['pattern'] = pattern
@classmethod
def _constraints_for_new_request(cls, config):
old_list = get_safe(config, 'new_data_check') or []
# CBM: Fix this when the DotList crap is sorted out
old_list = list(old_list) # NOTE that the internal tuples are also DotList objects
ret = {}
base_url = get_safe(config, 'ds_params.base_url')
list_pattern = get_safe(config, 'ds_params.list_pattern')
date_pattern = get_safe(config, 'ds_params.date_pattern')
date_extraction_pattern = get_safe(config, 'ds_params.date_extraction_pattern')
curr_list = list_file_info(base_url, list_pattern)
new_list = [x for x in curr_list if x not in old_list]
ret['start_time'] = get_time_from_filename(new_list[0][0], date_extraction_pattern, date_pattern)
ret['end_time'] = get_time_from_filename(new_list[len(new_list) - 1][0], date_extraction_pattern, date_pattern)
ret['new_files'] = new_list
ret['bounding_box'] = {}
ret['vars'] = []
return ret
@classmethod
def _constraints_for_historical_request(cls, config):
base_url = get_safe(config, 'ds_params.base_url')
list_pattern = get_safe(config, 'ds_params.list_pattern')
date_pattern = get_safe(config, 'ds_params.date_pattern')
date_extraction_pattern = get_safe(config, 'ds_params.date_extraction_pattern')
start_time = get_safe(config, 'constraints.start_time')
end_time = get_safe(config, 'constraints.end_time')
new_list = []
curr_list = list_file_info(base_url, list_pattern)
for x in curr_list:
curr_time = get_time_from_filename(x[0], date_extraction_pattern, date_pattern)
if start_time <= curr_time <= end_time:
new_list.append(x)
# config['constraints']['new_files'] = new_list
return {'new_files': new_list}
@classmethod
def _get_data(cls, config):
"""
Iterable function that acquires data from a source iteratively based on constraints provided by config
Passed into BaseDataHandler._publish_data and iterated to publish samples.
@param config dict containing configuration parameters, may include constraints, formatters, etc
@retval an iterable that returns well-formed Granule objects on each iteration
"""
new_flst = get_safe(config, 'constraints.new_files', [])
hdr_cnt = get_safe(config, 'header_count', SlocumParser.DEFAULT_HEADER_SIZE)
for f in new_flst:
try:
parser = SlocumParser(f[0], hdr_cnt)
#CBM: Not in use yet...
# ext_dset_res = get_safe(config, 'external_dataset_res', None)
# t_vname = ext_dset_res.dataset_description.parameters['temporal_dimension']
# x_vname = ext_dset_res.dataset_description.parameters['zonal_dimension']
# y_vname = ext_dset_res.dataset_description.parameters['meridional_dimension']
# z_vname = ext_dset_res.dataset_description.parameters['vertical_dimension']
# var_lst = ext_dset_res.dataset_description.parameters['variables']
max_rec = get_safe(config, 'max_records', 1)
dprod_id = get_safe(config, 'data_producer_id', 'unknown data producer')
stream_def = get_safe(config, 'stream_def')
cnt = calculate_iteration_count(len(parser.data_map[parser.data_map.keys()[0]]), max_rec)
for x in xrange(cnt):
#rdt = RecordDictionaryTool(taxonomy=ttool)
rdt = RecordDictionaryTool(stream_definition_id=stream_def)
for name in parser.sensor_map:
d = parser.data_map[name][x * max_rec:(x + 1) * max_rec]
rdt[name] = d
#g = build_granule(data_producer_id=dprod_id, taxonomy=ttool, record_dictionary=rdt)
g = rdt.to_granule()
yield g
except SlocumParseException:
# TODO: Decide what to do here, raise an exception or carry on
log.error('Error parsing data file: \'{0}\''.format(f))
class SlocumParser(object):
# John K's documentation says there are 16 header lines, but I believe there are actually 17
# The 17th indicating the 'dtype' of the data for that column
DEFAULT_HEADER_SIZE = 17
header_map = {}
sensor_map = {}
data_map = {}
def __init__(self, url=None, header_size=17):
"""
Constructor for the parser. Initializes headers and data
@param url the url/filepath of the file
@param header_size number of header lines. This is information is in the header already, so it will be removed
"""
if not url:
raise SlocumParseException('Must provide a filename')
self.header_size = int(header_size)
sb = None
try:
# Get a byte-string generator for use in the data-retrieval loop (to avoid opening the file every time)
sb = get_sbuffer(url)
sb.seek(0)
for x in xrange(self.header_size - 3):
line = sb.readline()
key, value = line.split(':', 1)
self.header_map[key.strip()] = value.strip()
# Collect the sensor names & units
sensor_names = sb.readline().split()
units = sb.readline().split()
# Keep track of the intended data type for each sensor
dtypes = []
for d in sb.readline().split():
if d is '1':
dtypes.append('byte')
elif d is '2':
dtypes.append('short')
elif d is '4':
dtypes.append('float')
elif d is '8':
dtypes.append('double')
assert len(sensor_names) == len(units) == len(dtypes)
for i in xrange(len(sensor_names)):
sb.seek(0)
self.sensor_map[sensor_names[i]] = (units[i], dtypes[i])
dat = np.genfromtxt(fname=sb, skip_header=self.header_size, usecols=i, dtype=dtypes[i], missing_values='NaN') # ,usemask=True)
self.data_map[sensor_names[i]] = dat
finally:
if not sb is None:
sb.close()
class SlocumParseException(Exception):
pass
| ooici/coi-services | ion/agents/data/handlers/slocum_data_handler.py | Python | bsd-2-clause | 8,434 |
"""
---------------------------------------------------------------------
.. sectionauthor:: Juan Arias de Reyna <arias@us.es>
This module implements zeta-related functions using the Riemann-Siegel
expansion: zeta_offline(s,k=0)
* coef(J, eps): Need in the computation of Rzeta(s,k)
* Rzeta_simul(s, der=0) computes Rzeta^(k)(s) and Rzeta^(k)(1-s) simultaneously
for 0 <= k <= der. Used by zeta_offline and z_offline
* Rzeta_set(s, derivatives) computes Rzeta^(k)(s) for given derivatives, used by
z_half(t,k) and zeta_half
* z_offline(w,k): Z(w) and its derivatives of order k <= 4
* z_half(t,k): Z(t) (Riemann Siegel function) and its derivatives of order k <= 4
* zeta_offline(s): zeta(s) and its derivatives of order k<= 4
* zeta_half(1/2+it,k): zeta(s) and its derivatives of order k<= 4
* rs_zeta(s,k=0) Computes zeta^(k)(s) Unifies zeta_half and zeta_offline
* rs_z(w,k=0) Computes Z^(k)(w) Unifies z_offline and z_half
----------------------------------------------------------------------
This program uses Riemann-Siegel expansion even to compute
zeta(s) on points s = sigma + i t with sigma arbitrary not
necessarily equal to 1/2.
It is founded on a new deduction of the formula, with rigorous
and sharp bounds for the terms and rest of this expansion.
More information on the papers:
J. Arias de Reyna, High Precision Computation of Riemann's
Zeta Function by the Riemann-Siegel Formula I, II
We refer to them as I, II.
In them we shall find detailed explanation of all the
procedure.
The program uses Riemann-Siegel expansion.
This is useful when t is big, ( say t > 10000 ).
The precision is limited, roughly it can compute zeta(sigma+it)
with an error less than exp(-c t) for some constant c depending
on sigma. The program gives an error when the Riemann-Siegel
formula can not compute to the wanted precision.
"""
import math
class RSCache(object):
def __init__(ctx):
ctx._rs_cache = [0, 10, {}, {}]
from .functions import defun
#-------------------------------------------------------------------------------#
# #
# coef(ctx, J, eps, _cache=[0, 10, {} ] ) #
# #
#-------------------------------------------------------------------------------#
# This function computes the coefficients c[n] defined on (I, equation (47))
# but see also (II, section 3.14).
#
# Since these coefficients are very difficult to compute we save the values
# in a cache. So if we compute several values of the functions Rzeta(s) for
# near values of s, we do not recompute these coefficients.
#
# c[n] are the Taylor coefficients of the function:
#
# F(z):= (exp(pi*j*(z*z/2+3/8))-j* sqrt(2) cos(pi*z/2))/(2*cos(pi *z))
#
#
def _coef(ctx, J, eps):
r"""
Computes the coefficients `c_n` for `0\le n\le 2J` with error less than eps
**Definition**
The coefficients c_n are defined by
.. math ::
\begin{equation}
F(z)=\frac{e^{\pi i
\bigl(\frac{z^2}{2}+\frac38\bigr)}-i\sqrt{2}\cos\frac{\pi}{2}z}{2\cos\pi
z}=\sum_{n=0}^\infty c_{2n} z^{2n}
\end{equation}
they are computed applying the relation
.. math ::
\begin{multline}
c_{2n}=-\frac{i}{\sqrt{2}}\Bigl(\frac{\pi}{2}\Bigr)^{2n}
\sum_{k=0}^n\frac{(-1)^k}{(2k)!}
2^{2n-2k}\frac{(-1)^{n-k}E_{2n-2k}}{(2n-2k)!}+\\
+e^{3\pi i/8}\sum_{j=0}^n(-1)^j\frac{
E_{2j}}{(2j)!}\frac{i^{n-j}\pi^{n+j}}{(n-j)!2^{n-j+1}}.
\end{multline}
"""
newJ = J+2 # compute more coefficients that are needed
neweps6 = eps/2. # compute with a slight more precision
# that are needed
# PREPARATION FOR THE COMPUTATION OF V(N) AND W(N)
# See II Section 3.16
#
# Computing the exponent wpvw of the error II equation (81)
wpvw = max(ctx.mag(10*(newJ+3)), 4*newJ+5-ctx.mag(neweps6))
# Preparation of Euler numbers (we need until the 2*RS_NEWJ)
E = ctx._eulernum(2*newJ)
# Now we have in the cache all the needed Euler numbers.
#
# Computing the powers of pi
#
# We need to compute the powers pi**n for 1<= n <= 2*J
# with relative error less than 2**(-wpvw)
# it is easy to show that this is obtained
# taking wppi as the least d with
# 2**d>40*J and 2**d> 4.24 *newJ + 2**wpvw
# In II Section 3.9 we need also that
# wppi > wptcoef[0], and that the powers
# here computed 0<= k <= 2*newJ are more
# than those needed there that are 2*L-2.
# so we need J >= L this will be checked
# before computing tcoef[]
wppi = max(ctx.mag(40*newJ), ctx.mag(newJ)+3 +wpvw)
ctx.prec = wppi
pipower = {}
pipower[0] = ctx.one
pipower[1] = ctx.pi
for n in range(2,2*newJ+1):
pipower[n] = pipower[n-1]*ctx.pi
# COMPUTING THE COEFFICIENTS v(n) AND w(n)
# see II equation (61) and equations (81) and (82)
ctx.prec = wpvw+2
v={}
w={}
for n in range(0,newJ+1):
va = (-1)**n * ctx._eulernum(2*n)
va = ctx.mpf(va)/ctx.fac(2*n)
v[n]=va*pipower[2*n]
for n in range(0,2*newJ+1):
wa = ctx.one/ctx.fac(n)
wa=wa/(2**n)
w[n]=wa*pipower[n]
# COMPUTATION OF THE CONVOLUTIONS RS_P1 AND RS_P2
# See II Section 3.16
ctx.prec = 15
wpp1a = 9 - ctx.mag(neweps6)
P1 = {}
for n in range(0,newJ+1):
ctx.prec = 15
wpp1 = max(ctx.mag(10*(n+4)),4*n+wpp1a)
ctx.prec = wpp1
sump = 0
for k in range(0,n+1):
sump += ((-1)**k) * v[k]*w[2*n-2*k]
P1[n]=((-1)**(n+1))*ctx.j*sump
P2={}
for n in range(0,newJ+1):
ctx.prec = 15
wpp2 = max(ctx.mag(10*(n+4)),4*n+wpp1a)
ctx.prec = wpp2
sump = 0
for k in range(0,n+1):
sump += (ctx.j**(n-k)) * v[k]*w[n-k]
P2[n]=sump
# COMPUTING THE COEFFICIENTS c[2n]
# See II Section 3.14
ctx.prec = 15
wpc0 = 5 - ctx.mag(neweps6)
wpc = max(6,4*newJ+wpc0)
ctx.prec = wpc
mu = ctx.sqrt(ctx.mpf('2'))/2
nu = ctx.expjpi(3./8)/2
c={}
for n in range(0,newJ):
ctx.prec = 15
wpc = max(6,4*n+wpc0)
ctx.prec = wpc
c[2*n] = mu*P1[n]+nu*P2[n]
for n in range(1,2*newJ,2):
c[n] = 0
return [newJ, neweps6, c, pipower]
def coef(ctx, J, eps):
_cache = ctx._rs_cache
if J <= _cache[0] and eps >= _cache[1]:
return _cache[2], _cache[3]
orig = ctx._mp.prec
try:
data = _coef(ctx._mp, J, eps)
finally:
ctx._mp.prec = orig
if ctx is not ctx._mp:
data[2] = dict((k,ctx.convert(v)) for (k,v) in data[2].items())
data[3] = dict((k,ctx.convert(v)) for (k,v) in data[3].items())
ctx._rs_cache[:] = data
return ctx._rs_cache[2], ctx._rs_cache[3]
#-------------------------------------------------------------------------------#
# #
# Rzeta_simul(s,k=0) #
# #
#-------------------------------------------------------------------------------#
# This function return a list with the values:
# Rzeta(sigma+it), conj(Rzeta(1-sigma+it)),Rzeta'(sigma+it), conj(Rzeta'(1-sigma+it)),
# .... , Rzeta^{(k)}(sigma+it), conj(Rzeta^{(k)}(1-sigma+it))
#
# Useful to compute the function zeta(s) and Z(w) or its derivatives.
#
def aux_M_Fp(ctx, xA, xeps4, a, xB1, xL):
# COMPUTING M NUMBER OF DERIVATIVES Fp[m] TO COMPUTE
# See II Section 3.11 equations (47) and (48)
aux1 = 126.0657606*xA/xeps4 # 126.06.. = 316/sqrt(2*pi)
aux1 = ctx.ln(aux1)
aux2 = (2*ctx.ln(ctx.pi)+ctx.ln(xB1)+ctx.ln(a))/3 -ctx.ln(2*ctx.pi)/2
m = 3*xL-3
aux3= (ctx.loggamma(m+1)-ctx.loggamma(m/3.0+2))/2 -ctx.loggamma((m+1)/2.)
while((aux1 < m*aux2+ aux3)and (m>1)):
m = m - 1
aux3 = (ctx.loggamma(m+1)-ctx.loggamma(m/3.0+2))/2 -ctx.loggamma((m+1)/2.)
xM = m
return xM
def aux_J_needed(ctx, xA, xeps4, a, xB1, xM):
# DETERMINATION OF J THE NUMBER OF TERMS NEEDED
# IN THE TAYLOR SERIES OF F.
# See II Section 3.11 equation (49))
# Only determine one
h1 = xeps4/(632*xA)
h2 = xB1*a * 126.31337419529260248 # = pi^2*e^2*sqrt(3)
h2 = h1 * ctx.power((h2/xM**2),(xM-1)/3) / xM
h3 = min(h1,h2)
return h3
def Rzeta_simul(ctx, s, der=0):
# First we take the value of ctx.prec
wpinitial = ctx.prec
# INITIALIZATION
# Take the real and imaginary part of s
t = ctx._im(s)
xsigma = ctx._re(s)
ysigma = 1 - xsigma
# Now compute several parameter that appear on the program
ctx.prec = 15
a = ctx.sqrt(t/(2*ctx.pi))
xasigma = a ** xsigma
yasigma = a ** ysigma
# We need a simple bound A1 < asigma (see II Section 3.1 and 3.3)
xA1=ctx.power(2, ctx.mag(xasigma)-1)
yA1=ctx.power(2, ctx.mag(yasigma)-1)
# We compute various epsilon's (see II end of Section 3.1)
eps = ctx.power(2, -wpinitial)
eps1 = eps/6.
xeps2 = eps * xA1/3.
yeps2 = eps * yA1/3.
# COMPUTING SOME COEFFICIENTS THAT DEPENDS
# ON sigma
# constant b and c (see I Theorem 2 formula (26) )
# coefficients A and B1 (see I Section 6.1 equation (50))
#
# here we not need high precision
ctx.prec = 15
if xsigma > 0:
xb = 2.
xc = math.pow(9,xsigma)/4.44288
# 4.44288 =(math.sqrt(2)*math.pi)
xA = math.pow(9,xsigma)
xB1 = 1
else:
xb = 2.25158 # math.sqrt( (3-2* math.log(2))*math.pi )
xc = math.pow(2,-xsigma)/4.44288
xA = math.pow(2,-xsigma)
xB1 = 1.10789 # = 2*sqrt(1-log(2))
if(ysigma > 0):
yb = 2.
yc = math.pow(9,ysigma)/4.44288
# 4.44288 =(math.sqrt(2)*math.pi)
yA = math.pow(9,ysigma)
yB1 = 1
else:
yb = 2.25158 # math.sqrt( (3-2* math.log(2))*math.pi )
yc = math.pow(2,-ysigma)/4.44288
yA = math.pow(2,-ysigma)
yB1 = 1.10789 # = 2*sqrt(1-log(2))
# COMPUTING L THE NUMBER OF TERMS NEEDED IN THE RIEMANN-SIEGEL
# CORRECTION
# See II Section 3.2
ctx.prec = 15
xL = 1
while 3*xc*ctx.gamma(xL*0.5) * ctx.power(xb*a,-xL) >= xeps2:
xL = xL+1
xL = max(2,xL)
yL = 1
while 3*yc*ctx.gamma(yL*0.5) * ctx.power(yb*a,-yL) >= yeps2:
yL = yL+1
yL = max(2,yL)
# The number L has to satify some conditions.
# If not RS can not compute Rzeta(s) with the prescribed precision
# (see II, Section 3.2 condition (20) ) and
# (II, Section 3.3 condition (22) ). Also we have added
# an additional technical condition in Section 3.17 Proposition 17
if ((3*xL >= 2*a*a/25.) or (3*xL+2+xsigma<0) or (abs(xsigma) > a/2.) or \
(3*yL >= 2*a*a/25.) or (3*yL+2+ysigma<0) or (abs(ysigma) > a/2.)):
ctx.prec = wpinitial
raise NotImplementedError("Riemann-Siegel can not compute with such precision")
# We take the maximum of the two values
L = max(xL, yL)
# INITIALIZATION (CONTINUATION)
#
# eps3 is the constant defined on (II, Section 3.5 equation (27) )
# each term of the RS correction must be computed with error <= eps3
xeps3 = xeps2/(4*xL)
yeps3 = yeps2/(4*yL)
# eps4 is defined on (II Section 3.6 equation (30) )
# each component of the formula (II Section 3.6 equation (29) )
# must be computed with error <= eps4
xeps4 = xeps3/(3*xL)
yeps4 = yeps3/(3*yL)
# COMPUTING M NUMBER OF DERIVATIVES Fp[m] TO COMPUTE
xM = aux_M_Fp(ctx, xA, xeps4, a, xB1, xL)
yM = aux_M_Fp(ctx, yA, yeps4, a, yB1, yL)
M = max(xM, yM)
# COMPUTING NUMBER OF TERMS J NEEDED
h3 = aux_J_needed(ctx, xA, xeps4, a, xB1, xM)
h4 = aux_J_needed(ctx, yA, yeps4, a, yB1, yM)
h3 = min(h3,h4)
J = 12
jvalue = (2*ctx.pi)**J / ctx.gamma(J+1)
while jvalue > h3:
J = J+1
jvalue = (2*ctx.pi)*jvalue/J
# COMPUTING eps5[m] for 1 <= m <= 21
# See II Section 10 equation (43)
# We choose the minimum of the two possibilities
eps5={}
xforeps5 = math.pi*math.pi*xB1*a
yforeps5 = math.pi*math.pi*yB1*a
for m in range(0,22):
xaux1 = math.pow(xforeps5, m/3)/(316.*xA)
yaux1 = math.pow(yforeps5, m/3)/(316.*yA)
aux1 = min(xaux1, yaux1)
aux2 = ctx.gamma(m+1)/ctx.gamma(m/3.0+0.5)
aux2 = math.sqrt(aux2)
eps5[m] = (aux1*aux2*min(xeps4,yeps4))
# COMPUTING wpfp
# See II Section 3.13 equation (59)
twenty = min(3*L-3, 21)+1
aux = 6812*J
wpfp = ctx.mag(44*J)
for m in range(0,twenty):
wpfp = max(wpfp, ctx.mag(aux*ctx.gamma(m+1)/eps5[m]))
# COMPUTING N AND p
# See II Section
ctx.prec = wpfp + ctx.mag(t)+20
a = ctx.sqrt(t/(2*ctx.pi))
N = ctx.floor(a)
p = 1-2*(a-N)
# now we get a rounded version of p
# to the precision wpfp
# this possibly is not necessary
num=ctx.floor(p*(ctx.mpf('2')**wpfp))
difference = p * (ctx.mpf('2')**wpfp)-num
if (difference < 0.5):
num = num
else:
num = num+1
p = ctx.convert(num * (ctx.mpf('2')**(-wpfp)))
# COMPUTING THE COEFFICIENTS c[n] = cc[n]
# We shall use the notation cc[n], since there is
# a constant that is called c
# See II Section 3.14
# We compute the coefficients and also save then in a
# cache. The bulk of the computation is passed to
# the function coef()
#
# eps6 is defined in II Section 3.13 equation (58)
eps6 = ctx.power(ctx.convert(2*ctx.pi), J)/(ctx.gamma(J+1)*3*J)
# Now we compute the coefficients
cc = {}
cont = {}
cont, pipowers = coef(ctx, J, eps6)
cc=cont.copy() # we need a copy since we have
# to change his values.
Fp={} # this is the adequate locus of this
for n in range(M, 3*L-2):
Fp[n] = 0
Fp={}
ctx.prec = wpfp
for m in range(0,M+1):
sumP = 0
for k in range(2*J-m-1,-1,-1):
sumP = (sumP * p)+ cc[k]
Fp[m] = sumP
# preparation of the new coefficients
for k in range(0,2*J-m-1):
cc[k] = (k+1)* cc[k+1]
# COMPUTING THE NUMBERS xd[u,n,k], yd[u,n,k]
# See II Section 3.17
#
# First we compute the working precisions xwpd[k]
# Se II equation (92)
xwpd={}
d1 = max(6,ctx.mag(40*L*L))
xd2 = 13+ctx.mag((1+abs(xsigma))*xA)-ctx.mag(xeps4)-1
xconst = ctx.ln(8/(ctx.pi*ctx.pi*a*a*xB1*xB1)) /2
for n in range(0,L):
xd3 = ctx.mag(ctx.sqrt(ctx.gamma(n-0.5)))-ctx.floor(n*xconst)+xd2
xwpd[n]=max(xd3,d1)
# procedure of II Section 3.17
ctx.prec = xwpd[1]+10
xpsigma = 1-(2*xsigma)
xd = {}
xd[0,0,-2]=0; xd[0,0,-1]=0; xd[0,0,0]=1; xd[0,0,1]=0
xd[0,-1,-2]=0; xd[0,-1,-1]=0; xd[0,-1,0]=1; xd[0,-1,1]=0
for n in range(1,L):
ctx.prec = xwpd[n]+10
for k in range(0,3*n//2+1):
m = 3*n-2*k
if(m!=0):
m1 = ctx.one/m
c1= m1/4
c2=(xpsigma*m1)/2
c3=-(m+1)
xd[0,n,k]=c3*xd[0,n-1,k-2]+c1*xd[0,n-1,k]+c2*xd[0,n-1,k-1]
else:
xd[0,n,k]=0
for r in range(0,k):
add=xd[0,n,r]*(ctx.mpf('1.0')*ctx.fac(2*k-2*r)/ctx.fac(k-r))
xd[0,n,k] -= ((-1)**(k-r))*add
xd[0,n,-2]=0; xd[0,n,-1]=0; xd[0,n,3*n//2+1]=0
for mu in range(-2,der+1):
for n in range(-2,L):
for k in range(-3,max(1,3*n//2+2)):
if( (mu<0)or (n<0) or(k<0)or (k>3*n//2)):
xd[mu,n,k] = 0
for mu in range(1,der+1):
for n in range(0,L):
ctx.prec = xwpd[n]+10
for k in range(0,3*n//2+1):
aux=(2*mu-2)*xd[mu-2,n-2,k-3]+2*(xsigma+n-2)*xd[mu-1,n-2,k-3]
xd[mu,n,k] = aux - xd[mu-1,n-1,k-1]
# Now we compute the working precisions ywpd[k]
# Se II equation (92)
ywpd={}
d1 = max(6,ctx.mag(40*L*L))
yd2 = 13+ctx.mag((1+abs(ysigma))*yA)-ctx.mag(yeps4)-1
yconst = ctx.ln(8/(ctx.pi*ctx.pi*a*a*yB1*yB1)) /2
for n in range(0,L):
yd3 = ctx.mag(ctx.sqrt(ctx.gamma(n-0.5)))-ctx.floor(n*yconst)+yd2
ywpd[n]=max(yd3,d1)
# procedure of II Section 3.17
ctx.prec = ywpd[1]+10
ypsigma = 1-(2*ysigma)
yd = {}
yd[0,0,-2]=0; yd[0,0,-1]=0; yd[0,0,0]=1; yd[0,0,1]=0
yd[0,-1,-2]=0; yd[0,-1,-1]=0; yd[0,-1,0]=1; yd[0,-1,1]=0
for n in range(1,L):
ctx.prec = ywpd[n]+10
for k in range(0,3*n//2+1):
m = 3*n-2*k
if(m!=0):
m1 = ctx.one/m
c1= m1/4
c2=(ypsigma*m1)/2
c3=-(m+1)
yd[0,n,k]=c3*yd[0,n-1,k-2]+c1*yd[0,n-1,k]+c2*yd[0,n-1,k-1]
else:
yd[0,n,k]=0
for r in range(0,k):
add=yd[0,n,r]*(ctx.mpf('1.0')*ctx.fac(2*k-2*r)/ctx.fac(k-r))
yd[0,n,k] -= ((-1)**(k-r))*add
yd[0,n,-2]=0; yd[0,n,-1]=0; yd[0,n,3*n//2+1]=0
for mu in range(-2,der+1):
for n in range(-2,L):
for k in range(-3,max(1,3*n//2+2)):
if( (mu<0)or (n<0) or(k<0)or (k>3*n//2)):
yd[mu,n,k] = 0
for mu in range(1,der+1):
for n in range(0,L):
ctx.prec = ywpd[n]+10
for k in range(0,3*n//2+1):
aux=(2*mu-2)*yd[mu-2,n-2,k-3]+2*(ysigma+n-2)*yd[mu-1,n-2,k-3]
yd[mu,n,k] = aux - yd[mu-1,n-1,k-1]
# COMPUTING THE COEFFICIENTS xtcoef[k,l]
# See II Section 3.9
#
# computing the needed wp
xwptcoef={}
xwpterm={}
ctx.prec = 15
c1 = ctx.mag(40*(L+2))
xc2 = ctx.mag(68*(L+2)*xA)
xc4 = ctx.mag(xB1*a*math.sqrt(ctx.pi))-1
for k in range(0,L):
xc3 = xc2 - k*xc4+ctx.mag(ctx.fac(k+0.5))/2.
xwptcoef[k] = (max(c1,xc3-ctx.mag(xeps4)+1)+1 +20)*1.5
xwpterm[k] = (max(c1,ctx.mag(L+2)+xc3-ctx.mag(xeps3)+1)+1 +20)
ywptcoef={}
ywpterm={}
ctx.prec = 15
c1 = ctx.mag(40*(L+2))
yc2 = ctx.mag(68*(L+2)*yA)
yc4 = ctx.mag(yB1*a*math.sqrt(ctx.pi))-1
for k in range(0,L):
yc3 = yc2 - k*yc4+ctx.mag(ctx.fac(k+0.5))/2.
ywptcoef[k] = ((max(c1,yc3-ctx.mag(yeps4)+1))+10)*1.5
ywpterm[k] = (max(c1,ctx.mag(L+2)+yc3-ctx.mag(yeps3)+1)+1)+10
# check of power of pi
# computing the fortcoef[mu,k,ell]
xfortcoef={}
for mu in range(0,der+1):
for k in range(0,L):
for ell in range(-2,3*k//2+1):
xfortcoef[mu,k,ell]=0
for mu in range(0,der+1):
for k in range(0,L):
ctx.prec = xwptcoef[k]
for ell in range(0,3*k//2+1):
xfortcoef[mu,k,ell]=xd[mu,k,ell]*Fp[3*k-2*ell]/pipowers[2*k-ell]
xfortcoef[mu,k,ell]=xfortcoef[mu,k,ell]/((2*ctx.j)**ell)
def trunc_a(t):
wp = ctx.prec
ctx.prec = wp + 2
aa = ctx.sqrt(t/(2*ctx.pi))
ctx.prec = wp
return aa
# computing the tcoef[k,ell]
xtcoef={}
for mu in range(0,der+1):
for k in range(0,L):
for ell in range(-2,3*k//2+1):
xtcoef[mu,k,ell]=0
ctx.prec = max(xwptcoef[0],ywptcoef[0])+3
aa= trunc_a(t)
la = -ctx.ln(aa)
for chi in range(0,der+1):
for k in range(0,L):
ctx.prec = xwptcoef[k]
for ell in range(0,3*k//2+1):
xtcoef[chi,k,ell] =0
for mu in range(0, chi+1):
tcoefter=ctx.binomial(chi,mu)*ctx.power(la,mu)*xfortcoef[chi-mu,k,ell]
xtcoef[chi,k,ell] += tcoefter
# COMPUTING THE COEFFICIENTS ytcoef[k,l]
# See II Section 3.9
#
# computing the needed wp
# check of power of pi
# computing the fortcoef[mu,k,ell]
yfortcoef={}
for mu in range(0,der+1):
for k in range(0,L):
for ell in range(-2,3*k//2+1):
yfortcoef[mu,k,ell]=0
for mu in range(0,der+1):
for k in range(0,L):
ctx.prec = ywptcoef[k]
for ell in range(0,3*k//2+1):
yfortcoef[mu,k,ell]=yd[mu,k,ell]*Fp[3*k-2*ell]/pipowers[2*k-ell]
yfortcoef[mu,k,ell]=yfortcoef[mu,k,ell]/((2*ctx.j)**ell)
# computing the tcoef[k,ell]
ytcoef={}
for chi in range(0,der+1):
for k in range(0,L):
for ell in range(-2,3*k//2+1):
ytcoef[chi,k,ell]=0
for chi in range(0,der+1):
for k in range(0,L):
ctx.prec = ywptcoef[k]
for ell in range(0,3*k//2+1):
ytcoef[chi,k,ell] =0
for mu in range(0, chi+1):
tcoefter=ctx.binomial(chi,mu)*ctx.power(la,mu)*yfortcoef[chi-mu,k,ell]
ytcoef[chi,k,ell] += tcoefter
# COMPUTING tv[k,ell]
# See II Section 3.8
#
# a has a good value
ctx.prec = max(xwptcoef[0], ywptcoef[0])+2
av = {}
av[0] = 1
av[1] = av[0]/a
ctx.prec = max(xwptcoef[0],ywptcoef[0])
for k in range(2,L):
av[k] = av[k-1] * av[1]
# Computing the quotients
xtv = {}
for chi in range(0,der+1):
for k in range(0,L):
ctx.prec = xwptcoef[k]
for ell in range(0,3*k//2+1):
xtv[chi,k,ell] = xtcoef[chi,k,ell]* av[k]
# Computing the quotients
ytv = {}
for chi in range(0,der+1):
for k in range(0,L):
ctx.prec = ywptcoef[k]
for ell in range(0,3*k//2+1):
ytv[chi,k,ell] = ytcoef[chi,k,ell]* av[k]
# COMPUTING THE TERMS xterm[k]
# See II Section 3.6
xterm = {}
for chi in range(0,der+1):
for n in range(0,L):
ctx.prec = xwpterm[n]
te = 0
for k in range(0, 3*n//2+1):
te += xtv[chi,n,k]
xterm[chi,n] = te
# COMPUTING THE TERMS yterm[k]
# See II Section 3.6
yterm = {}
for chi in range(0,der+1):
for n in range(0,L):
ctx.prec = ywpterm[n]
te = 0
for k in range(0, 3*n//2+1):
te += ytv[chi,n,k]
yterm[chi,n] = te
# COMPUTING rssum
# See II Section 3.5
xrssum={}
ctx.prec=15
xrsbound = math.sqrt(ctx.pi) * xc /(xb*a)
ctx.prec=15
xwprssum = ctx.mag(4.4*((L+3)**2)*xrsbound / xeps2)
xwprssum = max(xwprssum, ctx.mag(10*(L+1)))
ctx.prec = xwprssum
for chi in range(0,der+1):
xrssum[chi] = 0
for k in range(1,L+1):
xrssum[chi] += xterm[chi,L-k]
yrssum={}
ctx.prec=15
yrsbound = math.sqrt(ctx.pi) * yc /(yb*a)
ctx.prec=15
ywprssum = ctx.mag(4.4*((L+3)**2)*yrsbound / yeps2)
ywprssum = max(ywprssum, ctx.mag(10*(L+1)))
ctx.prec = ywprssum
for chi in range(0,der+1):
yrssum[chi] = 0
for k in range(1,L+1):
yrssum[chi] += yterm[chi,L-k]
# COMPUTING S3
# See II Section 3.19
ctx.prec = 15
A2 = 2**(max(ctx.mag(abs(xrssum[0])), ctx.mag(abs(yrssum[0]))))
eps8 = eps/(3*A2)
T = t *ctx.ln(t/(2*ctx.pi))
xwps3 = 5 + ctx.mag((1+(2/eps8)*ctx.power(a,-xsigma))*T)
ywps3 = 5 + ctx.mag((1+(2/eps8)*ctx.power(a,-ysigma))*T)
ctx.prec = max(xwps3, ywps3)
tpi = t/(2*ctx.pi)
arg = (t/2)*ctx.ln(tpi)-(t/2)-ctx.pi/8
U = ctx.expj(-arg)
a = trunc_a(t)
xasigma = ctx.power(a, -xsigma)
yasigma = ctx.power(a, -ysigma)
xS3 = ((-1)**(N-1)) * xasigma * U
yS3 = ((-1)**(N-1)) * yasigma * U
# COMPUTING S1 the zetasum
# See II Section 3.18
ctx.prec = 15
xwpsum = 4+ ctx.mag((N+ctx.power(N,1-xsigma))*ctx.ln(N) /eps1)
ywpsum = 4+ ctx.mag((N+ctx.power(N,1-ysigma))*ctx.ln(N) /eps1)
wpsum = max(xwpsum, ywpsum)
ctx.prec = wpsum +10
'''
# This can be improved
xS1={}
yS1={}
for chi in range(0,der+1):
xS1[chi] = 0
yS1[chi] = 0
for n in range(1,int(N)+1):
ln = ctx.ln(n)
xexpn = ctx.exp(-ln*(xsigma+ctx.j*t))
yexpn = ctx.conj(1/(n*xexpn))
for chi in range(0,der+1):
pown = ctx.power(-ln, chi)
xterm = pown*xexpn
yterm = pown*yexpn
xS1[chi] += xterm
yS1[chi] += yterm
'''
xS1, yS1 = ctx._zetasum(s, 1, int(N)-1, range(0,der+1), True)
# END OF COMPUTATION of xrz, yrz
# See II Section 3.1
ctx.prec = 15
xabsS1 = abs(xS1[der])
xabsS2 = abs(xrssum[der] * xS3)
xwpend = max(6, wpinitial+ctx.mag(6*(3*xabsS1+7*xabsS2) ) )
ctx.prec = xwpend
xrz={}
for chi in range(0,der+1):
xrz[chi] = xS1[chi]+xrssum[chi]*xS3
ctx.prec = 15
yabsS1 = abs(yS1[der])
yabsS2 = abs(yrssum[der] * yS3)
ywpend = max(6, wpinitial+ctx.mag(6*(3*yabsS1+7*yabsS2) ) )
ctx.prec = ywpend
yrz={}
for chi in range(0,der+1):
yrz[chi] = yS1[chi]+yrssum[chi]*yS3
yrz[chi] = ctx.conj(yrz[chi])
ctx.prec = wpinitial
return xrz, yrz
def Rzeta_set(ctx, s, derivatives=[0]):
r"""
Computes several derivatives of the auxiliary function of Riemann `R(s)`.
**Definition**
The function is defined by
.. math ::
\begin{equation}
{\mathop{\mathcal R }\nolimits}(s)=
\int_{0\swarrow1}\frac{x^{-s} e^{\pi i x^2}}{e^{\pi i x}-
e^{-\pi i x}}\,dx
\end{equation}
To this function we apply the Riemann-Siegel expansion.
"""
der = max(derivatives)
# First we take the value of ctx.prec
# During the computation we will change ctx.prec, and finally we will
# restaurate the initial value
wpinitial = ctx.prec
# Take the real and imaginary part of s
t = ctx._im(s)
sigma = ctx._re(s)
# Now compute several parameter that appear on the program
ctx.prec = 15
a = ctx.sqrt(t/(2*ctx.pi)) # Careful
asigma = ctx.power(a, sigma) # Careful
# We need a simple bound A1 < asigma (see II Section 3.1 and 3.3)
A1 = ctx.power(2, ctx.mag(asigma)-1)
# We compute various epsilon's (see II end of Section 3.1)
eps = ctx.power(2, -wpinitial)
eps1 = eps/6.
eps2 = eps * A1/3.
# COMPUTING SOME COEFFICIENTS THAT DEPENDS
# ON sigma
# constant b and c (see I Theorem 2 formula (26) )
# coefficients A and B1 (see I Section 6.1 equation (50))
# here we not need high precision
ctx.prec = 15
if sigma > 0:
b = 2.
c = math.pow(9,sigma)/4.44288
# 4.44288 =(math.sqrt(2)*math.pi)
A = math.pow(9,sigma)
B1 = 1
else:
b = 2.25158 # math.sqrt( (3-2* math.log(2))*math.pi )
c = math.pow(2,-sigma)/4.44288
A = math.pow(2,-sigma)
B1 = 1.10789 # = 2*sqrt(1-log(2))
# COMPUTING L THE NUMBER OF TERMS NEEDED IN THE RIEMANN-SIEGEL
# CORRECTION
# See II Section 3.2
ctx.prec = 15
L = 1
while 3*c*ctx.gamma(L*0.5) * ctx.power(b*a,-L) >= eps2:
L = L+1
L = max(2,L)
# The number L has to satify some conditions.
# If not RS can not compute Rzeta(s) with the prescribed precision
# (see II, Section 3.2 condition (20) ) and
# (II, Section 3.3 condition (22) ). Also we have added
# an additional technical condition in Section 3.17 Proposition 17
if ((3*L >= 2*a*a/25.) or (3*L+2+sigma<0) or (abs(sigma)> a/2.)):
#print 'Error Riemann-Siegel can not compute with such precision'
ctx.prec = wpinitial
raise NotImplementedError("Riemann-Siegel can not compute with such precision")
# INITIALIZATION (CONTINUATION)
#
# eps3 is the constant defined on (II, Section 3.5 equation (27) )
# each term of the RS correction must be computed with error <= eps3
eps3 = eps2/(4*L)
# eps4 is defined on (II Section 3.6 equation (30) )
# each component of the formula (II Section 3.6 equation (29) )
# must be computed with error <= eps4
eps4 = eps3/(3*L)
# COMPUTING M. NUMBER OF DERIVATIVES Fp[m] TO COMPUTE
M = aux_M_Fp(ctx, A, eps4, a, B1, L)
Fp = {}
for n in range(M, 3*L-2):
Fp[n] = 0
# But I have not seen an instance of M != 3*L-3
#
# DETERMINATION OF J THE NUMBER OF TERMS NEEDED
# IN THE TAYLOR SERIES OF F.
# See II Section 3.11 equation (49))
h1 = eps4/(632*A)
h2 = ctx.pi*ctx.pi*B1*a *ctx.sqrt(3)*math.e*math.e
h2 = h1 * ctx.power((h2/M**2),(M-1)/3) / M
h3 = min(h1,h2)
J=12
jvalue = (2*ctx.pi)**J / ctx.gamma(J+1)
while jvalue > h3:
J = J+1
jvalue = (2*ctx.pi)*jvalue/J
# COMPUTING eps5[m] for 1 <= m <= 21
# See II Section 10 equation (43)
eps5={}
foreps5 = math.pi*math.pi*B1*a
for m in range(0,22):
aux1 = math.pow(foreps5, m/3)/(316.*A)
aux2 = ctx.gamma(m+1)/ctx.gamma(m/3.0+0.5)
aux2 = math.sqrt(aux2)
eps5[m] = aux1*aux2*eps4
# COMPUTING wpfp
# See II Section 3.13 equation (59)
twenty = min(3*L-3, 21)+1
aux = 6812*J
wpfp = ctx.mag(44*J)
for m in range(0, twenty):
wpfp = max(wpfp, ctx.mag(aux*ctx.gamma(m+1)/eps5[m]))
# COMPUTING N AND p
# See II Section
ctx.prec = wpfp + ctx.mag(t) + 20
a = ctx.sqrt(t/(2*ctx.pi))
N = ctx.floor(a)
p = 1-2*(a-N)
# now we get a rounded version of p to the precision wpfp
# this possibly is not necessary
num = ctx.floor(p*(ctx.mpf(2)**wpfp))
difference = p * (ctx.mpf(2)**wpfp)-num
if difference < 0.5:
num = num
else:
num = num+1
p = ctx.convert(num * (ctx.mpf(2)**(-wpfp)))
# COMPUTING THE COEFFICIENTS c[n] = cc[n]
# We shall use the notation cc[n], since there is
# a constant that is called c
# See II Section 3.14
# We compute the coefficients and also save then in a
# cache. The bulk of the computation is passed to
# the function coef()
#
# eps6 is defined in II Section 3.13 equation (58)
eps6 = ctx.power(2*ctx.pi, J)/(ctx.gamma(J+1)*3*J)
# Now we compute the coefficients
cc={}
cont={}
cont, pipowers = coef(ctx, J, eps6)
cc = cont.copy() # we need a copy since we have
Fp={}
for n in range(M, 3*L-2):
Fp[n] = 0
ctx.prec = wpfp
for m in range(0,M+1):
sumP = 0
for k in range(2*J-m-1,-1,-1):
sumP = (sumP * p) + cc[k]
Fp[m] = sumP
# preparation of the new coefficients
for k in range(0, 2*J-m-1):
cc[k] = (k+1) * cc[k+1]
# COMPUTING THE NUMBERS d[n,k]
# See II Section 3.17
# First we compute the working precisions wpd[k]
# Se II equation (92)
wpd = {}
d1 = max(6, ctx.mag(40*L*L))
d2 = 13+ctx.mag((1+abs(sigma))*A)-ctx.mag(eps4)-1
const = ctx.ln(8/(ctx.pi*ctx.pi*a*a*B1*B1)) /2
for n in range(0,L):
d3 = ctx.mag(ctx.sqrt(ctx.gamma(n-0.5)))-ctx.floor(n*const)+d2
wpd[n] = max(d3,d1)
# procedure of II Section 3.17
ctx.prec = wpd[1]+10
psigma = 1-(2*sigma)
d = {}
d[0,0,-2]=0; d[0,0,-1]=0; d[0,0,0]=1; d[0,0,1]=0
d[0,-1,-2]=0; d[0,-1,-1]=0; d[0,-1,0]=1; d[0,-1,1]=0
for n in range(1,L):
ctx.prec = wpd[n]+10
for k in range(0,3*n//2+1):
m = 3*n-2*k
if (m!=0):
m1 = ctx.one/m
c1 = m1/4
c2 = (psigma*m1)/2
c3 = -(m+1)
d[0,n,k] = c3*d[0,n-1,k-2]+c1*d[0,n-1,k]+c2*d[0,n-1,k-1]
else:
d[0,n,k]=0
for r in range(0,k):
add = d[0,n,r]*(ctx.one*ctx.fac(2*k-2*r)/ctx.fac(k-r))
d[0,n,k] -= ((-1)**(k-r))*add
d[0,n,-2]=0; d[0,n,-1]=0; d[0,n,3*n//2+1]=0
for mu in range(-2,der+1):
for n in range(-2,L):
for k in range(-3,max(1,3*n//2+2)):
if ((mu<0)or (n<0) or(k<0)or (k>3*n//2)):
d[mu,n,k] = 0
for mu in range(1,der+1):
for n in range(0,L):
ctx.prec = wpd[n]+10
for k in range(0,3*n//2+1):
aux=(2*mu-2)*d[mu-2,n-2,k-3]+2*(sigma+n-2)*d[mu-1,n-2,k-3]
d[mu,n,k] = aux - d[mu-1,n-1,k-1]
# COMPUTING THE COEFFICIENTS t[k,l]
# See II Section 3.9
#
# computing the needed wp
wptcoef = {}
wpterm = {}
ctx.prec = 15
c1 = ctx.mag(40*(L+2))
c2 = ctx.mag(68*(L+2)*A)
c4 = ctx.mag(B1*a*math.sqrt(ctx.pi))-1
for k in range(0,L):
c3 = c2 - k*c4+ctx.mag(ctx.fac(k+0.5))/2.
wptcoef[k] = max(c1,c3-ctx.mag(eps4)+1)+1 +10
wpterm[k] = max(c1,ctx.mag(L+2)+c3-ctx.mag(eps3)+1)+1 +10
# check of power of pi
# computing the fortcoef[mu,k,ell]
fortcoef={}
for mu in derivatives:
for k in range(0,L):
for ell in range(-2,3*k//2+1):
fortcoef[mu,k,ell]=0
for mu in derivatives:
for k in range(0,L):
ctx.prec = wptcoef[k]
for ell in range(0,3*k//2+1):
fortcoef[mu,k,ell]=d[mu,k,ell]*Fp[3*k-2*ell]/pipowers[2*k-ell]
fortcoef[mu,k,ell]=fortcoef[mu,k,ell]/((2*ctx.j)**ell)
def trunc_a(t):
wp = ctx.prec
ctx.prec = wp + 2
aa = ctx.sqrt(t/(2*ctx.pi))
ctx.prec = wp
return aa
# computing the tcoef[chi,k,ell]
tcoef={}
for chi in derivatives:
for k in range(0,L):
for ell in range(-2,3*k//2+1):
tcoef[chi,k,ell]=0
ctx.prec = wptcoef[0]+3
aa = trunc_a(t)
la = -ctx.ln(aa)
for chi in derivatives:
for k in range(0,L):
ctx.prec = wptcoef[k]
for ell in range(0,3*k//2+1):
tcoef[chi,k,ell] = 0
for mu in range(0, chi+1):
tcoefter = ctx.binomial(chi,mu) * la**mu * \
fortcoef[chi-mu,k,ell]
tcoef[chi,k,ell] += tcoefter
# COMPUTING tv[k,ell]
# See II Section 3.8
# Computing the powers av[k] = a**(-k)
ctx.prec = wptcoef[0] + 2
# a has a good value of a.
# See II Section 3.6
av = {}
av[0] = 1
av[1] = av[0]/a
ctx.prec = wptcoef[0]
for k in range(2,L):
av[k] = av[k-1] * av[1]
# Computing the quotients
tv = {}
for chi in derivatives:
for k in range(0,L):
ctx.prec = wptcoef[k]
for ell in range(0,3*k//2+1):
tv[chi,k,ell] = tcoef[chi,k,ell]* av[k]
# COMPUTING THE TERMS term[k]
# See II Section 3.6
term = {}
for chi in derivatives:
for n in range(0,L):
ctx.prec = wpterm[n]
te = 0
for k in range(0, 3*n//2+1):
te += tv[chi,n,k]
term[chi,n] = te
# COMPUTING rssum
# See II Section 3.5
rssum={}
ctx.prec=15
rsbound = math.sqrt(ctx.pi) * c /(b*a)
ctx.prec=15
wprssum = ctx.mag(4.4*((L+3)**2)*rsbound / eps2)
wprssum = max(wprssum, ctx.mag(10*(L+1)))
ctx.prec = wprssum
for chi in derivatives:
rssum[chi] = 0
for k in range(1,L+1):
rssum[chi] += term[chi,L-k]
# COMPUTING S3
# See II Section 3.19
ctx.prec = 15
A2 = 2**(ctx.mag(rssum[0]))
eps8 = eps/(3* A2)
T = t * ctx.ln(t/(2*ctx.pi))
wps3 = 5 + ctx.mag((1+(2/eps8)*ctx.power(a,-sigma))*T)
ctx.prec = wps3
tpi = t/(2*ctx.pi)
arg = (t/2)*ctx.ln(tpi)-(t/2)-ctx.pi/8
U = ctx.expj(-arg)
a = trunc_a(t)
asigma = ctx.power(a, -sigma)
S3 = ((-1)**(N-1)) * asigma * U
# COMPUTING S1 the zetasum
# See II Section 3.18
ctx.prec = 15
wpsum = 4 + ctx.mag((N+ctx.power(N,1-sigma))*ctx.ln(N)/eps1)
ctx.prec = wpsum + 10
'''
# This can be improved
S1 = {}
for chi in derivatives:
S1[chi] = 0
for n in range(1,int(N)+1):
ln = ctx.ln(n)
expn = ctx.exp(-ln*(sigma+ctx.j*t))
for chi in derivatives:
term = ctx.power(-ln, chi)*expn
S1[chi] += term
'''
S1 = ctx._zetasum(s, 1, int(N)-1, derivatives)[0]
# END OF COMPUTATION
# See II Section 3.1
ctx.prec = 15
absS1 = abs(S1[der])
absS2 = abs(rssum[der] * S3)
wpend = max(6, wpinitial + ctx.mag(6*(3*absS1+7*absS2)))
ctx.prec = wpend
rz = {}
for chi in derivatives:
rz[chi] = S1[chi]+rssum[chi]*S3
ctx.prec = wpinitial
return rz
def z_half(ctx,t,der=0):
r"""
z_half(t,der=0) Computes Z^(der)(t)
"""
s=ctx.mpf('0.5')+ctx.j*t
wpinitial = ctx.prec
ctx.prec = 15
tt = t/(2*ctx.pi)
wptheta = wpinitial +1 + ctx.mag(3*(tt**1.5)*ctx.ln(tt))
wpz = wpinitial + 1 + ctx.mag(12*tt*ctx.ln(tt))
ctx.prec = wptheta
theta = ctx.siegeltheta(t)
ctx.prec = wpz
rz = Rzeta_set(ctx,s, range(der+1))
if der > 0: ps1 = ctx._re(ctx.psi(0,s/2)/2 - ctx.ln(ctx.pi)/2)
if der > 1: ps2 = ctx._re(ctx.j*ctx.psi(1,s/2)/4)
if der > 2: ps3 = ctx._re(-ctx.psi(2,s/2)/8)
if der > 3: ps4 = ctx._re(-ctx.j*ctx.psi(3,s/2)/16)
exptheta = ctx.expj(theta)
if der == 0:
z = 2*exptheta*rz[0]
if der == 1:
zf = 2j*exptheta
z = zf*(ps1*rz[0]+rz[1])
if der == 2:
zf = 2 * exptheta
z = -zf*(2*rz[1]*ps1+rz[0]*ps1**2+rz[2]-ctx.j*rz[0]*ps2)
if der == 3:
zf = -2j*exptheta
z = 3*rz[1]*ps1**2+rz[0]*ps1**3+3*ps1*rz[2]
z = zf*(z-3j*rz[1]*ps2-3j*rz[0]*ps1*ps2+rz[3]-rz[0]*ps3)
if der == 4:
zf = 2*exptheta
z = 4*rz[1]*ps1**3+rz[0]*ps1**4+6*ps1**2*rz[2]
z = z-12j*rz[1]*ps1*ps2-6j*rz[0]*ps1**2*ps2-6j*rz[2]*ps2-3*rz[0]*ps2*ps2
z = z + 4*ps1*rz[3]-4*rz[1]*ps3-4*rz[0]*ps1*ps3+rz[4]+ctx.j*rz[0]*ps4
z = zf*z
ctx.prec = wpinitial
return ctx._re(z)
def zeta_half(ctx, s, k=0):
"""
zeta_half(s,k=0) Computes zeta^(k)(s) when Re s = 0.5
"""
wpinitial = ctx.prec
sigma = ctx._re(s)
t = ctx._im(s)
#--- compute wptheta, wpR, wpbasic ---
ctx.prec = 53
# X see II Section 3.21 (109) and (110)
if sigma > 0:
X = ctx.sqrt(abs(s))
else:
X = (2*ctx.pi)**(sigma-1) * abs(1-s)**(0.5-sigma)
# M1 see II Section 3.21 (111) and (112)
if sigma > 0:
M1 = 2*ctx.sqrt(t/(2*ctx.pi))
else:
M1 = 4 * t * X
# T see II Section 3.21 (113)
abst = abs(0.5-s)
T = 2* abst*math.log(abst)
# computing wpbasic, wptheta, wpR see II Section 3.21
wpbasic = max(6,3+ctx.mag(t))
wpbasic2 = 2+ctx.mag(2.12*M1+21.2*M1*X+1.3*M1*X*T)+wpinitial+1
wpbasic = max(wpbasic, wpbasic2)
wptheta = max(4, 3+ctx.mag(2.7*M1*X)+wpinitial+1)
wpR = 3+ctx.mag(1.1+2*X)+wpinitial+1
ctx.prec = wptheta
theta = ctx.siegeltheta(t-ctx.j*(sigma-ctx.mpf('0.5')))
if k > 0: ps1 = (ctx._re(ctx.psi(0,s/2)))/2 - ctx.ln(ctx.pi)/2
if k > 1: ps2 = -(ctx._im(ctx.psi(1,s/2)))/4
if k > 2: ps3 = -(ctx._re(ctx.psi(2,s/2)))/8
if k > 3: ps4 = (ctx._im(ctx.psi(3,s/2)))/16
ctx.prec = wpR
xrz = Rzeta_set(ctx,s,range(k+1))
yrz={}
for chi in range(0,k+1):
yrz[chi] = ctx.conj(xrz[chi])
ctx.prec = wpbasic
exptheta = ctx.expj(-2*theta)
if k==0:
zv = xrz[0]+exptheta*yrz[0]
if k==1:
zv1 = -yrz[1] - 2*yrz[0]*ps1
zv = xrz[1] + exptheta*zv1
if k==2:
zv1 = 4*yrz[1]*ps1+4*yrz[0]*(ps1**2)+yrz[2]+2j*yrz[0]*ps2
zv = xrz[2]+exptheta*zv1
if k==3:
zv1 = -12*yrz[1]*ps1**2-8*yrz[0]*ps1**3-6*yrz[2]*ps1-6j*yrz[1]*ps2
zv1 = zv1 - 12j*yrz[0]*ps1*ps2-yrz[3]+2*yrz[0]*ps3
zv = xrz[3]+exptheta*zv1
if k == 4:
zv1 = 32*yrz[1]*ps1**3 +16*yrz[0]*ps1**4+24*yrz[2]*ps1**2
zv1 = zv1 +48j*yrz[1]*ps1*ps2+48j*yrz[0]*(ps1**2)*ps2
zv1 = zv1+12j*yrz[2]*ps2-12*yrz[0]*ps2**2+8*yrz[3]*ps1-8*yrz[1]*ps3
zv1 = zv1-16*yrz[0]*ps1*ps3+yrz[4]-2j*yrz[0]*ps4
zv = xrz[4]+exptheta*zv1
ctx.prec = wpinitial
return zv
def zeta_offline(ctx, s, k=0):
"""
Computes zeta^(k)(s) off the line
"""
wpinitial = ctx.prec
sigma = ctx._re(s)
t = ctx._im(s)
#--- compute wptheta, wpR, wpbasic ---
ctx.prec = 53
# X see II Section 3.21 (109) and (110)
if sigma > 0:
X = ctx.power(abs(s), 0.5)
else:
X = ctx.power(2*ctx.pi, sigma-1)*ctx.power(abs(1-s),0.5-sigma)
# M1 see II Section 3.21 (111) and (112)
if (sigma > 0):
M1 = 2*ctx.sqrt(t/(2*ctx.pi))
else:
M1 = 4 * t * X
# M2 see II Section 3.21 (111) and (112)
if (1-sigma > 0):
M2 = 2*ctx.sqrt(t/(2*ctx.pi))
else:
M2 = 4*t*ctx.power(2*ctx.pi, -sigma)*ctx.power(abs(s),sigma-0.5)
# T see II Section 3.21 (113)
abst = abs(0.5-s)
T = 2* abst*math.log(abst)
# computing wpbasic, wptheta, wpR see II Section 3.21
wpbasic = max(6,3+ctx.mag(t))
wpbasic2 = 2+ctx.mag(2.12*M1+21.2*M2*X+1.3*M2*X*T)+wpinitial+1
wpbasic = max(wpbasic, wpbasic2)
wptheta = max(4, 3+ctx.mag(2.7*M2*X)+wpinitial+1)
wpR = 3+ctx.mag(1.1+2*X)+wpinitial+1
ctx.prec = wptheta
theta = ctx.siegeltheta(t-ctx.j*(sigma-ctx.mpf('0.5')))
s1 = s
s2 = ctx.conj(1-s1)
ctx.prec = wpR
xrz, yrz = Rzeta_simul(ctx, s, k)
if k > 0: ps1 = (ctx.psi(0,s1/2)+ctx.psi(0,(1-s1)/2))/4 - ctx.ln(ctx.pi)/2
if k > 1: ps2 = ctx.j*(ctx.psi(1,s1/2)-ctx.psi(1,(1-s1)/2))/8
if k > 2: ps3 = -(ctx.psi(2,s1/2)+ctx.psi(2,(1-s1)/2))/16
if k > 3: ps4 = -ctx.j*(ctx.psi(3,s1/2)-ctx.psi(3,(1-s1)/2))/32
ctx.prec = wpbasic
exptheta = ctx.expj(-2*theta)
if k == 0:
zv = xrz[0]+exptheta*yrz[0]
if k == 1:
zv1 = -yrz[1]-2*yrz[0]*ps1
zv = xrz[1]+exptheta*zv1
if k == 2:
zv1 = 4*yrz[1]*ps1+4*yrz[0]*(ps1**2) +yrz[2]+2j*yrz[0]*ps2
zv = xrz[2]+exptheta*zv1
if k == 3:
zv1 = -12*yrz[1]*ps1**2 -8*yrz[0]*ps1**3-6*yrz[2]*ps1-6j*yrz[1]*ps2
zv1 = zv1 - 12j*yrz[0]*ps1*ps2-yrz[3]+2*yrz[0]*ps3
zv = xrz[3]+exptheta*zv1
if k == 4:
zv1 = 32*yrz[1]*ps1**3 +16*yrz[0]*ps1**4+24*yrz[2]*ps1**2
zv1 = zv1 +48j*yrz[1]*ps1*ps2+48j*yrz[0]*(ps1**2)*ps2
zv1 = zv1+12j*yrz[2]*ps2-12*yrz[0]*ps2**2+8*yrz[3]*ps1-8*yrz[1]*ps3
zv1 = zv1-16*yrz[0]*ps1*ps3+yrz[4]-2j*yrz[0]*ps4
zv = xrz[4]+exptheta*zv1
ctx.prec = wpinitial
return zv
def z_offline(ctx, w, k=0):
r"""
Computes Z(w) and its derivatives off the line
"""
s = ctx.mpf('0.5')+ctx.j*w
s1 = s
s2 = ctx.conj(1-s1)
wpinitial = ctx.prec
ctx.prec = 35
# X see II Section 3.21 (109) and (110)
# M1 see II Section 3.21 (111) and (112)
if (ctx._re(s1) >= 0):
M1 = 2*ctx.sqrt(ctx._im(s1)/(2 * ctx.pi))
X = ctx.sqrt(abs(s1))
else:
X = (2*ctx.pi)**(ctx._re(s1)-1) * abs(1-s1)**(0.5-ctx._re(s1))
M1 = 4 * ctx._im(s1)*X
# M2 see II Section 3.21 (111) and (112)
if (ctx._re(s2) >= 0):
M2 = 2*ctx.sqrt(ctx._im(s2)/(2 * ctx.pi))
else:
M2 = 4 * ctx._im(s2)*(2*ctx.pi)**(ctx._re(s2)-1)*abs(1-s2)**(0.5-ctx._re(s2))
# T see II Section 3.21 Prop. 27
T = 2*abs(ctx.siegeltheta(w))
# defining some precisions
# see II Section 3.22 (115), (116), (117)
aux1 = ctx.sqrt(X)
aux2 = aux1*(M1+M2)
aux3 = 3 +wpinitial
wpbasic = max(6, 3+ctx.mag(T), ctx.mag(aux2*(26+2*T))+aux3)
wptheta = max(4,ctx.mag(2.04*aux2)+aux3)
wpR = ctx.mag(4*aux1)+aux3
# now the computations
ctx.prec = wptheta
theta = ctx.siegeltheta(w)
ctx.prec = wpR
xrz, yrz = Rzeta_simul(ctx,s,k)
pta = 0.25 + 0.5j*w
ptb = 0.25 - 0.5j*w
if k > 0: ps1 = 0.25*(ctx.psi(0,pta)+ctx.psi(0,ptb)) - ctx.ln(ctx.pi)/2
if k > 1: ps2 = (1j/8)*(ctx.psi(1,pta)-ctx.psi(1,ptb))
if k > 2: ps3 = (-1./16)*(ctx.psi(2,pta)+ctx.psi(2,ptb))
if k > 3: ps4 = (-1j/32)*(ctx.psi(3,pta)-ctx.psi(3,ptb))
ctx.prec = wpbasic
exptheta = ctx.expj(theta)
if k == 0:
zv = exptheta*xrz[0]+yrz[0]/exptheta
j = ctx.j
if k == 1:
zv = j*exptheta*(xrz[1]+xrz[0]*ps1)-j*(yrz[1]+yrz[0]*ps1)/exptheta
if k == 2:
zv = exptheta*(-2*xrz[1]*ps1-xrz[0]*ps1**2-xrz[2]+j*xrz[0]*ps2)
zv =zv + (-2*yrz[1]*ps1-yrz[0]*ps1**2-yrz[2]-j*yrz[0]*ps2)/exptheta
if k == 3:
zv1 = -3*xrz[1]*ps1**2-xrz[0]*ps1**3-3*xrz[2]*ps1+j*3*xrz[1]*ps2
zv1 = (zv1+ 3j*xrz[0]*ps1*ps2-xrz[3]+xrz[0]*ps3)*j*exptheta
zv2 = 3*yrz[1]*ps1**2+yrz[0]*ps1**3+3*yrz[2]*ps1+j*3*yrz[1]*ps2
zv2 = j*(zv2 + 3j*yrz[0]*ps1*ps2+ yrz[3]-yrz[0]*ps3)/exptheta
zv = zv1+zv2
if k == 4:
zv1 = 4*xrz[1]*ps1**3+xrz[0]*ps1**4 + 6*xrz[2]*ps1**2
zv1 = zv1-12j*xrz[1]*ps1*ps2-6j*xrz[0]*ps1**2*ps2-6j*xrz[2]*ps2
zv1 = zv1-3*xrz[0]*ps2*ps2+4*xrz[3]*ps1-4*xrz[1]*ps3-4*xrz[0]*ps1*ps3
zv1 = zv1+xrz[4]+j*xrz[0]*ps4
zv2 = 4*yrz[1]*ps1**3+yrz[0]*ps1**4 + 6*yrz[2]*ps1**2
zv2 = zv2+12j*yrz[1]*ps1*ps2+6j*yrz[0]*ps1**2*ps2+6j*yrz[2]*ps2
zv2 = zv2-3*yrz[0]*ps2*ps2+4*yrz[3]*ps1-4*yrz[1]*ps3-4*yrz[0]*ps1*ps3
zv2 = zv2+yrz[4]-j*yrz[0]*ps4
zv = exptheta*zv1+zv2/exptheta
ctx.prec = wpinitial
return zv
@defun
def rs_zeta(ctx, s, derivative=0, **kwargs):
if derivative > 4:
raise NotImplementedError
s = ctx.convert(s)
re = ctx._re(s); im = ctx._im(s)
if im < 0:
z = ctx.conj(ctx.rs_zeta(ctx.conj(s), derivative))
return z
critical_line = (re == 0.5)
if critical_line:
return zeta_half(ctx, s, derivative)
else:
return zeta_offline(ctx, s, derivative)
@defun
def rs_z(ctx, w, derivative=0):
w = ctx.convert(w)
re = ctx._re(w); im = ctx._im(w)
if re < 0:
return rs_z(ctx, -w, derivative)
critical_line = (im == 0)
if critical_line :
return z_half(ctx, w, derivative)
else:
return z_offline(ctx, w, derivative)
| wolfram74/numerical_methods_iserles_notes | venv/lib/python2.7/site-packages/sympy/mpmath/functions/rszeta.py | Python | mit | 46,233 |
f_int = 6
f_float = 7.0
f_bool = False
f_str = "hi"
f_list = [1, True, "bye"]
other_d = {}
_private_i = 123
print("f_int: {}".format(f_int))
print("f_float: {}".format(f_float))
print("f_bool: {}".format(f_bool))
print("f_str: {}".format(f_str))
print("f_list: {}".format(f_list))
| guildai/guild | guild/tests/samples/projects/flags/main_globals.py | Python | apache-2.0 | 284 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that uses Google AutoML services.
"""
import os
from datetime import datetime
from airflow import models
from airflow.providers.google.cloud.hooks.automl import CloudAutoMLHook
from airflow.providers.google.cloud.operators.automl import (
AutoMLCreateDatasetOperator,
AutoMLDeleteDatasetOperator,
AutoMLDeleteModelOperator,
AutoMLImportDataOperator,
AutoMLTrainModelOperator,
)
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "your-project-id")
GCP_AUTOML_LOCATION = os.environ.get("GCP_AUTOML_LOCATION", "us-central1")
GCP_AUTOML_SENTIMENT_BUCKET = os.environ.get("GCP_AUTOML_SENTIMENT_BUCKET", "gs://INVALID BUCKET NAME")
# Example values
DATASET_ID = ""
# Example model
MODEL = {
"display_name": "auto_model_1",
"dataset_id": DATASET_ID,
"text_sentiment_model_metadata": {},
}
# Example dataset
DATASET = {
"display_name": "test_text_sentiment_dataset",
"text_sentiment_dataset_metadata": {"sentiment_max": 10},
}
IMPORT_INPUT_CONFIG = {"gcs_source": {"input_uris": [GCP_AUTOML_SENTIMENT_BUCKET]}}
extract_object_id = CloudAutoMLHook.extract_object_id
# Example DAG for AutoML Natural Language Text Sentiment
with models.DAG(
"example_automl_text_sentiment",
schedule_interval=None, # Override to match your needs
start_date=datetime(2021, 1, 1),
catchup=False,
user_defined_macros={"extract_object_id": extract_object_id},
tags=['example'],
) as example_dag:
create_dataset_task = AutoMLCreateDatasetOperator(
task_id="create_dataset_task", dataset=DATASET, location=GCP_AUTOML_LOCATION
)
dataset_id = create_dataset_task.output['dataset_id']
import_dataset_task = AutoMLImportDataOperator(
task_id="import_dataset_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
input_config=IMPORT_INPUT_CONFIG,
)
MODEL["dataset_id"] = dataset_id
create_model = AutoMLTrainModelOperator(task_id="create_model", model=MODEL, location=GCP_AUTOML_LOCATION)
model_id = create_model.output['model_id']
delete_model_task = AutoMLDeleteModelOperator(
task_id="delete_model_task",
model_id=model_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
delete_datasets_task = AutoMLDeleteDatasetOperator(
task_id="delete_datasets_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
import_dataset_task >> create_model
delete_model_task >> delete_datasets_task
# Task dependencies created via `XComArgs`:
# create_dataset_task >> import_dataset_task
# create_dataset_task >> create_model
# create_model >> delete_model_task
# create_dataset_task >> delete_datasets_task
| Acehaidrey/incubator-airflow | airflow/providers/google/cloud/example_dags/example_automl_nl_text_sentiment.py | Python | apache-2.0 | 3,589 |
#!/usr/bin/env python
import rospy
from camera_manager import Camera
import tf
from ieee2016_msgs.srv import RequestMap
import numpy as np
class PointIntersector():
'''
Given a point in the camera frame and Shia's current position estimate where that point is along the wall.
(We are assuming that the blocks will be flush against the wall, or have some offset from the wall).
'''
def __init__(self):
# Map used for estimating distances
map_request = rospy.ServiceProxy('/robot/request_map',RequestMap)
self.map = np.array(map_request().map)
def intersect_point(self, camera, point, time = None, offset = 0):
# Make a ray and remove components we don't need
ray = camera.make_3d_vector(point)
raw_ray = np.copy(ray)
ray[1] = 0
unit_ray = ray / np.linalg.norm(ray)
# Calculate alpha angle between [0,0,1] and the unit_ray in the camera frame
forward_ray = np.array([0, 0, 1])
alpha = np.arccos(np.dot(unit_ray, forward_ray))
signed_alpha = -1 * np.sign(unit_ray[0]) * alpha
# Find map frame position of the camera
cam_tf = camera.get_tf(target_frame = "map", time = time)
theta = cam_tf[2] + signed_alpha
point = cam_tf[:2]
self.offset = offset
dist = self.simulate_scan(point, theta)
return camera.make_3d_point(raw_ray, dist, output_frame = "map", time = time)
def simulate_scan(self, point, theta):
'''
The works similarly to the particle filter raytracer. We just need to calculate theta for our point
and the position of the camera. Theta is comprised of the robots yaw, and the angle the point makes
with the camera in the frame.
'''
# Make sure the point is a numpy array
point = np.array(point)
ray_direction = np.array([np.cos(theta), np.sin(theta)])
intersections = []
# Go through each wall and test for intersections, then pick the closest intersection
# Sketchy as fuck way of detecting half blocks - move all the walls upwards.
offset_map = (self.map.reshape(len(self.map)/5,5) + np.array([0,self.offset,0,self.offset,0])).flatten()
for w in range(len(offset_map) / 5):
intersection_dist = self.find_intersection(point, ray_direction,
np.array([offset_map[5 * w], offset_map[5 * w + 1]]),
np.array([offset_map[5 * w + 2] ,offset_map[5 * w + 3]]))
if intersection_dist is not None:
intersections.append(intersection_dist)
# All intersection points found, now return the closest
return min(intersections)
def find_intersection(self, ray_origin, ray_direction, point1, point2):
# Ray-Line Segment Intersection Test in 2D
# http://bit.ly/1CoxdrG
v1 = ray_origin - point1
v2 = point2 - point1
v3 = np.array([-ray_direction[1], ray_direction[0]])
v2_dot_v3 = np.dot(v2, v3)
if v2_dot_v3 == 0:
return None
t1 = np.cross(v2, v1) / v2_dot_v3
t2 = np.dot(v1, v3) / v2_dot_v3
if t1 >= 0.0 and t2 >= 0.0 and t2 <= 1.0:
return t1
return None
if __name__ == "__main__":
rospy.init_node("point_intersector")
c = Camera(1)
c.activate()
i = PointIntersector()
i.intersect_point(c, (1920 / 2, 108))
c.deactivate()
# print np.array([0.00464233,-0.30620446,1.0]) * c.proj_mat
| ufieeehw/IEEE2016 | ros/ieee2016_vision/scripts/point_intersector.py | Python | mit | 3,584 |
# -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,base64
import requests
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import directstream
from BeautifulSoup import BeautifulSoup
from resources.lib.modules.common import random_agent, quality_tag
class source:
def __init__(self):
self.base_link = 'http://blackcinema.org'
self.movie_link = '/movies/%s/'
self.ep_link = '/episodes/%s/'
def movie(self, imdb, title, year):
self.zen_url = []
try:
headers = {'User-Agent': random_agent()}
# print("WATCHCARTOON")
title = cleantitle.getsearch(title)
title = title.replace(' ','-')
query = self.movie_link % title
query = urlparse.urljoin(self.base_link, query)
r = BeautifulSoup(requests.get(query, headers=headers).content)
r = r.findAll('iframe')
# print ("ANIMETOON s1", r)
for u in r:
u = u['src'].encode('utf-8')
if u.startswith("//"): u = "http:" + u
print("BLACKCINEMA PASSED", u)
self.zen_url.append(u)
return self.zen_url.append(u)
except:
return
# http://blackcinema.org/episodes/ash-vs-evil-dead-1x2/
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
url = {'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
self.zen_url = []
try:
headers = {'User-Agent': random_agent()}
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
data['season'], data['episode'] = season, episode
self.zen_url = []
title = cleantitle.getsearch(title)
title = title.replace(' ','-')
query = title + "-" + season + "x" + episode
query= self.ep_link % query
query = urlparse.urljoin(self.base_link, query)
r = BeautifulSoup(requests.get(query, headers=headers).content)
r = r.findAll('iframe')
# print ("ANIMETOON s1", r)
for u in r:
u = u['src'].encode('utf-8')
if u.startswith("//"): u = "http:" + u
print("BLACKCINEMA PASSED", u)
self.zen_url.append(u)
return self.zen_url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
headers = {'User-Agent': random_agent()}
for url in self.zen_url:
if url == None: return
print("BLACKCINEMA SOURCE", url)
r = requests.get(url, headers=headers).text
print("BLACKCINEMA SOURCE", r)
match = re.compile('file:\s*"(.+?)",label:"(.+?)",').findall(r)
for href, quality in match:
quality = quality_tag(quality)
sources.append({'source': 'gvideo', 'quality':quality, 'provider': 'Bcinema', 'url': href, 'direct': True, 'debridonly': False})
return sources
except:
return sources
def resolve(self, url):
return url
| repotvsupertuga/repo | plugin.video.zen/resources/lib/sources/bcinema_mv_tv.py | Python | gpl-2.0 | 3,582 |
""" log machine-parseable test session result information in a plain
text file.
"""
import os
import py
from _pytest.store import StoreKey
resultlog_key = StoreKey["ResultLog"]()
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "resultlog plugin options")
group.addoption(
"--resultlog",
"--result-log",
action="store",
metavar="path",
default=None,
help="DEPRECATED path for machine-readable result log.",
)
def pytest_configure(config):
resultlog = config.option.resultlog
# prevent opening resultlog on slave nodes (xdist)
if resultlog and not hasattr(config, "slaveinput"):
dirname = os.path.dirname(os.path.abspath(resultlog))
if not os.path.isdir(dirname):
os.makedirs(dirname)
logfile = open(resultlog, "w", 1) # line buffered
config._store[resultlog_key] = ResultLog(config, logfile)
config.pluginmanager.register(config._store[resultlog_key])
from _pytest.deprecated import RESULT_LOG
from _pytest.warnings import _issue_warning_captured
_issue_warning_captured(RESULT_LOG, config.hook, stacklevel=2)
def pytest_unconfigure(config):
resultlog = config._store.get(resultlog_key, None)
if resultlog:
resultlog.logfile.close()
del config._store[resultlog_key]
config.pluginmanager.unregister(resultlog)
class ResultLog:
def __init__(self, config, logfile):
self.config = config
self.logfile = logfile # preferably line buffered
def write_log_entry(self, testpath, lettercode, longrepr):
print("{} {}".format(lettercode, testpath), file=self.logfile)
for line in longrepr.splitlines():
print(" %s" % line, file=self.logfile)
def log_outcome(self, report, lettercode, longrepr):
testpath = getattr(report, "nodeid", None)
if testpath is None:
testpath = report.fspath
self.write_log_entry(testpath, lettercode, longrepr)
def pytest_runtest_logreport(self, report):
if report.when != "call" and report.passed:
return
res = self.config.hook.pytest_report_teststatus(
report=report, config=self.config
)
code = res[1]
if code == "x":
longrepr = str(report.longrepr)
elif code == "X":
longrepr = ""
elif report.passed:
longrepr = ""
elif report.skipped:
longrepr = str(report.longrepr[2])
else:
longrepr = str(report.longrepr)
self.log_outcome(report, code, longrepr)
def pytest_collectreport(self, report):
if not report.passed:
if report.failed:
code = "F"
longrepr = str(report.longrepr)
else:
assert report.skipped
code = "S"
longrepr = "%s:%d: %s" % report.longrepr
self.log_outcome(report, code, longrepr)
def pytest_internalerror(self, excrepr):
reprcrash = getattr(excrepr, "reprcrash", None)
path = getattr(reprcrash, "path", None)
if path is None:
path = "cwd:%s" % py.path.local()
self.write_log_entry(path, "!", str(excrepr))
| alfredodeza/pytest | src/_pytest/resultlog.py | Python | mit | 3,302 |
import unittest
from mock import patch
from geopy.point import Point
from geopy.exc import GeocoderNotFound
from geopy.geocoders import get_geocoder_for_service, GoogleV3
from geopy.geocoders.base import Geocoder, DEFAULT_TIMEOUT
import geopy.geocoders.base
class GetGeocoderTestCase(unittest.TestCase):
def test_ok(self):
"""
get_geocoder_for_service
"""
self.assertEqual(get_geocoder_for_service("google"), GoogleV3)
self.assertEqual(get_geocoder_for_service("googlev3"), GoogleV3)
def test_fail(self):
"""
get_geocoder_for_service unknown service
"""
with self.assertRaises(GeocoderNotFound):
get_geocoder_for_service("")
class GeocoderTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.geocoder = Geocoder()
cls.coordinates = (40.74113, -73.989656)
cls.coordinates_str = "40.74113,-73.989656"
cls.coordinates_address = "175 5th Avenue, NYC, USA"
def test_init(self):
"""
Geocoder()
"""
format_string = '%s Los Angeles, CA USA'
scheme = 'http'
timeout = DEFAULT_TIMEOUT + 1
proxies = {'https': '192.0.2.0'}
geocoder = Geocoder(
format_string=format_string,
scheme=scheme,
timeout=timeout,
proxies=proxies
)
for attr in ('format_string', 'scheme', 'timeout', 'proxies'):
self.assertEqual(locals()[attr], getattr(geocoder, attr))
def test_user_agent_default(self):
with patch('geopy.geocoders.base.DEFAULT_USER_AGENT', 'mocked_user_agent/0.0.0'):
self.assertEqual(geopy.geocoders.base.DEFAULT_USER_AGENT, 'mocked_user_agent/0.0.0')
geocoder = Geocoder()
self.assertEqual(geocoder.headers['User-Agent'], 'mocked_user_agent/0.0.0')
def test_user_agent_custom(self):
geocoder = Geocoder(
user_agent='my_user_agent/1.0'
)
self.assertEqual(geocoder.headers['User-Agent'], 'my_user_agent/1.0')
def test_point_coercion_point(self):
"""
Geocoder._coerce_point_to_string Point
"""
self.assertEqual(
self.geocoder._coerce_point_to_string(Point(*self.coordinates)),
self.coordinates_str
)
def test_point_coercion_floats(self):
"""
Geocoder._coerce_point_to_string tuple of coordinates
"""
self.assertEqual(
self.geocoder._coerce_point_to_string(self.coordinates),
self.coordinates_str
)
def test_point_coercion_string(self):
"""
Geocoder._coerce_point_to_string string of coordinates
"""
self.assertEqual(
self.geocoder._coerce_point_to_string(self.coordinates_str),
self.coordinates_str
)
def test_point_coercion_address(self):
"""
Geocoder._coerce_point_to_string address string
"""
self.assertEqual(
self.geocoder._coerce_point_to_string(self.coordinates_address),
self.coordinates_address
)
| mthh/geopy | test/geocoders/base.py | Python | mit | 3,152 |
from pip.req import parse_requirements as pip_parse_requirements
from pip.req import InstallRequirement
def is_pypi_requirement(requirement):
return requirement.req and not requirement.link
def parse_requirements(path_to_requirements):
""" Parse requirements
:param path_to_requirements: path/to/requirements.txt
:return: ['package name', ..]
"""
parsed_reqs = []
for requirement in pip_parse_requirements(path_to_requirements,
session=False):
if not is_pypi_requirement(requirement):
continue
parsed_reqs.append(requirement.req.project_name)
return parsed_reqs
def get_valid_pypi_requirement(line):
try:
requirement = InstallRequirement.from_line(line)
if not is_pypi_requirement(requirement):
raise ValueError
except ValueError:
return None
return requirement
| 5monkeys/reqlice | reqlice/requirement.py | Python | mit | 926 |
""" This module provides the backend Flask server that serves an experiment. """
from datetime import datetime, timedelta
from functools import update_wrapper
import gevent
from json import dumps
from json import loads
from operator import attrgetter
import os
import re
import sys
import user_agents
from flask import (
abort,
Flask,
make_response,
render_template,
request,
Response,
send_from_directory,
)
from jinja2 import TemplateNotFound
from rq import get_current_job
from rq import Queue
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
from sqlalchemy import exc
from sqlalchemy import func
from sqlalchemy.sql.expression import true
from dallinger import db
from dallinger import experiment
from dallinger import models
from dallinger import information
from dallinger.heroku.worker import conn as redis
from dallinger.config import get_config
from dallinger import recruiters
from dallinger.heroku.messages import get_messenger
from dallinger.heroku.messages import MessengerError
from dallinger.heroku.messages import HITSummary
from .replay import ReplayBackend
from .worker_events import WorkerEvent
from .utils import nocache
# Initialize the Dallinger database.
session = db.session
# Connect to the Redis queue for notifications.
q = Queue(connection=redis)
WAITING_ROOM_CHANNEL = 'quorum'
app = Flask('Experiment_Server')
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, str):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, str):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
@app.before_first_request
def _config():
config = get_config()
if not config.ready:
config.load()
return config
def Experiment(args):
klass = experiment.load()
return klass(args)
"""Load the experiment's extra routes, if any."""
try:
from dallinger_experiment.experiment import extra_routes
except ImportError:
pass
else:
app.register_blueprint(extra_routes)
"""Basic routes."""
@app.route('/')
def index():
"""Index route"""
config = _config()
html = '<html><head></head><body><h1>Dallinger Experiment in progress</h1><dl>'
for item in sorted(config.as_dict().items()):
html += '<dt style="font-weight:bold;margin-top:15px;">{}</dt><dd>{}</dd>'.format(*item)
html += '</dl></body></html>'
return html
@app.route('/robots.txt')
def static_robots_txt():
"""Serve robots.txt from static file."""
return send_from_directory('static', 'robots.txt')
@app.route('/favicon.ico')
def static_favicon():
return send_from_directory('static', 'favicon.ico', mimetype='image/x-icon')
"""Define some canned response types."""
def success_response(**data):
"""Return a generic success response."""
data_out = {}
data_out["status"] = "success"
data_out.update(data)
js = dumps(data_out, default=date_handler)
return Response(js, status=200, mimetype='application/json')
def error_response(error_type="Internal server error",
error_text='',
status=400,
participant=None,
simple=False,
request_data=''):
"""Return a generic server error response."""
last_exception = sys.exc_info()
if last_exception[0]:
db.logger.error(
"Failure for request: {!r}".format(dict(request.args)),
exc_info=last_exception)
data = {"status": "error"}
if simple:
data["message"] = error_text
else:
data["html"] = error_page(error_text=error_text,
error_type=error_type,
participant=participant,
request_data=request_data).get_data().decode('utf-8')
return Response(dumps(data), status=status, mimetype='application/json')
def error_page(participant=None, error_text=None, compensate=True,
error_type="default", request_data=''):
"""Render HTML for error page."""
config = _config()
if error_text is None:
error_text = """There has been an error and so you are unable to
continue, sorry!"""
if participant is not None:
hit_id = participant.hit_id,
assignment_id = participant.assignment_id,
worker_id = participant.worker_id
participant_id = participant.id
else:
hit_id = request.form.get('hit_id', '')
assignment_id = request.form.get('assignment_id', '')
worker_id = request.form.get('worker_id', '')
participant_id = request.form.get('participant_id', None)
if participant_id:
try:
participant_id = int(participant_id)
except (ValueError, TypeError):
participant_id = None
return make_response(
render_template(
'error.html',
error_text=error_text,
compensate=compensate,
contact_address=config.get('contact_email_on_error', ''),
error_type=error_type,
hit_id=hit_id,
assignment_id=assignment_id,
worker_id=worker_id,
request_data=request_data,
participant_id=participant_id
),
500,
)
class ExperimentError(Exception):
"""
Error class for experimental errors, such as subject not being found in
the database.
"""
def __init__(self, value):
experiment_errors = dict(
status_incorrectly_set=1000,
hit_assign_worker_id_not_set_in_mturk=1001,
hit_assign_worker_id_not_set_in_consent=1002,
hit_assign_worker_id_not_set_in_exp=1003,
hit_assign_appears_in_database_more_than_once=1004,
already_started_exp=1008,
already_started_exp_mturk=1009,
already_did_exp_hit=1010,
tried_to_quit=1011,
intermediate_save=1012,
improper_inputs=1013,
browser_type_not_allowed=1014,
api_server_not_reachable=1015,
ad_not_found=1016,
error_setting_worker_complete=1017,
hit_not_registered_with_ad_server=1018,
template_unsafe=1019,
insert_mode_failed=1020,
page_not_found=404,
in_debug=2005,
unknown_error=9999
)
self.value = value
self.errornum = experiment_errors[self.value]
self.template = "error.html"
def __str__(self):
return repr(self.value)
@app.errorhandler(ExperimentError)
def handle_exp_error(exception):
"""Handle errors by sending an error page."""
app.logger.error(
"%s (%s) %s", exception.value, exception.errornum, str(dict(request.args)))
return error_page(error_type=exception.value)
"""Define functions for handling requests."""
@app.teardown_request
def shutdown_session(_=None):
"""Rollback and close session at end of a request."""
session.remove()
db.logger.debug('Closing Dallinger DB session at flask request end')
@app.context_processor
def inject_experiment():
"""Inject experiment and enviroment variables into the template context."""
exp = Experiment(session)
return dict(
experiment=exp,
env=os.environ,
)
@app.route('/error-page', methods=['POST', 'GET'])
def render_error():
request_data = request.form.get("request_data")
participant_id = request.form.get("participant_id")
participant = None
if participant_id:
participant = models.Participant.query.get(participant_id)
return error_page(
participant=participant,
request_data=request_data,
)
@app.route('/handle-error', methods=['POST'])
def handle_error():
request_data = request.form.get("request_data")
error_feedback = request.form.get("error_feedback")
error_type = request.form.get("error_type")
error_text = request.form.get("error_text")
worker_id = request.form.get("worker_id")
assignment_id = request.form.get("assignment_id")
participant_id = request.form.get("participant_id")
hit_id = request.form.get("hit_id")
participant = None
completed = False
details = {'request_data': {}}
if request_data:
try:
request_data = loads(request_data)
except ValueError:
request_data = {}
details['request_data'] = request_data
try:
data = loads(request_data.get("data", "null")) or request_data
except ValueError:
data = request_data
if not participant_id and 'participant_id' in data:
participant_id = data['participant_id']
if not worker_id and 'worker_id' in data:
worker_id = data['worker_id']
if not assignment_id and 'assignment_id' in data:
assignment_id = data['assignment_id']
if not hit_id and 'hit_id' in data:
hit_id = data['hit_id']
if participant_id:
try:
participant_id = int(participant_id)
except (ValueError, TypeError):
participant_id = None
details['feedback'] = error_feedback
details['error_type'] = error_type
details['error_text'] = error_text
if participant_id is None and worker_id:
participants = session.query(models.Participant).filter_by(
worker_id=worker_id
).all()
if participants:
participant = participants[0]
if not assignment_id:
assignment_id = participant.assignment_id
if participant_id is None and assignment_id:
participants = session.query(models.Participant).filter_by(
worker_id=assignment_id
).all()
if participants:
participant = participants[0]
participant_id = participant.id
if not worker_id:
worker_id = participant.worker_id
if participant_id is not None:
_worker_complete(participant_id)
completed = True
details['request_data'].update({'worker_id': worker_id,
'hit_id': hit_id,
'participant_id': participant_id})
notif = models.Notification(
assignment_id=assignment_id or 'unknown',
event_type='ExperimentError', details=details
)
session.add(notif)
session.commit()
config = _config()
summary = HITSummary(
assignment_id=assignment_id or 'unknown',
duration=0,
time_active=0,
app_id=config.get('id', 'unknown'),
)
db.logger.debug("Reporting HIT error...")
with config.override({'whimsical': False}, strict=True):
messenger = get_messenger(summary, config)
try:
messenger.send_hit_error_msg()
except MessengerError as ex:
db.logger.exception(ex)
return render_template(
'error-complete.html',
completed=completed,
contact_address=config.get('contact_email_on_error', ''),
hit_id=hit_id
)
"""Define routes for managing an experiment and the participants."""
@app.route('/launch', methods=['POST'])
def launch():
"""Launch the experiment."""
try:
exp = Experiment(db.init_db(drop_all=False))
except Exception as ex:
return error_response(
error_text="Failed to load experiment in /launch: {}".format(str(ex)),
status=500, simple=True
)
try:
exp.log("Launching experiment...", "-----")
except IOError as ex:
return error_response(
error_text="IOError writing to experiment log: {}".format(str(ex)),
status=500, simple=True
)
try:
recruitment_details = exp.recruiter.open_recruitment(n=exp.initial_recruitment_size)
session.commit()
except Exception as e:
return error_response(
error_text="Failed to open recruitment, check experiment server log "
"for details: {}".format(str(e)),
status=500, simple=True
)
for task in exp.background_tasks:
try:
gevent.spawn(task)
except Exception:
return error_response(
error_text="Failed to spawn task on launch: {}, ".format(task) +
"check experiment server log for details",
status=500, simple=True
)
if _config().get('replay', False):
try:
task = ReplayBackend(exp)
gevent.spawn(task)
except Exception:
return error_response(
error_text="Failed to launch replay task for experiment."
"check experiment server log for details",
status=500, simple=True
)
# If the experiment defines a channel, subscribe the experiment to the
# redis communication channel:
if exp.channel is not None:
try:
from dallinger.experiment_server.sockets import chat_backend
chat_backend.subscribe(exp, exp.channel)
except Exception:
return error_response(
error_text="Failed to subscribe to chat for channel on launch " +
"{}".format(exp.channel) +
", check experiment server log for details",
status=500, simple=True
)
message = "\n".join((
"Initial recruitment list:\n{}".format("\n".join(recruitment_details['items'])),
"Additional details:\n{}".format(recruitment_details['message'])
))
return success_response(recruitment_msg=message)
@app.route('/ad', methods=['GET'])
@nocache
def advertisement():
"""
This is the url we give for the ad for our 'external question'. The ad has
to display two different things: This page will be called from within
mechanical turk, with url arguments hitId, assignmentId, and workerId.
If the worker has not yet accepted the hit:
These arguments will have null values, we should just show an ad for
the experiment.
If the worker has accepted the hit:
These arguments will have appropriate values and we should enter the
person in the database and provide a link to the experiment popup.
"""
if not ('hitId' in request.args and 'assignmentId' in request.args):
raise ExperimentError('hit_assign_worker_id_not_set_in_mturk')
# Browser rule validation, if configured:
user_agent_string = request.user_agent.string
user_agent_obj = user_agents.parse(user_agent_string)
browser_ok = True
config = _config()
for rule in config.get('browser_exclude_rule', '').split(','):
myrule = rule.strip()
if myrule in ["mobile", "tablet", "touchcapable", "pc", "bot"]:
if (myrule == "mobile" and user_agent_obj.is_mobile) or\
(myrule == "tablet" and user_agent_obj.is_tablet) or\
(myrule == "touchcapable" and user_agent_obj.is_touch_capable) or\
(myrule == "pc" and user_agent_obj.is_pc) or\
(myrule == "bot" and user_agent_obj.is_bot):
browser_ok = False
elif myrule and myrule in user_agent_string:
browser_ok = False
if not browser_ok:
raise ExperimentError('browser_type_not_allowed')
hit_id = request.args['hitId']
assignment_id = request.args['assignmentId']
app_id = config.get('id', 'unknown')
mode = config.get('mode')
debug_mode = mode == 'debug'
worker_id = request.args.get('workerId')
status = None
if worker_id is not None:
# First check if this workerId has completed the task before
# under a different assignment (v1):
already_participated = bool(
models.Participant.query
.filter(models.Participant.assignment_id != assignment_id)
.filter(models.Participant.worker_id == worker_id)
.count()
)
if already_participated and not debug_mode:
raise ExperimentError('already_did_exp_hit')
# Next, check for participants already associated with this very
# assignment, and retain their status, if found:
try:
part = models.Participant.query.\
filter(models.Participant.hit_id == hit_id).\
filter(models.Participant.assignment_id == assignment_id).\
filter(models.Participant.worker_id == worker_id).\
one()
except exc.SQLAlchemyError:
pass
else:
status = part.status
recruiter_name = request.args.get('recruiter')
if recruiter_name:
recruiter = recruiters.by_name(recruiter_name)
else:
recruiter = recruiters.from_config(config)
recruiter_name = recruiter.nickname
ready_for_external_submission = status == 'working' and part.end_time is not None
assignment_complete = status in ('submitted', 'approved')
if assignment_complete or ready_for_external_submission:
# They've either done, or they're from a recruiter that requires
# submission of an external form to complete their participation.
return render_template(
'thanks.html',
hitid=hit_id,
assignmentid=assignment_id,
workerid=worker_id,
external_submit_url=recruiter.external_submission_url,
mode=config.get('mode'),
app_id=app_id
)
if status == 'working':
# Once participants have finished the instructions, we do not allow
# them to start the task again.
raise ExperimentError('already_started_exp_mturk')
# Participant has not yet agreed to the consent. They might not
# even have accepted the HIT.
return render_template(
'ad.html',
recruiter=recruiter_name,
hitid=hit_id,
assignmentid=assignment_id,
workerid=worker_id,
mode=config.get('mode'),
app_id=app_id
)
@app.route('/summary', methods=['GET'])
def summary():
"""Summarize the participants' status codes."""
exp = Experiment(session)
state = {
"status": "success",
"summary": exp.log_summary(),
"completed": exp.is_complete(),
}
unfilled_nets = models.Network.query.filter(
models.Network.full != true()
).with_entities(models.Network.id, models.Network.max_size).all()
working = models.Participant.query.filter_by(
status='working'
).with_entities(func.count(models.Participant.id)).scalar()
state['unfilled_networks'] = len(unfilled_nets)
nodes_remaining = 0
required_nodes = 0
if state['unfilled_networks'] == 0:
if working == 0 and state['completed'] is None:
state['completed'] = True
else:
for net in unfilled_nets:
node_count = models.Node.query.filter_by(
network_id=net.id, failed=False,
).with_entities(func.count(models.Node.id)).scalar()
net_size = net.max_size
required_nodes += net_size
nodes_remaining += net_size - node_count
state['nodes_remaining'] = nodes_remaining
state['required_nodes'] = required_nodes
if state['completed'] is None:
state['completed'] = False
return Response(
dumps(state),
status=200,
mimetype='application/json'
)
@app.route('/experiment_property/<prop>', methods=['GET'])
@app.route('/experiment/<prop>', methods=['GET'])
def experiment_property(prop):
"""Get a property of the experiment by name."""
exp = Experiment(session)
try:
value = exp.public_properties[prop]
except KeyError:
abort(404)
return success_response(**{prop: value})
@app.route("/<page>", methods=["GET"])
def get_page(page):
"""Return the requested page."""
try:
return render_template(page + ".html")
except TemplateNotFound:
abort(404)
@app.route("/<directory>/<page>", methods=["GET"])
def get_page_from_directory(directory, page):
"""Get a page from a given directory."""
return render_template(directory + '/' + page + '.html')
@app.route("/consent")
def consent():
"""Return the consent form. Here for backwards-compatibility with 2.x."""
config = _config()
return render_template(
"consent.html",
hit_id=request.args['hit_id'],
assignment_id=request.args['assignment_id'],
worker_id=request.args['worker_id'],
mode=config.get('mode')
)
"""Routes for reading and writing to the database."""
def request_parameter(parameter, parameter_type=None, default=None,
optional=False):
"""Get a parameter from a request.
parameter is the name of the parameter you are looking for
parameter_type is the type the parameter should have
default is the value the parameter takes if it has not been passed
If the parameter is not found and no default is specified,
or if the parameter is found but is of the wrong type
then a Response object is returned
"""
exp = Experiment(session)
# get the parameter
try:
value = request.values[parameter]
except KeyError:
# if it isnt found use the default, or return an error Response
if default is not None:
return default
elif optional:
return None
else:
msg = "{} {} request, {} not specified".format(
request.url, request.method, parameter)
return error_response(error_type=msg)
# check the parameter type
if parameter_type is None:
# if no parameter_type is required, return the parameter as is
return value
elif parameter_type == "int":
# if int is required, convert to an int
try:
value = int(value)
return value
except ValueError:
msg = "{} {} request, non-numeric {}: {}".format(
request.url, request.method, parameter, value)
return error_response(error_type=msg)
elif parameter_type == "known_class":
# if its a known class check against the known classes
try:
value = exp.known_classes[value]
return value
except KeyError:
msg = "{} {} request, unknown_class: {} for parameter {}".format(
request.url, request.method, value, parameter)
return error_response(error_type=msg)
elif parameter_type == "bool":
# if its a boolean, convert to a boolean
if value in ["True", "False"]:
return value == "True"
else:
msg = "{} {} request, non-boolean {}: {}".format(
request.url, request.method, parameter, value)
return error_response(error_type=msg)
else:
msg = "/{} {} request, unknown parameter type: {} for parameter {}"\
.format(request.url, request.method, parameter_type, parameter)
return error_response(error_type=msg)
def assign_properties(thing):
"""Assign properties to an object.
When creating something via a post request (e.g. a node), you can pass the
properties of the object in the request. This function gets those values
from the request and fills in the relevant columns of the table.
"""
for p in range(5):
property_name = "property" + str(p + 1)
property = request_parameter(parameter=property_name, optional=True)
if property:
setattr(thing, property_name, property)
session.commit()
@app.route("/participant/<worker_id>/<hit_id>/<assignment_id>/<mode>",
methods=["POST"])
@db.serialized
def create_participant(worker_id, hit_id, assignment_id, mode):
"""Create a participant.
This route is hit early on. Any nodes the participant creates will be
defined in reference to the participant object. You must specify the
worker_id, hit_id, assignment_id, and mode in the url.
"""
fingerprint_hash = request.args.get('fingerprint_hash')
try:
fingerprint_found = models.Participant.query.\
filter_by(fingerprint_hash=fingerprint_hash).one_or_none()
except MultipleResultsFound:
fingerprint_found = True
if fingerprint_hash and fingerprint_found:
db.logger.warning("Same browser fingerprint detected.")
if mode == 'live':
return error_response(
error_type="/participant POST: Same participant dectected.",
status=403)
already_participated = models.Participant.query.\
filter_by(worker_id=worker_id).one_or_none()
if already_participated:
db.logger.warning("Worker has already participated.")
return error_response(
error_type="/participant POST: worker has already participated.",
status=403)
duplicate = models.Participant.query.\
filter_by(
assignment_id=assignment_id,
status="working")\
.one_or_none()
if duplicate:
msg = """
AWS has reused assignment_id while existing participant is
working. Replacing older participant {}.
"""
app.logger.warning(msg.format(duplicate.id))
q.enqueue(worker_function, "AssignmentReassigned", None, duplicate.id)
# Count working or beyond participants.
nonfailed_count = models.Participant.query.filter(
(models.Participant.status == "working") |
(models.Participant.status == "submitted") |
(models.Participant.status == "approved")
).count() + 1
recruiter_name = request.args.get('recruiter')
if recruiter_name and recruiter_name != 'undefined':
recruiter = recruiters.by_name(recruiter_name)
else:
recruiter = recruiters.from_config(_config())
# Create the new participant.
participant = models.Participant(
recruiter_id=recruiter.nickname,
worker_id=worker_id,
assignment_id=assignment_id,
hit_id=hit_id,
mode=mode,
fingerprint_hash=fingerprint_hash,
)
session.add(participant)
session.flush() # Make sure we know the id for the new row
result = {
'participant': participant.__json__()
}
exp = Experiment(session)
# Ping back to the recruiter that one of their participants has joined:
recruiter.notify_recruited(participant)
overrecruited = exp.is_overrecruited(nonfailed_count)
if not overrecruited:
# We either had no quorum or we have not overrecruited, inform the
# recruiter that this participant will be seeing the experiment
recruiter.notify_using(participant)
# Queue notification to others in waiting room
if exp.quorum:
quorum = {
'q': exp.quorum,
'n': nonfailed_count,
'overrecruited': overrecruited,
}
db.queue_message(WAITING_ROOM_CHANNEL, dumps(quorum))
result['quorum'] = quorum
# return the data
return success_response(**result)
@app.route("/participant/<participant_id>", methods=["GET"])
def get_participant(participant_id):
"""Get the participant with the given id."""
try:
ppt = models.Participant.query.filter_by(id=participant_id).one()
except NoResultFound:
return error_response(
error_type="/participant GET: no participant found",
status=403)
# return the data
return success_response(participant=ppt.__json__())
@app.route("/network/<network_id>", methods=["GET"])
def get_network(network_id):
"""Get the network with the given id."""
try:
net = models.Network.query.filter_by(id=network_id).one()
except NoResultFound:
return error_response(
error_type="/network GET: no network found",
status=403)
# return the data
return success_response(network=net.__json__())
@app.route("/question/<participant_id>", methods=["POST"])
def create_question(participant_id):
"""Send a POST request to the question table.
Questions store information at the participant level, not the node
level.
You should pass the question (string) number (int) and response
(string) as arguments.
"""
# Get the participant.
try:
ppt = models.Participant.query.filter_by(id=participant_id).one()
except NoResultFound:
return error_response(error_type="/question POST no participant found",
status=403)
question = request_parameter(parameter="question")
response = request_parameter(parameter="response")
number = request_parameter(parameter="number", parameter_type="int")
for x in [question, response, number]:
if isinstance(x, Response):
return x
# Consult the recruiter regarding whether to accept a questionnaire
# from the participant:
rejection = ppt.recruiter.rejects_questionnaire_from(ppt)
if rejection:
return error_response(
error_type="/question POST, status = {}, reason: {}".format(
ppt.status, rejection),
participant=ppt
)
try:
# execute the request
models.Question(participant=ppt, question=question,
response=response, number=number)
session.commit()
except Exception:
return error_response(error_type="/question POST server error",
status=403)
# return the data
return success_response()
@app.route("/node/<int:node_id>/neighbors", methods=["GET"])
def node_neighbors(node_id):
"""Send a GET request to the node table.
This calls the neighbours method of the node
making the request and returns a list of descriptions of
the nodes (even if there is only one).
Required arguments: participant_id, node_id
Optional arguments: type, connection
After getting the neighbours it also calls
exp.node_get_request()
"""
exp = Experiment(session)
# get the parameters
node_type = request_parameter(parameter="node_type",
parameter_type="known_class",
default=models.Node)
connection = request_parameter(parameter="connection", default="to")
failed = request_parameter(parameter="failed",
parameter_type="bool",
optional=True)
for x in [node_type, connection]:
if type(x) == Response:
return x
# make sure the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(
error_type="/node/neighbors, node does not exist",
error_text="/node/{0}/neighbors, node {0} does not exist"
.format(node_id))
# get its neighbors
if failed is not None:
# This will always raise because "failed" is not a supported parameter.
# We just want to pass the exception message back in the response:
try:
node.neighbors(type=node_type, direction=connection, failed=failed)
except Exception as e:
return error_response(error_type='node.neighbors', error_text=str(e))
else:
nodes = node.neighbors(type=node_type, direction=connection)
try:
# ping the experiment
exp.node_get_request(
node=node,
nodes=nodes)
session.commit()
except Exception:
return error_response(error_type="exp.node_get_request")
return success_response(nodes=[n.__json__() for n in nodes])
@app.route("/node/<participant_id>", methods=["POST"])
@db.serialized
def create_node(participant_id):
"""Send a POST request to the node table.
This makes a new node for the participant, it calls:
1. exp.get_network_for_participant
2. exp.create_node
3. exp.add_node_to_network
4. exp.node_post_request
"""
exp = Experiment(session)
# Get the participant.
try:
participant = models.Participant.query.filter_by(id=participant_id).one()
except NoResultFound:
return error_response(error_type="/node POST no participant found",
status=403)
# Make sure the participant status is working
if participant.status != "working":
error_type = "/node POST, status = {}".format(participant.status)
return error_response(error_type=error_type,
participant=participant)
# execute the request
network = exp.get_network_for_participant(participant=participant)
if network is None:
return Response(dumps({"status": "error"}), status=403)
node = exp.create_node(participant=participant, network=network)
assign_properties(node)
exp.add_node_to_network(node=node, network=network)
# ping the experiment
exp.node_post_request(participant=participant, node=node)
# return the data
return success_response(node=node.__json__())
@app.route("/node/<int:node_id>/vectors", methods=["GET"])
def node_vectors(node_id):
"""Get the vectors of a node.
You must specify the node id in the url.
You can pass direction (incoming/outgoing/all) and failed
(True/False/all).
"""
exp = Experiment(session)
# get the parameters
direction = request_parameter(parameter="direction", default="all")
failed = request_parameter(parameter="failed",
parameter_type="bool", default=False)
for x in [direction, failed]:
if type(x) == Response:
return x
# execute the request
node = models.Node.query.get(node_id)
if node is None:
return error_response(error_type="/node/vectors, node does not exist")
try:
vectors = node.vectors(direction=direction, failed=failed)
exp.vector_get_request(node=node, vectors=vectors)
session.commit()
except Exception:
return error_response(error_type="/node/vectors GET server error",
status=403,
participant=node.participant)
# return the data
return success_response(vectors=[v.__json__() for v in vectors])
@app.route("/node/<int:node_id>/connect/<int:other_node_id>",
methods=["POST"])
def connect(node_id, other_node_id):
"""Connect to another node.
The ids of both nodes must be speficied in the url.
You can also pass direction (to/from/both) as an argument.
"""
exp = Experiment(session)
# get the parameters
direction = request_parameter(parameter="direction", default="to")
if type(direction == Response):
return direction
# check the nodes exist
node = models.Node.query.get(node_id)
if node is None:
return error_response(error_type="/node/connect, node does not exist")
other_node = models.Node.query.get(other_node_id)
if other_node is None:
return error_response(
error_type="/node/connect, other node does not exist",
participant=node.participant)
# execute the request
try:
vectors = node.connect(whom=other_node, direction=direction)
for v in vectors:
assign_properties(v)
# ping the experiment
exp.vector_post_request(
node=node,
vectors=vectors)
session.commit()
except Exception:
return error_response(error_type="/vector POST server error",
status=403,
participant=node.participant)
return success_response(vectors=[v.__json__() for v in vectors])
@app.route("/info/<int:node_id>/<int:info_id>", methods=["GET"])
def get_info(node_id, info_id):
"""Get a specific info.
Both the node and info id must be specified in the url.
"""
exp = Experiment(session)
# check the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(error_type="/info, node does not exist")
# execute the experiment method:
info = models.Info.query.get(info_id)
if info is None:
return error_response(error_type="/info GET, info does not exist",
participant=node.participant)
elif (info.origin_id != node.id and
info.id not in
[t.info_id for t in node.transmissions(direction="incoming",
status="received")]):
return error_response(error_type="/info GET, forbidden info",
status=403,
participant=node.participant)
try:
# ping the experiment
exp.info_get_request(node=node, infos=info)
session.commit()
except Exception:
return error_response(error_type="/info GET server error",
status=403,
participant=node.participant)
# return the data
return success_response(info=info.__json__())
@app.route("/node/<int:node_id>/infos", methods=["GET"])
def node_infos(node_id):
"""Get all the infos of a node.
The node id must be specified in the url.
You can also pass info_type.
"""
exp = Experiment(session)
# get the parameters
info_type = request_parameter(parameter="info_type",
parameter_type="known_class",
default=models.Info)
if type(info_type) == Response:
return info_type
# check the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(error_type="/node/infos, node does not exist")
try:
# execute the request:
infos = node.infos(type=info_type)
# ping the experiment
exp.info_get_request(
node=node,
infos=infos)
session.commit()
except Exception:
return error_response(error_type="/node/infos GET server error",
status=403,
participant=node.participant)
return success_response(infos=[i.__json__() for i in infos])
@app.route("/node/<int:node_id>/received_infos", methods=["GET"])
def node_received_infos(node_id):
"""Get all the infos a node has been sent and has received.
You must specify the node id in the url.
You can also pass the info type.
"""
exp = Experiment(session)
# get the parameters
info_type = request_parameter(parameter="info_type",
parameter_type="known_class",
default=models.Info)
if type(info_type) == Response:
return info_type
# check the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(
error_type="/node/infos, node {} does not exist".format(node_id)
)
# execute the request:
infos = node.received_infos(type=info_type)
try:
# ping the experiment
exp.info_get_request(
node=node,
infos=infos)
session.commit()
except Exception:
return error_response(error_type="info_get_request error",
status=403,
participant=node.participant)
return success_response(infos=[i.__json__() for i in infos])
@app.route("/tracking_event/<int:node_id>", methods=["POST"])
@crossdomain(origin='*')
def tracking_event_post(node_id):
"""Enqueue a TrackingEvent worker for the specified Node.
"""
details = request_parameter(parameter="details", optional=True)
if details:
details = loads(details)
# check the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(error_type="/info POST, node does not exist")
db.logger.debug('rq: Queueing %s with for node: %s for worker_function',
'TrackingEvent', node_id)
q.enqueue(worker_function, 'TrackingEvent', None, None,
node_id=node_id, details=details)
return success_response(details=details)
@app.route("/info/<int:node_id>", methods=["POST"])
@crossdomain(origin='*')
def info_post(node_id):
"""Create an info.
The node id must be specified in the url.
You must pass contents as an argument.
info_type is an additional optional argument.
If info_type is a custom subclass of Info it must be
added to the known_classes of the experiment class.
"""
# get the parameters and validate them
contents = request_parameter(parameter="contents")
details = request_parameter(parameter="details", optional=True)
info_type = request_parameter(parameter="info_type",
parameter_type="known_class",
default=models.Info)
for x in [contents, details, info_type]:
if type(x) == Response:
return x
# check the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(error_type="/info POST, node does not exist")
if details:
details = loads(details)
exp = Experiment(session)
try:
# execute the request
info = info_type(origin=node, contents=contents, details=details)
assign_properties(info)
# ping the experiment
exp.info_post_request(
node=node,
info=info)
session.commit()
except Exception:
return error_response(error_type="/info POST server error",
status=403,
participant=node.participant)
# return the data
return success_response(info=info.__json__())
@app.route("/node/<int:node_id>/transmissions", methods=["GET"])
def node_transmissions(node_id):
"""Get all the transmissions of a node.
The node id must be specified in the url.
You can also pass direction (to/from/all) or status (all/pending/received)
as arguments.
"""
exp = Experiment(session)
# get the parameters
direction = request_parameter(parameter="direction", default="incoming")
status = request_parameter(parameter="status", default="all")
for x in [direction, status]:
if type(x) == Response:
return x
# check the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(
error_type="/node/transmissions, node does not exist")
# execute the request
transmissions = node.transmissions(direction=direction, status=status)
try:
if direction in ["incoming", "all"] and status in ["pending", "all"]:
node.receive()
session.commit()
# ping the experiment
exp.transmission_get_request(node=node, transmissions=transmissions)
session.commit()
except Exception:
return error_response(
error_type="/node/transmissions GET server error",
status=403,
participant=node.participant)
# return the data
return success_response(transmissions=[t.__json__() for t in transmissions])
@app.route("/node/<int:node_id>/transmit", methods=["POST"])
def node_transmit(node_id):
"""Transmit to another node.
The sender's node id must be specified in the url.
As with node.transmit() the key parameters are what and to_whom.
However, the values these accept are more limited than for the back end
due to the necessity of serialization.
If what and to_whom are not specified they will default to None.
Alternatively you can pass an int (e.g. '5') or a class name (e.g.
'Info' or 'Agent'). Passing an int will get that info/node, passing
a class name will pass the class. Note that if the class you are specifying
is a custom class it will need to be added to the dictionary of
known_classes in your experiment code.
You may also pass the values property1, property2, property3, property4
and property5. If passed this will fill in the relevant values of the
transmissions created with the values you specified.
For example, to transmit all infos of type Meme to the node with id 10:
dallinger.post(
"/node/" + my_node_id + "/transmit",
{what: "Meme",
to_whom: 10}
);
"""
exp = Experiment(session)
what = request_parameter(parameter="what", optional=True)
to_whom = request_parameter(parameter="to_whom", optional=True)
# check the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(error_type="/node/transmit, node does not exist")
# create what
if what is not None:
try:
what = int(what)
what = models.Info.query.get(what)
if what is None:
return error_response(
error_type="/node/transmit POST, info does not exist",
participant=node.participant)
except Exception:
try:
what = exp.known_classes[what]
except KeyError:
msg = '/node/transmit POST, {} not in experiment.known_classes'
return error_response(
error_type=msg.format(what),
participant=node.participant)
# create to_whom
if to_whom is not None:
try:
to_whom = int(to_whom)
to_whom = models.Node.query.get(to_whom)
if to_whom is None:
return error_response(
error_type="/node/transmit POST, recipient Node does not exist",
participant=node.participant)
except Exception:
try:
to_whom = exp.known_classes[to_whom]
except KeyError:
msg = '/node/transmit POST, {} not in experiment.known_classes'
return error_response(
error_type=msg.format(to_whom),
participant=node.participant)
# execute the request
try:
transmissions = node.transmit(what=what, to_whom=to_whom)
for t in transmissions:
assign_properties(t)
session.commit()
# ping the experiment
exp.transmission_post_request(
node=node,
transmissions=transmissions)
session.commit()
except Exception:
return error_response(error_type="/node/transmit POST, server error",
participant=node.participant)
# return the data
return success_response(transmissions=[t.__json__() for t in transmissions])
@app.route("/node/<int:node_id>/transformations", methods=["GET"])
def transformation_get(node_id):
"""Get all the transformations of a node.
The node id must be specified in the url.
You can also pass transformation_type.
"""
exp = Experiment(session)
# get the parameters
transformation_type = request_parameter(parameter="transformation_type",
parameter_type="known_class",
default=models.Transformation)
if type(transformation_type) == Response:
return transformation_type
# check the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(
error_type="/node/transformations, "
"node {} does not exist".format(node_id)
)
# execute the request
transformations = node.transformations(
type=transformation_type)
try:
# ping the experiment
exp.transformation_get_request(node=node,
transformations=transformations)
session.commit()
except Exception:
return error_response(error_type="/node/transformations GET failed",
participant=node.participant)
# return the data
return success_response(transformations=[t.__json__() for t in transformations])
@app.route(
"/transformation/<int:node_id>/<int:info_in_id>/<int:info_out_id>",
methods=["POST"])
def transformation_post(node_id, info_in_id, info_out_id):
"""Transform an info.
The ids of the node, info in and info out must all be in the url.
You can also pass transformation_type.
"""
exp = Experiment(session)
# Get the parameters.
transformation_type = request_parameter(parameter="transformation_type",
parameter_type="known_class",
default=models.Transformation)
if type(transformation_type) == Response:
return transformation_type
# Check that the node etc. exists.
node = models.Node.query.get(node_id)
if node is None:
return error_response(
error_type="/transformation POST, "
"node {} does not exist".format(node_id)
)
info_in = models.Info.query.get(info_in_id)
if info_in is None:
return error_response(
error_type="/transformation POST, info_in {} does not exist".format(
info_in_id
),
participant=node.participant)
info_out = models.Info.query.get(info_out_id)
if info_out is None:
return error_response(
error_type="/transformation POST, info_out {} does not exist".format(
info_out_id
),
participant=node.participant)
try:
# execute the request
transformation = transformation_type(info_in=info_in,
info_out=info_out)
assign_properties(transformation)
session.commit()
# ping the experiment
exp.transformation_post_request(node=node,
transformation=transformation)
session.commit()
except Exception:
return error_response(error_type="/transformation POST failed",
participant=node.participant)
# return the data
return success_response(transformation=transformation.__json__())
@app.route("/notifications", methods=["POST", "GET"])
@crossdomain(origin='*')
def api_notifications():
"""Receive MTurk REST notifications."""
event_type = request.values['Event.1.EventType']
assignment_id = request.values.get('Event.1.AssignmentId')
participant_id = request.values.get('participant_id')
# Add the notification to the queue.
db.logger.debug('rq: Queueing %s with id: %s for worker_function',
event_type, assignment_id)
q.enqueue(worker_function, event_type, assignment_id,
participant_id)
db.logger.debug('rq: Submitted Queue Length: %d (%s)', len(q),
', '.join(q.job_ids))
return success_response()
def _handle_worker_event(
assignment_id, participant_id=None, event_type='AssignmentSubmitted'):
return worker_function(event_type, assignment_id, participant_id)
def check_for_duplicate_assignments(participant):
"""Check that the assignment_id of the participant is unique.
If it isnt the older participants will be failed.
"""
participants = models.Participant.query.filter_by(
assignment_id=participant.assignment_id).all()
duplicates = [p for p in participants if (p.id != participant.id and
p.status == "working")]
for d in duplicates:
q.enqueue(worker_function, "AssignmentAbandoned", None, d.id)
@app.route('/worker_complete', methods=['GET'])
@db.scoped_session_decorator
def worker_complete():
"""Complete worker."""
participant_id = request.args.get('participant_id')
if not participant_id:
return error_response(
error_type="bad request",
error_text='participantId parameter is required'
)
try:
_worker_complete(participant_id)
except KeyError:
return error_response(error_type='ParticipantId not found: {}'.format(participant_id))
return success_response(status="success")
def _worker_complete(participant_id):
participants = models.Participant.query.filter_by(id=participant_id).all()
if not participants:
raise KeyError()
participant = participants[0]
participant.end_time = datetime.now()
session.add(participant)
session.commit()
# let recruiter know when completed, for qualification assignment
participant.recruiter.notify_completed(participant)
event_type = participant.recruiter.submitted_event()
if event_type is None:
return
_handle_worker_event(
assignment_id=participant.assignment_id,
participant_id=participant.id,
event_type=event_type,
)
@app.route('/worker_failed', methods=['GET'])
@db.scoped_session_decorator
def worker_failed():
"""Fail worker. Used by bots only for now."""
participant_id = request.args.get('participant_id')
if not participant_id:
return error_response(
error_type="bad request",
error_text='participantId parameter is required'
)
try:
_worker_failed(participant_id)
except KeyError:
return error_response(error_type='ParticipantId not found: {}'.format(participant_id))
return success_response(field="status",
data="success",
request_type="worker failed")
def _worker_failed(participant_id):
participants = models.Participant.query.filter_by(id=participant_id).all()
if not participants:
raise KeyError()
participant = participants[0]
participant.end_time = datetime.now()
session.add(participant)
session.commit()
if participant.recruiter_id == 'bots':
_handle_worker_event(
assignment_id=participant.assignment_id,
participant_id=participant.id,
event_type='BotAssignmentRejected',
)
@db.scoped_session_decorator
def worker_function(event_type, assignment_id, participant_id, node_id=None, details=None):
"""Process the notification."""
_config()
try:
db.logger.debug("rq: worker_function working on job id: %s",
get_current_job().id)
db.logger.debug('rq: Received Queue Length: %d (%s)', len(q),
', '.join(q.job_ids))
except AttributeError:
db.logger.debug('Debug worker_function called synchronously')
exp = Experiment(session)
key = "-----"
exp.log("Received an {} notification for assignment {}, participant {}"
.format(event_type, assignment_id, participant_id), key)
if event_type == 'TrackingEvent':
node = None
if node_id:
node = models.Node.query.get(node_id)
if not node:
participant = None
if participant_id:
# Lookup assignment_id to create notifications
participant = models.Participant.query\
.get(participant_id)
elif assignment_id:
participants = models.Participant.query\
.filter_by(assignment_id=assignment_id)\
.all()
# if there are one or more participants select the most recent
if participants:
participant = max(participants,
key=attrgetter('creation_time'))
participant_id = participant.id
if not participant:
exp.log("Warning: No participant associated with this "
"TrackingEvent notification.", key)
return
nodes = participant.nodes()
if not nodes:
exp.log("Warning: No node associated with this "
"TrackingEvent notification.", key)
return
node = max(nodes, key=attrgetter('creation_time'))
if not details:
details = {}
info = information.TrackingEvent(origin=node, details=details)
session.add(info)
session.commit()
return
runner_cls = WorkerEvent.for_name(event_type)
if not runner_cls:
exp.log("Event type {} is not supported... ignoring.".format(event_type))
return
if assignment_id is not None:
# save the notification to the notification table
notif = models.Notification(
assignment_id=assignment_id,
event_type=event_type)
session.add(notif)
session.commit()
# try to identify the participant
participants = models.Participant.query\
.filter_by(assignment_id=assignment_id)\
.all()
# if there are one or more participants select the most recent
if participants:
participant = max(participants,
key=attrgetter('creation_time'))
# if there are none print an error
else:
exp.log("Warning: No participants associated with this "
"assignment_id. Notification will not be processed.", key)
return None
elif participant_id is not None:
participant = models.Participant.query\
.filter_by(id=participant_id).all()[0]
else:
raise ValueError(
"Error: worker_function needs either an assignment_id or a "
"participant_id, they cannot both be None")
participant_id = participant.id
runner = runner_cls(
participant, assignment_id, exp, session, _config(), datetime.now()
)
runner()
session.commit()
def date_handler(obj):
"""Serialize dates."""
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
# Insert "mode" into pages so it's carried from page to page done server-side
# to avoid breaking backwards compatibility with old templates.
def insert_mode(page_html, mode):
"""Insert mode."""
match_found = False
matches = re.finditer('workerId={{ workerid }}', page_html)
match = None
for match in matches:
match_found = True
if match_found:
new_html = page_html[:match.end()] + "&mode=" + mode +\
page_html[match.end():]
return new_html
else:
raise ExperimentError("insert_mode_failed")
| jcpeterson/Dallinger | dallinger/experiment_server/experiment_server.py | Python | mit | 60,002 |
import json
import logging
from functools import wraps
logger = logging.getLogger(__name__)
class PandaError(Exception):
pass
def error_check(func):
@wraps(func)
def check(*args, **kwargs):
try:
res = func(*args, **kwargs)
if "error" in res:
logger.error(res["message"])
raise PandaError(res["message"])
except Exception as e:
logger.error(e)
raise
return res
return check
class Retriever(object):
def __init__(self, panda, model_type, path = None):
self.panda = panda
self.model_type = model_type
if path:
self.path = path
else:
self.path = model_type.path
class GroupRetriever(Retriever):
@error_check
def _all(self, **kwargs):
json_data = self.panda.get("{0}.json".format(self.path), kwargs)
return json.loads(json_data)
@error_check
def new(self, *args, **kwargs):
return self.model_type(self.panda, *args, **kwargs)
@error_check
def create(self, *args, **kwargs):
return self.new(*args, **kwargs).create(**kwargs)
@error_check
def find(self, val, **kwargs):
json_data = self.panda.get("{0}/{1}.json".format(self.path, val), **kwargs)
return self.model_type(self.panda, **json.loads(json_data))
def all(self, **kwargs):
return [self.model_type(self.panda, **json_attr) for json_attr in self._all(**kwargs)]
def where(self, pred, **kwargs):
return [self.model_type(self.panda, **json_attr) for json_attr in self._all(**kwargs) if pred(json_attr)]
class SingleRetriever(Retriever):
@error_check
def get(self, **kwargs):
json_data = self.panda.get("{0}.json".format(self.path), **kwargs)
return self.model_type(self.panda, json.loads(json_data))
@error_check
def post(self, **kwargs):
json_data = self.panda.post("{0}.json".format(self.path), **kwargs)
return self.model_type(self.panda, json.loads(json_data))
class PandaDict(dict):
def __init__(self, panda, *arg, **kwarg):
self.panda = panda
super(PandaDict, self).__init__(*arg, **kwarg)
def to_json(self, *args, **kwargs):
return json.dumps(self, *args, **kwargs)
class PandaModel(PandaDict):
def dup(self):
copy = self.copy()
if "id" in copy:
copy["id"]
return copy
def reload(self):
json_data = self.panda.get("{0}/{1}.json".format(self.path, self["id"]))
self.clear()
parsed = json.loads(json_data)
self.update(parsed)
@error_check
def create(self, **kwargs):
json_data = self.panda.post("{0}.json".format(self.path), kwargs)
return self.__class__(self.panda, json.loads(json_data))
@error_check
def delete(self, **kwargs):
json_data = self.panda.delete("{0}/{1}.json".format(self.path, self["id"]), kwargs)
return self.__class__(self.panda, json.loads(json_data))
class UpdatablePandaModel(PandaModel):
changed_values = {}
@error_check
def save(self):
put_path = "{0}/{1}.json".format(self.path, self["id"])
ret = type(self)(self.panda, json.loads(self.panda.put(put_path, self.changed_values)))
if "error" not in ret:
self.changed_values = {}
return ret
def __setitem__(self, key, val):
self.changed_values[key] = val
super(UpdatablePandaModel, self).__setitem__(key, val)
# http://stackoverflow.com/a/2588648/1542900
def update(self, *args, **kwargs):
if args:
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got %d" % len(args))
other = dict(args[0])
for key in other:
self[key] = other[key]
for key in kwargs:
self[key] = kwargs[key]
# http://stackoverflow.com/a/2588648/1542900
def setdefault(self, key, value=None):
if key not in self:
self[key] = value
return self[key]
class Video(PandaModel):
path = "/videos"
def encodings(self):
return GroupRetriever(self.panda, Encoding, "/videos/{0}/encodings".format(self["id"])).all()
def metadata(self):
return SingleRetriever(self.panda, Metadata, "/videos/{0}/metadata".format(self["id"])).get()
class Cloud(UpdatablePandaModel):
path = "/clouds"
class Encoding(PandaModel):
path = "/encodings"
def video(self):
return SingleRetriever(self.panda, Video, "/videos/{0}".format(self["video_id"])).get()
def profile(self):
key = self["profile_name"] or self["profile_id"]
return SingleRetriever(self.panda, Video, "/profiles/{0}".format(key)).get()
def cancel(self):
return SingleRetriever(self.panda, PandaDict, "/encodings/{0}/cancel.json".format(self["id"])).post()
def retry(self):
return SingleRetriever(self.panda, PandaDict, "/encodings/{0}/retry.json".format(self["id"])).post()
class Profile(UpdatablePandaModel):
path = "/profiles"
class Notifications(UpdatablePandaModel):
path = "/notifications"
@error_check
def save(self):
tmp = dict(self)
for event in tmp["events"]:
tmp["events"][event] = str(tmp["events"][event]).lower()
return Notifications(self.panda, json.loads(self.panda.put("/notifications.json", tmp)))
def delete(self):
raise AttributeError("Notification instance has no attribute 'delete'")
def reload(self):
json_data = self.panda.get("/notifications.json")
self.clear()
self.update(json.loads(json_data))
class Metadata(PandaDict):
pass
| pandastream/panda_client_python | panda/models.py | Python | mit | 5,728 |
import urllib.request
url = 'http://www.ifce.edu.br'
# Obter o conteúdo da página
pagina = urllib.request.urlopen(url)
texto1 = pagina.read().decode('utf-8')
# Outra forma de fazer a mesma coisa ..
import requests
page = requests.get(url)
texto2 = page.content.decode('utf-8')
# Verificamos que todas as linhas são iguais
print(texto1.split('\n') == texto2.split('\n')) | santiagosilas/propython | raspagem/random/exemplo01.py | Python | mit | 377 |
from django.conf.urls.defaults import patterns, url
from django.views.generic import DetailView, ListView
from polls.models import Poll
urlpatterns = patterns('',
(r'^$',
ListView.as_view(
queryset=Poll.objects.order_by('-pub_date')[:5],
context_object_name='latest_poll_list',
template_name='polls/index.html')),
(r'^(?P<pk>\d+)/$',
DetailView.as_view(
model=Poll,
template_name='polls/detail.html')),
url(r'^(?P<pk>\d+)/results/$',
DetailView.as_view(
model=Poll,
template_name='polls/results.html'),
name='poll_results'),
(r'^(?P<poll_id>\d+)/vote/$', 'polls.views.vote'),
)
| jokey2k/ShockGsite | polls/urls.py | Python | bsd-3-clause | 711 |
## Import numpy
import numpy as np
import matplotlib.pyplot as plt
from scipy import interpolate
def wvf_interpolate(times, voltages, knot_frequency):
"""
calculate b-spline interpolation derivatives for voltage data
according to interpolation mode
returns times, voltages and derivatives suitable for passing to
serialize_branch()
also removes the first time point (implicitly shifts to 0) and
the last voltages/derivatives (irrelevant since the polynomial ends
here) and sets the pause/trigger marker on the last point
as desired by the fpga
"""
## For hfGUI purposes here, I will only be using cubic splines
mode = 3
derivatives = ()
if mode in (2, 3):
## Create a knot vector
time_pts = times[::knot_frequency]
voltage_pts = voltages[::knot_frequency]
if time_pts[-1] != times[-1]:
time_pts = np.append(time_pts, times[-1])
voltage_pts = np.append(voltage_pts, voltages[-1])
## Create a spline through the voltage points at knot_frequency
spline = interpolate.splrep(time_pts, voltage_pts, k = mode)
derivatives = [interpolate.splev(time_pts, spline, der = i+1)
for i in range(mode)]
## In the case of just defining voltages, clear time_pts
if mode == 0:
time_pts = None
# plot spline fit
"""
Comment this plotting part out to speed up run-time
"""
# tnew = np.arange(times[1],times[-1],.1)
# vnew = interpolate.splev(tnew,spline,der=0)
# plt.figure()
# plt.plot(times, voltages, 'r', tnew, vnew, 'b')
# plt.show()
"""
"""
## pass back all of the spline results
return time_pts, voltage_pts, derivatives
def spline_write(times, voltages, derivatives, channel, wvf_name, branch, timeScale, file_handle):
"""
This takes in the cubic spline data for a single channel.
It then opens the target .dch file for the experiment,
and writes/appends to the file
"""
## A constant electrode number offset
chan_start = 1
chan = channel + chan_start
## Make an h vector out of delta-t
h = np.zeros(len(times)-1)
for i in range(0,len(h)):
h[i] = (times[i+1] - times[i])
## Use the derivatives matrix to produce a DC file that can be used
## Computed by matching t^n terms for the spline with that of the FPGA's
## discrete summed polynomial
file_handle.write('wvfcdef({0}_{1}, {2}, {3})\n'.format(wvf_name, chan, chan, branch))
# .dch spline data from spline derivatives
for i in range(0,len(h)):
file_handle.write('[{0:.4f}] {1:.4f},{2:.4f},{3:.4f},{4:.4f};\n'.format(
(times[i] + h[i])*timeScale, voltages[i],
h[i]*(derivatives[0][i]-derivatives[1][i]/2+derivatives[2][i]/6),
h[i]*(h[i]+1)*(derivatives[1][i]-derivatives[2][i])/2,
h[i]*(h[i]+1)*(h[i]+2)*derivatives[2][i]/6 ))
file_handle.write('wvfend\n\n')
## Done writing this waveform channel
return None
| camacazio/pdq-project | PDQ_configuration/PDQ_control_files/spline_dch_creation_coefficients.py | Python | gpl-3.0 | 3,064 |
"""
==============================================================================
Program: SpellingCorrector.py
Author: Kyle Reese Almryde
Date: Thu 03/28/2013 @ 12:03:42 PM
Description: This program tries to correct the spelling of a word using a
supplied dictionary and a criteria.
==============================================================================
"""
import os
import sys
import difflib
import wave
from pprint import pprint
def getWAVduration(fname):
""" Determine the duration of a .WAV file
Params:
fname -- String: The WAV filename
Returns:
A Float representing the duration in milliseconds
"""
f = wave.open(fname, 'r')
frames = f.getnframes()
rate = f.getframerate()
duration = frames/float(rate) * 1000
return duration
#=============================== START OF MAIN ===============================
def main():
SOUNDS = '/usr/local/Utilities/Russian/sounds'
os.chdir(SOUNDS)
wordList = glob.glob('*ale/*/*2.wav')
random.shuffle(wordList)
stimList = {"Ff1": [], "Ff2": [], "Fm1": [], "Fm2": [], "Mm1": [], "Mm2": [], "Mf1": [], "Mf2": []}
for word in wordList:
if word.startswith("Male/"):
if word.endswith('telya2.wav') or word.endswith('telyem2.wav'):
stimList["Mm2"].append(word)
elif word.endswith('ya2.wav') or word.endswith('yem2.wav'):
stimList["Mm1"].append(word)
elif word.endswith('kaoj2.wav') or word.endswith('kau2.wav'):
stimList["Mf2"].append(word)
elif word.endswith('aoj2.wav') or word.endswith('au2.wav'):
stimList["M12"].append(word)
else:
if word.endswith('telya2.wav') or word.endswith('telyem2.wav'):
stimList["Fm2"].append(word)
elif word.endswith('ya2.wav') or word.endswith('yem2.wav'):
stimList["Fm1"].append(word)
elif word.endswith('kaoj2.wav') or word.endswith('kau2.wav'):
stimList["Ff2"].append(word)
elif word.endswith('aoj2.wav') or word.endswith('au2.wav'):
stimList["F12"].append(word)
inc = os.path.join(SOUNDS, 'IncorrectList.txt')
corr = os.path.join(SOUNDS, 'CorrectList.txt')
lexicon = {os.path.split(y)[1].strip().lower() for y in open(corr).readlines()}
outFile = os.path.join(SOUNDS, 'FixedList.txt')
fout = open(outFile, 'w')
template = "{0}\t{1}\t{2}\t{3}\t{4}\n"
header = ["Speaker", "Gender", "Marking", "Word", "Fix"]
fout.write(template.format(*header))
for line in open(inc).readlines():
mark, l = line.split(':')
p, l = os.path.split(l)
speaker, g = os.path.split(p)
gender = g[2:]
word = l.strip().lower()
fix = difflib.get_close_matches(word, lexicon, 1)
fix = ''.join(fix) if len(fix) > 0 else None
fout.write(template.format(speaker, gender, mark, word, fix))
fout.close()
if __name__ == '__main__':
main() | KrbAlmryde/Utilities | Russian/SpellingCorrector.py | Python | mit | 3,049 |
from __future__ import absolute_import
from celery import shared_task
from roomsensor.models import Roomsensor
import time
@shared_task
def read(sensorname):
sensor = Roomsensor.objects.get(name=sensorname)
sensor.read()
@shared_task
def read_and_save_to_mongodb(sensorname):
sensor = Roomsensor.objects.get(name=sensorname)
sensor.read()
dataset = {}
# if (isinstance(sensor.luminosity, float) and isinstance(sensor.temperature, float) and isinstance(sensor.humidity, float)):
dataset["luminosity"] = sensor.luminosity
dataset["temperature"] = sensor.temperature
dataset["humidity"] = sensor.humidity
# else:
# raise Exception("write to db failed: values not from type float")
dataset["time"] = time.time()
sensor.db.write(dataset) | volzotan/django-howl | howl/roomsensor/tasks.py | Python | mit | 794 |
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from rdmo.core.exports import XMLResponse
from rdmo.core.permissions import HasModelPermission
from rdmo.core.views import ChoicesViewSet
from rdmo.core.viewsets import CopyModelMixin
from .models import Condition
from .renderers import ConditionRenderer
from .serializers.export import ConditionExportSerializer
from .serializers.v1 import ConditionIndexSerializer, ConditionSerializer
class ConditionViewSet(CopyModelMixin, ModelViewSet):
permission_classes = (HasModelPermission, )
queryset = Condition.objects.select_related('source', 'target_option') \
.prefetch_related('optionsets', 'questionsets', 'questions', 'tasks')
serializer_class = ConditionSerializer
filter_backends = (DjangoFilterBackend,)
filterset_fields = (
'uri',
'key',
'source',
'relation',
'target_text',
'target_option'
)
@action(detail=False)
def index(self, request):
queryset = Condition.objects.select_related('source', 'target_option')
serializer = ConditionIndexSerializer(queryset, many=True)
return Response(serializer.data)
@action(detail=False, permission_classes=[HasModelPermission])
def export(self, request):
serializer = ConditionExportSerializer(self.get_queryset(), many=True)
xml = ConditionRenderer().render(serializer.data)
return XMLResponse(xml, name='conditions')
@action(detail=True, url_path='export', permission_classes=[HasModelPermission])
def detail_export(self, request, pk=None):
serializer = ConditionExportSerializer(self.get_object())
xml = ConditionRenderer().render([serializer.data])
return XMLResponse(xml, name=self.get_object().key)
class RelationViewSet(ChoicesViewSet):
permission_classes = (IsAuthenticated, )
queryset = Condition.RELATION_CHOICES
| rdmorganiser/rdmo | rdmo/conditions/viewsets.py | Python | apache-2.0 | 2,141 |
import json
import time
from time import gmtime, strftime
import datetime
import sys
from dateutil import parser
import calendar
from TrendAnalyser import TrendAnalyser
start_time = time.time()
TA = TrendAnalyser(load_api=False, load_db=True)
end_time = time.time()
print "Time Taken:", end_time - start_time
| chewett/TrendAnalyser | tests/test_init_no_api_db.py | Python | mit | 312 |
"""
WSGI config for vitelco project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "weza_tracking_backend.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| kyrelos/vitelco-mobile-money-wallet | wsgi.py | Python | gpl-3.0 | 1,428 |
#! /usr/bin/env python
"""
Usage:
cm-image -h | --help
cm-image version
cm-image [--kind=KIND] info
cm-image [--kind=KIND] [--gui] build OS
cm-image [--kind=KIND] register OS
Arguments:
OS the OS you can find with cm-image list
GUI yes or no
Options:
--gui switch on the gui. [default: False]
--kind=KIND the Kind of the image to be created. [default: vbox]
"""
from __future__ import print_function
from docopt import docopt
import hostlist
from cloudmesh_base.util import path_expand, banner
import os
import sh
import cloudmesh
# definitions = ["~/veewee", "$CLOUDMESH/images/veewee"]
definitions = ["$CLOUDMESH/images/veewee"]
def not_implemented():
print("ERROR: not yet implemented")
def cm_image_command(arguments):
"""
cm-image admin on HOSTS
cm-image admin off HOSTS
"""
path = path_expand(definitions[0])
if arguments["version"]:
print(cloudmesh.__version__)
elif arguments["info"]:
banner("info")
banner("System", c='-')
print("Kind: ", arguments['--kind'])
print("Path: ", path)
print("Version:", cloudmesh.__version__)
banner("List of templates", c='-')
system_name = None
for definition in definitions:
try:
path = path_expand(definition)
if os.path.exists(path):
os.system("cd '%s' ; veewee vbox list" % path)
else:
print("WARNING: path", path, "does not exist")
except KeyError, key:
print('WARNING: no environment variable called', key, 'found')
print()
print("To build one, please use one of the")
print()
print(" cm-image build OS")
print()
print("Next you need to register the image")
print()
print(" cm-image register OS")
print()
print("where OS is one of the labels listed above.")
print()
elif arguments["build"]:
banner("build")
system_name = arguments["OS"]
if arguments['--gui']:
gui = ""
else:
gui = '--nogui'
if arguments['--kind'] == "vbox":
os.system("cd '%s' ; veewee vbox build '%s' --force %s" %
(path, system_name, gui))
# due to some bug the following does not work
# os.system("veewee vbox build %s --workdir='%s' --force" % (path,
# system_name)
else:
print("ERROR: wrong options")
elif arguments["register"]:
banner("register")
system_name = arguments["OS"]
print(system_name, path)
banner("export iamge", c="-")
if arguments['--kind'] is 'vbox':
os.system("cd '%s' ; veewee vbox export '%s'" %
(path, system_name))
banner("add iamge", c="-")
os.system("cd '%s' ; vagrant box add '%s' '%s.box'" %
(path, system_name, system_name))
def main():
arguments = docopt(__doc__)
cm_image_command(arguments)
if __name__ == '__main__':
main()
| rajpushkar83/cloudmesh | cloudmesh/image/cm_image.py | Python | apache-2.0 | 3,204 |
import csv
from StringIO import StringIO
from math import ceil
from collections import Mapping, Sequence
def __expand_container(cont, i, j, empty_sym=''):
""" Expand, if possible, the list of list cont of size (h, k) to a list
of lists of size (i, j). If the expansion is successful, newly
created elements are filled with data empty_sym.
"""
for ln in cont:
# expand horizontally
if len(ln) < j:
ln.extend([empty_sym for k in range((j - len(ln)))])
if len(cont) < i:
# expand vertically
cont.extend([[empty_sym for k in range(j)]
for h in range((i - len(cont)))])
def __recursive_insert_data(di, data_cont, col_index):
""" Recursively insert data into data_cont (list of list)
while visiting the data container di (either a dictionary-like
container or a list-like container) using DFS.
The position of data_cont in which the data is insert is
col_index; if data_cont is not big enough to accommodate the
data, it will be automatically expanded.
"""
print type(di), isinstance(di, Mapping)
if not(isinstance(di, Mapping)) and not(isinstance(di, Sequence)):
# reached the data, back up a position to insert it in!
return col_index
new_col_index = col_index
# assign progressive index names starting from 0 if di
# is a list-like object
di_iter = (di.iteritems() if isinstance(di, Mapping) else enumerate(di))
for k, v in di_iter:
# recursively insert data for the sublist of di
new_col_index = __recursive_insert_data(v, data_cont, new_col_index)
if new_col_index == col_index:
# previous iteration has reached the data, better dump!
__expand_container(data_cont, len(di), col_index + 1)
for i, elem in enumerate(di):
data_cont[i][col_index] = elem
return (col_index + 1)
else:
# di contains multiple subheaders, so no dumping
return new_col_index
def __recursive_build_header((name, di), heads_cont, left, depth):
""" Recursively detect headers in di. Headers are collected in
the container heads_cont.
The container is automatically expanded if needed.
"""
if not(isinstance(di, Mapping)) or not(isinstance(di, Sequence)):
return left
right = left
di_iter = (di.iteritems() if isinstance(di, Mapping) else enumerate(di))
for k, v in di_iter:
right = __recursive_build_header((k, v), heads_cont, right, depth + 1)
if left == right:
__expand_container(heads_cont, depth + 1, right + 1,)
heads_cont[depth][right] = name
right += 1
elif name is not None:
pos = left + (int(ceil(float(right - left) / 2)) - 1)
heads_cont[depth][pos] = name
return right
def dict2csv(di, csv_kwargs=None):
""" Input: a dictionary [of dictionaries]* containing data
(optional) arguments to control layout of csv file
Output: a string ready to be written as csv file
"""
# collect data
data_cont = []
__recursive_insert_data(di, data_cont, 0)
# format headers
heads_cont = []
__recursive_build_header((None, di), heads_cont, 0, 0)
heads_cont = heads_cont[1:]
# prepare output file
outstr = StringIO()
if csv_kwargs is None:
csv_kwargs = {}
wr = csv.writer(outstr, **csv_kwargs)
# write data
wr.writerows(heads_cont)
wr.writerows(data_cont)
# rewind and return data
outstr.seek(0)
outstr = outstr.read()
return outstr
| lucasoldaini/dict2csv | dict2csv.py | Python | mit | 3,606 |
def main ( m , n ) :
from matrix import mat , show
M = mat( m , n , 0 )
print( show( M ) , end = "" )
if __name__ == "__main__" :
import sys
main( *map( int , sys.argv[1:] ) )
| aureooms/mupi | zeros.py | Python | agpl-3.0 | 189 |
"""
This command exports a course from CMS to a git repository.
It takes as arguments the course id to export (i.e MITx/999/2020 ) and
the repository to commit too. It takes username as an option for identifying
the commit, as well as a directory path to place the git repository.
By default it will use settings.GIT_REPO_EXPORT_DIR/repo_name as the cloned
directory. It is branch aware, but will reset all local changes to the
repository before attempting to export the XML, add, and commit changes if
any have taken place.
This functionality is also available as an export view in studio if the giturl
attribute is set and the FEATURE['ENABLE_EXPORT_GIT'] is set.
"""
import logging
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.utils.translation import ugettext as _
import contentstore.git_export_utils as git_export_utils
log = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Take a course from studio and export it to a git repository.
"""
option_list = BaseCommand.option_list + (
make_option('--username', '-u', dest='user',
help=('Specify a username from LMS/Studio to be used '
'as the commit author.')),
make_option('--repo_dir', '-r', dest='repo',
help='Specify existing git repo directory.'),
)
help = _('Take the specified course and attempt to '
'export it to a git repository\n. Course directory '
'must already be a git repository. Usage: '
' git_export <course_loc> <git_url>')
def handle(self, *args, **options):
"""
Checks arguments and runs export function if they are good
"""
if len(args) != 2:
raise CommandError('This script requires exactly two arguments: '
'course_loc and git_url')
# Rethrow GitExportError as CommandError for SystemExit
try:
git_export_utils.export_to_git(
args[0],
args[1],
options.get('user', ''),
options.get('rdir', None)
)
except git_export_utils.GitExportError as ex:
raise CommandError(str(ex))
| XiaodunServerGroup/xiaodun-platform | cms/djangoapps/contentstore/management/commands/git_export.py | Python | agpl-3.0 | 2,288 |
# Uses python3
import sys
def agafa_valoroptim(pesadmes, llistavalors, llistapesos):
valortotal = 0.
# return valortotal
# def main():
# dadesinicials = list(map(int, sys.stdin.read().split()))
# nombre, pesadmes = dadesinicials[0:2]
# values = dadesinicials[2:(2 * n + 2):2]
# weights = dadesinicials[3:(2 * n + 2):2]
# valoroptim = agafa_valoroptim(pesadmes, pesos, valors)
# print("{:.10f}".format(valoroptim))
def main():
nombre, pesadmes = map(int, input().split())
print(nombre, pesadmes)
llistavalors = [None] * nombre
llistapesos = [None] * nombre
for item in range(0,nombre):
llistavalors[item], llistapesos[item] = map(int, input().split())
main()
| papapep/python | UC_SanDiego/1_AlgorithmicToolbox/Week3/fractional_knapsack.py | Python | gpl-3.0 | 727 |
from bitmovin.resources.models import AbstractModel
from bitmovin.resources import AbstractNameDescriptionResource
from bitmovin.errors import InvalidTypeError
from bitmovin.utils import Serializable
from .encoding_output import EncodingOutput
class Sprite(AbstractNameDescriptionResource, AbstractModel, Serializable):
def __init__(self, height, width, sprite_name, vtt_name, outputs, distance=None, id_=None, custom_data=None,
name=None, description=None):
super().__init__(id_=id_, custom_data=custom_data, name=name, description=description)
self._outputs = None
self.height = height
self.width = width
self.distance = distance
self.spriteName = sprite_name
self.vttName = vtt_name
if outputs is not None and not isinstance(outputs, list):
raise InvalidTypeError('outputs must be a list')
self.outputs = outputs
@classmethod
def parse_from_json_object(cls, json_object):
id_ = json_object['id']
custom_data = json_object.get('customData')
width = json_object.get('width')
height = json_object.get('height')
distance = json_object.get('distance')
sprite_name = json_object.get('spriteName')
vtt_name = json_object.get('vttName')
outputs = json_object.get('outputs')
name = json_object.get('name')
description = json_object.get('description')
sprite = Sprite(id_=id_, custom_data=custom_data, outputs=outputs, name=name, description=description,
height=height, width=width, sprite_name=sprite_name, vtt_name=vtt_name, distance=distance)
return sprite
@property
def outputs(self):
return self._outputs
@outputs.setter
def outputs(self, new_outputs):
if new_outputs is None:
return
if not isinstance(new_outputs, list):
raise InvalidTypeError('new_outputs has to be a list of EncodingOutput objects')
if all(isinstance(output, EncodingOutput) for output in new_outputs):
self._outputs = new_outputs
else:
outputs = []
for json_object in new_outputs:
output = EncodingOutput.parse_from_json_object(json_object)
outputs.append(output)
self._outputs = outputs
def serialize(self):
serialized = super().serialize()
serialized['outputs'] = self.outputs
return serialized
| bitmovin/bitmovin-python | bitmovin/resources/models/encodings/sprite.py | Python | unlicense | 2,500 |
class Stack:
def __init__(self, pos, mem, wordsize):
""" Initialize stack
"""
self.base = pos
self.pos = pos
self.mem = mem
self.size = None
self.wordsize = wordsize
def getPos(self):
""" Get current position of stack
"""
return self.pos
def relocate(self, pos):
""" Relocate stack
>>> from primitives import Mem
>>> mymem = Mem(100)
>>> mymem.setRaw(21, 0xFF)
>>> s = Stack(20, mymem, 4)
>>> s.push(1)
>>> s.push(2)
>>> s.push(3)
>>> s.push(4)
>>> s.pop()
4
>>> s.relocate(12)
>>> s.pop()
2
>>> s.pop()
1
"""
self.base = pos
self.pos = pos
def setSize(self, size):
""" Set stack size, enables boundary checking
"""
self.size = size
def upBoundCheck(self):
""" Check for upper bound of the stack
>>> from primitives import Mem
>>> mymem = Mem(100)
>>> mymem.setRaw(21, 0xFF)
>>> s = Stack(20, mymem, 4)
>>> # Test that we can go beound boundaries
>>> print '%x' % s.pop()
ff00
>>> # Return
>>> s.push(0)
>>> s.setSize(16)
>>> s.pop() #doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: Stack underflow!
>>> s.push(42)
>>> s.pop()
42
"""
if self.size is None:
return True
if self.pos >= self.base:
return False
return True
def lowBoundCheck(self):
""" Check for lower bound of the stack
>>> from primitives import Mem
>>> mymem = Mem(100)
>>> s = Stack(20, mymem, 4)
>>> s.setSize(16)
>>> s.push(10)
>>> s.push(9)
>>> s.push(8)
>>> s.push(7)
>>> s.push(6) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: Stack overflow!
"""
if self.size is None:
return True
bottom = self.base - self.size
if self.pos <= bottom:
return False
return True
def push(self, data):
""" Push data to stack
>>> from primitives import Mem
>>> mymem = Mem(100)
>>> s = Stack(20, mymem, 4)
>>> s.push(10)
>>> s.push(9)
>>> mymem.getData(20-4)
10
>>> mymem.getData(20-4*2)
9
"""
if not self.lowBoundCheck():
raise IndexError('Stack overflow!')
self.pos -= self.wordsize
self.mem.setData(self.pos, data, self.wordsize)
def pop(self):
""" Pop data back from stack
>>> from primitives import Mem
>>> mymem = Mem(100)
>>> s = Stack(20, mymem, 4)
>>> s.push(10)
>>> s.push(9)
>>> s.push(4)
>>> s.pop()
4
>>> s.pop()
9
>>> s.pop()
10
"""
if not self.upBoundCheck():
raise IndexError('Stack underflow!')
data = self.mem.getData(self.pos, self.wordsize)
self.pos += self.wordsize
return data
| jroivas/cpus | primitives/stack.py | Python | bsd-3-clause | 3,249 |
from django.db import transaction
from django.core.management.base import NoArgsCommand
from devilry.apps.core.models import Candidate
class Command(NoArgsCommand):
help = "Sync the cached fields in Candidate with the actual data from User."
def handle_noargs(self, **options):
verbosity = int(options.get('verbosity', '1'))
updates = 0
with transaction.commit_manually():
for candidate in Candidate.objects.all():
candidate.save()
if verbosity > 1:
print 'Updated {0}'.format(candidate)
updates += 1
transaction.commit()
if verbosity > 0:
print 'Successfully updated {0} candidates.'.format(updates)
| vegarang/devilry-django | devilry/apps/superadmin/management/commands/devilry_sync_candidates.py | Python | bsd-3-clause | 750 |
import unittest
import sys
if sys.version_info[0] < 3:
mock_o = '__builtin__.open'
import mock
else:
mock_o = 'builtins.open'
import unittest.mock as mock
from obdlib.obd.pids import Pids
class TestPids(unittest.TestCase):
def setUp(self):
self.pids = Pids()
def test_set_mode(self):
response = self.pids.set_mode(1)
self.assertEqual(self.pids.mode, 1)
self.assertIsInstance(response, Pids)
@mock.patch(mock_o)
def test_getitem(self, mock_open):
file = mock.MagicMock(return_value=None)
file.__enter__.return_value = file
file.__iter__.return_value = (x for x in ('("a",)', '("b",)'))
mock_open.return_value = file
mode = 1
self.pids.set_mode(mode)
response = self.pids[0]
mock_open.assert_called_once_with('obdlib/obd/commands/pids.{}'.format(mode))
self.assertIsInstance(response, tuple)
self.assertEqual(response, ('a',))
suite = unittest.TestLoader().loadTestsFromTestCase(TestPids)
unittest.TextTestRunner(verbosity=2).run(suite)
| QualiApps/obdlib | tests/test_pids.py | Python | mit | 1,090 |
from SHISO import * | logpai/logparser | logparser/SHISO/__init__.py | Python | mit | 19 |
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Automatically restart the server when a source file is modified.
Most applications should not access this module directly. Instead,
pass the keyword argument ``autoreload=True`` to the
`tornado.web.Application` constructor (or ``debug=True``, which
enables this setting and several others). This will enable autoreload
mode as well as checking for changes to templates and static
resources. Note that restarting is a destructive operation and any
requests in progress will be aborted when the process restarts. (If
you want to disable autoreload while using other debug-mode features,
pass both ``debug=True`` and ``autoreload=False``).
This module can also be used as a command-line wrapper around scripts
such as unit test runners. See the `main` method for details.
The command-line wrapper and Application debug modes can be used together.
This combination is encouraged as the wrapper catches syntax errors and
other import-time failures, while debug mode catches changes once
the server has started.
This module depends on `.IOLoop`, so it will not work in WSGI applications
and Google App Engine. It also will not work correctly when `.HTTPServer`'s
multi-process mode is used.
Reloading loses any Python interpreter command-line arguments (e.g. ``-u``)
because it re-executes Python using ``sys.executable`` and ``sys.argv``.
Additionally, modifying these variables will cause reloading to behave
incorrectly.
"""
# Further patched by Zulip check whether the code we're about to
# reload actually imports before reloading into it. This fixes a
# major development workflow problem, where if one did a `git rebase`,
# Tornado would crash itself by auto-reloading into a version of the
# code that didn't work.
import functools
import importlib
import os
import subprocess
import sys
import traceback
import types
import weakref
from tornado import ioloop, process
from tornado.log import gen_log
try:
import signal
except ImportError:
signal = None
# os.execv is broken on Windows and can't properly parse command line
# arguments and executable name if they contain whitespaces. subprocess
# fixes that behavior.
_has_execv = sys.platform != 'win32'
_watched_files = set()
_reload_hooks = []
_reload_attempted = False
_io_loops = weakref.WeakKeyDictionary()
needs_to_reload = False
def start(io_loop=None, check_time=500):
"""Begins watching source files for changes.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
io_loop = io_loop or ioloop.IOLoop.current()
if io_loop in _io_loops:
return
_io_loops[io_loop] = True
if len(_io_loops) > 1:
gen_log.warning("tornado.autoreload started more than once in the same process")
modify_times = {}
callback = functools.partial(_reload_on_update, modify_times)
scheduler = ioloop.PeriodicCallback(callback, check_time, io_loop=io_loop)
scheduler.start()
def wait():
"""Wait for a watched file to change, then restart the process.
Intended to be used at the end of scripts like unit test runners,
to run the tests again after any source file changes (but see also
the command-line interface in `main`)
"""
io_loop = ioloop.IOLoop()
start(io_loop)
io_loop.start()
def watch(filename):
"""Add a file to the watch list.
All imported modules are watched by default.
"""
_watched_files.add(filename)
def add_reload_hook(fn):
"""Add a function to be called before reloading the process.
Note that for open file and socket handles it is generally
preferable to set the ``FD_CLOEXEC`` flag (using `fcntl` or
``tornado.platform.auto.set_close_exec``) instead
of using a reload hook to close them.
"""
_reload_hooks.append(fn)
def _reload_on_update(modify_times):
global needs_to_reload
if _reload_attempted:
# We already tried to reload and it didn't work, so don't try again.
return
if process.task_id() is not None:
# We're in a child process created by fork_processes. If child
# processes restarted themselves, they'd all restart and then
# all call fork_processes again.
return
for module in list(sys.modules.values()):
# Some modules play games with sys.modules (e.g. email/__init__.py
# in the standard library), and occasionally this can cause strange
# failures in getattr. Just ignore anything that's not an ordinary
# module.
if not isinstance(module, types.ModuleType):
continue
path = getattr(module, "__file__", None)
if not path:
continue
if path.endswith(".pyc") or path.endswith(".pyo"):
path = path[:-1]
result = _check_file(modify_times, module, path)
if result is False:
# If any files errored, we abort this attempt at reloading.
return
if result is True:
# If any files had actual changes that import properly,
# we'll plan to reload the next time we run with no files
# erroring.
needs_to_reload = True
if needs_to_reload:
_reload()
def _check_file(modify_times, module, path):
try:
modified = os.stat(path).st_mtime
except Exception:
return
if path not in modify_times:
modify_times[path] = modified
return
if modify_times[path] != modified:
gen_log.info("%s modified; restarting server", path)
modify_times[path] = modified
else:
return
if path == __file__ or path == os.path.join(os.path.dirname(__file__),
"event_queue.py"):
# Assume that the autoreload library itself imports correctly,
# because reloading this file will destroy its state,
# including _reload_hooks
return True
try:
importlib.reload(module)
except Exception:
gen_log.error(f"Error importing {path}, not reloading")
traceback.print_exc()
return False
return True
def _reload():
global _reload_attempted
_reload_attempted = True
for fn in _reload_hooks:
fn()
# Make sure any output from reload hooks makes it to stdout.
sys.stdout.flush()
if hasattr(signal, "setitimer"):
# Clear the alarm signal set by
# ioloop.set_blocking_log_threshold so it doesn't fire
# after the exec.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
# sys.path fixes: see comments at top of file. If sys.path[0] is an empty
# string, we were (probably) invoked with -m and the effective path
# is about to change on re-exec. Add the current directory to $PYTHONPATH
# to ensure that the new process sees the same path we did.
path_prefix = '.' + os.pathsep
if (sys.path[0] == '' and
not os.environ.get("PYTHONPATH", "").startswith(path_prefix)):
os.environ["PYTHONPATH"] = (path_prefix +
os.environ.get("PYTHONPATH", ""))
if not _has_execv:
subprocess.Popen([sys.executable] + sys.argv)
sys.exit(0)
else:
try:
os.execv(sys.executable, [sys.executable] + sys.argv)
except OSError:
# Mac OS X versions prior to 10.6 do not support execv in
# a process that contains multiple threads. Instead of
# re-executing in the current process, start a new one
# and cause the current process to exit. This isn't
# ideal since the new process is detached from the parent
# terminal and thus cannot easily be killed with ctrl-C,
# but it's better than not being able to autoreload at
# all.
# Unfortunately the errno returned in this case does not
# appear to be consistent, so we can't easily check for
# this error specifically.
os.spawnv(os.P_NOWAIT, sys.executable,
[sys.executable] + sys.argv)
# At this point the IOLoop has been closed and finally
# blocks will experience errors if we allow the stack to
# unwind, so just exit uncleanly.
os._exit(0)
| shubhamdhama/zulip | zerver/tornado/autoreload.py | Python | apache-2.0 | 8,826 |
"""Test runner for all Ansible tests."""
from __future__ import annotations
import os
import sys
import typing as t
# This import should occur as early as possible.
# It must occur before subprocess has been imported anywhere in the current process.
from .init import (
CURRENT_RLIMIT_NOFILE,
)
from .util import (
ApplicationError,
display,
)
from .delegation import (
delegate,
)
from .executor import (
ApplicationWarning,
Delegate,
ListTargets,
)
from .timeout import (
configure_timeout,
)
from .data import (
data_context,
)
from .util_common import (
CommonConfig,
)
from .cli import (
parse_args,
)
from .provisioning import (
PrimeContainers,
)
def main(cli_args=None): # type: (t.Optional[t.List[str]]) -> None
"""Main program function."""
try:
os.chdir(data_context().content.root)
args = parse_args(cli_args)
config = args.config(args) # type: CommonConfig
display.verbosity = config.verbosity
display.truncate = config.truncate
display.redact = config.redact
display.color = config.color
display.info_stderr = config.info_stderr
configure_timeout(config)
display.info('RLIMIT_NOFILE: %s' % (CURRENT_RLIMIT_NOFILE,), verbosity=2)
delegate_args = None
target_names = None
try:
data_context().check_layout()
args.func(config)
except PrimeContainers:
pass
except ListTargets as ex:
# save target_names for use once we exit the exception handler
target_names = ex.target_names
except Delegate as ex:
# save delegation args for use once we exit the exception handler
delegate_args = (ex.host_state, ex.exclude, ex.require)
if delegate_args:
delegate(config, *delegate_args)
if target_names:
for target_name in target_names:
print(target_name) # info goes to stderr, this should be on stdout
display.review_warnings()
config.success = True
except ApplicationWarning as ex:
display.warning(u'%s' % ex)
sys.exit(0)
except ApplicationError as ex:
display.error(u'%s' % ex)
sys.exit(1)
except KeyboardInterrupt:
sys.exit(2)
except BrokenPipeError:
sys.exit(3)
| mattclay/ansible | test/lib/ansible_test/_internal/__init__.py | Python | gpl-3.0 | 2,387 |
#!/usr/bin/env python
import sys
import subprocess
version = { }
header = """/* This file is automatically generated by {0}!
* Do not edit manually, any manual change will be overwritten.
*/
"""
if len(sys.argv)<3:
print("Usage:")
print(" {0} <infile> <outfile>".format(sys.argv[0]))
sys.exit(1)
#Get the build repos information
identify = subprocess.check_output(["hg", "identify", "-nitb"])
identify = identify.split()
version['revision'] = identify[0]
version['irevision0'] = "0x" + identify[0][0:8]
version['irevision1'] = "0x" + identify[0][8:12]
version['local_revision'] = identify[1]
version['branch'] = identify[2]
if len(identify)>3:
version['tag'] = identify[3]
else:
version['tag'] = ""
try:
version['local_revision'].index('+')
version['modified'] = 'true'
except Exception:
version['modified'] = 'false'
#Apply information to the file template
infile = open(sys.argv[1], 'r')
outfile = open(sys.argv[2], 'w')
outfile.write(header.format(sys.argv[0], sys.argv[1]))
outfile.write(infile.read().format(**version))
infile.close()
outfile.close()
| itsnotmyfault1/kimcopter2 | crazyflie-firmware/scripts/versionTemplate.py | Python | gpl-2.0 | 1,098 |
"""
Capture log messages during test execution, appending them to the
error reports of failed tests.
This plugin implements :func:`startTestRun`, :func:`startTest`,
:func:`stopTest`, :func:`setTestOutcome`, and :func:`outcomeDetail` to
set up a logging configuration that captures log messages during test
execution, and appends them to error reports for tests that fail or
raise exceptions.
"""
import logging
from logging.handlers import BufferingHandler
import threading
from nose2.events import Plugin
from nose2.util import ln, parse_log_level
log = logging.getLogger(__name__)
__unittest = True
class LogCapture(Plugin):
"""Capture log messages during test execution"""
configSection = 'log-capture'
commandLineSwitch = (None, 'log-capture', 'Enable log capture')
logformat = '%(name)s: %(levelname)s: %(message)s'
logdatefmt = None
clear = False
filters = ['-nose']
def __init__(self):
self.logformat = self.config.as_str('format', self.logformat)
self.logdatefmt = self.config.as_str('date-format', self.logdatefmt)
self.filters = self.config.as_list('filter', self.filters)
self.clear = self.config.as_bool('clear-handlers', self.clear)
self.loglevel = parse_log_level(
self.config.as_str('log-level', 'NOTSET'))
self.handler = MyMemoryHandler(1000, self.logformat, self.logdatefmt,
self.filters)
def registerInSubprocess(self, event):
event.pluginClasses.append(self.__class__)
def startSubprocess(self, event):
self._setupLoghandler()
def startTestRun(self, event):
"""Set up logging handler"""
self._setupLoghandler()
def startTest(self, event):
"""Set up handler for new test"""
self._setupLoghandler()
def setTestOutcome(self, event):
"""Store captured log messages in ``event.metadata``"""
self._addCapturedLogs(event)
def stopTest(self, event):
"""Clear captured messages, ready for next test"""
self.handler.truncate()
def outcomeDetail(self, event):
"""Append captured log messages to ``event.extraDetail``"""
logs = event.outcomeEvent.metadata.get('logs', None)
if logs:
event.extraDetail.append(ln('>> begin captured logging <<'))
event.extraDetail.extend(logs)
event.extraDetail.append(ln('>> end captured logging <<'))
def _setupLoghandler(self):
# setup our handler with root logger
root_logger = logging.getLogger()
if self.clear:
if hasattr(root_logger, "handlers"):
for handler in root_logger.handlers:
root_logger.removeHandler(handler)
for logger in logging.Logger.manager.loggerDict.values():
if hasattr(logger, "handlers"):
for handler in logger.handlers:
logger.removeHandler(handler)
# make sure there isn't one already
# you can't simply use "if self.handler not in root_logger.handlers"
# since at least in unit tests this doesn't work --
# LogCapture() is instantiated for each test case while root_logger
# is module global
# so we always add new MyMemoryHandler instance
for handler in root_logger.handlers[:]:
if isinstance(handler, MyMemoryHandler):
root_logger.handlers.remove(handler)
root_logger.addHandler(self.handler)
root_logger.setLevel(self.loglevel)
def _addCapturedLogs(self, event):
format = self.handler.format
records = [format(r) for r in self.handler.buffer]
if 'logs' in event.metadata:
event.metadata['logs'].extend(records)
else:
event.metadata['logs'] = records
class FilterSet(object):
def __init__(self, filter_components):
self.inclusive, self.exclusive = self._partition(filter_components)
@staticmethod
def _partition(components):
inclusive, exclusive = [], []
for component in components:
if component.startswith('-'):
exclusive.append(component[1:])
else:
inclusive.append(component)
return inclusive, exclusive
def allow(self, record):
"""returns whether this record should be printed"""
if not self:
# nothing to filter
return True
return self._allow(record) and not self._deny(record)
@staticmethod
def _any_match(matchers, record):
"""return the bool of whether `record` starts with
any item in `matchers`"""
def record_matches_key(key):
return record == key or record.startswith(key + '.')
return any(map(record_matches_key, matchers))
def _allow(self, record):
if not self.inclusive:
return True
return self._any_match(self.inclusive, record)
def _deny(self, record):
if not self.exclusive:
return False
return self._any_match(self.exclusive, record)
class MyMemoryHandler(BufferingHandler):
def __init__(self, capacity, logformat, logdatefmt, filters):
BufferingHandler.__init__(self, capacity)
fmt = logging.Formatter(logformat, logdatefmt)
self.setFormatter(fmt)
self.filterset = FilterSet(filters)
def flush(self):
pass # do nothing
def truncate(self):
self.buffer = []
def filter(self, record):
return self.filterset.allow(record.name)
def __getstate__(self):
state = self.__dict__.copy()
del state['lock']
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.lock = threading.RLock()
| leth/nose2 | nose2/plugins/logcapture.py | Python | bsd-2-clause | 5,817 |
import sys
import numpy as np
from copy import copy, deepcopy
import multiprocessing as mp
from numpy.random import shuffle, random, normal
from math import log, sqrt, exp, pi
import itertools as it
from scipy.stats import gaussian_kde, pearsonr
from scipy.stats import ttest_1samp
from itertools import product
try:
from Crypto.pct_warnings import PowmInsecureWarning
import warnings
warnings.simplefilter("ignore", PowmInsecureWarning)
except:
pass
# In this work, I am computing transfer entropies
# by, first, discretizing expression values into a given
# number of bins. Using those bins, the probability of a given
# interval is computed, and the joint probability over time
# can also be computed (given two time series).
# Want P(X_t+1, X_k2, Y_k1) * log (P(X_t+1,Y_k1,X_k2)*P(X_t+1)) / (P(X_t+1, X_k2)*P(X_k2,Y_K1))
# just get the joint, then get the others by marginalization
# parameters:
# yk: the markov order for Y = let it be 1
# xk: the markov order for x = let it be 1
# yl: the time delay for y
# xl: the time delay for x
# b : the number of bins
# autoTE is
# FOR TE (Y -> X)
def autoshuff((x,y)):
permutedY = deepcopy(y)
shuffle(permutedY)
return(pearsonr(x, permutedY)[0])
def autoCorr(x,y,reps1, cpus):
pool = mp.Pool(cpus)
observed = pearsonr(x,y)[0]
permutedList = it.repeat( (x,y), reps1)
permutedCor = pool.map(autoshuff, permutedList)
pool.close()
return([observed] + permutedCor)
def geneindex(gene, genes):
for i in range(0,len(genes)):
if gene in genes[i]:
return(i)
return(-1)
def prepGeneDataGG(dats, genes, g1, g2):
i = geneindex(g1, genes) # from
j = geneindex(g2, genes) # to
if (i > -1 and j > -1):
x = map(float,dats[i]) #from
y = map(float,dats[j]) # to
x = np.array(x); x = (x-x.mean())/max(1,(x-x.mean()).max())
y = np.array(y); y = (y-y.mean())/max(1,(y-y.mean()).max())
return((x,y))
else:
return( ([],[]) )
def corEdges(exprfile, genefile, fileout, reps, cpus, g1, g2):
genes = open(genefile,'r').read().strip().split("\n")
dat = open(exprfile,'r').read().strip().split("\n")
dats = map(lambda x: x.split("\t"), dat)
fout = open(fileout,'w')
(fromx,toy) = prepGeneDataGG(dats, genes, g1, g2)
res0 = autoCorr(fromx,toy,reps, cpus)
fout.write(g1 +"\t"+ g2 +"\t"+ "\t".join(map(str,res0)) +"\n")
fout.close()
def maxLagCorEdges(exprfile, genefile, fileout, reps, cpus, ylmax, g1, g2):
genes = open(genefile,'r').read().strip().split("\n")
dat = open(exprfile,'r').read().strip().split("\n")
dats = map(lambda x: x.split("\t"), dat)
fout = open(fileout,'w')
(fromx,toy) = prepGeneDataGG(dats, genes, g1, g2)
maxCorr = 0.0
maxLag = 0.0
for yl in range(0,(ylmax+1)):
try:
res0 = autoCorr(fromx,toy,reps, cpus)
if (res0[0] > maxCorr):
maxTE = res0
maxLag = yl
except:
e = sys.exc_info()
sys.stderr.write(str(e)+"\n")
fout.write(g1 +"\t"+ g2 +"\t"+ str(maxLag) +"\t"+ str(maxCorr) +"\t"+ "\t".join(map(str,res0)) +"\n")
fout.close()
def main(argv):
#for i in range(1,len(argv)):
# print(str(i) +" "+ argv[i])
exprfile = argv[1]
genefile = argv[2]
fileout = argv[3]
reps = int(argv[4])
cpus = int(argv[5])
g1 = argv[6]
g2 = argv[7]
maxLagCorEdges(exprfile, genefile, fileout, reps, cpus, 6, g1, g2)
if __name__ == "__main__":
main(sys.argv)
#pref="/Users/davidlgibbs/Dropbox/Research/Projects/Influence_Maximization_Problem/EserData/"
#pref = "/users/dgibbs/EserData/"
#genes = pref +"yeast_array_genesymbols.csv"
#gexpr = pref +"Eser_Averaged_Expression.txt"
#tout = "/Users/davidlgibbs/Desktop/x.txt"
#corEdges(gexpr, genes, tout, 20, 2, "YOX1", "MBP1")
| Gibbsdavidl/miergolf | src/corEdges.py | Python | bsd-3-clause | 3,914 |
"""
Tests whether the serializers work properly
"""
import os
import numpy as np
from noxer.serializers import FolderDatasetReader
class TestSerializers:
def setUp(self):
pass
def tearDown(self):
pass
def test_folder_dataset(self):
folder = os.path.join('test_data', 'folder_dataset')
dataset = FolderDatasetReader(folder)
X, Y = dataset.read()
# check if labels are there
categories = ['class img', 'class_img_2', 'class_json', 'class_wav']
for c in categories:
if not c in Y:
raise ValueError("The category %s was not read!" % c)
# check if data is read corrrectly
for x, y in zip(X, Y):
if y == "class img":
assert x.shape == (32, 32, 3)
assert np.mean(x) == 228.400390625
if y == "class_img_2":
assert x.shape == (26, 32, 3)
assert np.mean(x) == 196.75480769230768
if y == "class_wav":
assert x.shape == (1, 41294)
assert np.mean(x)*1e+6 == 67.19572411384434 | iaroslav-ai/noxer | noxer/tests/test_serializers.py | Python | mit | 1,124 |
from setuptools import setup, find_packages
setup(
name='spotify_connect_scrobbler',
version='0.1',
license='MIT',
packages=find_packages(),
install_requires=['click', 'python-dateutil', 'requests'],
entry_points={
'console_scripts': [
'scrobbler=spotify_connect_scrobbler.scrobbler:main',
'scrobbler-auth=spotify_connect_scrobbler.auth:main']
}
)
| jeschkies/spotify-connect-scrobbler | setup.py | Python | mit | 407 |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import svgwrite
# Sample dictionary
# Params can be any ot this: 'Box', 'Slider', 'Enum', 'Enum2', 'Bool', 'Text', 'Button'
# Params general structure is: ('name', 'value', 'type of param')
# Enum param: (['t1', 't2', 't3', 't4'], ['t2', 't4'], 'Enum')
# For Button, 'value' is the height of the button
# For more than one slider value in a row: (['A', 'B', 'C'], ['35', '45', '63'], 'Slider')
# You can combine in same row two types: 'Box', 'Enum2', 'Bool', 'Text' and 'Button':
# (['name1', 'value1', 'type1'], ['name2', 'value2', 'type2'], 'Combine'),
# If value of an input is empty, only socket and name are displayed
circle_def = {'name': 'Example',
'outputs': [('Vertices', '', 'VerticesSocket'),
('Edges', '', 'StringsSocket'),
('Matrix', '', 'MatrixSocket'),],
'params': [('Text box', '', 'Box'),
('Slider', '35', 'Slider'),
(['t1', 't2', 't3', 't4'], ['t2', 't4'], 'Enum'),
('Enum', 'test', 'Enum2'),
('Boolean', True, 'Bool'),
('Text example:', '', 'Text'),
('Button Test', '60', 'Button'),
(['A', 'B', 'C'], ['35', '45', '63'], 'Slider'),
(['Test', '', 'Box'], ['Testing', True, 'Bool'], 'Combine'),
(['Testing', '', 'Box'], ['Testing', '', 'Enum2'], 'Combine'),],
'inputs': [('Number', '1.00', 'StringsSocket'),
('Vertices', '', 'VerticesSocket'),
('Matrix', '', 'MatrixSocket'),]
}
#Defaults
col_background = '#a7a7a7'
col_header = '#707070'
col_stroke = '#000'
col_darktext = 'black'
col_whitetext = 'white'
col_slider = '#1a1a1a'
col_arrows = '#777777'
col_boolean = '#414141'
col_enum = '#414141'
col_active = '#5781c3'
col_box = '#bfbfbf'
col_button = '#838383'
VerticesSocket = '#e59933'
StringsSocket = '#99ff99'
MatrixSocket = '#33cccc'
width = 365
def slider(dwg, parameter, pos, width):
dwg.add(dwg.rect(insert=(20, pos-17), size=(width-40, 34), rx=18, ry=18, fill=col_slider, stroke=col_stroke, stroke_width=1))
if type(parameter[1]) is list:
x = (width-40)/len(parameter[1])
for i, v in enumerate(parameter[0]):
dwg.add(dwg.text(v+':', insert=(50+x*i, pos+7), fill=col_whitetext, font_size=20))
for i, v in enumerate(parameter[1]):
dwg.add(dwg.text(v, insert=(x*i+x-10, pos+7), fill=col_whitetext, font_size=20, text_anchor='end'))
dwg.add(dwg.path("M "+str(30+x*i)+" "+str(pos)+" l 0 0 l 10 8 l 0 -16 M "+str(x*i+x+10)+" "+str(pos)+" l 0 0 l -10 8 l 0 -16", fill=col_arrows))
for i in range(len(parameter[1])-1):
dwg.add(dwg.line(start=(20+x*(i+1), pos+17), end=(20+x*(i+1), pos-17), stroke=col_background, stroke_width=0.5))
else:
dwg.add(dwg.path("M 30 "+str(pos)+" l 0 0 l 10 8 l 0 -16 M "+str(width-30)+" "+str(pos)+" l 0 0 l -10 8 l 0 -16", fill=col_arrows))
dwg.add(dwg.text(parameter[0]+':', insert=(50, pos+7), fill=col_whitetext, font_size=20))
dwg.add(dwg.text(parameter[1], insert=(width-50, pos+7), fill=col_whitetext, font_size=20, text_anchor='end'))
def boolean(dwg, parameter, pos, width):
dwg.add(dwg.rect(insert=(20, pos-14), size=(28, 28), rx=4, ry=4, fill=col_boolean, stroke=col_stroke, stroke_width=0.8))
dwg.add(dwg.text(parameter[0], insert=(60, pos+7), fill=col_darktext, font_size=20))
if parameter[1]:
dwg.add(dwg.text(u'\u2713', insert=(20, pos+12), fill=col_whitetext, font_size=40))
def enum(dwg, parameter, pos, width):
dwg.add(dwg.rect(insert=(20, pos-17), size=(width-40, 34), rx=8, ry=8, fill=col_enum, stroke=col_stroke, stroke_width=0.8))
x = (width-40)/len(parameter[0])
for i, v in enumerate(parameter[0]):
for j in parameter[1]:
if v == j:
if i == 0:
dwg.add(dwg.path("M 28 "+str(pos-16.6)+" c -4 0 -7.6 3 -7.6 8.2 l 0 18 c 0 4 3.8 7 8 7 l "+str(x-8)+" 0 l 0 -33.2 z ", fill=col_active, stroke='none', stroke_width=0.8))
elif i == (len(parameter[0])-1):
dwg.add(dwg.path("M "+str(x*i+20)+" "+str(pos-16.6)+" l "+str(x-8)+" 0 c 5 0 7.6 3.6 7.6 8 l 0 18 c 0 4 -3.5 7.2 -8 7.2 l "+str(-x+8)+" 0 z ", fill=col_active, stroke='none', stroke_width=0.8))
else:
dwg.add(dwg.rect(insert=(20+x*i, pos-16.5), size=(x, 33), fill=col_active, stroke='none', stroke_width=0.8))
for i, v in enumerate(parameter[0]):
dwg.add(dwg.text(v, insert=(20+x/2+x*i, pos+7), fill=col_whitetext, font_size=20, text_anchor='middle'))
for i in range(len(parameter[0])-1):
dwg.add(dwg.line(start=(20+x*(i+1), pos+17), end=(20+x*(i+1), pos-17), stroke=col_stroke, stroke_width=0.8))
def enum2(dwg, parameter, pos, width):
dwg.add(dwg.text(parameter[0]+':', insert=(width/2-20, pos+7), fill=col_darktext, font_size=20, text_anchor='end'))
dwg.add(dwg.rect(insert=(width/2, pos-17), size=(width/2-20, 34), rx=8, ry=8, fill=col_enum, stroke=col_stroke, stroke_width=0.8))
dwg.add(dwg.text(parameter[1], insert=(width/2+20, pos+7), fill=col_whitetext, font_size=20))
dwg.add(dwg.path("M "+str(width-35)+" "+str(pos-12)+" l 0 0 l 5 9 l -10 0 z M "+str(width-35)+" "+str(pos+12)+" l 0 0 l 5 -9 l -10 0 z", fill=col_arrows))
def box(dwg, parameter, pos, width):
dwg.add(dwg.rect(insert=(20, pos-17), size=(width-40, 34), rx=8, ry=8, fill=col_box, stroke=col_stroke, stroke_width=1))
dwg.add(dwg.text(parameter[0], insert=(40, pos+7), fill=col_darktext, font_size=20))
def text(dwg, parameter, pos, width):
dwg.add(dwg.text(parameter[0], insert=(30, pos+7), fill=col_darktext, font_size=20))
def button(dwg, parameter, pos, width):
dwg.add(dwg.rect(insert=(20, pos+17-int(parameter[1])), size=(width-40, int(parameter[1])), rx=8, ry=8, fill=col_button, stroke=col_stroke, stroke_width=1))
dwg.add(dwg.text(parameter[0], insert=(width/2, pos+24-int(parameter[1])/2), fill=col_darktext, font_size=20, text_anchor='middle'))
def combine(dwg, parameter, pos, width):
col_1 = [col_box if parameter[0][2] == 'Box' else col_enum if parameter[0][2] == 'Enum2' else col_button]
col_2 = [col_box if parameter[1][2] == 'Box' else col_enum if parameter[1][2] == 'Enum2' else col_button]
col_text = [[col_whitetext if parameter[0][2] == 'Enum2' else col_darktext], [col_whitetext if parameter[1][2] == 'Enum2' else col_darktext]]
mix = ['Box', 'Enum2', 'Button']
if parameter[0][2] in mix and parameter[1][2] in mix:
dwg.add(dwg.path("M 20 "+str(pos-9)+" c 0 -4 4 -8 8 -8 l "+str(width/2-28)+" 0 l 0 34 l -"+str(width/2-28)+" 0 c -4 0 -8 -4 -8 -8 z", fill=col_1[0], stroke=col_stroke, stroke_width=0.8))
dwg.add(dwg.path("M "+str(width-20)+" "+str(pos-9)+" c 0 -4 -4 -8 -8 -8 l -"+str(width/2-28)+" 0 l 0 34 l "+str(width/2-28)+" 0 c 4 0 8 -4 8 -8 z", fill=col_2[0], stroke=col_stroke, stroke_width=0.8))
else:
if parameter[0][2] in mix:
dwg.add(dwg.rect(insert=(20, pos-17), size=(width/2-30, 34), rx=8, ry=8, fill=col_1[0], stroke=col_stroke, stroke_width=1))
elif parameter[1][2] in mix:
dwg.add(dwg.rect(insert=(width/2, pos-17), size=(width/2-20, 34), rx=8, ry=8, fill=col_2[0], stroke=col_stroke, stroke_width=1))
elem = [parameter[0][2], parameter[1][2]]
for i, v in enumerate(elem):
if v == 'Box' or v == 'Enum2' or v == 'Text':
dwg.add(dwg.text(parameter[i][0], insert=(30+(width/2-20)*i, pos+7), fill=col_text[i][0], font_size=20))
if v == 'Enum2':
dwg.add(dwg.path("M "+str(width/2-15+(width/2-20)*i)+" "+str(pos-12)+" l 0 0 l 5 9 l -10 0 z M "+str(width/2-15+(width/2-20)*i)+" "+str(pos+12)+" l 0 0 l 5 -9 l -10 0 z", fill=col_arrows))
elif v == 'Button':
dwg.add(dwg.text(parameter[i][0], insert=(width/4+10+(width/2-20)*i, pos+7), fill=col_text[i][0], font_size=20, text_anchor='middle'))
elif v == 'Bool':
dwg.add(dwg.rect(insert=(20+i*(width/2-20), pos-14), size=(28, 28), rx=4, ry=4, fill=col_boolean, stroke=col_stroke, stroke_width=0.8))
dwg.add(dwg.text(parameter[i][0], insert=(60+i*(width/2-20), pos+7), fill=col_darktext, font_size=20))
if parameter[i][1]:
dwg.add(dwg.text(u'\u2713', insert=(20+i*(width/2-20), pos+12), fill=col_whitetext, font_size=40))
methods = {'Bool': boolean,
'Enum': enum,
'Enum2': enum2,
'Box': box,
'Slider': slider,
'Text': text,
'Button': button,
'Combine': combine}
def draw_node(node):
for i in node:
name = node['name']
outputs = node['outputs']
params = node['params']
inputs = node['inputs']
height = (len(outputs) + len(params) + len(inputs))*40 + 70
for i in params:
if i[2] == 'Button':
height += int(i[1])-40
dwg = svgwrite.Drawing(name+'.svg', profile='tiny')
dwg.add(dwg.rect(insert=(0, 0), size=(width, height), rx=18.5, ry=18.5, fill=col_background, stroke=col_stroke, stroke_width=2))
dwg.add(dwg.path("M 18.7,0.99 c -10.1,0 -17.7,7.7 -17.7,17.7 l 0,21.8 "+str(width-2)+",0 0,-21.9 c 0,-10.0 -7.6,-17.6 -17.7,-17.6 z", fill=col_header))
dwg.add(dwg.text(name, insert=(40, 30), fill='black', font_size=22))
dwg.add(dwg.path("M 12 10 l 0 0 l 18 0 l -9 22 z", fill='#c46127'))
y = 40
for i, v in enumerate(outputs):
y += 40
col = [VerticesSocket if v[2] == 'VerticesSocket' else MatrixSocket if v[2] == 'MatrixSocket' else StringsSocket]
dwg.add(dwg.circle(center=(width, y), r=10, fill=col[0], stroke=col_stroke, stroke_width=1))
dwg.add(dwg.text(v[0], insert=(width-25, y+5), fill='black', font_size=20, text_anchor='end'))
for i, v in enumerate(params):
a = [int(v[1])+6 if v[2] == 'Button' else 40]
y += a[0]
if v[2] in methods:
methods[v[2]](dwg, v, y, width)
for i, v in enumerate(inputs):
y += 40
col = [VerticesSocket if v[2] == 'VerticesSocket' else MatrixSocket if v[2] == 'MatrixSocket' else StringsSocket]
dwg.add(dwg.circle(center=(0, y), r=10, fill=col[0], stroke=col_stroke, stroke_width=1))
if len(v[1]) == 0:
dwg.add(dwg.text(v[0], insert=(30, y+7), fill=col_darktext, font_size=20))
else:
slider(dwg, v, y, width)
dwg.save()
#draw_node(circle_def)
| taxpon/sverchok | utils/sv_draw_svg_node.py | Python | gpl-3.0 | 11,478 |
#/****************************************************************************
# Copyright 2008, Colorado School of Mines and others.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#****************************************************************************/
# This Jython script is set up to demonstrate the application of SteerablePyramid
# on 2D and 3D images.
# Author: John Mathewson, Colorado School of Mines
# Version: 2008.10.07
import sys
from math import *
from java.lang import *
from java.util import *
from java.nio import *
from javax.swing import *
from java.io import *
from edu.mines.jtk.awt import *
from edu.mines.jtk.dsp import *
from edu.mines.jtk.io import *
from edu.mines.jtk.mosaic import *
from edu.mines.jtk.util import *
from edu.mines.jtk.util.ArrayMath import *
True = 1
False = 0
#############################################################################
# parameters
# no. of samples in x1 and x2 directions for synthetic 2D timeslice image
nr1 = 500
nr2 = 500
# no. of samples in x1, x2, and x3 directions for synthetic 3D timeslice image
n3d1 = 101
n3d2 = 101
n3d3 = 101
fontSize = 24
width = 1000
height = 1000
widthColorBar = 200
pngDir = "./png/"
dataDir = "./data/"
spyr = SteerablePyramid(0.5,1.0)
#############################################################################
# functions
# Uncomment one of the following lines in main to run.
def main(args):
#makePyramidandPlotBasisImages2D("win34nodecim_ts170.dat")
#makePyramidandPlotBasisImages3D("win34_decim101.dat")
#makePyramidSteerSumandCascadePlot2D("win34nodecim_ts170.dat")
#SubtractPlanesthenHighlightChannels3D("win34_decim101a.dat")
return
def makePyramidandPlotBasisImages2D(filename):
x = readImage2D(filename)
plot(x,0.0125,)
pyr = spyr.makePyramid(x)
nlev = len(pyr)
ndir = len(pyr[0])
for lev in range(nlev-1):
for dir in range(ndir):
plot(pyr[lev][dir],0.00125,)
y = spyr.sumPyramid(1,pyr)
plot(y,0.0125,)
sub(x,y,y)
plot(y,0.0125,)
def makePyramidandPlotBasisImages3D(filename):
x = readImage3D(filename)
sliceplot(x,0.0125,)
pyr = spyr.makePyramid(x)
nlev = len(pyr)
ndir = len(pyr[0])
for lev in range(nlev-1):
for dir in range(ndir):
sliceplot(pyr[lev][dir],0.00125,)
y = spyr.sumPyramid(1,pyr)
sliceplot(y,0.0125,)
sub(x,y,y)
sliceplot(y,0.0125,)
def makePyramidSteerSumandCascadePlot2D(filename):
x = readImage2D(filename)
plot(x,0.0125,)
y = copy(x)
for i in range(0,5,1):
pyr = spyr.makePyramid(y)
attr = spyr.estimateAttributes(2.0,pyr)
spyr.steerScale(0,50.0,0.5,attr,pyr)
y = spyr.sumPyramid(1,pyr)
plot(y,0.0125,)
def SubtractPlanesthenHighlightChannels3D(filename):
x = readImage3D(filename)
sliceplot(x,0.0100,)
y = copy(x)
# Smooth locally-planar and plot.
for i in range(0,3,1):
pyr = spyr.makePyramid(y)
attr = spyr.estimateAttributes(0,2.0,pyr)
spyr.steerScale(0,0,50.0,0.5,attr,pyr)
y = spyr.sumPyramid(1,pyr)
sliceplot(y,0.0100,)
# Subtract smoothed planes and plot.
sub(x,y,y)
sliceplot(y,0.0100,)
# Smooth locally-linear, threshold and plot.
pyr = spyr.makePyramid(y)
attr = spyr.estimateAttributes(1,2.0,pyr)
spyr.steerScale(1,99,50.0,0.3,attr,pyr)
y = spyr.sumPyramid(0,pyr)
sliceplot(y,0.0100,)
def readImage2D(infile):
fileName = dataDir+infile
ais = ArrayInputStream(fileName,ByteOrder.BIG_ENDIAN)
x = zerofloat(nr1,nr2)
ais.readFloats(x)
ais.close()
print "x min =",min(x)," max =",max(x)
return x
def readImage3D(infile):
fileName = dataDir+infile
ais = ArrayInputStream(fileName,ByteOrder.BIG_ENDIAN)
x = zerofloat(n3d1,n3d2,n3d3)
ais.readFloats(x)
ais.close()
print "x min =",min(x)," max =",max(x)
return x
#############################################################################
# plots functions
def plot(f,clip=0.0,png=None):
n1 = len(f[0])
n2 = len(f)
p = panel()
s1 = Sampling(n1,1.0,0.0)
s2 = Sampling(n2,1.0,0.0)
pv = p.addPixels(s1,s2,f)
if clip!=0.0:
pv.setClips(-clip,clip)
else:
pv.setPercentiles(0.0,100.0)
#pv.setColorModel(ColorMap.JET)
pv.setInterpolation(PixelsView.Interpolation.LINEAR)
frame(p,png)
def sliceplot(x,clip=0.0,png=None):
np3 = len(x)
np2 = len(x[0])
np1 = len(x[0][0])
islice = zerofloat(np2, np1)
for i2 in range(np2):
for i1 in range(np1):
islice[i1][i2] = x[(int)(np3/2.0)][i2][np1-1-i1]
xslice = zerofloat(np3, np1)
for i2 in range(np3):
for i1 in range(np1):
xslice[i1][i2] = x[i2][(int)(np2/2.0)][np1-1-i1]
tslice = zerofloat(np2, np3)
for i2 in range(np3):
for i1 in range(np2):
tslice[i2][i1] = x[i2][i1][(int)(np1/2.0)]
pp = PlotPanel(2, 2)
p1 = pp.addPixels(1, 0, islice)
p2 = pp.addPixels(0, 0, tslice)
p3 = pp.addPixels(1, 1, xslice)
#p1.setPercentiles(1,99)
#p2.setPercentiles(1,99)
#p3.setPercentiles(1,99)
p1.setClips(-clip,clip)
p2.setClips(-clip,clip)
p3.setClips(-clip,clip)
frame(pp,png)
def panel():
p = PlotPanel(PlotPanel.Orientation.X1DOWN_X2RIGHT)
#p.addColorBar()
#p.setColorBarWidthMinimum(widthColorBar)
return p
def frame(panel,png=None):
frame = PlotFrame(panel)
frame.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE)
frame.setFontSize(fontSize)
frame.setSize(width,height)
frame.setVisible(True)
if png and pngDir:
frame.paintToPng(100,6,pngDir+"/"+png+".png")
return frame
#############################################################################
# Do everything on Swing thread.
class RunMain(Runnable):
def run(self):
main(sys.argv)
SwingUtilities.invokeLater(RunMain())
| askogvold/jtk | src/demo/jython/edu/mines/jtk/dsp/SteerablePyramidDemo.py | Python | apache-2.0 | 6,124 |
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Lithomop3d by Charles A. Williams
# Copyright (c) 2003-2005 Rensselaer Polytechnic Institute
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The function of this code is to call the elastic and time-dependent solution
# drivers. To do this, a number of previously-defined parameters need to be
# bundled into lists. This portion of the code requires access to all of the
# information previously defined in Lithomop3d_scan.py and Lithomop3d_setup.py.
#
from pyre.components.Component import Component
class Lithomop3d_run(Component):
def initialize(self, scanner, setup):
lm3dscan = scanner
lm3dsetup = setup
print ""
print "Hello from lm3drun.initialize (begin)!"
print "Importing information from other modules:"
# The only parameters required from Lithomop3d_scan are those in the
# inventory. All others have been imported into Lithomop3d_setup, and
# possibly modified there. Get all required info from the inventory.
# PETSc logging
self.autoprestrStage = lm3dsetup.autoprestrStage
self.elasticStage = lm3dsetup.elasticStage
self.viscousStage = lm3dsetup.viscousStage
self.iterateEvent = lm3dsetup.iterateEvent
self.A = lm3dsetup.A
self.analysisType = lm3dscan.inventory.analysisType
# Import all necessary pointers, etc. from Lithomop3d_setup.
self.memorySize = lm3dsetup.memorySize
self.intSize = lm3dsetup.intSize
self.doubleSize = lm3dsetup.doubleSize
self.numberTimeStepGroups = lm3dsetup.numberTimeStepGroups
self.pointerToBextern = lm3dsetup.pointerToBextern
self.pointerToBtraction = lm3dsetup.pointerToBtraction
self.pointerToBgravity = lm3dsetup.pointerToBgravity
self.pointerToBconcForce = lm3dsetup.pointerToBconcForce
self.pointerToBintern = lm3dsetup.pointerToBintern
self.pointerToBresid = lm3dsetup.pointerToBresid
self.pointerToBwink = lm3dsetup.pointerToBwink
self.pointerToBwinkx = lm3dsetup.pointerToBwinkx
self.pointerToDispVec = lm3dsetup.pointerToDispVec
self.pointerToDprev = lm3dsetup.pointerToDprev
self.pointerToListArrayNforce = lm3dsetup.pointerToListArrayNforce
self.pointerToListArrayGrav = lm3dsetup.pointerToListArrayGrav
self.pointerToX = lm3dsetup.pointerToX
self.pointerToD = lm3dsetup.pointerToD
self.pointerToDeld = lm3dsetup.pointerToDeld
self.pointerToDcur = lm3dsetup.pointerToDcur
self.pointerToId = lm3dsetup.pointerToId
self.pointerToIwink = lm3dsetup.pointerToIwink
self.pointerToWink = lm3dsetup.pointerToWink
self.pointerToListArrayNsysdat = lm3dsetup.pointerToListArrayNsysdat
self.pointerToListArrayIddmat = lm3dsetup.pointerToListArrayIddmat
self.pointerToIbond = lm3dsetup.pointerToIbond
self.pointerToBond = lm3dsetup.pointerToBond
self.pointerToDx = lm3dsetup.pointerToDx
self.pointerToDeldx = lm3dsetup.pointerToDeldx
self.pointerToDxcur = lm3dsetup.pointerToDxcur
self.pointerToDiforc = lm3dsetup.pointerToDiforc
self.pointerToIdx = lm3dsetup.pointerToIdx
self.pointerToIwinkx = lm3dsetup.pointerToIwinkx
self.pointerToWinkx = lm3dsetup.pointerToWinkx
self.pointerToIdslp = lm3dsetup.pointerToIdslp
self.pointerToIpslp = lm3dsetup.pointerToIpslp
self.pointerToIdhist = lm3dsetup.pointerToIdhist
self.pointerToFault = lm3dsetup.pointerToFault
self.pointerToNfault = lm3dsetup.pointerToNfault
self.pointerToDfault = lm3dsetup.pointerToDfault
self.pointerToTfault = lm3dsetup.pointerToTfault
self.pointerToS = lm3dsetup.pointerToS
self.pointerToStemp = lm3dsetup.pointerToStemp
self.pointerToState = lm3dsetup.pointerToState
self.pointerToDstate = lm3dsetup.pointerToDstate
self.pointerToState0 = lm3dsetup.pointerToState0
self.pointerToDmat = lm3dsetup.pointerToDmat
self.pointerToIens = lm3dsetup.pointerToIens
self.pointerToLm = lm3dsetup.pointerToLm
self.pointerToLmx = lm3dsetup.pointerToLmx
self.pointerToLmf = lm3dsetup.pointerToLmf
self.pointerToIvfamily = lm3dsetup.pointerToIvfamily
self.pointerToListArrayNpar = lm3dsetup.pointerToListArrayNpar
self.prestressAutoComputeInt = lm3dsetup.prestressAutoComputeInt
self.pointerToIelno = lm3dsetup.pointerToIelno
self.pointerToIside = lm3dsetup.pointerToIside
self.pointerToIhistry = lm3dsetup.pointerToIhistry
self.pointerToPres = lm3dsetup.pointerToPres
self.pointerToPdir = lm3dsetup.pointerToPdir
self.pointerToListArrayPropertyList = lm3dsetup.pointerToListArrayPropertyList
self.pointerToMaterialModelInfo = lm3dsetup.pointerToMaterialModelInfo
self.pointerToGauss = lm3dsetup.pointerToGauss
self.pointerToSh = lm3dsetup.pointerToSh
self.pointerToShj = lm3dsetup.pointerToShj
self.pointerToListArrayElementTypeInfo = lm3dsetup.pointerToListArrayElementTypeInfo
self.pointerToHistry = lm3dsetup.pointerToHistry
self.pointerToListArrayRtimdat = lm3dsetup.pointerToListArrayRtimdat
self.pointerToListArrayNtimdat = lm3dsetup.pointerToListArrayNtimdat
self.pointerToListArrayNvisdat = lm3dsetup.pointerToListArrayNvisdat
self.pointerToMaxstp = lm3dsetup.pointerToMaxstp
self.pointerToDelt = lm3dsetup.pointerToDelt
self.pointerToAlfa = lm3dsetup.pointerToAlfa
self.pointerToMaxit = lm3dsetup.pointerToMaxit
self.pointerToNtdinit = lm3dsetup.pointerToNtdinit
self.pointerToLgdef = lm3dsetup.pointerToLgdef
self.pointerToUtol = lm3dsetup.pointerToUtol
self.pointerToFtol = lm3dsetup.pointerToFtol
self.pointerToEtol = lm3dsetup.pointerToEtol
self.pointerToItmax = lm3dsetup.pointerToItmax
self.pointerToListArrayRgiter = lm3dsetup.pointerToListArrayRgiter
self.pointerToSkew = lm3dsetup.pointerToSkew
self.pointerToIprint = lm3dsetup.pointerToIprint
self.pointerToListArrayNcodat = lm3dsetup.pointerToListArrayNcodat
self.pointerToListArrayNunits = lm3dsetup.pointerToListArrayNunits
self.pointerToListArrayNprint = lm3dsetup.pointerToListArrayNprint
self.pointerToIstatout = lm3dsetup.pointerToIstatout
self.pointerToNstatout = lm3dsetup.pointerToNstatout
self.asciiOutputFile = lm3dsetup.asciiOutputFile
self.plotOutputFile = lm3dsetup.plotOutputFile
self.ucdOutputRoot = lm3dsetup.ucdOutputRoot
print ""
print "Hello from lm3drun.initialize (end)!"
return
def run(self):
import lithomop3d
# First define all of the lists that maintain variable values. The
# variables in these lists are altered during the running of the code
# and should not be accessed directly except as a member of the list.
# They should not have been defined previously.
print ""
print "Hello from lm3drun.run (begin)!"
print "Beginning problem solution:"
# Output approximate memory usage
self.memorySizeMB =0.0
self.memorySizeMB=self.memorySize/(1024.0*1024.0)
print ""
print "Approximate memory allocation for f77 arrays (MB): %g" % self.memorySizeMB
# print "Just before lithomop3d.autoprestr:"
# Compute gravitational prestresses, if requested.
if self.analysisType == "elasticSolution" or self.analysisType == "fullSolution":
if self.prestressAutoComputeInt == 1:
lithomop3d.autoprestr(
self.A,
self.pointerToBextern,
self.pointerToBtraction,
self.pointerToBgravity,
self.pointerToBconcForce,
self.pointerToBintern,
self.pointerToBresid,
self.pointerToBwink,
self.pointerToBwinkx,
self.pointerToDispVec,
self.pointerToDprev,
self.pointerToListArrayNforce,
self.pointerToListArrayGrav,
self.pointerToX,
self.pointerToD,
self.pointerToDeld,
self.pointerToDcur,
self.pointerToId,
self.pointerToIwink,
self.pointerToWink,
self.pointerToListArrayNsysdat,
self.pointerToListArrayIddmat,
self.pointerToIbond,
self.pointerToBond,
self.pointerToDx,
self.pointerToDeldx,
self.pointerToDxcur,
self.pointerToDiforc,
self.pointerToIdx,
self.pointerToIwinkx,
self.pointerToWinkx,
self.pointerToIdslp,
self.pointerToIpslp,
self.pointerToIdhist,
self.pointerToFault,
self.pointerToNfault,
self.pointerToDfault,
self.pointerToTfault,
self.pointerToS,
self.pointerToStemp,
self.pointerToState,
self.pointerToDstate,
self.pointerToState0,
self.pointerToDmat,
self.pointerToIens,
self.pointerToLm,
self.pointerToLmx,
self.pointerToLmf,
self.pointerToIvfamily,
self.pointerToListArrayNpar,
self.pointerToIelno,
self.pointerToIside,
self.pointerToIhistry,
self.pointerToPres,
self.pointerToPdir,
self.pointerToListArrayPropertyList,
self.pointerToMaterialModelInfo,
self.pointerToGauss,
self.pointerToSh,
self.pointerToShj,
self.pointerToListArrayElementTypeInfo,
self.pointerToHistry,
self.pointerToListArrayRtimdat,
self.pointerToListArrayNtimdat,
self.pointerToListArrayNvisdat,
self.pointerToMaxstp,
self.pointerToDelt,
self.pointerToAlfa,
self.pointerToMaxit,
self.pointerToNtdinit,
self.pointerToLgdef,
self.pointerToUtol,
self.pointerToFtol,
self.pointerToEtol,
self.pointerToItmax,
self.pointerToListArrayRgiter,
self.pointerToSkew,
self.pointerToListArrayNcodat,
self.pointerToListArrayNunits,
self.pointerToListArrayNprint,
self.pointerToIstatout,
self.pointerToNstatout,
self.asciiOutputFile,
self.plotOutputFile,
self.ucdOutputRoot,
self.autoprestrStage,
self.iterateEvent)
# Perform elastic solution, if requested.
lithomop3d.elastc(
self.A,
self.pointerToBextern,
self.pointerToBtraction,
self.pointerToBgravity,
self.pointerToBconcForce,
self.pointerToBintern,
self.pointerToBresid,
self.pointerToBwink,
self.pointerToBwinkx,
self.pointerToDispVec,
self.pointerToDprev,
self.pointerToListArrayNforce,
self.pointerToListArrayGrav,
self.pointerToX,
self.pointerToD,
self.pointerToDeld,
self.pointerToDcur,
self.pointerToId,
self.pointerToIwink,
self.pointerToWink,
self.pointerToListArrayNsysdat,
self.pointerToListArrayIddmat,
self.pointerToIbond,
self.pointerToBond,
self.pointerToDx,
self.pointerToDeldx,
self.pointerToDxcur,
self.pointerToDiforc,
self.pointerToIdx,
self.pointerToIwinkx,
self.pointerToWinkx,
self.pointerToIdslp,
self.pointerToIpslp,
self.pointerToIdhist,
self.pointerToFault,
self.pointerToNfault,
self.pointerToDfault,
self.pointerToTfault,
self.pointerToS,
self.pointerToStemp,
self.pointerToState,
self.pointerToDstate,
self.pointerToState0,
self.pointerToDmat,
self.pointerToIens,
self.pointerToLm,
self.pointerToLmx,
self.pointerToLmf,
self.pointerToIvfamily,
self.pointerToListArrayNpar,
self.pointerToIelno,
self.pointerToIside,
self.pointerToIhistry,
self.pointerToPres,
self.pointerToPdir,
self.pointerToListArrayPropertyList,
self.pointerToMaterialModelInfo,
self.pointerToGauss,
self.pointerToSh,
self.pointerToShj,
self.pointerToListArrayElementTypeInfo,
self.pointerToHistry,
self.pointerToListArrayRtimdat,
self.pointerToListArrayNtimdat,
self.pointerToListArrayNvisdat,
self.pointerToMaxstp,
self.pointerToDelt,
self.pointerToAlfa,
self.pointerToMaxit,
self.pointerToNtdinit,
self.pointerToLgdef,
self.pointerToUtol,
self.pointerToFtol,
self.pointerToEtol,
self.pointerToItmax,
self.pointerToListArrayRgiter,
self.pointerToSkew,
self.pointerToListArrayNcodat,
self.pointerToListArrayNunits,
self.pointerToListArrayNprint,
self.pointerToIstatout,
self.pointerToNstatout,
self.asciiOutputFile,
self.plotOutputFile,
self.ucdOutputRoot,
self.elasticStage,
self.iterateEvent)
# Perform time-dependent solution, if requested.
if self.analysisType == "fullSolution" and self.numberTimeStepGroups > 1:
lithomop3d.viscos(
self.A,
self.pointerToBextern,
self.pointerToBtraction,
self.pointerToBgravity,
self.pointerToBconcForce,
self.pointerToBintern,
self.pointerToBresid,
self.pointerToBwink,
self.pointerToBwinkx,
self.pointerToDispVec,
self.pointerToDprev,
self.pointerToListArrayNforce,
self.pointerToListArrayGrav,
self.pointerToX,
self.pointerToD,
self.pointerToDeld,
self.pointerToDcur,
self.pointerToId,
self.pointerToIwink,
self.pointerToWink,
self.pointerToListArrayNsysdat,
self.pointerToListArrayIddmat,
self.pointerToIbond,
self.pointerToBond,
self.pointerToDx,
self.pointerToDeldx,
self.pointerToDxcur,
self.pointerToDiforc,
self.pointerToIdx,
self.pointerToIwinkx,
self.pointerToWinkx,
self.pointerToIdslp,
self.pointerToIpslp,
self.pointerToIdhist,
self.pointerToFault,
self.pointerToNfault,
self.pointerToDfault,
self.pointerToTfault,
self.pointerToS,
self.pointerToStemp,
self.pointerToState,
self.pointerToDstate,
self.pointerToState0,
self.pointerToDmat,
self.pointerToIens,
self.pointerToLm,
self.pointerToLmx,
self.pointerToLmf,
self.pointerToIvfamily,
self.pointerToListArrayNpar,
self.pointerToIelno,
self.pointerToIside,
self.pointerToIhistry,
self.pointerToPres,
self.pointerToPdir,
self.pointerToListArrayPropertyList,
self.pointerToMaterialModelInfo,
self.pointerToGauss,
self.pointerToSh,
self.pointerToShj,
self.pointerToListArrayElementTypeInfo,
self.pointerToHistry,
self.pointerToListArrayRtimdat,
self.pointerToListArrayNtimdat,
self.pointerToListArrayNvisdat,
self.pointerToMaxstp,
self.pointerToDelt,
self.pointerToAlfa,
self.pointerToMaxit,
self.pointerToNtdinit,
self.pointerToLgdef,
self.pointerToUtol,
self.pointerToFtol,
self.pointerToEtol,
self.pointerToItmax,
self.pointerToListArrayRgiter,
self.pointerToSkew,
self.pointerToIprint,
self.pointerToListArrayNcodat,
self.pointerToListArrayNunits,
self.pointerToListArrayNprint,
self.pointerToIstatout,
self.pointerToNstatout,
self.asciiOutputFile,
self.plotOutputFile,
self.ucdOutputRoot,
self.viscousStage,
self.iterateEvent)
lithomop3d.destroyPETScMat(self.A)
lithomop3d.PetscFinalize()
print ""
print "Hello from lm3drun.run (end)!"
return
def __init__(self):
Component.__init__(self, "lm3drun", "solver")
print ""
print "Hello from lm3drun.__init__!"
return
# version
# $Id: Lithomop3d_run.py,v 1.17 2005/05/03 18:47:35 willic3 Exp $
# End of file
| geodynamics/lithomop | lithomop3d/lithomop3d/Lithomop3d_run.py | Python | mit | 20,127 |
#!/usr/bin/env python3
import unittest
from datetime import date
from pycaching.errors import ValueError
from pycaching import Trackable
from pycaching import Geocaching
from pycaching import Point
class TestProperties(unittest.TestCase):
def setUp(self):
self.gc = Geocaching()
self.t = Trackable("TB123AB", self.gc, name="Testing", type="Travel Bug", location=Point(), owner="human", description="long text", goal="short text")
def test___str__(self):
self.assertEqual(str(self.t), "TB123AB")
def test___eq__(self):
self.assertEqual(self.t, Trackable("TB123AB", self.gc))
def test_geocaching(self):
with self.assertRaises(ValueError):
Trackable("TB123AB", None)
def test_tid(self):
self.assertEqual(self.t.tid, "TB123AB")
with self.subTest("filter invalid"):
with self.assertRaises(ValueError):
self.t.tid = "xxx"
def test_name(self):
self.assertEqual(self.t.name, "Testing")
def test_type(self):
self.assertEqual(self.t.type, "Travel Bug")
def test_owner(self):
self.assertEqual(self.t.owner, "human")
def test_description(self):
self.assertEqual(self.t.description, "long text")
def test_goal(self):
self.assertEqual(self.t.goal, "short text")
| kumy/pycaching | test/test_trackable.py | Python | lgpl-3.0 | 1,338 |
from tests.support import platform_name
from webdriver.transport import Response
from tests.support.asserts import assert_error, assert_success
from tests.support.inline import inline
def navigate_to(session, url):
return session.transport.send(
"POST", "session/{session_id}/url".format(**vars(session)),
{"url": url})
def test_null_parameter_value(session, http):
path = "/session/{session_id}/url".format(**vars(session))
with http.post(path, None) as response:
assert_error(Response.from_http(response), "invalid argument")
def test_null_response_value(session):
response = navigate_to(session, inline("<div/>"))
value = assert_success(response)
assert value is None
def test_no_browsing_context(session, closed_window):
response = navigate_to(session, "foo")
assert_error(response, "no such window")
def test_file_protocol(session, server_config):
# tests that the browsing context remains the same
# when navigated privileged documents
path = server_config["doc_root"]
if platform_name == "windows":
# Convert the path into the format eg. /c:/foo/bar
path = "/{}".format(path.replace("\\", "/"))
url = u"file://{}".format(path)
response = navigate_to(session, url)
assert_success(response)
if session.url.endswith('/'):
url += '/'
assert session.url == url
| nnethercote/servo | tests/wpt/web-platform-tests/webdriver/tests/navigate_to/navigate.py | Python | mpl-2.0 | 1,392 |
__author__ = 'Erik'
import random
import pygame
import os
from pygame import *
#Class for handling the game music, plays a random song from a list
class GameMusic:
#Was originally longer, but due to shortage of space I had to remove a few songs.
songList = [os.path.join('sounds', "msboy.mp3"),os.path.join('sounds', "glimma.mp3")]
def __init__(self):
pass
#Play a random song
def playRandomSong(self):
pygame.mixer.music.stop()
pygame.mixer.music.load(self.songList[random.Random().randint(0, len(self.songList)-1)])
pygame.mixer.music.play(5)
pygame.mixer.music.queue(self.songList[random.Random().randint(0, len(self.songList)-1)])
pygame.mixer.music.queue(self.songList[random.Random().randint(0, len(self.songList)-1)])
pygame.mixer.music.queue(self.songList[random.Random().randint(0, len(self.songList)-1)])
pygame.mixer.music.queue(self.songList[random.Random().randint(0, len(self.songList)-1)])
pygame.mixer.music.set_volume(0.7)
#Stop the music
def stopMusic(self):
pygame.mixer.music.stop()
| Ramqvist/SpaceMania | view/MusicHandler.py | Python | apache-2.0 | 1,117 |
# -*- coding: utf-8 -*-
from openerp.osv import osv, fields
class users(osv.osv):
_name = 'res.users'
_inherit = 'res.users'
_columns = {
'account_x_ids': fields.many2many('account.account', 'account_security_account_users','user_id',
'account_id', 'Restricted Accounts', help="This accounts and the information related to it will be only visible for users where you specify that they can see them setting this same field."),
}
| elwan/Odoo | account_security/res_users.py | Python | gpl-2.0 | 498 |
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""upgradewallet RPC functional test
Test upgradewallet RPC. Download node binaries:
test/get_previous_releases.py -b v0.19.1 v0.18.1 v0.17.2 v0.16.3 v0.15.2
Only v0.15.2 and v0.16.3 are required by this test. The others are used in feature_backwards_compatibility.py
"""
import os
import shutil
import struct
from io import BytesIO
from test_framework.bdb import dump_bdb_kv
from test_framework.messages import deser_compact_size, deser_string
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_is_hex_string,
sha256sum_file,
)
UPGRADED_KEYMETA_VERSION = 12
def deser_keymeta(f):
ver, create_time = struct.unpack('<Iq', f.read(12))
kp_str = deser_string(f)
seed_id = f.read(20)
fpr = f.read(4)
path_len = 0
path = []
has_key_orig = False
if ver == UPGRADED_KEYMETA_VERSION:
path_len = deser_compact_size(f)
for i in range(0, path_len):
path.append(struct.unpack('<I', f.read(4))[0])
has_key_orig = bool(f.read(1))
return ver, create_time, kp_str, seed_id, fpr, path_len, path, has_key_orig
class UpgradeWalletTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [
["-addresstype=bech32", "-keypool=2"], # current wallet version
["-usehd=1", "-keypool=2"], # v0.16.3 wallet
["-usehd=0", "-keypool=2"] # v0.15.2 wallet
]
self.wallet_names = [self.default_wallet_name, None, None]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
self.skip_if_no_bdb()
self.skip_if_no_previous_releases()
def setup_network(self):
self.setup_nodes()
def setup_nodes(self):
self.add_nodes(self.num_nodes, extra_args=self.extra_args, versions=[
None,
160300,
150200,
])
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
def dumb_sync_blocks(self):
"""
Little helper to sync older wallets.
Notice that v0.15.2's regtest is hardforked, so there is
no sync for it.
v0.15.2 is only being used to test for version upgrade
and master hash key presence.
v0.16.3 is being used to test for version upgrade and balances.
Further info: https://github.com/bitcoin/bitcoin/pull/18774#discussion_r416967844
"""
node_from = self.nodes[0]
v16_3_node = self.nodes[1]
to_height = node_from.getblockcount()
height = self.nodes[1].getblockcount()
for i in range(height, to_height+1):
b = node_from.getblock(blockhash=node_from.getblockhash(i), verbose=0)
v16_3_node.submitblock(b)
assert_equal(v16_3_node.getblockcount(), to_height)
def test_upgradewallet(self, wallet, previous_version, requested_version=None, expected_version=None):
unchanged = expected_version == previous_version
new_version = previous_version if unchanged else expected_version if expected_version else requested_version
assert_equal(wallet.getwalletinfo()["walletversion"], previous_version)
assert_equal(wallet.upgradewallet(requested_version),
{
"wallet_name": "",
"previous_version": previous_version,
"current_version": new_version,
"result": "Already at latest version. Wallet version unchanged." if unchanged else "Wallet upgraded successfully from version {} to version {}.".format(previous_version, new_version),
}
)
assert_equal(wallet.getwalletinfo()["walletversion"], new_version)
def test_upgradewallet_error(self, wallet, previous_version, requested_version, msg):
assert_equal(wallet.getwalletinfo()["walletversion"], previous_version)
assert_equal(wallet.upgradewallet(requested_version),
{
"wallet_name": "",
"previous_version": previous_version,
"current_version": previous_version,
"error": msg,
}
)
assert_equal(wallet.getwalletinfo()["walletversion"], previous_version)
def run_test(self):
self.nodes[0].generatetoaddress(101, self.nodes[0].getnewaddress())
self.dumb_sync_blocks()
# # Sanity check the test framework:
res = self.nodes[0].getblockchaininfo()
assert_equal(res['blocks'], 101)
node_master = self.nodes[0]
v16_3_node = self.nodes[1]
v15_2_node = self.nodes[2]
# Send coins to old wallets for later conversion checks.
v16_3_wallet = v16_3_node.get_wallet_rpc('wallet.dat')
v16_3_address = v16_3_wallet.getnewaddress()
node_master.generatetoaddress(101, v16_3_address)
self.dumb_sync_blocks()
v16_3_balance = v16_3_wallet.getbalance()
self.log.info("Test upgradewallet RPC...")
# Prepare for copying of the older wallet
node_master_wallet_dir = os.path.join(node_master.datadir, "regtest/wallets", self.default_wallet_name)
node_master_wallet = os.path.join(node_master_wallet_dir, self.default_wallet_name, self.wallet_data_filename)
v16_3_wallet = os.path.join(v16_3_node.datadir, "regtest/wallets/wallet.dat")
v15_2_wallet = os.path.join(v15_2_node.datadir, "regtest/wallet.dat")
split_hd_wallet = os.path.join(v15_2_node.datadir, "regtest/splithd")
self.stop_nodes()
# Make split hd wallet
self.start_node(2, ['-usehd=1', '-keypool=2', '-wallet=splithd'])
self.stop_node(2)
def copy_v16():
node_master.get_wallet_rpc(self.default_wallet_name).unloadwallet()
# Copy the 0.16.3 wallet to the last Bitcoin Core version and open it:
shutil.rmtree(node_master_wallet_dir)
os.mkdir(node_master_wallet_dir)
shutil.copy(
v16_3_wallet,
node_master_wallet_dir
)
node_master.loadwallet(self.default_wallet_name)
def copy_non_hd():
node_master.get_wallet_rpc(self.default_wallet_name).unloadwallet()
# Copy the 0.15.2 non hd wallet to the last Bitcoin Core version and open it:
shutil.rmtree(node_master_wallet_dir)
os.mkdir(node_master_wallet_dir)
shutil.copy(
v15_2_wallet,
node_master_wallet_dir
)
node_master.loadwallet(self.default_wallet_name)
def copy_split_hd():
node_master.get_wallet_rpc(self.default_wallet_name).unloadwallet()
# Copy the 0.15.2 split hd wallet to the last Bitcoin Core version and open it:
shutil.rmtree(node_master_wallet_dir)
os.mkdir(node_master_wallet_dir)
shutil.copy(
split_hd_wallet,
os.path.join(node_master_wallet_dir, 'wallet.dat')
)
node_master.loadwallet(self.default_wallet_name)
self.restart_node(0)
copy_v16()
wallet = node_master.get_wallet_rpc(self.default_wallet_name)
self.log.info("Test upgradewallet without a version argument")
self.test_upgradewallet(wallet, previous_version=159900, expected_version=169900)
# wallet should still contain the same balance
assert_equal(wallet.getbalance(), v16_3_balance)
copy_non_hd()
wallet = node_master.get_wallet_rpc(self.default_wallet_name)
# should have no master key hash before conversion
assert_equal('hdseedid' in wallet.getwalletinfo(), False)
self.log.info("Test upgradewallet with explicit version number")
self.test_upgradewallet(wallet, previous_version=60000, requested_version=169900)
# after conversion master key hash should be present
assert_is_hex_string(wallet.getwalletinfo()['hdseedid'])
self.log.info("Intermediary versions don't effect anything")
copy_non_hd()
# Wallet starts with 60000
assert_equal(60000, wallet.getwalletinfo()['walletversion'])
wallet.unloadwallet()
before_checksum = sha256sum_file(node_master_wallet)
node_master.loadwallet('')
# Test an "upgrade" from 60000 to 129999 has no effect, as the next version is 130000
self.test_upgradewallet(wallet, previous_version=60000, requested_version=129999, expected_version=60000)
wallet.unloadwallet()
assert_equal(before_checksum, sha256sum_file(node_master_wallet))
node_master.loadwallet('')
self.log.info('Wallets cannot be downgraded')
copy_non_hd()
self.test_upgradewallet_error(wallet, previous_version=60000, requested_version=40000,
msg="Cannot downgrade wallet from version 60000 to version 40000. Wallet version unchanged.")
wallet.unloadwallet()
assert_equal(before_checksum, sha256sum_file(node_master_wallet))
node_master.loadwallet('')
self.log.info('Can upgrade to HD')
# Inspect the old wallet and make sure there is no hdchain
orig_kvs = dump_bdb_kv(node_master_wallet)
assert b'\x07hdchain' not in orig_kvs
# Upgrade to HD, no split
self.test_upgradewallet(wallet, previous_version=60000, requested_version=130000)
# Check that there is now a hd chain and it is version 1, no internal chain counter
new_kvs = dump_bdb_kv(node_master_wallet)
assert b'\x07hdchain' in new_kvs
hd_chain = new_kvs[b'\x07hdchain']
assert_equal(28, len(hd_chain))
hd_chain_version, external_counter, seed_id = struct.unpack('<iI20s', hd_chain)
assert_equal(1, hd_chain_version)
seed_id = bytearray(seed_id)
seed_id.reverse()
old_kvs = new_kvs
# First 2 keys should still be non-HD
for i in range(0, 2):
info = wallet.getaddressinfo(wallet.getnewaddress())
assert 'hdkeypath' not in info
assert 'hdseedid' not in info
# Next key should be HD
info = wallet.getaddressinfo(wallet.getnewaddress())
assert_equal(seed_id.hex(), info['hdseedid'])
assert_equal('m/0\'/0\'/0\'', info['hdkeypath'])
prev_seed_id = info['hdseedid']
# Change key should be the same keypool
info = wallet.getaddressinfo(wallet.getrawchangeaddress())
assert_equal(prev_seed_id, info['hdseedid'])
assert_equal('m/0\'/0\'/1\'', info['hdkeypath'])
self.log.info('Cannot upgrade to HD Split, needs Pre Split Keypool')
for version in [139900, 159900, 169899]:
self.test_upgradewallet_error(wallet, previous_version=130000, requested_version=version,
msg="Cannot upgrade a non HD split wallet from version {} to version {} without upgrading to "
"support pre-split keypool. Please use version 169900 or no version specified.".format(130000, version))
self.log.info('Upgrade HD to HD chain split')
self.test_upgradewallet(wallet, previous_version=130000, requested_version=169900)
# Check that the hdchain updated correctly
new_kvs = dump_bdb_kv(node_master_wallet)
hd_chain = new_kvs[b'\x07hdchain']
assert_equal(32, len(hd_chain))
hd_chain_version, external_counter, seed_id, internal_counter = struct.unpack('<iI20sI', hd_chain)
assert_equal(2, hd_chain_version)
assert_equal(0, internal_counter)
seed_id = bytearray(seed_id)
seed_id.reverse()
assert_equal(seed_id.hex(), prev_seed_id)
# Next change address is the same keypool
info = wallet.getaddressinfo(wallet.getrawchangeaddress())
assert_equal(prev_seed_id, info['hdseedid'])
assert_equal('m/0\'/0\'/2\'', info['hdkeypath'])
# Next change address is the new keypool
info = wallet.getaddressinfo(wallet.getrawchangeaddress())
assert_equal(prev_seed_id, info['hdseedid'])
assert_equal('m/0\'/1\'/0\'', info['hdkeypath'])
# External addresses use the same keypool
info = wallet.getaddressinfo(wallet.getnewaddress())
assert_equal(prev_seed_id, info['hdseedid'])
assert_equal('m/0\'/0\'/3\'', info['hdkeypath'])
self.log.info('Upgrade non-HD to HD chain split')
copy_non_hd()
self.test_upgradewallet(wallet, previous_version=60000, requested_version=169900)
# Check that the hdchain updated correctly
new_kvs = dump_bdb_kv(node_master_wallet)
hd_chain = new_kvs[b'\x07hdchain']
assert_equal(32, len(hd_chain))
hd_chain_version, external_counter, seed_id, internal_counter = struct.unpack('<iI20sI', hd_chain)
assert_equal(2, hd_chain_version)
assert_equal(2, internal_counter)
# Drain the keypool by fetching one external key and one change key. Should still be the same keypool
info = wallet.getaddressinfo(wallet.getnewaddress())
assert 'hdseedid' not in info
assert 'hdkeypath' not in info
info = wallet.getaddressinfo(wallet.getrawchangeaddress())
assert 'hdseedid' not in info
assert 'hdkeypath' not in info
# The next addresses are HD and should be on different HD chains
info = wallet.getaddressinfo(wallet.getnewaddress())
ext_id = info['hdseedid']
assert_equal('m/0\'/0\'/0\'', info['hdkeypath'])
info = wallet.getaddressinfo(wallet.getrawchangeaddress())
assert_equal(ext_id, info['hdseedid'])
assert_equal('m/0\'/1\'/0\'', info['hdkeypath'])
self.log.info('KeyMetadata should upgrade when loading into master')
copy_v16()
old_kvs = dump_bdb_kv(v16_3_wallet)
new_kvs = dump_bdb_kv(node_master_wallet)
for k, old_v in old_kvs.items():
if k.startswith(b'\x07keymeta'):
new_ver, new_create_time, new_kp_str, new_seed_id, new_fpr, new_path_len, new_path, new_has_key_orig = deser_keymeta(BytesIO(new_kvs[k]))
old_ver, old_create_time, old_kp_str, old_seed_id, old_fpr, old_path_len, old_path, old_has_key_orig = deser_keymeta(BytesIO(old_v))
assert_equal(10, old_ver)
if old_kp_str == b"": # imported things that don't have keymeta (i.e. imported coinbase privkeys) won't be upgraded
assert_equal(new_kvs[k], old_v)
continue
assert_equal(12, new_ver)
assert_equal(new_create_time, old_create_time)
assert_equal(new_kp_str, old_kp_str)
assert_equal(new_seed_id, old_seed_id)
assert_equal(0, old_path_len)
assert_equal(new_path_len, len(new_path))
assert_equal([], old_path)
assert_equal(False, old_has_key_orig)
assert_equal(True, new_has_key_orig)
# Check that the path is right
built_path = []
for s in new_kp_str.decode().split('/')[1:]:
h = 0
if s[-1] == '\'':
s = s[:-1]
h = 0x80000000
p = int(s) | h
built_path.append(p)
assert_equal(new_path, built_path)
self.log.info('Upgrading to NO_DEFAULT_KEY should not remove the defaultkey')
copy_split_hd()
# Check the wallet has a default key initially
old_kvs = dump_bdb_kv(node_master_wallet)
defaultkey = old_kvs[b'\x0adefaultkey']
self.log.info("Upgrade the wallet. Should still have the same default key.")
self.test_upgradewallet(wallet, previous_version=139900, requested_version=159900)
new_kvs = dump_bdb_kv(node_master_wallet)
up_defaultkey = new_kvs[b'\x0adefaultkey']
assert_equal(defaultkey, up_defaultkey)
# 0.16.3 doesn't have a default key
v16_3_kvs = dump_bdb_kv(v16_3_wallet)
assert b'\x0adefaultkey' not in v16_3_kvs
if __name__ == '__main__':
UpgradeWalletTest().main()
| Sjors/bitcoin | test/functional/wallet_upgradewallet.py | Python | mit | 16,487 |
def dummy_config():
return {
'uuid': 'TEST-UUID',
'main': {
'server': 'https://test.forge.io/api/'
}
} | grammarly/browser-extensions | generate/generate/tests/__init__.py | Python | bsd-3-clause | 113 |
#!/usr/bin/python
from smbus import SMBus
import RPi.GPIO as GPIO
import time
#for data input
import sys
from select import select
#i2c CAP1188 address
address = 0x29
CAP1188_SENINPUTSTATUS = 0x3
CAP1188_SENLEDSTATUS = 0x4
CAP1188_SENSNOISE = 0xA
CAP1188_NOISETHR = 0x38
CAP1188_MTBLK = 0x2A
CAP1188_PRODID = 0xFD
CAP1188_MANUID = 0xFE
CAP1188_STANDBYCFG = 0x41
CAP1188_REV = 0xFF
CAP1188_MAIN = 0x00
CAP1188_MAIN_INT = 0x01
CAP1188_LEDPOL = 0x73
CAP1188_INTENABLE = 0x27
CAP1188_REPRATE = 0x28
CAP1188_LEDLINK = 0x72
CAP1188_SENSITIVITY = 0x1f
CAP1188_CALIBRATE = 0x26
#reset pin BCM#27 on RPi
CAP1188_RESETPIN = 27
#init i2c
b = SMBus(1)
#reset cap1188
GPIO.setmode(GPIO.BCM)
GPIO.setup(CAP1188_RESETPIN, GPIO.OUT)
GPIO.output(CAP1188_RESETPIN, False)
time.sleep(0.1)
GPIO.output(CAP1188_RESETPIN, True)
time.sleep(0.1)
GPIO.output(CAP1188_RESETPIN, False)
time.sleep(1)
#init cap118
b.write_byte_data(address, CAP1188_MTBLK, 0)#allow multiple touches
#b.write_byte_data(address, CAP1188_STANDBYCFG, 0x30)#speed up a bit
b.write_byte_data(address, CAP1188_STANDBYCFG, 0xB9) #mode proximity sensor
#b.write_byte_data(address, CAP1188_INTENABLE, 0x05)
b.write_byte_data(address, CAP1188_LEDLINK, 0xff) #Have LEDs follow touches
#b.write_byte_data(address, CAP1188_SENSITIVITY, 0x6f) #reduce sensitivity
#b.write_byte_data(address, CAP1188_SENSITIVITY, 0x2f) #standard sensitivity
b.write_byte_data(address, CAP1188_SENSITIVITY, 0x1f) #maax sensitivity
b.write_byte_data(address, CAP1188_CALIBRATE, 0xff) #force recalibration
b.write_byte_data(address, CAP1188_NOISETHR,0x1) #noise thresold level 62.5
time.sleep(1)
#read register
b.write_byte(address, CAP1188_PRODID)
print "prodid=",hex(b.read_byte(address))
b.write_byte(address, CAP1188_MANUID)
print "manuid=",hex(b.read_byte(address))
b.write_byte(address, CAP1188_REV)
print "rev=",hex(b.read_byte(address))
#read loop
try:
while True:
#input detection
t1 = b.read_byte_data(address, CAP1188_SENINPUTSTATUS)
if(t1):
print("detexed:"+str(t1)+","+'{0:08b}'.format(t1))
# if (t1 & 4):
# print "toto"
#error checking
t2 = b.read_byte_data(address, CAP1188_SENSNOISE)
if(t2):
print "Error: ",t2,'{0:08b}'.format(t2)
#data entry
# rlist, _, _ = select([sys.stdin], [], [], 0.1)
# if rlist:
if 0:
s = sys.stdin.readline()
try:
val=int(s)
b.write_byte_data(address, CAP1188_SENSITIVITY, val)
time.sleep(0.05)
except ValueError:
print "Entry error: NaN"
print "Entered:",s,"decoded=",hex(val)
#reinit sensor
time.sleep(0.05)
b.write_byte_data(address, CAP1188_MAIN, 0x00)
finally:
GPIO.cleanup()
| r0bin-fr/pirok2 | capac.py | Python | gpl-3.0 | 2,732 |
# parsers.py - Python implementation of parsers.c
#
# Copyright 2009 Matt Mackall <mpm@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2, incorporated herein by reference.
from node import bin, nullid, nullrev
import util
import struct, zlib
_pack = struct.pack
_unpack = struct.unpack
_compress = zlib.compress
_decompress = zlib.decompress
_sha = util.sha1
def parse_manifest(mfdict, fdict, lines):
for l in lines.splitlines():
f, n = l.split('\0')
if len(n) > 40:
fdict[f] = n[40:]
mfdict[f] = bin(n[:40])
else:
mfdict[f] = bin(n)
def parse_index(data, inline):
def gettype(q):
return int(q & 0xFFFF)
def offset_type(offset, type):
return long(long(offset) << 16 | type)
indexformatng = ">Qiiiiii20s12x"
s = struct.calcsize(indexformatng)
index = []
cache = None
nodemap = {nullid: nullrev}
n = off = 0
# if we're not using lazymap, always read the whole index
l = len(data) - s
append = index.append
if inline:
cache = (0, data)
while off <= l:
e = _unpack(indexformatng, data[off:off + s])
nodemap[e[7]] = n
append(e)
n += 1
if e[1] < 0:
break
off += e[1] + s
else:
while off <= l:
e = _unpack(indexformatng, data[off:off + s])
nodemap[e[7]] = n
append(e)
n += 1
off += s
e = list(index[0])
type = gettype(e[0])
e[0] = offset_type(0, type)
index[0] = tuple(e)
# add the magic null revision at -1
index.append((0, 0, 0, -1, -1, -1, -1, nullid))
return index, nodemap, cache
def parse_dirstate(dmap, copymap, st):
parents = [st[:20], st[20: 40]]
# deref fields so they will be local in loop
format = ">cllll"
e_size = struct.calcsize(format)
pos1 = 40
l = len(st)
# the inner loop
while pos1 < l:
pos2 = pos1 + e_size
e = _unpack(">cllll", st[pos1:pos2]) # a literal here is faster
pos1 = pos2 + e[4]
f = st[pos2:pos1]
if '\0' in f:
f, c = f.split('\0')
copymap[f] = c
dmap[f] = e[:4]
return parents
| dkrisman/Traipse | mercurial/parsers.py | Python | gpl-2.0 | 2,344 |
from toee import *
from utilities import *
from ed import *
from batch import *
###################################################################
### (18:55 20/04/06) A script written by Glen Wheeler (Ugignadl) for manipulating ToEE files.
### Requested by Cerulean the Blue from Co8.
##
### (13:05 22/04/06) Added a few functions which should enable the implementation of a bag of
### holding. Some of them use the already written functions here (just import this scripts as a library).
##
##
## (08/1/2013) Added Co8 configuration options (Sitra Achara)
##
###################################################################
def boh_newbag(bagcontents=[11300], bagnum=0, bgfilename='modules\\ToEE\\Bag_of_Holding.mes'):
""" This function only takes keyword arguments. bagnum of 0 will ask the function to use the lowest available, otherwise will use that number. If it already exists the game will crash (deliberately, I can change that behaviour). bgfilename is the filename for all the bags. The contents should be a list (or tuple) of integers. Also, if you want each BOH to start with something inside them here is where you can do it (say with a note on how to use them, about the charged things being broken and crafted stuff etc). """
# So we read in all the bags.
allBagDict = readMes(bgfilename)
# Then insert the new bag.
linenumbers = allBagDict.keys()
if bagnum:
if bagnum in allBagDict:
raise 'BadbagnumError'
else:
bagnum = 1
while bagnum in linenumbers:
bagnum += 1
allBagDict[bagnum] = bagcontents
# Then write it all back again.
# writeMes(bgfilename, allBagDict)
# print 'New Bag'
return bagnum
def readMes(mesfile):
""" Read the mesfile into a dictionary indexed by line number. """
mesFile = file(mesfile,'r')
mesDict = {}
for line in mesFile.readlines():
# Remove whitespace.
line = line.strip()
# Ignore empty lines.
if not line:
continue
# Ignore comment lines.
if line[0] != '{':
continue
# Decode the line. Just standard python string processing.
line = line.split('}')[:-1]
for i in range(len(line)):
line[i] = line[i].strip()
line[i] = line[i][1:]
contents = line[1:]
# Insert the line into the mesDict.
mesDict[int(line[0])] = contents
mesFile.close()
# print 'File read'
return mesDict
def writeMes(mesfile, mesDict):
""" Write the dictionary mesDict as a mesfile. This does not presever comments (although it could if desired). Overwrites mesfile if it already exists."""
mesFile = file(mesfile, 'w')
linenumbers = mesDict.keys()
linenumbers.sort()
for linenumber in linenumbers:
mesFile.write('{'+str(linenumber)+'}{')
for thing in mesDict[linenumber]:
if thing == mesDict[linenumber][-1]:
mesFile.write(str(thing))
else:
mesFile.write(str(thing)+', ')
mesFile.write('}\n')
mesFile.close()
# print 'File written'
return 1
def boh_getContents(bagnum, bgfilename='modules\\ToEE\\Bag_of_Holding.mes'):
""" This can be called when opening the bag. The bag filename is (I am assuming) fixed, but can be set by passing a keyword argument. bagnum is the line number to use for that bag in the bgfilename. """
allBagDict = readMes(bgfilename)
# Note: If bagnum is not an integer we will have a CTD. I can fix this if it is the case by coercing the datatype.
contents = __boh_decode(allBagDict[bagnum])
# contents will be an ordered list of integers, ready to be turned into objects.
# print ' Get Contents'
return contents
def __boh_decode(bohcontents):
""" bohcontents should just be a comma delimited series of integers. We don't have any more support than that (CB: if we have more complex data types in the bag in future then this is the function to be changed.) Sub-bags (i.e. BOH within BOH) could be implemented by decoding lists within lists. However I'm not even sure that is in accordance with the rules. """
# [0] is there to comply with the output of the readMes function.
l = bohcontents[0].split(', ')
for i in range(len(l)):
# This should cause a crash, but I am testing with non-integers. If you want to be hardcore then just remove the try--except.
try:
l[i] = int(l[i])
except:
print "WARNING: NON-INTEGER FOUND IN BAG OF HOLDING!"
print 'Non-integer found is', l[i]
# print 'Decoded'
return l
def _boh_removeitem(bagnum, itemnum, bgfilename='modules\\ToEE\\Bag_of_Holding.mes'):
""" Remove the item itemnum from the bag bagnum. If it's not in there we get a (deliberate) crash. """
allBagDict = readMes(bgfilename)
contents.remove(itemnum)
allBagDict[bagnum] = contents
writeMes(bgfilename, allBagDict)
def _boh_insertitem(bagnum, itemnum, bgfilename='modules\\ToEE\\Bag_of_Holding.mes'):
""" This function will insert the integer itemnum at the end of the list associated with bagnum in bgfilename. """
allBagDict = readMes(bgfilename)
contents.append(itemnum)
allBagDict[bagnum] = contents
writeMes(bgfilename, allBagDict)
###################################################################
#Added by Darmagon #
###################################################################
SPELL_FLAGS_BASE = obj_f_secretdoor_dc
has_obj_list = []
objs_to_destroy_list = []
active_spells = []
holder = OBJ_HANDLE_NULL
holder2 = OBJ_HANDLE_NULL
OSF_IS_IRON_BODY = 1
OSF_IS_TENSERS_TRANSFORMATION = 2
OSF_IS_ANALYZE_DWEOMER = 4 ## also used by ShiningTed for Antidote
OSF_IS_HOLY_SWORD = 8 ## also used by ShiningTed for Foresight
OSF_IS_PROTECTION_FROM_SPELLS = 16
OSF_IS_MORDENKAINENS_SWORD = 32
OSF_IS_FLAMING_SPHERE = 64
OSF_IS_SUMMONED = 128
OSF_IS_HEZROU_STENCH = 256
OSF_IS_TONGUES = 512
OSF_IS_DISGUISE_SELF = 1024
OSF_IS_DEATH_WARD = 2048
ITEM_HOLDER = 1027
def set_spell_flag( obj, flag):
val = obj.obj_get_int(SPELL_FLAGS_BASE)
obj.obj_set_int(SPELL_FLAGS_BASE, val | flag)
return obj.obj_get_int(SPELL_FLAGS_BASE)
def get_spell_flags(obj):
return obj.obj_get_int(SPELL_FLAGS_BASE)
def is_spell_flag_set(obj, flag):
return obj.obj_get_int(SPELL_FLAGS_BASE) & flag
def unset_spell_flag(obj, flag):
val = obj.obj_get_int(SPELL_FLAGS_BASE)
if val & flag:
obj.obj_set_int(SPELL_FLAGS_BASE, val-flag)
return obj.obj_get_int(SPELL_FLAGS_BASE)
def are_spell_flags_null(obj):
if obj.obj_get_int(SPELL_FLAGS_BASE) == 0:
return 1
return 0
def check_for_protection_from_spells(t_list, check):
global has_obj_list
global objs_to_destroy_list
global holder
global holder2
holder = game.obj_create(14629, game.party[0].location)
holder2 = game.obj_create(14629, game.party[0].location)
ret = 0
for obj in t_list:
prot_obj = obj.obj.item_find_by_proto(6400)
while prot_obj != OBJ_HANDLE_NULL and is_spell_flag_set(prot_obj, OSF_IS_PROTECTION_FROM_SPELLS)==0:
prot_obj.item_flag_unset(OIF_NO_DROP)
holder.item_get(prot_obj)
prot_obj = obj.obj.item_find_by_proto(6400)
get_back_obj = holder.item_find_by_proto(6400)
while get_back_obj != OBJ_HANDLE_NULL:
obj.obj.item_get(get_back_obj)
get_back_obj.item_flag_set(OIF_NO_DROP)
get_back_obj = holder.item_find_by_proto(6400)
if prot_obj != OBJ_HANDLE_NULL:
ret = 1
has_obj_list.append(obj.obj)
prot_obj.item_flag_unset(OIF_NO_DROP)
holder2.item_get(prot_obj)
new_obj = game.obj_create(6400,game.party[0].location)
set_spell_flag(new_obj, OSF_IS_PROTECTION_FROM_SPELLS)
objs_to_destroy_list.append(new_obj)
new_obj.item_condition_add_with_args('Saving Throw Resistance Bonus', 0, 8)
new_obj.item_condition_add_with_args('Saving Throw Resistance Bonus', 1, 8)
new_obj.item_condition_add_with_args('Saving Throw Resistance Bonus', 2, 8)
obj.obj.item_get(new_obj)
holder.destroy()
if ret == 0:
holder2.destroy()
return ret
def replace_protection_from_spells():
global has_obj_list
global objs_to_destroy_list
global holder2
for obj in objs_to_destroy_list:
obj.destroy()
objs_to_destroy = []
count = 0
while count < len(has_obj_list):
obj_to_get = holder2.item_find_by_proto(6400)
has_obj_list[count].item_get(obj_to_get)
obj_to_get.item_flag_set(OIF_NO_DROP)
count = count + 1
holder_objs = []
has_obj_list= []
holder2.destroy()
class obj_holder:
obj = OBJ_HANDLE_NULL
def __init__(self, obj):
self.obj = obj
class spell_with_objs_packet:
def __init__(self, spell_obj, sentinel1, sentinel2, spell):
self.spell_obj = spell_obj
self.sentinel1 = sentinel1
self.sentinel2 = sentinel2
self.spell = spell
def is_obj_in_active_spell_list(obj):
ret = None
for object in active_spells:
if obj == object.sentinel1 or obj == object.sentinel2 or obj == object.spell_obj:
return object
return ret
def get_active_spells():
return active_spells
def append_to_active_spells(spell, spell_obj, sent1, sent2):
global active_spells
new_spell = spell_with_objs_packet(spell_obj,sent1,sent2,spell)
ofile = open("append.txt", "w")
ofile.write(str(new_spell.sentinel1) + "\n")
active_spells.append(new_spell)
ofile.write(str(game.time.time_in_game())+ "\n")
ofile.write(str(get_active_spells()) + "\n")
ofile.close()
def remove_from_active_spells(spell_packet):
global active_spells
count = 0
while count < len(active_spells):
if active_spells[count] == spell_packet:
active_spells[count].sentinel1.destroy()
active_spells[count].sentinel2.destroy()
del active_spells[count]
break
count = count + 1
def build_obj_list(list):
t_list = []
for t in list:
t_list.append(obj_holder(t))
return t_list
def find_spell_obj_with_flag( target,item, flag ):
ret = OBJ_HANDLE_NULL
item_holder = game.obj_create(ITEM_HOLDER, target.location)
prot_item = target.item_find_by_proto(item)
while prot_item != OBJ_HANDLE_NULL and ret == OBJ_HANDLE_NULL:
if is_spell_flag_set(prot_item, flag)!= 0:
ret = prot_item
prot_item.item_flag_unset(OIF_NO_DROP)
item_holder.item_get(prot_item)
prot_item = target.item_find_by_proto(item)
prot_item = item_holder.item_find_by_proto(item)
while prot_item!= OBJ_HANDLE_NULL:
target.item_get(prot_item)
prot_item.item_flag_set(OIF_NO_DROP)
prot_item = item_holder.item_find_by_proto(item)
item_holder.destroy()
return ret
def destroy_spell_obj_with_flag(target, proto_id, flag):
ret = 0
item_holder = game.obj_create(ITEM_HOLDER, target.location)
prot_item = target.item_find_by_proto(proto_id)
while prot_item != OBJ_HANDLE_NULL:
if is_spell_flag_set(prot_item, flag)!= 0:
ret = 1
print "found it"
break
prot_item.item_flag_unset(OIF_NO_DROP)
item_holder.item_get(prot_item)
prot_item = target.item_find_by_proto(proto_id)
if ret == 1:
prot_item.destroy()
print "destroyed it"
prot_item = item_holder.item_find_by_proto(proto_id)
while prot_item!= OBJ_HANDLE_NULL:
target.item_get(prot_item)
if proto_id == 6400:
prot_item.item_flag_set(OIF_NO_DROP)
prot_item = item_holder.item_find_by_proto(proto_id)
item_holder.destroy()
return ret
def set_blast_delay(num):
#if num >= 0 and num <=5:
# ifile = open("delayed_blast_fireball.txt", "w")
# ifile.write(str(num))
# ifile.close()
print "someone actually uses set_blast_delay?\n"
return # was this even used?
def is_in_party(obj):
for x in game.party:
if obj == x:
return 1
return 0
def unequip( slot, npc, whole_party = 0):
unequip_set = []
if whole_party:
unequip_set = game.party
else:
unequip_set = [npc]
for npc2 in unequip_set:
i = 0
j = 0
item = npc2.item_worn_at(slot)
if item != OBJ_HANDLE_NULL:
if item.item_flags_get() & OIF_NO_DROP:
item.item_flag_unset(OIF_NO_DROP)
i = 1
if item.item_flags_get() & OIF_NO_TRANSFER:
item.item_flag_unset(OIF_NO_TRANSFER)
j = 1
holder = game.obj_create(1004, npc2.location)
holder.item_get(item)
tempp = npc2.item_get(item)
pc_index = 0
while tempp == 0 and pc_index < len(game.party): #this part is insurance against filled up inventory for any PCs
if game.party[pc_index].type == obj_t_pc:
tempp = game.party[pc_index].item_get(item)
pc_index += 1
if i:
item.item_flag_set(OIF_NO_DROP)
if j:
item.item_flag_set(OIF_NO_TRANSFER)
holder.destroy()
# game.particles( "sp-summon monster I", game.party[0] )
# item = npc.item_worn_at(slot)
# holder = game.obj_create(1004, npc.location)
# holder.item_get(item)
# npc.item_get(item)
# holder.destroy()
#################################################################
#End added by Darmagon #
#################################################################
#################################################################
#Updated by Shiningted 19/9/9 #
#################################################################
def weap_too_big(weap_user):
if weap_user.is_category_type( mc_type_giant ):
return
weap_1 = weap_user.item_worn_at(3)
weap_2 = weap_user.item_worn_at(4)
size_1 = weap_1.obj_get_int(obj_f_size)
if size_1 > STAT_SIZE_MEDIUM and weap_2 != OBJ_HANDLE_NULL:
unequip( 3, weap_user)
if weap_2 != OBJ_HANDLE_NULL: # fix - added OBJ_HANDLE_NULL check
size_2 = weap_2.obj_get_int(obj_f_size)
if size_2 > STAT_SIZE_MEDIUM:
unequip( 4, weap_user)
return
#################################################################
#End added by Shiningted #
#################################################################
#################################################################
# Added by Cerulean the Blue #
#################################################################
def read_field( object, field ):
return object.obj_get_int( field )
def write_field( object, field, value ):
object.obj_set_int( field, value )
return object.obj_get_int( field )
def clear_field( object, field ):
object.obj_set_int( field, 0 )
return object.obj_get_int( field )
def GetCritterHandle( spell, critter_name ):
# Returns a handle that can be used to manipulate the summoned creature object
for critter in game.obj_list_vicinity( spell.target_loc, OLC_CRITTERS ):
if (critter.name == critter_name and not (is_spell_flag_set(critter, OSF_IS_SUMMONED)) and critter.is_friendly(spell.caster)):
set_spell_flag( critter, OSF_IS_SUMMONED)
return critter
return OBJ_HANDLE_NULL
def End_Spell(spell):
# This is used to forcibly end Co8-made spells that are supposed to be of instantaneous duration
# Previous version used Cerulean the Blue's workaround method (commented out below)
## spell.summon_monsters( 1, 14456 )
## critter = GetCritterHandle( spell, 14456)
## spell.caster.follower_remove(critter)
## critter.destroy()
# proper method:
spell.spell_end( spell.id,1 ) # this forces the spell_end to work even with a non-empty target list
return
def Timed_Destroy(obj, time):
game.timevent_add( destroy, ( obj ), time) # 1000 = 1 second
return
def Timed_Runoff(obj, runoff_time = 1000, runoff_location = -1):
if runoff_location == -1:
obj.runoff(obj.location-3)
elif type(runoff_location) == type( obj.location ):
obj.runoff(runoff_location)
elif type(runoff_location) == type( [ 1 , 2 ] ):
obj.runoff( location_from_axis(runoff_location[0], runoff_location[1]) )
else:
obj.runoff(obj.location-3)
game.timevent_add( Timed_Runoff_Set_OF_OFF, ( obj ), runoff_time) # 1000 = 1 second, default
return
def Timed_Runoff_Set_OF_OFF( obj ):
obj.object_flag_set(OF_OFF)
return
def destroy(obj): # Destroys object. Neccessary for time event destruction to work.
obj.destroy()
return 1
def StopCombat(obj, flag):
if type(obj) == type(OBJ_HANDLE_NULL.location):
loc1 = obj
else:
loc1 = obj.location
# game.particles( 'Orb-Summon-Air-Elemental', game.party[0] )
for pc in game.party:
for critter in game.obj_list_vicinity( loc1, OLC_CRITTERS ):
critter.ai_shitlist_remove( pc )
if flag == 1:
critter.npc_flag_unset(ONF_KOS)
obj.ai_shitlist_remove(pc)
return
def group_challenge_rating():
return (group_average_level(game.leader) * (len(game.party)/4.0))
#################################################################
# Added by Hazelnut #
#################################################################
# Replacement for the D20STD_F_POISON flag for saving throws. The STD define contains
# the enum index value, 4, which is incorrect as it's checked against the bitmask 8
# in temple.dll.
D20CO8_F_POISON = 8
# Util functions for getting & setting words, bytes and nibbles in object integers.
# object = reference to the object containing the integer variable.
# var = the variable to be used. e.g. obj_f_weapon_pad_i_2
# idx = the index of the word (0-1), byte (0-3) or nibble (0-7) to use.
# val = the value to be set.
def getObjVarDWord(object, var):
return object.obj_get_int(var)
def setObjVarDWord(object, var, val):
object.obj_set_int(var, val)
def getObjVarWord(object, var, idx):
return getObjVar(object, var, idx, 0xffff, 16)
def setObjVarWord(object, var, idx, val):
setObjVar(object, var, idx, val, 0xffff, 16)
def getObjVarByte(object, var, idx):
return getObjVar(object, var, idx, 0xff, 8)
def setObjVarByte(object, var, idx, val):
setObjVar(object, var, idx, val, 0xff, 8)
def getObjVarNibble(object, var, idx):
return getObjVar(object, var, idx, 0xf, 4)
def setObjVarNibble(object, var, idx, val):
setObjVar(object, var, idx, val, 0xf, 4)
def getObjVar(object, var, idx, mask, bits):
bitMask = mask << (idx * bits)
val = object.obj_get_int(var) & bitMask
val = val >> (idx * bits)
return (val & mask)
def setObjVar(object, var, idx, val, mask, bits):
#print "obj=", object, " var=", var, " idx=", idx, " val=", val
bitMask = mask << (idx * bits)
val = val << (idx * bits)
oldVal = object.obj_get_int(var) & ~bitMask
object.obj_set_int(var, oldVal | val)
##################
# added by dolio #
##################
# Temporarily renders a target invincible, and deals
# some damage (which gets reduced to 0). This allows
# the dealer to gain experience for killing the target.
def plink( critter, spell ):
invuln = critter.object_flags_get() & OF_INVULNERABLE
dice = dice_new( '1d1' )
critter.object_flag_set( OF_INVULNERABLE )
critter.spell_damage( spell.caster,
D20DT_UNSPECIFIED,
dice,
D20DAP_UNSPECIFIED,
D20A_CAST_SPELL,
spell.id )
if not invuln:
critter.object_flag_unset( OF_INVULNERABLE )
def slay_critter( critter, spell ):
plink( critter, spell )
critter.critter_kill()
def slay_critter_by_effect( critter, spell ):
plink( critter, spell )
critter.critter_kill_by_effect()
# This kills a critter, and gets rid of the body, while
# dropping the equipment.
def disintegrate_critter( ensure_exp, critter, spell ):
spell.duration = 0
if ensure_exp:
plink( critter, spell )
critter.critter_kill()
critter.condition_add_with_args( 'sp-Animate Dead',
spell.id, spell.duration, 3 )
# end dolio additions
#################################################################
# Added by Sitra Achara #
#################################################################
def config_override_bits( var_no , bit_range , new_value ):
if type(bit_range) == type( [1,2,3] ):
bit_maskk = 0
for diggitt in bit_range:
bit_maskk += 2**diggitt
game.global_vars[var_no] ^= ( bit_maskk & ( game.global_vars[var_no] ^ ( new_value << bit_range[0] ) ) )
elif type(bit_range) == type( 1 ):
if new_value != 0:
game.global_vars[var_no] |= (2**bit_range)
else:
game.global_vars[var_no] -= ( game.global_vars[var_no] & (2**bit_range) )
def get_Co8_options_from_ini():
try:
i_file = open('Co8_config.ini','r') # opens the file in ToEE main folder now
s = 'initial value'
failsafe_count = 0
while s != '' and failsafe_count < 1000:
failsafe_count += 1
s = i_file.readline()
s2 = s.split('{')
if len(s2) >= 3:
# check if it's an actual entry line with the format:
# { Param name } { value } {description (this bracket is optional!) }
# will return an array ['','[Param name text]} ','[value]}, [Description text]}'] entry
param_name = s2[1].replace("}","").strip()
par_lower = param_name.lower()
param_value = s2[2].replace("}","").strip()
if param_value.isdigit():
param_value = int(param_value)
if par_lower == 'Party_Run_Speed'.lower():
config_override_bits( 449, range(0, 2+1), param_value )
elif par_lower == 'Disable_New_Plots'.lower():
config_override_bits( 450, 0, param_value )
elif par_lower == 'Disable_Target_Of_Revenge'.lower():
config_override_bits( 450, 10, param_value )
elif par_lower == 'Disable_Moathouse_Ambush'.lower():
config_override_bits( 450, 11, param_value )
elif par_lower == 'Disable_Arena_Of_Heroes'.lower():
config_override_bits( 450, 12, param_value )
elif par_lower == 'Disable_Reactive_Temple'.lower():
config_override_bits( 450, 13, param_value )
elif par_lower == 'Disable_Screng_Recruit_Special'.lower():
config_override_bits( 450, 14, param_value )
elif par_lower == 'Entangle_Outdoors_Only'.lower():
config_override_bits( 451, 0, param_value )
elif par_lower == 'Elemental_Spells_At_Elemental_Nodes'.lower():
config_override_bits( 451, 1, param_value )
elif par_lower == 'Charm_Spell_DC_Modifier'.lower():
config_override_bits( 451, 2, param_value )
elif par_lower == 'AI_Ignore_Summons'.lower():
config_override_bits( 451, 3, param_value )
elif par_lower == 'AI_Ignore_Spiritual_Weapons'.lower():
config_override_bits( 451, 4, param_value )
elif par_lower == 'Random_Encounter_XP_Reduction'.lower():
config_override_bits( 451, range(5, 7+1), param_value ) # bits 5-7
elif par_lower == 'Stinking_Cloud_Duration_Nerf'.lower():
config_override_bits( 451, 8, param_value )
if game.global_vars[449] & (2**0 + 2**1 + 2**2) != 0:
speedup( game.global_vars[449] & (2**0 + 2**1 + 2**2) , game.global_vars[449] & (2**0 + 2**1 + 2**2) )
i_file.close()
except:
return
return | GrognardsFromHell/TemplePlus | tpdatasrc/co8infra/scr/Co8.py | Python | mit | 22,189 |
# Copyright 2013 Google Inc. All Rights Reserved.
"""Generate usage text for displaying to the user.
"""
import argparse
import re
import StringIO
import sys
import textwrap
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.core.util import console_io
LINE_WIDTH = 80
HELP_INDENT = 25
MARKDOWN_BOLD = '*'
MARKDOWN_ITALIC = '_'
MARKDOWN_CODE = '`'
class HelpInfo(object):
"""A class to hold some the information we need to generate help text."""
def __init__(self, help_text, is_hidden, release_track):
"""Create a HelpInfo object.
Args:
help_text: str, The text of the help message.
is_hidden: bool, True if this command or group has been marked as hidden.
release_track: calliope.base.ReleaseTrack, The maturity level of this
command.
"""
self.help_text = help_text or ''
self.is_hidden = is_hidden
self.release_track = release_track
class CommandChoiceSuggester(object):
"""Utility to suggest mistyped commands.
"""
TEST_QUOTA = 5000
MAX_DISTANCE = 5
def __init__(self):
self.cache = {}
self.inf = float('inf')
self._quota = self.TEST_QUOTA
def Deletions(self, s):
return [s[:i] + s[i + 1:] for i in range(len(s))]
def GetDistance(self, longer, shorter):
"""Get the edit distance between two words.
They must be in the correct order, since deletions and mutations only happen
from 'longer'.
Args:
longer: str, The longer of the two words.
shorter: str, The shorter of the two words.
Returns:
int, The number of substitutions or deletions on longer required to get
to shorter.
"""
if longer == shorter:
return 0
try:
return self.cache[(longer, shorter)]
except KeyError:
pass
self.cache[(longer, shorter)] = self.inf
best_distance = self.inf
if len(longer) > len(shorter):
if self._quota < 0:
return self.inf
self._quota -= 1
for m in self.Deletions(longer):
best_distance = min(best_distance, self.GetDistance(m, shorter) + 1)
if len(longer) == len(shorter):
# just count how many letters differ
best_distance = 0
for i in range(len(longer)):
if longer[i] != shorter[i]:
best_distance += 1
self.cache[(longer, shorter)] = best_distance
return best_distance
def SuggestCommandChoice(self, arg, choices):
"""Find the item that is closest to what was attempted.
Args:
arg: str, The argument provided.
choices: [str], The list of valid arguments.
Returns:
str, The closest match.
"""
min_distance = self.inf
for choice in choices:
self._quota = self.TEST_QUOTA
first, second = arg, choice
if len(first) < len(second):
first, second = second, first
if len(first) - len(second) > self.MAX_DISTANCE:
# Don't bother if they're too different.
continue
d = self.GetDistance(first, second)
if d < min_distance:
min_distance = d
bestchoice = choice
if min_distance > self.MAX_DISTANCE:
return None
return bestchoice
def WrapMessageInNargs(msg, nargs):
"""Create the display help string for a positional arg.
Args:
msg: [str] The possibly repeated text.
nargs: The repetition operator.
Returns:
str, The string representation for printing.
"""
if nargs == '+':
return '{msg} [{msg} ...]'.format(msg=msg)
elif nargs == '*' or nargs == argparse.REMAINDER:
return '[{msg} ...]'.format(msg=msg)
elif nargs == '?':
return '[{msg}]'.format(msg=msg)
else:
return msg
def GetFlagMetavar(metavar, flag):
if isinstance(flag.type, arg_parsers.ArgList):
msg = '[{metavar},...]'.format(metavar=metavar)
if flag.type.min_length:
msg = ','.join([metavar]*flag.type.min_length+[msg])
return msg
return metavar
def PositionalDisplayString(arg, markdown=False):
"""Create the display help string for a positional arg.
Args:
arg: argparse.Argument, The argument object to be displayed.
markdown: bool, If true add markdowns.
Returns:
str, The string representation for printing.
"""
msg = arg.metavar or arg.dest.upper()
if markdown:
msg = re.sub(r'(\b[a-zA-Z][-a-zA-Z_0-9]*)',
MARKDOWN_ITALIC + r'\1' + MARKDOWN_ITALIC, msg)
return ' ' + WrapMessageInNargs(msg, arg.nargs)
def FlagDisplayString(arg, brief=False, markdown=False):
"""Create the display help string for a flag arg.
Args:
arg: argparse.Argument, The argument object to be displayed.
brief: bool, If true, only display one version of a flag that has
multiple versions, and do not display the default value.
markdown: bool, If true add markdowns.
Returns:
str, The string representation for printing.
"""
metavar = arg.metavar or arg.dest.upper()
if brief:
long_string = sorted(arg.option_strings)[0]
if arg.nargs == 0:
return long_string
return '{flag} {metavar}'.format(
flag=long_string,
metavar=GetFlagMetavar(metavar, arg))
else:
if arg.nargs == 0:
if markdown:
return ', '.join([MARKDOWN_BOLD + x + MARKDOWN_BOLD
for x in arg.option_strings])
else:
return ', '.join(arg.option_strings)
else:
if markdown:
metavar = re.sub('(\\b[a-zA-Z][-a-zA-Z_0-9]*)',
MARKDOWN_ITALIC + '\\1' + MARKDOWN_ITALIC, metavar)
display_string = ', '.join(
['{bb}{flag}{be} {metavar}'.format(
bb=MARKDOWN_BOLD if markdown else '',
flag=option_string,
be=MARKDOWN_BOLD if markdown else '',
metavar=GetFlagMetavar(metavar, arg))
for option_string in arg.option_strings])
if not arg.required and arg.default:
display_string += '; default="{val}"'.format(val=arg.default)
return display_string
def WrapWithPrefix(prefix, message, indent, length, spacing,
writer=sys.stdout):
"""Helper function that does two-column writing.
If the first column is too long, the second column begins on the next line.
Args:
prefix: str, Text for the first column.
message: str, Text for the second column.
indent: int, Width of the first column.
length: int, Width of both columns, added together.
spacing: str, Space to put on the front of prefix.
writer: file-like, Receiver of the written output.
"""
def W(s):
writer.write(s)
def Wln(s):
W(s + '\n')
# Reformat the message to be of rows of the correct width, which is what's
# left-over from length when you subtract indent. The first line also needs
# to begin with the indent, but that will be taken care of conditionally.
message = ('\n%%%ds' % indent % ' ').join(
textwrap.wrap(message, length - indent))
if len(prefix) > indent - len(spacing) - 2:
# If the prefix is too long to fit in the indent width, start the message
# on a new line after writing the prefix by itself.
Wln('%s%s' % (spacing, prefix))
# The message needs to have the first line indented properly.
W('%%%ds' % indent % ' ')
Wln(message)
else:
# If the prefix fits comfortably within the indent (2 spaces left-over),
# print it out and start the message after adding enough whitespace to make
# up the rest of the indent.
W('%s%s' % (spacing, prefix))
Wln('%%%ds %%s'
% (indent - len(prefix) - len(spacing) - 1)
% (' ', message))
# TODO(user): Remove this and all references. See b/18933702.
def ShouldPrintAncestorFlag(arg):
"""Determine if an ancestor flag should be printed in a subcommand.
This is a temporary hack to prevent these flags from showing up in
sub-command helps. Proper support for marking flags as global and
rationalizing where they will be printed will be in an upcoming CL.
Args:
arg: The argparse argument that is to be printed.
Returns:
True if is should be printed, False otherwise.
"""
return arg.option_strings[0] not in ['--user-output-enabled', '--verbosity']
def GenerateUsage(command, argument_interceptor):
"""Generate a usage string for a calliope command or group.
Args:
command: calliope._CommandCommon, The command or group object that we're
generating usage for.
argument_interceptor: calliope._ArgumentInterceptor, the object that tracks
all of the flags for this command or group.
Returns:
str, The usage string.
"""
command.LoadAllSubElements()
buf = StringIO.StringIO()
command_path = ' '.join(command.GetPath())
usage_parts = []
optional_messages = False
flag_messages = []
# Do positional args first, since flag args taking lists can mess them
# up otherwise.
# Explicitly not sorting here - order matters.
# Make a copy, and we'll pop items off. Once we get to a REMAINDER, that goes
# after the flags so we'll stop and finish later.
positional_args = argument_interceptor.positional_args[:]
while positional_args:
arg = positional_args[0]
if arg.nargs == argparse.REMAINDER:
break
positional_args.pop(0)
usage_parts.append(PositionalDisplayString(arg))
for arg in argument_interceptor.flag_args:
if arg.help == argparse.SUPPRESS:
continue
if not arg.required:
optional_messages = True
continue
# and add it to the usage
msg = FlagDisplayString(arg, True)
flag_messages.append(msg)
usage_parts.extend(sorted(flag_messages))
if optional_messages:
# If there are any optional flags, add a simple message to the usage.
usage_parts.append('[optional flags]')
# positional_args will only be non-empty if we had some REMAINDER left.
for arg in positional_args:
usage_parts.append(PositionalDisplayString(arg))
group_helps = command.GetSubGroupHelps()
command_helps = command.GetSubCommandHelps()
groups = sorted([name for (name, help_info) in group_helps.iteritems()
if command.IsHidden() or not help_info.is_hidden])
commands = sorted([name for (name, help_info) in command_helps.iteritems()
if command.IsHidden() or not help_info.is_hidden])
all_subtypes = []
if groups:
all_subtypes.append('group')
if commands:
all_subtypes.append('command')
if groups or commands:
usage_parts.append('<%s>' % ' | '.join(all_subtypes))
usage_msg = ' '.join(usage_parts)
non_option = '{command} '.format(command=command_path)
buf.write(non_option + usage_msg + '\n')
if groups:
WrapWithPrefix('group may be', ' | '.join(
groups), HELP_INDENT, LINE_WIDTH, spacing=' ', writer=buf)
if commands:
WrapWithPrefix('command may be', ' | '.join(
commands), HELP_INDENT, LINE_WIDTH, spacing=' ', writer=buf)
return buf.getvalue()
def ExpandHelpText(command, text):
"""Expand command {...} references in text.
Args:
command: calliope._CommandCommon, The command object that we're helping.
text: str, The text chunk to expand.
Returns:
str, The expanded help text.
"""
if text == command.long_help:
long_help = ''
else:
long_help = ExpandHelpText(command, command.long_help)
path = command.GetPath()
return console_io.LazyFormat(
text or '',
command=' '.join(path),
man_name='_'.join(path),
top_command=path[0],
parent_command=' '.join(path[:-1]),
index=command.short_help,
description=long_help)
def ShortHelpText(command, argument_interceptor):
"""Get a command's short help text.
Args:
command: calliope._CommandCommon, The command object that we're helping.
argument_interceptor: calliope._ArgumentInterceptor, the object that tracks
all of the flags for this command or group.
Returns:
str, The short help text.
"""
command.LoadAllSubElements()
buf = StringIO.StringIO()
required_messages = []
optional_messages = []
# Sorting for consistency and readability.
for arg in (argument_interceptor.flag_args +
[arg for arg in argument_interceptor.ancestor_flag_args
if ShouldPrintAncestorFlag(arg)]):
if arg.help == argparse.SUPPRESS:
continue
message = (FlagDisplayString(arg, False), arg.help or '')
if not arg.required:
optional_messages.append(message)
continue
required_messages.append(message)
# and add it to the usage
msg = FlagDisplayString(arg, True)
positional_messages = []
# Explicitly not sorting here - order matters.
for arg in argument_interceptor.positional_args:
positional_messages.append(
(PositionalDisplayString(arg), arg.help or ''))
group_helps = command.GetSubGroupHelps()
command_helps = command.GetSubCommandHelps()
group_messages = [(name, help_info.help_text) for (name, help_info)
in group_helps.iteritems()
if command.IsHidden() or not help_info.is_hidden]
command_messages = [(name, help_info.help_text) for (name, help_info)
in command_helps.iteritems()
if command.IsHidden() or not help_info.is_hidden]
buf.write('Usage: ' + GenerateUsage(command, argument_interceptor) + '\n')
# Second, print out the long help.
buf.write('\n'.join(textwrap.wrap(ExpandHelpText(command, command.long_help),
LINE_WIDTH)))
buf.write('\n\n')
# Third, print out the short help for everything that can come on
# the command line, grouped into required flags, optional flags,
# sub groups, sub commands, and positional arguments.
# This printing is done by collecting a list of rows. If the row is just
# a string, that means print it without decoration. If the row is a tuple,
# use WrapWithPrefix to print that tuple in aligned columns.
required_flag_msgs = []
unrequired_flag_msgs = []
for arg in argument_interceptor.flag_args:
if arg.help == argparse.SUPPRESS:
continue
usage = FlagDisplayString(arg, False)
msg = (usage, arg.help or '')
if not arg.required:
unrequired_flag_msgs.append(msg)
else:
required_flag_msgs.append(msg)
def TextIfExists(title, messages):
if not messages:
return None
textbuf = StringIO.StringIO()
textbuf.write('%s\n' % title)
for (arg, helptxt) in messages:
WrapWithPrefix(arg, helptxt, HELP_INDENT, LINE_WIDTH,
spacing=' ', writer=textbuf)
return textbuf.getvalue()
all_messages = [
TextIfExists('required flags:', sorted(required_messages)),
TextIfExists('optional flags:', sorted(optional_messages)),
TextIfExists('positional arguments:', positional_messages),
TextIfExists('command groups:', sorted(group_messages)),
TextIfExists('commands:', sorted(command_messages)),
]
buf.write('\n'.join([msg for msg in all_messages if msg]))
return buf.getvalue()
def ExtractHelpStrings(docstring):
"""Extracts short help and long help from a docstring.
If the docstring contains a blank line (i.e., a line consisting of zero or
more spaces), everything before the first blank line is taken as the short
help string and everything after it is taken as the long help string. The
short help is flowing text with no line breaks, while the long help may
consist of multiple lines, each line beginning with an amount of whitespace
determined by dedenting the docstring.
If the docstring does not contain a blank line, the sequence of words in the
docstring is used as both the short help and the long help.
Corner cases: If the first line of the docstring is empty, everything
following it forms the long help, and the sequence of words of in the long
help (without line breaks) is used as the short help. If the short help
consists of zero or more spaces, None is used instead. If the long help
consists of zero or more spaces, the short help (which might or might not be
None) is used instead.
Args:
docstring: The docstring from which short and long help are to be taken
Returns:
a tuple consisting of a short help string and a long help string
"""
if docstring:
unstripped_doc_lines = docstring.splitlines()
stripped_doc_lines = [s.strip() for s in unstripped_doc_lines]
try:
empty_line_index = stripped_doc_lines.index('')
short_help = ' '.join(stripped_doc_lines[:empty_line_index])
raw_long_help = '\n'.join(unstripped_doc_lines[empty_line_index + 1:])
long_help = textwrap.dedent(raw_long_help).strip()
except ValueError: # no empty line in stripped_doc_lines
short_help = ' '.join(stripped_doc_lines).strip()
long_help = None
if not short_help: # docstring started with a blank line
short_help = ' '.join(stripped_doc_lines[empty_line_index + 1:]).strip()
# words of long help as flowing text
return (short_help or None, long_help or short_help or None)
else:
return (None, None)
| ychen820/microblog | y/google-cloud-sdk/lib/googlecloudsdk/calliope/usage_text.py | Python | bsd-3-clause | 16,991 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-21 15:44
from __future__ import unicode_literals
from django.db import migrations
import djgeojson.fields
class Migration(migrations.Migration):
dependencies = [
('survey', '0044_auto_20160618_1412'),
]
operations = [
migrations.AddField(
model_name='record',
name='coords',
field=djgeojson.fields.PointField(blank=True, editable=False, null=True),
),
]
| simonspa/django-datacollect | datacollect/survey/migrations/0045_record_coords.py | Python | gpl-3.0 | 498 |
# -*- coding: utf-8 -*-
"""
Projekt IIS - ordinacia praktickeho lekara
Usage:
main.py db init
main.py db drop
main.py generate random users <number_of_users>
main.py generate random drugs <number_of_drugs>
main.py generate random visits <max_visits>
main.py generate random predepsal <number>
main.py generate random ukony <number>
main.py generate random objednavky <number>
main.py generate odborne vysetrenie <number>
main.py generate faktury <number>
main.py generate users
main.py generate pojistovna
main.py sort pacients
main.py generate all
"""
from docopt_dispatch import dispatch
from database.database import (init_db, drop_db, create_random_users, create_random_drugs, create_random_predepsal,
create_random_visits, create_registrovani_cizi_pacienti, create_random_ukony, generate_pojistovna,
create_random_objednavky, generate_random_faktury, create_all_user_types, generate_odborne_vysetrenie)
@dispatch.on("db", "init")
def create_database(**kwargs):
init_db()
@dispatch.on("db", "drop")
def drop_database(**kwargs):
drop_db()
@dispatch.on("generate", "random", "users")
def create_users(number_of_users, **kwargs):
init_db()
create_random_users(int(number_of_users))
@dispatch.on("generate", "random", "visits")
def create_visits(max_visits, **kwargs):
init_db()
create_random_visits(int(max_visits))
@dispatch.on("generate", "random", "drugs")
def create_drugs(number_of_drugs, **kwargs):
init_db()
create_random_drugs(int(number_of_drugs))
@dispatch.on("generate", "random", "predepsal")
def create_predepsal(number, **kwargs):
init_db()
create_random_predepsal(int(number))
@dispatch.on("generate", "random", "ukony")
def create_ukony(number, **kwargs):
init_db()
create_random_ukony(int(number))
@dispatch.on("generate", "random", "objednavky")
def create_objednavky(number, **kwargs):
init_db()
create_random_objednavky(int(number))
@dispatch.on("generate", "users")
def create_user_type(**kwargs):
init_db()
create_all_user_types()
@dispatch.on("sort", "pacients")
def sort_pacients(**kwargs):
create_registrovani_cizi_pacienti()
@dispatch.on("generate", "pojistovna")
def pojistovna(**kwargs):
generate_pojistovna()
@dispatch.on("generate", "odborne", "vysetrenie")
def odborne_vysetrenie(number, **kwargs):
generate_odborne_vysetrenie(number)
@dispatch.on("generate", "faktury")
def generate_faktury(number, **kwargs):
generate_random_faktury(number)
@dispatch.on("generate", "all")
def generate_all(**kwargs):
drop_db()
init_db()
pojistovna()
create_users(500)
create_drugs(1000)
create_random_visits(3000)
create_random_predepsal(1000)
create_random_ukony(1000)
create_user_type()
sort_pacients()
generate_odborne_vysetrenie(1000)
generate_faktury(1000)
create_random_objednavky(1000)
if __name__ == '__main__':
dispatch(__doc__)
| Joozty/FIT-VUT | 5. Semester/IIS - Information Systems/app/main.py | Python | gpl-3.0 | 2,988 |
__author__ = 'michael'
import itertools
import unittest.mock
import pytest
from games import gset
@pytest.fixture(scope="module")
def fset():
testlist = [1, 2, 'Hallo', 2, gset.FiniteSet([2, 3, "Hallo"]), 'Hallo', None]
finiteset = gset.FiniteSet(testlist)
return (testlist, finiteset)
@pytest.fixture(scope="module")
def fset2():
testlist = [frozenset([1, 3]), 2]
finiteset = gset.FiniteSet(testlist)
return (testlist, finiteset)
# noinspection PyMethodMayBeStatic
class TestFiniteSets:
def sethelper(self, seta):
for i in range(10):
assert seta.sample() in seta
assert 'testnichtimset' not in seta
seta.__repr__() is str
seta.__str__() is str
def test_basic(self, fset):
with pytest.raises(TypeError):
fset[1] * 2
with pytest.raises(TypeError):
fset[1] & 2
with pytest.raises(TypeError):
fset[1] | 2
assert set(fset[1]) == set(fset[0])
assert gset.FiniteSet(fset[0]) == fset[1]
self.sethelper(fset[1])
assert fset[1].estimate() == len(set(fset[0]))
assert fset[1].is_finite()
assert fset[1] == fset[1].to_finite()
def test_cartproduct(self, fset, fset2):
sc = set([(x, y) for x in fset[0] for y in fset[0]])
c = fset[1] * fset[1]
self.sethelper(c)
assert set(c) == sc
assert c != gset.FiniteSet(sc) # for flattening purposes this makes a difference!
assert c.to_finite() == gset.FiniteSet(sc)
for t in sc:
assert t in c
for x in fset[0]:
assert x not in c
assert (x, 'tst') not in c
assert ('tst', x) not in c
assert fset[1] * fset2[1] != fset2[1] * fset[1]
assert fset2[1] * fset[1] == fset2[1] * fset[1]
assert not c.is_finite()
def test_cart_flatten(self, fset, fset2):
fm = fset[1].mul(fset2[1], fset[1], fset2[1])
m = fset[1] * fset2[1] * fset[1] * fset2[1]
self.sethelper(m)
self.sethelper(fm)
assert fm == m.rflattened()
assert fm != m.flattened()
assert m.flattened() == (fset[1] * fset2[1]).mul(fset[1], fset2[1])
def test_union(self, fset, fset2):
u = fset[1] | fset2[1]
self.sethelper(u)
assert u == fset2[1] | fset[1]
assert u | fset[1] == u
assert u | u == u
assert fset[1] | fset2[1] != fset[1]
assert set(u) == set(fset[0]) | set(fset2[0])
assert u.to_finite() == gset.FiniteSet(set(fset[0]) | set(fset2[0]))
u = (fset[1] * fset[1]) | fset2[1]
self.sethelper(u)
assert u.__eq__(fset2[1]) == NotImplemented
assert u | fset2[1] == u
assert u | u == u
assert u | fset[1] != u
assert u.to_finite() == gset.FiniteSet(set((fset[1] * fset[1])) | fset2[1])
u = (fset[1] * fset[1]) | (fset2[1] * fset[1])
self.sethelper(u)
assert u | u == u
assert u.to_finite() == gset.FiniteSet(set((fset[1] * fset[1])) | set((fset2[1] * fset[1])))
def test_intersect(self, fset, fset2):
i = fset[1] & fset2[1]
self.sethelper(i)
assert i.to_finite() == gset.FiniteSet([2])
assert fset[1] & fset[1] == fset[1]
assert i & fset[1] == i
assert (fset[1] * fset2[1]) & fset2[1] == fset2[1] & (fset[1] * fset2[1])
i2 = (fset[1] * fset[1]) & (fset2[1] * fset2[1])
self.sethelper(i2)
assert i2.to_finite() == gset.FiniteSet([(2, 2)])
assert i2 & (fset[1] * fset[1]) == i2
assert i2 != fset[1]
def test_hashable(self, fset, fset2):
c1 = (fset[1] * fset2[1])
{c1, c1 | fset[1], c1 & fset[1]}
def test_default(self):
gs = gset.GeneralSet()
assert not gs.is_finite()
assert gs.estimate() == -2
gs.sample = unittest.mock.Mock()
iter(gs).__next__()
assert gs.sample.called
assert gs.to_finite() is None
def test_map(self):
m = gset.MappedSet(gset.FiniteSet(range(10)), lambda x: 2 * x, lambda x: x / 2 if isinstance(x, int) else -23)
assert m.estimate() == 10
ds = gset.FiniteSet(range(0, 20, 2))
assert m.to_finite() == ds
self.sethelper(m)
for d in ds:
assert d in m
def test_perm(self):
lst = list(range(6))
p = gset.PermutationSet(lst)
assert p.estimate() == 720
self.sethelper(p)
pl = list(p)
sr = frozenset(range(6))
for i in range(10):
assert frozenset(pl[i]) == sr
assert frozenset(p.sample()) == sr
assert len(pl) == 720
assert set(pl) == set(itertools.permutations(lst))
def test_pow(self):
pass
| sonnerm/games | games/test_gset.py | Python | agpl-3.0 | 4,793 |
from __future__ import absolute_import, unicode_literals
from django.contrib import admin
from . import models
class LocationAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'content_type', 'parent', 'active', )
list_display_links = ('id', 'name', )
search_fields = ('name', 'name_ascii', 'body', )
list_filter = ('active', 'content_type', )
list_editable = ('active', )
raw_id_fields = ('creator', )
admin.site.register(models.Location, LocationAdmin)
admin.site.register(models.Country, LocationAdmin)
admin.site.register(models.Region, LocationAdmin)
admin.site.register(models.City, LocationAdmin)
admin.site.register(models.Street, LocationAdmin)
| emacsway/django-geo | geo/admin.py | Python | bsd-3-clause | 686 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Project: Create a Proxy Class
#
# In this assignment, create a proxy class (one is started for you
# below). You should be able to initialize the proxy object with any
# object. Any attributes called on the proxy object should be forwarded
# to the target object. As each attribute call is sent, the proxy should
# record the name of the attribute sent.
#
# The proxy class is started for you. You will need to add a method
# missing handler and any other supporting methods. The specification
# of the Proxy class is given in the AboutProxyObjectProject koan.
# Note: This is a bit trickier than its Ruby Koans counterpart, but you
# can do it!
from runner.koan import *
class Proxy(object):
def __init__(self, target_object):
self._messages = list()
#initialize '_obj' attribute last. Trust me on this!
self._obj = target_object
def __getattr__(self, name):
# print 'Debug GET ' + type(self).__name__ + "." + name + " dict=" + str(self.__dict__)
attr = getattr(self._obj, name)
self._messages.append(name)
return attr
def __setattr__(self, name, value):
# print 'Debug SET ' + type(self).__name__ + "." + name + "=" + str(value) + " __dict__=" + str(self.__dict__)
if '_' == name[0]:
return object.__setattr__(self, name, value)
setattr(self._obj, name, value)
self._messages.append(name + '=')
def messages(self):
return self._messages
def was_called(self, attr):
return self.number_of_times_called(attr) > 0
def number_of_times_called(self, attr):
return len(filter(lambda msg: msg == attr, self._messages))
# The proxy object should pass the following Koan:
#
class AboutProxyObjectProject(Koan):
def test_proxy_method_returns_wrapped_object(self):
# NOTE: The Television class is defined below
tv = Proxy(Television())
self.assertTrue(isinstance(tv, Proxy))
def test_tv_methods_still_perform_their_function(self):
tv = Proxy(Television())
tv.channel = 10
tv.power()
self.assertEqual(10, tv.channel)
self.assertTrue(tv.is_on())
def test_proxy_records_messages_sent_to_tv(self):
tv = Proxy(Television())
tv.power()
tv.channel = 10
self.assertEqual(['power', 'channel='], tv.messages())
def test_proxy_handles_invalid_messages(self):
tv = Proxy(Television())
ex = None
try:
tv.no_such_method()
except AttributeError as ex:
pass
self.assertEqual(AttributeError, type(ex))
def test_proxy_reports_methods_have_been_called(self):
tv = Proxy(Television())
tv.power()
tv.power()
self.assertTrue(tv.was_called('power'))
self.assertFalse(tv.was_called('channel'))
def test_proxy_counts_method_calls(self):
tv = Proxy(Television())
tv.power()
tv.channel = 48
tv.power()
self.assertEqual(2, tv.number_of_times_called('power'))
self.assertEqual(1, tv.number_of_times_called('channel='))
self.assertEqual(0, tv.number_of_times_called('is_on'))
def test_proxy_can_record_more_than_just_tv_objects(self):
proxy = Proxy("Py Ohio 2010")
result = proxy.upper()
self.assertEqual("PY OHIO 2010", result)
result = proxy.split()
self.assertEqual(["Py", "Ohio", "2010"], result)
self.assertEqual(['upper', 'split'], proxy.messages())
# ====================================================================
# The following code is to support the testing of the Proxy class. No
# changes should be necessary to anything below this comment.
# Example class using in the proxy testing above.
class Television(object):
def __init__(self):
self._channel = None
self._power = None
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, value):
self._channel = value
def power(self):
if self._power == 'on':
self._power = 'off'
else:
self._power = 'on'
def is_on(self):
return self._power == 'on'
# Tests for the Television class. All of theses tests should pass.
class TelevisionTest(Koan):
def test_it_turns_on(self):
tv = Television()
tv.power()
self.assertTrue(tv.is_on())
def test_it_also_turns_off(self):
tv = Television()
tv.power()
tv.power()
self.assertFalse(tv.is_on())
def test_edge_case_on_off(self):
tv = Television()
tv.power()
tv.power()
tv.power()
self.assertTrue(tv.is_on())
tv.power()
self.assertFalse(tv.is_on())
def test_can_set_the_channel(self):
tv = Television()
tv.channel = 11
self.assertEqual(11, tv.channel)
| ChristianAA/python_koans_solutions | python2/koans/about_proxy_object_project.py | Python | mit | 4,995 |
import os
def enumFeeds():
for fn in os.listdir('/etc/opkg'):
if fn.endswith('-feed.conf'):
try:
for feed in open(os.path.join('/etc/opkg', fn)):
yield feed.split()[1]
except IndexError:
pass
except IOError:
pass
def enumPlugins(filter_start=''):
for feed in enumFeeds():
package = None
try:
for line in open('/var/lib/opkg/%s' % feed, 'r'):
if line.startswith('Package:'):
package = line[8:].strip()
version = ''
description = ''
if package.startswith(filter_start) and not package.endswith('-dev') and not package.endswith('-staticdev') and not package.endswith('-dbg') and not package.endswith('-doc'):
continue
package = None
if package is None:
continue
if line.startswith('Version:'):
version = line[8:].strip()
elif line.startswith('Description:'):
description = line[14:-1]
elif description and line.startswith(' '):
description += line[:-1]
elif len(line) <= 1:
d = description.split(' ',3)
if len(d) > 3:
# Get rid of annoying "version" and package repeating strings
if d[1] == 'version':
description = d[3]
if description.startswith('gitAUTOINC'):
description = description.split(' ',1)[1]
yield package, version, description.strip()
package = None
except IOError:
pass
if __name__ == '__main__':
for p in enumPlugins('enigma'):
print p
| pli3/enigma2-git | lib/python/Components/opkg.py | Python | gpl-2.0 | 1,423 |
#/usr/bin/env python
# -#- coding: utf-8 -#-
#
# refdata/product/core/classes.py - reference data product core classes module
#
# This file is part of OndALear collection of open source components
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Copyright (C) 2008 Amnon Janiv <amnon.janiv@ondalear.com>
#
# Initial version: 2008-02-01
# Author: Amnon Janiv <amnon.janiv@ondalear.com>
"""
.. module:: refdata.product.ir.classes
:synopsis: Reference data product ir classes modules
.. moduleauthor:: Amnon Janiv <amnon.janiv@ondalear.com>
"""
__revision__ = '$Id: $'
__version__ = '0.0.1'
from emf.core.emfutil import check_list
from refdata.core.classes import RefDataDomain
from refdata.core.classes import RefDataGrouping
from refdata.product.core.classes import BusinessProduct
#
# Data representation classes.
#
class BusRefDataProductIRDomain(RefDataDomain):
def __init__(self):
super(BusRefDataProductIRDomain,self).__init__()
# End Class BusRefDataProductIRDomain Definition
class BusRefDataProductIRObjects(RefDataDomain):
def __init__(self):
super(BusRefDataProductIRObjects,self).__init__()
self.cash = None
self.float_ = None
self.fixed = None
def get_cash(self): return self.cash
def set_cash(self, cash): self.cash = cash
@check_list
def add_cash(self, value): self.cash.append(value)
def get_float(self): return self.float_
def set_float(self, float_): self.float_ = float_
@check_list
def add_float(self, value): self.float_.append(value)
def get_fixed(self): return self.fixed
def set_fixed(self, fixed): self.fixed = fixed
@check_list
def add_fixed(self, value): self.fixed.append(value)
# End Class BusRefDataProductIRObjects Definition
class CashProduct(BusinessProduct):
def __init__(self):
super(CashProduct,self).__init__()
# End Class CashProduct Definition
class CashProductGrouping(RefDataGrouping):
def __init__(self):
super(CashProductGrouping,self).__init__()
# End Class CashProductGrouping Definition
class FixedProductGrouping(RefDataGrouping):
def __init__(self):
super(FixedProductGrouping,self).__init__()
# End Class FixedProductGrouping Definition
class FloatProductGrouping(RefDataGrouping):
def __init__(self):
super(FloatProductGrouping,self).__init__()
# End Class FloatProductGrouping Definition
class SimpleRateProduct(BusinessProduct):
def __init__(self, ):
super(SimpleRateProduct,self).__init__()
self.refRate = None
def get_refRate(self):
return self.refRate
def set_refRate(self, refRate):
self.refRate = refRate
# End Class SimpleRateProduct Definition
class FloatProduct(SimpleRateProduct):
def __init__(self):
super(FloatProduct,self).__init__()
# End Class FloatProduct Definition
class FixedProduct(SimpleRateProduct):
def __init__(self):
super(FixedProduct,self).__init__()
# end class FixedProduct | ajaniv/softwarebook | cpython/refdata/product/ir/classes.py | Python | gpl-2.0 | 3,284 |
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
float_or_none,
parse_iso8601,
update_url_query,
int_or_none,
determine_protocol,
unescapeHTML,
)
class SendtoNewsIE(InfoExtractor):
_VALID_URL = r'https?://embed\.sendtonews\.com/player2/embedplayer\.php\?.*\bSC=(?P<id>[0-9A-Za-z-]+)'
_TEST = {
# From http://cleveland.cbslocal.com/2016/05/16/indians-score-season-high-15-runs-in-blowout-win-over-reds-rapid-reaction/
'url': 'http://embed.sendtonews.com/player2/embedplayer.php?SC=GxfCe0Zo7D-175909-5588&type=single&autoplay=on&sound=YES',
'info_dict': {
'id': 'GxfCe0Zo7D-175909-5588'
},
'playlist_count': 8,
# test the first video only to prevent lengthy tests
'playlist': [{
'info_dict': {
'id': '240385',
'ext': 'mp4',
'title': 'Indians introduce Encarnacion',
'description': 'Indians president of baseball operations Chris Antonetti and Edwin Encarnacion discuss the slugger\'s three-year contract with Cleveland',
'duration': 137.898,
'thumbnail': r're:https?://.*\.jpg$',
'upload_date': '20170105',
'timestamp': 1483649762,
},
}],
'params': {
# m3u8 download
'skip_download': True,
},
}
_URL_TEMPLATE = '//embed.sendtonews.com/player2/embedplayer.php?SC=%s'
@classmethod
def _extract_url(cls, webpage):
mobj = re.search(r'''(?x)<script[^>]+src=([\'"])
(?:https?:)?//embed\.sendtonews\.com/player/responsiveembed\.php\?
.*\bSC=(?P<SC>[0-9a-zA-Z-]+).*
\1>''', webpage)
if mobj:
sc = mobj.group('SC')
return cls._URL_TEMPLATE % sc
def _real_extract(self, url):
playlist_id = self._match_id(url)
data_url = update_url_query(
url.replace('embedplayer.php', 'data_read.php'),
{'cmd': 'loadInitial'})
playlist_data = self._download_json(data_url, playlist_id)
entries = []
for video in playlist_data['playlistData'][0]:
info_dict = self._parse_jwplayer_data(
video['jwconfiguration'],
require_title=False, m3u8_id='hls', rtmp_params={'no_resume': True})
for f in info_dict['formats']:
if f.get('tbr'):
continue
tbr = int_or_none(self._search_regex(
r'/(\d+)k/', f['url'], 'bitrate', default=None))
if not tbr:
continue
f.update({
'format_id': '%s-%d' % (determine_protocol(f), tbr),
'tbr': tbr,
})
self._sort_formats(info_dict['formats'], ('tbr', 'height', 'width', 'format_id'))
thumbnails = []
if video.get('thumbnailUrl'):
thumbnails.append({
'id': 'normal',
'url': video['thumbnailUrl'],
})
if video.get('smThumbnailUrl'):
thumbnails.append({
'id': 'small',
'url': video['smThumbnailUrl'],
})
info_dict.update({
'title': video['S_headLine'].strip(),
'description': unescapeHTML(video.get('S_fullStory')),
'thumbnails': thumbnails,
'duration': float_or_none(video.get('SM_length')),
'timestamp': parse_iso8601(video.get('S_sysDate'), delimiter=' '),
})
entries.append(info_dict)
return self.playlist_result(entries, playlist_id)
| valmynd/MediaFetcher | src/plugins/youtube_dl/youtube_dl/extractor/sendtonews.py | Python | gpl-3.0 | 3,101 |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for perfkitbenchmarker.packages.ycsb"""
import os
import unittest
from perfkitbenchmarker.packages import ycsb
class SimpleResultParserTestCase(unittest.TestCase):
maxDiff = None
def setUp(self):
path = os.path.join(os.path.dirname(__file__), '..', 'data',
'ycsb-test-run.dat')
with open(path) as fp:
self.contents = fp.read()
self.results = ycsb.ParseResults(self.contents, 'histogram')
def testCommandLineSet(self):
self.assertEqual('Command line: -db com.yahoo.ycsb.BasicDB '
'-P workloads/workloada -t', self.results['command_line'])
def testClientSet(self):
self.assertEqual('YCSB Client 0.1', self.results['client'])
def testUpdateStatisticsParsed(self):
self.assertDictEqual(
{
'group': 'update',
'statistics': {
'Operations': 531,
'Return=0': 531,
'AverageLatency(ms)': .0659774011299435,
'MinLatency(ms)': 0.042,
'MaxLatency(ms)': .345,
'95thPercentileLatency(ms)': 0,
'99thPercentileLatency(ms)': 0
},
'histogram': [(0, 530), (19, 1)],
},
dict(self.results['groups']['update']))
def testReadStatisticsParsed(self):
self.assertDictEqual(
{
'group': 'read',
'statistics': {
'Operations': 469,
'Return=0': 469,
'AverageLatency(ms)': 0.03847761194029851,
'MinLatency(ms)': 0.034,
'MaxLatency(ms)': 0.102,
'95thPercentileLatency(ms)': 0,
'99thPercentileLatency(ms)': 0
},
'histogram': [(0, 469)],
},
dict(self.results['groups']['read']))
def testOverallStatisticsParsed(self):
self.assertDictEqual(
{
'statistics': {
'RunTime(ms)': 80.0,
'Throughput(ops/sec)': 12500.0
},
'group': 'overall',
'histogram': []
},
self.results['groups']['overall'])
class DetailedResultParserTestCase(unittest.TestCase):
def setUp(self):
path = os.path.join(os.path.dirname(__file__), '..', 'data',
'ycsb-test-run-2.dat')
with open(path) as fp:
self.contents = fp.read()
self.results = ycsb.ParseResults(self.contents)
def testPercentilesFromHistogram_read(self):
hist = self.results['groups']['read']['histogram']
percentiles = ycsb._PercentilesFromHistogram(hist)
self.assertEqual(1, percentiles['p50'])
self.assertEqual(7, percentiles['p99'])
def testPercentilesFromHistogram_update(self):
hist = self.results['groups']['update']['histogram']
percentiles = ycsb._PercentilesFromHistogram(hist)
self.assertEqual(1, percentiles['p50'])
self.assertEqual(7, percentiles['p99'])
def testPercentilesFromHistogram_cleanup(self):
hist = self.results['groups']['cleanup']['histogram']
percentiles = ycsb._PercentilesFromHistogram(hist)
self.assertEqual(0, percentiles['p50'])
self.assertEqual(385, percentiles['p99'])
class WeightedQuantileTestCase(unittest.TestCase):
def testEvenlyWeightedSamples(self):
x = range(1, 101) # 1-100
weights = [1 for _ in x]
self.assertEqual(50, ycsb._WeightedQuantile(x, weights, 0.50))
self.assertEqual(75, ycsb._WeightedQuantile(x, weights, 0.75))
self.assertEqual(90, ycsb._WeightedQuantile(x, weights, 0.90))
self.assertEqual(95, ycsb._WeightedQuantile(x, weights, 0.95))
self.assertEqual(99, ycsb._WeightedQuantile(x, weights, 0.99))
self.assertEqual(100, ycsb._WeightedQuantile(x, weights, 1))
def testLowWeight(self):
x = [1, 4]
weights = [99, 1]
for i in xrange(100):
self.assertEqual(1, ycsb._WeightedQuantile(x, weights, i / 100.0))
self.assertEqual(4, ycsb._WeightedQuantile(x, weights, 0.995))
def testMidWeight(self):
x = [0, 1.2, 4]
weights = [1, 98, 1]
for i in xrange(2, 99):
self.assertAlmostEqual(1.2, ycsb._WeightedQuantile(x, weights, i / 100.0))
self.assertEqual(4, ycsb._WeightedQuantile(x, weights, 0.995))
class _ParseWorkloadTestCase(unittest.TestCase):
def testParsesEmptyString(self):
self.assertDictEqual({}, ycsb._ParseWorkload(''))
def testIgnoresComment(self):
self.assertDictEqual({}, ycsb._ParseWorkload('#\n'))
self.assertDictEqual({},
ycsb._ParseWorkload('#recordcount = 10\n'
'# columnfamily=cf'))
self.assertDictEqual({'recordcount': '10'},
ycsb._ParseWorkload('#Sample!\nrecordcount = 10'))
def testParsesSampleWorkload(self):
test_file_path = os.path.join(os.path.dirname(__file__), '..', 'data',
'ycsb_workloada')
with open(test_file_path) as fp:
contents = fp.read()
actual = ycsb._ParseWorkload(contents)
expected = {
'recordcount': '1000',
'operationcount': '1000',
'workload': 'com.yahoo.ycsb.workloads.CoreWorkload',
'readallfields': 'true',
'readproportion': '0.5',
'updateproportion': '0.5',
'scanproportion': '0',
'insertproportion': '0',
'requestdistribution': 'zipfian'
}
self.assertDictEqual(expected, actual)
| gablg1/PerfKitBenchmarker | tests/packages/ycsb_test.py | Python | apache-2.0 | 6,012 |
import time
import threading
import pytest
from eth_client_utils import BaseClient
class AsyncError(Exception):
pass
class ExampleClient(BaseClient):
_request_in_progress = False
def _make_request(self, *args, **kwargs):
"""
Implementation that isn't friendly to async requests.
"""
if self._request_in_progress is True:
raise AsyncError("Request already in progress")
self._request_in_progress = True
time.sleep(1)
self._request_in_progress = False
return {'result': hex(1)}
def do_something(self):
return self.make_request()
def test_fails_when_synchronous():
client = ExampleClient(async=False)
threads = []
errors = []
def spam_block_number():
for i in range(10):
try:
client.do_something()
except AsyncError as e:
errors.append(e)
raise
for i in range(3):
thread = threading.Thread(target=spam_block_number)
thread.daemon = True
threads.append(thread)
[thread.start() for thread in threads]
[thread.join() for thread in threads]
assert len(errors) > 0
def test_succeeds_when_asynchrounous():
client = ExampleClient(async=True)
threads = []
errors = []
def spam_block_number():
for i in range(10):
try:
client.do_something()
except AsyncError as e:
errors.append(e)
raise
for i in range(3):
thread = threading.Thread(target=spam_block_number)
thread.daemon = True
threads.append(thread)
[thread.start() for thread in threads]
[thread.join() for thread in threads]
assert not errors
| pipermerriam/ethereum-client-utils | tests/base-client/test_base_client.py | Python | mit | 1,779 |
from django.shortcuts import render, get_object_or_404, redirect
from institutional.models import HomePage
from mezzanine.conf import settings
def homepage(request, template="index.html"):
"""
Direciona para a pagina inicial
"""
page = get_object_or_404(HomePage, slug=settings.HOME_PAGE_SITE)
if page:
return redirect(page.get_absolute_url())
else:
return render(request, template)
| roberzguerra/scout_mez | institutional/views.py | Python | gpl-2.0 | 425 |
#!/usr/bin/python
"""
Defines the XPainter class, a simple wrapper over the <QtGui.QPainter> class
that supports python's notion of enter/exit contexts to begin and end painting
on a device. This is more reliable than using Qt's scoping as the object
may linger in memory longer than in the C++ version.
:usage |from xqt import QtGui
|from projexui.xpainter import XPainter
|class MyWidget(QtGui.QWidget):
| def paintEvent(self, event):
| with XPainter(self) as painter:
| # do some paint operations
"""
# define authorship information
__authors__ = ['Eric Hulser']
__author__ = ','.join(__authors__)
__credits__ = []
__copyright__ = 'Copyright (c) 2011, Projex Software'
__license__ = 'LGPL'
# maintanence information
__maintainer__ = 'Projex Software'
__email__ = 'team@projexsoftware.com'
from xqt import QtGui
class XPainter(QtGui.QPainter):
def __init__(self, device):
super(XPainter, self).__init__()
# store a reference to the paint device
self._device = device
def __enter__(self):
self.begin(self._device)
return self
def __exit__(self, *args):
self.end()
| bitesofcode/projexui | projexui/xpainter.py | Python | lgpl-3.0 | 1,330 |
import sys
import json
from numbers import Number
import time
import requests
import six
from . import version
__BASE_URL = "https://api.outbound.io/v2"
__HEADERS = None
ERROR_INIT = 1
ERROR_USER_ID = 2
ERROR_EVENT_NAME = 3
ERROR_CONNECTION = 4
ERROR_UNKNOWN = 5
ERROR_TOKEN = 6
ERROR_CAMPAIGN_IDS = 7
ERROR_PREVIOUS_ID = 2
APNS = "apns"
GCM = "gcm"
def __is_init():
return __HEADERS != None
def init(key):
global __HEADERS
__HEADERS = {
'content-type': 'application/json',
'X-Outbound-Client': 'Python/{0}'.format(version.VERSION),
'X-Outbound-Key': key,
}
def unsubscribe(user_id, from_all=False, campaign_ids=None, on_error=None, on_success=None):
""" Unsubscribe a user from some or all campaigns.
:param str | number user_id: the id you use to identify a user. this should
be static for the lifetime of a user.
:param bool from_all True to unsubscribe from all campaigns. Take precedence over
campaigns IDs if both are given.
:param list of str campaign_ids List of campaign IDs to unsubscribe the user from.
:param func on_error: An optional function to call in the event of an error.
on_error callback should take 2 parameters: `code` and `error`. `code` will be
one of outbound.ERROR_XXXXXX. `error` will be the corresponding message.
:param func on_success: An optional function to call if/when the API call succeeds.
on_success callback takes no parameters.
"""
__subscription(
user_id,
unsubscribe=True,
all_campaigns=from_all,
campaign_ids=campaign_ids,
on_error=on_error,
on_success=on_success,
)
def subscribe(user_id, to_all=False, campaign_ids=None, on_error=None, on_success=None):
""" Resubscribe a user to some or all campaigns.
:param str | number user_id: the id you use to identify a user. this should
be static for the lifetime of a user.
:param bool to_all True to reubscribe to all campaigns. Take precedence over
campaigns IDs if both are given.
:param list of str campaign_ids List of campaign IDs to resubscribe the user to.
:param func on_error: An optional function to call in the event of an error.
on_error callback should take 2 parameters: `code` and `error`. `code` will be
one of outbound.ERROR_XXXXXX. `error` will be the corresponding message.
:param func on_success: An optional function to call if/when the API call succeeds.
on_success callback takes no parameters.
"""
__subscription(
user_id,
unsubscribe=False,
all_campaigns=to_all,
campaign_ids=campaign_ids,
on_error=on_error,
on_success=on_success,
)
def disable_all_tokens(platform, user_id, on_error=None, on_success=None):
""" Disable ALL device tokens for the given user on the specified platform.
:param str platform The platform which to disable token on. One of either
Google Cloud Messaging (outbound.GCM) or Apple Push Notification Service
(outbound.APNS).
:param str | number user_id: the id you use to identify a user. this should
be static for the lifetime of a user.
:param func on_error: An optional function to call in the event of an error.
on_error callback should take 2 parameters: `code` and `error`. `code` will be
one of outbound.ERROR_XXXXXX. `error` will be the corresponding message.
:param func on_success: An optional function to call if/when the API call succeeds.
on_success callback takes no parameters.
"""
__device_token(platform, False, user_id, all=True, on_error=on_error, on_success=on_success)
def disable_token(platform, user_id, token, on_error=None, on_success=None):
""" Disable a device token for a user.
:param str platform The platform which to disable token on. One of either
Google Cloud Messaging (outbound.GCM) or Apple Push Notification Service
(outbound.APNS).
:param str | number user_id: the id you use to identify a user. this should
be static for the lifetime of a user.
:param str token: the token to disable.
:param func on_error: An optional function to call in the event of an error.
on_error callback should take 2 parameters: `code` and `error`. `code` will be
one of outbound.ERROR_XXXXXX. `error` will be the corresponding message.
:param func on_success: An optional function to call if/when the API call succeeds.
on_success callback takes no parameters.
"""
__device_token(platform, False, user_id, token=token, on_error=on_error, on_success=on_success)
def register_token(platform, user_id, token, on_error=None, on_success=None):
""" Register a device token for a user.
:param str platform The platform which to register token on. One of either
Google Cloud Messaging (outbound.GCM) or Apple Push Notification Service
(outbound.APNS).
:param str | number user_id: the id you use to identify a user. this should
be static for the lifetime of a user.
:param str token: the token to register.
:param func on_error: An optional function to call in the event of an error.
on_error callback should take 2 parameters: `code` and `error`. `code` will be
one of outbound.ERROR_XXXXXX. `error` will be the corresponding message.
:param func on_success: An optional function to call if/when the API call succeeds.
on_success callback takes no parameters.
"""
__device_token(platform, True, user_id, token=token, on_error=on_error, on_success=on_success)
def alias(user_id, previous_id, on_error=None, on_success=None):
""" Alias one user id to another.
:param str | number user_id: the id you use to identify a user. this will be the user's
primary user id.
:param str | number previous_id: the id you previously used to identify a user (or the old user id
you want to associate with the new user id).
:param func on_error: An optional function to call in the event of an error.
on_error callback should take 2 parameters: `code` and `error`. `code` will be
one of outbound.ERROR_XXXXXX. `error` will be the corresponding message.
:param func on_success: An optional function to call if/when the API call succeeds.
on_success callback takes no parameters.
"""
if not __is_init():
on_error(ERROR_INIT, __error_message(ERROR_INIT))
return
if not isinstance(user_id, six.string_types + (Number,)):
on_error(ERROR_USER_ID, __error_message(ERROR_USER_ID))
return
if not isinstance(previous_id, six.string_types + (Number,)):
on_error(ERROR_PREVIOUS_ID, __error_message(ERROR_PREVIOUS_ID))
return
data = dict(
user_id=user_id,
previous_id=previous_id,
)
try:
resp = requests.post(
"%s/identify" % __BASE_URL,
data=json.dumps(data),
headers=__HEADERS,
)
if resp.status_code >= 200 and resp.status_code < 400:
on_success()
else:
on_error(ERROR_UNKNOWN, resp.text)
except requests.exceptions.ConnectionError:
on_error(ERROR_CONNECTION, __error_message(ERROR_CONNECTION))
def identify(user_id, previous_id=None, group_id=None, group_attributes=None,
first_name=None, last_name=None, email=None,
phone_number=None, apns_tokens=None, gcm_tokens=None,
attributes=None, on_error=None, on_success=None):
""" Identifying a user creates a record of your user in Outbound. Identify
calls should be made prior to sending any track events for a user.
:param str | number user_id: the id you use to identify a user. this should
be static for the lifetime of a user.
:param str | number previous_id: OPTIONAL the id you previously used to identify the user.
:param str | number group_id: OPTIONAL the id that identifies a group of users the current user
belongs to.
:param dict group_attributes: OPTIONAL An optional dictionary of attributes that are shared
among the group this user belongs to.
:param str first_name: OPTIONAL the user's first name.
:param str last_name: OPTIONAL the user's last name.
:param str email: OPTIONAL the user's email address.
:param str phone_number: OPTIONAL the user's phone number.
:param str | list apns_tokens: OPTIONAL the device tokens for the user's iOS
devices. If a single string is given it is put into a list.
:param str | list gcm_tokens: OPTIONAL the device tokens for the user's Android
devices. If a single string is given it is put into a list.
:param dict attributes: An optional dictionary with any additional freeform
attributes describing the user.
:param func on_error: An optional function to call in the event of an error.
on_error callback should take 2 parameters: `code` and `error`. `code` will be
one of outbound.ERROR_XXXXXX. `error` will be the corresponding message.
:param func on_success: An optional function to call if/when the API call succeeds.
on_success callback takes no parameters.
"""
on_error = on_error or __on_error
on_success = on_success or __on_success
if not __is_init():
on_error(ERROR_INIT, __error_message(ERROR_INIT))
return
if not isinstance(user_id, six.string_types + (Number,)):
on_error(ERROR_USER_ID, __error_message(ERROR_USER_ID))
return
data = __user(
first_name,
last_name,
email,
phone_number,
apns_tokens,
gcm_tokens,
attributes,
previous_id,
group_id,
group_attributes,)
data['user_id'] = user_id
try:
resp = requests.post(
"%s/identify" % __BASE_URL,
data=json.dumps(data),
headers=__HEADERS,
)
if resp.status_code >= 200 and resp.status_code < 400:
on_success()
else:
on_error(ERROR_UNKNOWN, resp.text)
except requests.exceptions.ConnectionError:
on_error(ERROR_CONNECTION, __error_message(ERROR_CONNECTION))
def track(user_id, event, first_name=None, last_name=None, email=None,
phone_number=None, apns_tokens=None, gcm_tokens=None,
user_attributes=None, properties=None, on_error=None, on_success=None, timestamp=None):
""" For any event you want to track, when a user triggers that event you
would call this function.
You can do an identify and track call simultaneously by including all the
identifiable user information in the track call.
:param str | number user_id: the id you user who triggered the event.
:param str first_name: OPTIONAL the user's first name.
:param str last_name: OPTIONAL the user's last name.
:param str email: OPTIONAL the user's email address.
:param str phone_number: OPTIONAL the user's phone number.
:param str | list apns_tokens: OPTIONAL the device tokens for the user's iOS
devices. If a single string is given it is put into a list.
:param str | list gcm_tokens: OPTIONAL the device tokens for the user's Android
devices. If a single string is given it is put into a list.
:param dict user_attributes: An optional dictionary with any additional
freeform attributes describing the user.
:param dict properties: An optional dictionary with any properties that
describe the event being track. Example: if the event were "added item to
cart", you might include a properties named "item" that is the name
of the item added to the cart.
:param func on_error: An optional function to call in the event of an error.
on_error callback should take 1 parameter which will be the error message.
:param func on_success: An optional function to call if/when the API call succeeds.
on_success callback takes no parameters.
"""
on_error = on_error or __on_error
on_success = on_success or __on_success
if not __is_init():
on_error(ERROR_INIT, __error_message(ERROR_INIT))
return
if not isinstance(user_id, six.string_types + (Number,)):
on_error(ERROR_USER_ID, __error_message(ERROR_USER_ID))
return
if not isinstance(event, six.string_types):
on_error(ERROR_EVENT_NAME, __error_message(ERROR_EVENT_NAME))
return
data = dict(user_id=user_id, event=event)
user = __user(
first_name,
last_name,
email,
phone_number,
apns_tokens,
gcm_tokens,
user_attributes,
None, None, None)
if user:
data['user'] = user
if properties:
if isinstance(properties, dict):
if len(properties) > 0:
data['properties'] = properties
else:
sys.stderr.write('Invalid event properties given. Expected dictionary. ' +
'Got %s' % type(properties).__name__)
if timestamp:
data['timestamp'] = timestamp
else:
data['timestamp'] = int(time.time())
try:
resp = requests.post(
"%s/track" % __BASE_URL,
data=json.dumps(data),
headers=__HEADERS,
)
if resp.status_code >= 200 and resp.status_code < 400:
on_success()
else:
on_error(ERROR_UNKNOWN, resp.text)
except requests.exceptions.ConnectionError:
on_error(ERROR_CONNECTION, __error_message(ERROR_CONNECTION))
def __subscription(user_id, unsubscribe, all_campaigns=False, campaign_ids=None, on_error=None, on_success=None):
on_error = on_error or __on_error
on_success = on_success or __on_success
if not __is_init():
on_error(ERROR_INIT, __error_message(ERROR_INIT))
return
if not isinstance(user_id, six.string_types + (Number,)):
on_error(ERROR_USER_ID, __error_message(ERROR_USER_ID))
return
if not all_campaigns and (not isinstance(campaign_ids, (list, tuple)) or len(campaign_ids) == 0):
on_error(ERROR_TOKEN, __error_message(ERROR_CAMPAIGN_IDS))
return
url = '/'.join([__BASE_URL, ('unsubscribe' if unsubscribe else 'subscribe'), ('all' if all_campaigns else 'campaigns')])
data = dict(
user_id=user_id,
)
if not all_campaigns:
data['campaign_ids'] = campaign_ids
try:
print __HEADERS
resp = requests.post(
url,
data=json.dumps(data),
headers=__HEADERS,
)
if resp.status_code >= 200 and resp.status_code < 400:
on_success()
else:
on_error(ERROR_UNKNOWN, resp.text)
except requests.exceptions.ConnectionError:
on_error(ERROR_CONNECTION, __error_message(ERROR_CONNECTION))
def __device_token(platform, register, user_id, token='', all=False, on_error=None, on_success=None):
on_error = on_error or __on_error
on_success = on_success or __on_success
if not __is_init():
on_error(ERROR_INIT, __error_message(ERROR_INIT))
return
if not isinstance(user_id, six.string_types + (Number,)):
on_error(ERROR_USER_ID, __error_message(ERROR_USER_ID))
return
if not all and not isinstance(token, six.string_types):
on_error(ERROR_TOKEN, __error_message(ERROR_TOKEN))
return
try:
data = dict(
user_id=user_id,
)
if all:
data["all"] = True
else:
data["token"] = token
resp = requests.post(
"%s/%s/%s" % (__BASE_URL, platform, 'register' if register else 'disable'),
data=json.dumps(data),
headers=__HEADERS,
)
if resp.status_code >= 200 and resp.status_code < 400:
on_success()
else:
on_error(ERROR_UNKNOWN, resp.text)
except requests.exceptions.ConnectionError:
on_error(ERROR_CONNECTION, __error_message(ERROR_CONNECTION))
def __user(first_name, last_name, email, phone_number, apns_tokens,
gcm_tokens, attributes, previous_id, group_id, group_attributes):
data = dict()
if previous_id:
data['previous_id'] = previous_id
if group_id:
data['group_id'] = group_id
if group_attributes:
if isinstance(group_attributes, dict):
if len(group_attributes) > 0:
data['group_attributes'] = group_attributes
else:
sys.stderr.write('Invalid group attributes given. Expected dictionary. ' +
'Got %s' % type(group_attributes).__name__)
if first_name:
data['first_name'] = first_name
if last_name:
data['last_name'] = last_name
if email:
data['email'] = email
if phone_number:
data['phone_number'] = phone_number
if apns_tokens:
if isinstance(apns_tokens, six.string_types):
apns_tokens = [apns_tokens]
if isinstance(apns_tokens, (list, tuple)):
data['apns'] = apns_tokens
else:
sys.stderr.write('Invalid APNS tokens given. Expected string or ' +
'list of strings. Got %s' % type(apns_tokens).__name__)
if gcm_tokens:
if isinstance(gcm_tokens, six.string_types):
gcm_tokens = [gcm_tokens]
if isinstance(gcm_tokens, (list, tuple)):
data['gcm'] = gcm_tokens
else:
sys.stderr.write('Invalid GCM tokens given. Expected string or ' +
'list of strings. Got %s' % type(gcm_tokens).__name__)
if attributes:
if isinstance(attributes, dict):
if len(attributes) > 0:
data['attributes'] = attributes
else:
sys.stderr.write('Invalid user attributes given. Expected dictionary. ' +
'Got %s' % type(attributes).__name__)
return data
def __error_message(code):
return {
ERROR_INIT: "init() must be called before identifying any users.",
ERROR_USER_ID: "User ID must be a string or a number.",
ERROR_EVENT_NAME: "Event name must be a string.",
ERROR_CONNECTION: "Unable to connect to Outbound.",
ERROR_UNKNOWN: "Unknown error occurred.",
ERROR_TOKEN: "Token must be a string.",
ERROR_CAMPAIGN_IDS: "One or more campaigns must be specified.",
ERROR_PREVIOUS_ID: "Previous must be a string or a number.",
}.get(code, "Unknown error")
def __on_error(code, err):
pass
def __on_success():
pass
| outboundio/lib-python | outbound/__init__.py | Python | mit | 18,506 |
import json
import helpers
import requests
from models import Setting
def shodan(indicator):
try:
settings = Setting.query.filter_by(_id=1).first()
apikey = settings.shodankey
url = "https://api.shodan.io/shodan/host/"
ip = indicator
tempdict = {}
r = requests.get(url + ip + "?key=" + apikey)
shodan = json.loads(r.text)
for i in shodan:
if str(i) == "data":
for v in shodan[i]:
for d in v:
if str(d) == "html":
pass
else:
tempdict[v['port']] = v
if str(i) == "city" or str(i) == "region_code" or str(i) == "os" or \
str(i) == "isp" or str(i) == "country_name" or str(i) == "hostnames" \
or str(i) == "longitude" or str(i) == "latitude" or str(i) == "vulns" \
or str(i) == "info" or str(i) == "product" or str(i) == "ports":
tempdict[i] = str(shodan[i])
for i in tempdict.keys():
if tempdict[i] is None or tempdict[i] is "None":
tempdict.pop(i)
return tempdict
except:
pass
| defpoint/threat_note | threat_note/libs/shodan.py | Python | apache-2.0 | 1,241 |
import binascii
import os
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _, ungettext_lazy
class Token(models.Model):
key = models.CharField(_("Key"), max_length=40, primary_key=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='auth_tokens', verbose_name=_("User"))
created = models.DateTimeField(_("Created"), auto_now_add=True)
class Meta:
verbose_name = ungettext_lazy("Token", "Tokens", 1)
verbose_name_plural = ungettext_lazy("Token", "Tokens", 2)
def save(self, *args, **kwargs):
if not self.key:
self.key = self.generate_key()
return super(Token, self).save(*args, **kwargs)
def generate_key(self):
return binascii.hexlify(os.urandom(20)).decode()
def __str__(self):
return self.key
| invliD/lana-dashboard | lana_dashboard/lana_api/models.py | Python | agpl-3.0 | 835 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslotest import base as test_base
from oslo_service import fixture
from oslo_service import loopingcall
class FixtureTestCase(test_base.BaseTestCase):
def setUp(self):
super(FixtureTestCase, self).setUp()
self.sleepfx = self.useFixture(fixture.SleepFixture())
def test_sleep_fixture(self):
@loopingcall.RetryDecorator(max_retry_count=3, inc_sleep_time=2,
exceptions=(ValueError,))
def retried_method():
raise ValueError("!")
self.assertRaises(ValueError, retried_method)
self.assertEqual(3, self.sleepfx.mock_wait.call_count)
# TODO(efried): This is cheating, and shouldn't be done by real callers
# yet - see todo in SleepFixture.
self.sleepfx.mock_wait.assert_has_calls(
[mock.call(x) for x in (2, 4, 6)])
| openstack/oslo.service | oslo_service/tests/test_fixture.py | Python | apache-2.0 | 1,458 |
#!/usr/bin/env python3
from bottle import get, post, request, run
import subprocess
@get('/')
def status():
return '''
<form action="/" method="post">
Process: <input name="process" type="text">
<input value="Search" type="submit">
</form>
'''
@post('/')
def do_status():
process = request.forms.get('process')
out = subprocess.check_output('ps -ef | grep ' + process, shell=True)
return '''
<form action="/" method="post">
Process: <input name="process" type="text">
<input value="Search" type="submit">
</form>
''' + str(out).replace('\\n', '<br>')
run(host='0.0.0.0', port=8080)
| aig787/Personal | Examples/CVE-2014-6271/status.py | Python | isc | 613 |
# -*- coding: utf-8 -*-
import glob
import os.path
import instlatte
from instlatte.lib import Sentient
from lascaux import config
from lascaux.system.util import parse_config
from lascaux.system.logger import logger
from lascaux.plugin import Plugin
logger = logger(__name__)
class PluginSubsystem(instlatte.Subsystem, Sentient):
plugins = list()
def setup(self):
self.plugins = list()
def exec_find_plugins(self, app=None):
self.find_plugins(app)
def exec_load_plugins(self, app=None):
self.find_plugins(app)
self.load_controller_modules(app)
exec_pre_app_init = exec_load_plugins
def exec_get_static_dir_mappings(self, mappings):
static_dir_mappings = self.manager.get_subsystem('server').config['static_dir_mappings']
for plugin in self.get_plugins():
def append_mappings(relative, absolute):
mappings.append([os.path.join(plugin.name, relative),
os.path.join(plugin.config['package_dir'], absolute)])
mappings.append([os.path.join(plugin.app_package, plugin.name, relative),
os.path.join(plugin.config['package_dir'], absolute)])
for relative in static_dir_mappings:
append_mappings(relative, static_dir_mappings[relative])
if 'static_dir_mappings' in plugin.config:
for relative in plugin.config['static_dir_mappings']:
append_mappings(relative, plugin.config['static_dir_mappings'][relative])
def get_plugins(self):
return self.plugins
def find_plugins(self, app=None):
for app_package in config['app_packages']:
package_dir = config[app_package]['package_dir']
for plugin in glob.glob(os.path.join(package_dir, 'plugins', '*')):
if not os.path.isdir(plugin):
continue
plugin_name = os.path.basename(plugin)
if not plugin_name in config[app_package]['plugins']:
continue
plugin_config = config[app_package]['plugins'][plugin_name]
plugin_config['package_dir'] = plugin
plugin_config['routes'] = parse_config(os.path.join(plugin, plugin_config['routing']))
if app:
self.plugins.append(Plugin(app=app.get_root(),
name=plugin_name,
app_package=app_package,
config=plugin_config))
else:
self.plugins.append(Plugin(name=plugin_name,
app_package=app_package,
config=plugin_config))
logger.info("Found plugin `%s` in '%s'" % (plugin_name, plugin))
return self.plugins
def load_controller_modules(self, app=None):
for plugin in self.plugins:
for route in plugin.config['routes'].values():
dotpath = self.get_dotpath(os.path.join(plugin.config['package_dir'],
route['controller'].split(':')[0]))
module = __import__(dotpath)
for sym in dotpath.split('.')[1:]:
module = getattr(module, sym)
class_ = getattr(module, route['controller'].split(':')[1])
class_.plugin = plugin
plugin.controllers[route['controller']] = class_
logger.info("Loaded plugin module for `%s` from '%s'" % (route['controller'], os.path.abspath(module.__file__)))
| hyphyphyph/lascaux | lascaux/subsystems/plugin/plugin.py | Python | mit | 3,724 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for metric_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.metrics.python.ops import metric_ops
NAN = float('nan')
metrics = tf.contrib.metrics
def _enqueue_vector(sess, queue, values, shape=None):
if not shape:
shape = (1, len(values))
dtype = queue.dtypes[0]
sess.run(queue.enqueue(tf.constant(values, dtype=dtype, shape=shape)))
def _binary_2d_label_to_sparse_value(labels):
"""Convert dense 2D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
"""
indices = []
values = []
batch = 0
for row in labels:
label = 0
xi = 0
for x in row:
if x == 1:
indices.append([batch, xi])
values.append(label)
xi += 1
else:
assert x == 0
label += 1
batch += 1
shape = [len(labels), len(labels[0])]
return tf.SparseTensorValue(
np.array(indices, np.int64),
np.array(values, np.int64),
np.array(shape, np.int64))
def _binary_2d_label_to_sparse(labels):
"""Convert dense 2D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensor` whose values are indices along the last dimension of
`labels`.
"""
return tf.SparseTensor.from_value(_binary_2d_label_to_sparse_value(labels))
def _binary_3d_label_to_sparse_value(labels):
"""Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
"""
indices = []
values = []
for d0, labels_d0 in enumerate(labels):
for d1, labels_d1 in enumerate(labels_d0):
d2 = 0
for class_id, label in enumerate(labels_d1):
if label == 1:
values.append(class_id)
indices.append([d0, d1, d2])
d2 += 1
else:
assert label == 0
shape = [len(labels), len(labels[0]), len(labels[0][0])]
return tf.SparseTensorValue(
np.array(indices, np.int64),
np.array(values, np.int64),
np.array(shape, np.int64))
def _binary_3d_label_to_sparse(labels):
"""Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensor` whose values are indices along the last dimension of
`labels`.
"""
return tf.SparseTensor.from_value(_binary_3d_label_to_sparse_value(labels))
def _assert_nan(test_case, actual):
test_case.assertTrue(math.isnan(actual), 'Expected NAN, got %s.' % actual)
class StreamingMeanTest(tf.test.TestCase):
def setUp(self):
tf.reset_default_graph()
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean(
tf.ones([4, 3]),
metrics_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean(
tf.ones([4, 3]),
updates_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.test_session() as sess:
values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean(values)
sess.run(tf.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean(values)
sess.run(tf.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(1.475, sess.run(update_op), 5)
self.assertAlmostEqual(12.4/6.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def test1dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [1])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
tf.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test1dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
feed_values = (
(0, 1),
(-4.2, 9.1),
(6.5, 0),
(-3.2, 4.0)
)
values = tf.placeholder(dtype=tf.float32)
# Create the queue that populates the weighted labels.
weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [1])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
tf.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test2dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
tf.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
def test2dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
feed_values = (
(0, 1),
(-4.2, 9.1),
(6.5, 0),
(-3.2, 4.0)
)
values = tf.placeholder(dtype=tf.float32)
# Create the queue that populates the weighted labels.
weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
tf.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
class StreamingMeanTensorTest(tf.test.TestCase):
def setUp(self):
tf.reset_default_graph()
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_tensor(
tf.ones([4, 3]),
metrics_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_tensor(
tf.ones([4, 3]),
updates_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.test_session() as sess:
values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(tf.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-0.9/4., 3.525]], sess.run(mean))
def testMultiDimensional(self):
with self.test_session() as sess:
values_queue = tf.FIFOQueue(2, dtypes=tf.float32, shapes=(2, 2, 2))
_enqueue_vector(sess,
values_queue,
[[[1, 2], [1, 2]], [[1, 2], [1, 2]]],
shape=(2, 2, 2))
_enqueue_vector(sess,
values_queue,
[[[1, 2], [1, 2]], [[3, 4], [9, 10]]],
shape=(2, 2, 2))
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(tf.local_variables_initializer())
for _ in range(2):
sess.run(update_op)
self.assertAllClose([[[1, 2], [1, 2]], [[2, 3], [5, 6]]],
sess.run(mean))
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(tf.local_variables_initializer())
self.assertAllClose([[0, 1]], sess.run(update_op), 5)
self.assertAllClose([[-2.1, 5.05]], sess.run(update_op), 5)
self.assertAllClose([[2.3/3., 10.1/3.]], sess.run(update_op), 5)
self.assertAllClose([[-0.9/4., 3.525]], sess.run(update_op), 5)
self.assertAllClose([[-0.9/4., 3.525]], sess.run(mean), 5)
def testWeighted1d(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(tf.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[3.25, 0.5]], sess.run(mean), 5)
def testWeighted2d_1(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(tf.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-2.1, 0.5]], sess.run(mean), 5)
def testWeighted2d_2(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(tf.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[0, 0.5]], sess.run(mean), 5)
class StreamingAccuracyTest(tf.test.TestCase):
def setUp(self):
tf.reset_default_graph()
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_accuracy(
predictions=tf.ones((10, 1)),
labels=tf.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_accuracy(
predictions=tf.ones((10, 1)),
labels=tf.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = tf.ones((10, 3))
labels = tf.ones((10, 4))
with self.assertRaises(ValueError):
metrics.streaming_accuracy(predictions, labels)
def testPredictionsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = tf.ones((10, 3))
labels = tf.ones((10, 3))
weights = tf.ones((9, 3))
with self.assertRaises(ValueError):
metrics.streaming_accuracy(predictions, labels, weights)
def testValueTensorIsIdempotent(self):
predictions = tf.random_uniform((10, 3), maxval=3, dtype=tf.int64, seed=1)
labels = tf.random_uniform((10, 3), maxval=3, dtype=tf.int64, seed=1)
accuracy, update_op = metrics.streaming_accuracy(
predictions, labels)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_accuracy = accuracy.eval()
for _ in range(10):
self.assertEqual(initial_accuracy, accuracy.eval())
def testMultipleUpdates(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
accuracy, update_op = metrics.streaming_accuracy(
predictions, labels)
sess.run(tf.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(0.5, sess.run(update_op))
self.assertEqual(0.5, accuracy.eval())
def testEffectivelyEquivalentSizes(self):
predictions = tf.ones((40, 1))
labels = tf.ones((40,))
with self.test_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(
predictions, labels)
sess.run(tf.local_variables_initializer())
self.assertEqual(1.0, update_op.eval())
self.assertEqual(1.0, accuracy.eval())
def testEffectivelyEquivalentSizesWithStaicShapedWeight(self):
predictions = tf.convert_to_tensor([1, 1, 1]) # shape 3,
labels = tf.expand_dims(tf.convert_to_tensor([1, 0, 0]), 1) # shape 3, 1
weights = tf.expand_dims(tf.convert_to_tensor([100, 1, 1]), 1) # shape 3, 1
with self.test_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(
predictions, labels, weights)
sess.run(tf.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(), .95)
self.assertGreater(accuracy.eval(), .95)
def testEffectivelyEquivalentSizesWithDynamicallyShapedWeight(self):
predictions = tf.convert_to_tensor([1, 1, 1]) # shape 3,
labels = tf.expand_dims(tf.convert_to_tensor([1, 0, 0]), 1) # shape 3, 1
weights = [[100], [1], [1]] # shape 3, 1
weights_placeholder = tf.placeholder(dtype=tf.int32, name='weights')
feed_dict = {weights_placeholder: weights}
with self.test_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(
predictions, labels, weights_placeholder)
sess.run(tf.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(feed_dict=feed_dict), .95)
self.assertGreater(accuracy.eval(feed_dict=feed_dict), .95)
def testMultipleUpdatesWithWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = tf.FIFOQueue(4, dtypes=tf.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = tf.FIFOQueue(4, dtypes=tf.int64, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
weights = weights_queue.dequeue()
accuracy, update_op = metrics.streaming_accuracy(
predictions, labels, weights)
sess.run(tf.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, accuracy.eval())
class StreamingPrecisionTest(tf.test.TestCase):
def setUp(self):
np.random.seed(1)
tf.reset_default_graph()
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_precision(
predictions=tf.ones((10, 1)),
labels=tf.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_precision(
predictions=tf.ones((10, 1)),
labels=tf.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = tf.random_uniform((10, 3), maxval=1, dtype=tf.int64, seed=1)
labels = tf.random_uniform((10, 3), maxval=1, dtype=tf.int64, seed=1)
precision, update_op = metrics.streaming_precision(
predictions, labels)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_precision = precision.eval()
for _ in range(10):
self.assertEqual(initial_precision, precision.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = tf.constant(inputs)
labels = tf.constant(inputs)
precision, update_op = metrics.streaming_precision(
predictions, labels)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op))
self.assertAlmostEqual(1, precision.eval())
def testSomeCorrect(self):
predictions = tf.constant([1, 0, 1, 0], shape=(1, 4))
labels = tf.constant([0, 1, 1, 0], shape=(1, 4))
precision, update_op = metrics.streaming_precision(
predictions, labels)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, precision.eval())
def testWeighted1d(self):
predictions = tf.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = tf.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.streaming_precision(
predictions, labels, weights=tf.constant([[2], [5]]))
with self.test_session():
tf.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeighted1d_placeholders(self):
predictions = tf.placeholder(dtype=tf.float32)
labels = tf.placeholder(dtype=tf.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.streaming_precision(
predictions, labels, weights=tf.constant([[2], [5]]))
with self.test_session():
tf.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testWeighted2d(self):
predictions = tf.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = tf.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.streaming_precision(
predictions, labels, weights=tf.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.test_session():
tf.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeighted2d_placeholders(self):
predictions = tf.placeholder(dtype=tf.float32)
labels = tf.placeholder(dtype=tf.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.streaming_precision(
predictions, labels, weights=tf.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.test_session():
tf.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = tf.constant(inputs)
labels = tf.constant(1 - inputs)
precision, update_op = metrics.streaming_precision(
predictions, labels)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0, precision.eval())
def testZeroTrueAndFalsePositivesGivesZeroPrecision(self):
predictions = tf.constant([0, 0, 0, 0])
labels = tf.constant([0, 0, 0, 0])
precision, update_op = metrics.streaming_precision(
predictions, labels)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0.0, precision.eval())
class StreamingRecallTest(tf.test.TestCase):
def setUp(self):
np.random.seed(1)
tf.reset_default_graph()
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_recall(
predictions=tf.ones((10, 1)),
labels=tf.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_recall(
predictions=tf.ones((10, 1)),
labels=tf.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = tf.random_uniform((10, 3), maxval=1, dtype=tf.int64, seed=1)
labels = tf.random_uniform((10, 3), maxval=1, dtype=tf.int64, seed=1)
recall, update_op = metrics.streaming_recall(
predictions, labels)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_recall = recall.eval()
for _ in range(10):
self.assertEqual(initial_recall, recall.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = tf.constant(np_inputs)
labels = tf.constant(np_inputs)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, recall.eval())
def testSomeCorrect(self):
predictions = tf.constant([1, 0, 1, 0], shape=(1, 4))
labels = tf.constant([0, 1, 1, 0], shape=(1, 4))
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, recall.eval())
def testWeighted1d(self):
predictions = tf.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = tf.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = tf.constant([[2], [5]])
recall, update_op = metrics.streaming_recall(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
weighted_tp = 2.0 + 5.0
weighted_t = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
def testWeighted2d(self):
predictions = tf.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = tf.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = tf.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
recall, update_op = metrics.streaming_recall(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
weighted_tp = 3.0 + 1.0
weighted_t = (2.0 + 3.0) + (4.0 + 1.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = tf.constant(np_inputs)
labels = tf.constant(1 - np_inputs)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
def testZeroTruePositivesAndFalseNegativesGivesZeroRecall(self):
predictions = tf.zeros((1, 4))
labels = tf.zeros((1, 4))
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
class StreamingAUCTest(tf.test.TestCase):
def setUp(self):
np.random.seed(1)
tf.reset_default_graph()
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_auc(
predictions=tf.ones((10, 1)),
labels=tf.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_auc(
predictions=tf.ones((10, 1)),
labels=tf.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = tf.random_uniform((10, 3), maxval=1, dtype=tf.float32, seed=1)
labels = tf.random_uniform((10, 3), maxval=1, dtype=tf.int64, seed=1)
auc, update_op = metrics.streaming_auc(
predictions, labels)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_auc = auc.eval()
for _ in range(10):
self.assertAlmostEqual(initial_auc, auc.eval(), 5)
def testAllCorrect(self):
self.allCorrectAsExpected('ROC')
def allCorrectAsExpected(self, curve):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = tf.constant(inputs, dtype=tf.float32)
labels = tf.constant(inputs)
auc, update_op = metrics.streaming_auc(predictions, labels, curve=curve)
sess.run(tf.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, auc.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = tf.constant([1, 0, 1, 0], shape=(1, 4), dtype=tf.float32)
labels = tf.constant([0, 1, 1, 0], shape=(1, 4))
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(tf.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op))
self.assertAlmostEqual(0.5, auc.eval())
def testWeighted1d(self):
with self.test_session() as sess:
predictions = tf.constant([1, 0, 1, 0], shape=(1, 4), dtype=tf.float32)
labels = tf.constant([0, 1, 1, 0], shape=(1, 4))
weights = tf.constant([2], shape=(1, 1))
auc, update_op = metrics.streaming_auc(predictions, labels,
weights=weights)
sess.run(tf.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(0.5, auc.eval(), 5)
def testWeighted2d(self):
with self.test_session() as sess:
predictions = tf.constant([1, 0, 1, 0], shape=(1, 4), dtype=tf.float32)
labels = tf.constant([0, 1, 1, 0], shape=(1, 4))
weights = tf.constant([1, 2, 3, 4], shape=(1, 4))
auc, update_op = metrics.streaming_auc(predictions, labels,
weights=weights)
sess.run(tf.local_variables_initializer())
self.assertAlmostEqual(0.7, sess.run(update_op), 5)
self.assertAlmostEqual(0.7, auc.eval(), 5)
def testAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = tf.constant([0.1, 0.4, 0.35, 0.8],
shape=(1, 4), dtype=tf.float32)
labels = tf.constant([0, 0, 1, 1], shape=(1, 4))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(tf.local_variables_initializer())
self.assertAlmostEqual(0.79166, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.79166, auc.eval(), delta=1e-3)
def testAnotherAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = tf.constant([0.1, 0.4, 0.35, 0.8, 0.1, 0.135, 0.81],
shape=(1, 7), dtype=tf.float32)
labels = tf.constant([0, 0, 1, 0, 1, 0, 1], shape=(1, 7))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(tf.local_variables_initializer())
self.assertAlmostEqual(0.610317, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.610317, auc.eval(), delta=1e-3)
def testThirdAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = tf.constant([0.0, 0.1, 0.2, 0.33, 0.3, 0.4, 0.5],
shape=(1, 7), dtype=tf.float32)
labels = tf.constant([0, 0, 0, 0, 1, 1, 1], shape=(1, 7))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(tf.local_variables_initializer())
self.assertAlmostEqual(0.90277, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.90277, auc.eval(), delta=1e-3)
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = tf.constant(inputs, dtype=tf.float32)
labels = tf.constant(1 - inputs, dtype=tf.float32)
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(tf.local_variables_initializer())
self.assertAlmostEqual(0, sess.run(update_op))
self.assertAlmostEqual(0, auc.eval())
def testZeroTruePositivesAndFalseNegativesGivesOneAUC(self):
with self.test_session() as sess:
predictions = tf.zeros([4], dtype=tf.float32)
labels = tf.zeros([4])
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(tf.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def testRecallOneAndPrecisionOneGivesOnePRAUC(self):
with self.test_session() as sess:
predictions = tf.ones([4], dtype=tf.float32)
labels = tf.ones([4])
auc, update_op = metrics.streaming_auc(predictions,
labels,
curve='PR')
sess.run(tf.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def np_auc(self, predictions, labels, weights):
"""Computes the AUC explicitely using Numpy.
Args:
predictions: an ndarray with shape [N].
labels: an ndarray with shape [N].
weights: an ndarray with shape [N].
Returns:
the area under the ROC curve.
"""
if weights is None:
weights = np.ones(np.size(predictions))
is_positive = labels > 0
num_positives = np.sum(weights[is_positive])
num_negatives = np.sum(weights[~is_positive])
# Sort descending:
inds = np.argsort(-predictions)
sorted_labels = labels[inds]
sorted_weights = weights[inds]
is_positive = sorted_labels > 0
tp = np.cumsum(sorted_weights * is_positive) / num_positives
return np.sum((sorted_weights * tp)[~is_positive]) / num_negatives
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=num_samples)
noise = np.random.normal(0.0, scale=0.2, size=num_samples)
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
def _enqueue_as_batches(x, enqueue_ops):
x_batches = x.astype(np.float32).reshape((num_batches, batch_size))
x_queue = tf.FIFOQueue(num_batches, dtypes=tf.float32,
shapes=(batch_size,))
for i in range(num_batches):
enqueue_ops[i].append(x_queue.enqueue(x_batches[i, :]))
return x_queue.dequeue()
for weights in (None,
np.ones(num_samples),
np.random.exponential(scale=1.0, size=num_samples)):
expected_auc = self.np_auc(predictions, labels, weights)
with self.test_session() as sess:
enqueue_ops = [[] for i in range(num_batches)]
tf_predictions = _enqueue_as_batches(predictions, enqueue_ops)
tf_labels = _enqueue_as_batches(labels, enqueue_ops)
tf_weights = (_enqueue_as_batches(weights, enqueue_ops)
if weights is not None else None)
for i in range(num_batches):
sess.run(enqueue_ops[i])
auc, update_op = metrics.streaming_auc(
tf_predictions, tf_labels, curve='ROC', num_thresholds=500,
weights=tf_weights)
sess.run(tf.local_variables_initializer())
for i in range(num_batches):
sess.run(update_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_auc, auc.eval(), 2)
class StreamingSpecificityAtSensitivityTest(tf.test.TestCase):
def setUp(self):
np.random.seed(1)
tf.reset_default_graph()
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_specificity_at_sensitivity(
predictions=tf.ones((10, 1)),
labels=tf.ones((10, 1)),
sensitivity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_specificity_at_sensitivity(
predictions=tf.ones((10, 1)),
labels=tf.ones((10, 1)),
sensitivity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = tf.random_uniform((10, 3), maxval=1, dtype=tf.float32, seed=1)
labels = tf.random_uniform((10, 3), maxval=2, dtype=tf.int64, seed=1)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.7)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_specificity = specificity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_specificity, specificity.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = tf.constant(inputs, dtype=tf.float32)
labels = tf.constant(inputs)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.7)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0,
0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = tf.constant(predictions_values, dtype=tf.float32)
labels = tf.constant(labels_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.8)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op))
self.assertAlmostEqual(1.0, specificity.eval())
def testSomeCorrectLowSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0,
0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = tf.constant(predictions_values, dtype=tf.float32)
labels = tf.constant(labels_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.4)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted1d(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0,
0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [3]
predictions = tf.constant(predictions_values, dtype=tf.float32)
labels = tf.constant(labels_values)
weights = tf.constant(weights_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, weights=weights, sensitivity=0.4)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted2d(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0,
0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = tf.constant(predictions_values, dtype=tf.float32)
labels = tf.constant(labels_values)
weights = tf.constant(weights_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, weights=weights, sensitivity=0.4)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
self.assertAlmostEqual(8.0 / 15.0, sess.run(update_op))
self.assertAlmostEqual(8.0 / 15.0, specificity.eval())
class StreamingSensitivityAtSpecificityTest(tf.test.TestCase):
def setUp(self):
np.random.seed(1)
tf.reset_default_graph()
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_sensitivity_at_specificity(
predictions=tf.ones((10, 1)),
labels=tf.ones((10, 1)),
specificity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_sensitivity_at_specificity(
predictions=tf.ones((10, 1)),
labels=tf.ones((10, 1)),
specificity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = tf.random_uniform((10, 3), maxval=1, dtype=tf.float32, seed=1)
labels = tf.random_uniform((10, 3), maxval=2, dtype=tf.int64, seed=1)
sensitivity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.7)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_sensitivity = sensitivity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_sensitivity, sensitivity.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = tf.constant(inputs, dtype=tf.float32)
labels = tf.constant(inputs)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.7)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4,
0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = tf.constant(predictions_values, dtype=tf.float32)
labels = tf.constant(labels_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.8)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
self.assertAlmostEqual(0.8, sess.run(update_op))
self.assertAlmostEqual(0.8, specificity.eval())
def testSomeCorrectLowSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4,
0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = tf.constant(predictions_values, dtype=tf.float32)
labels = tf.constant(labels_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.4)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4,
0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = tf.constant(predictions_values, dtype=tf.float32)
labels = tf.constant(labels_values)
weights = tf.constant(weights_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, weights=weights, specificity=0.4)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
self.assertAlmostEqual(0.675, sess.run(update_op))
self.assertAlmostEqual(0.675, specificity.eval())
# TODO(nsilberman): Break this up into two sets of tests.
class StreamingPrecisionRecallThresholdsTest(tf.test.TestCase):
def setUp(self):
np.random.seed(1)
tf.reset_default_graph()
def testMetricsCollection(self):
my_collection_name = '__metrics__'
prec, _ = metrics.streaming_precision_at_thresholds(
predictions=tf.ones((10, 1)),
labels=tf.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
rec, _ = metrics.streaming_recall_at_thresholds(
predictions=tf.ones((10, 1)),
labels=tf.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [prec, rec])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, precision_op = metrics.streaming_precision_at_thresholds(
predictions=tf.ones((10, 1)),
labels=tf.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
_, recall_op = metrics.streaming_recall_at_thresholds(
predictions=tf.ones((10, 1)),
labels=tf.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name),
[precision_op, recall_op])
def testValueTensorIsIdempotent(self):
predictions = tf.random_uniform((10, 3), maxval=1, dtype=tf.float32, seed=1)
labels = tf.random_uniform((10, 3), maxval=1, dtype=tf.int64, seed=1)
thresholds = [0, 0.5, 1.0]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
# Run several updates, then verify idempotency.
sess.run([prec_op, rec_op])
initial_prec = prec.eval()
initial_rec = rec.eval()
for _ in range(10):
sess.run([prec_op, rec_op])
self.assertAllClose(initial_prec, prec.eval())
self.assertAllClose(initial_rec, rec.eval())
# TODO(nsilberman): fix tests (passing but incorrect).
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = tf.constant(inputs, dtype=tf.float32)
labels = tf.constant(inputs)
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
sess.run(tf.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertEqual(1, prec.eval())
self.assertEqual(1, rec.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = tf.constant([1, 0, 1, 0], shape=(1, 4), dtype=tf.float32)
labels = tf.constant([0, 1, 1, 0], shape=(1, 4))
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
sess.run(tf.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.5, prec.eval())
self.assertAlmostEqual(0.5, rec.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = tf.constant(inputs, dtype=tf.float32)
labels = tf.constant(1 - inputs, dtype=tf.float32)
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
sess.run(tf.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval())
self.assertAlmostEqual(0, rec.eval())
def testWeights1d(self):
with self.test_session() as sess:
predictions = tf.constant([[1, 0], [1, 0]], shape=(2, 2),
dtype=tf.float32)
labels = tf.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = tf.constant([[0], [1]], shape=(2, 1), dtype=tf.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds, weights=weights)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds, weights=weights)
[prec_low, prec_high] = tf.split(0, 2, prec)
prec_low = tf.reshape(prec_low, shape=())
prec_high = tf.reshape(prec_high, shape=())
[rec_low, rec_high] = tf.split(0, 2, rec)
rec_low = tf.reshape(rec_low, shape=())
rec_high = tf.reshape(rec_high, shape=())
sess.run(tf.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
def testWeights2d(self):
with self.test_session() as sess:
predictions = tf.constant([[1, 0], [1, 0]], shape=(2, 2),
dtype=tf.float32)
labels = tf.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = tf.constant([[0, 0], [1, 1]], shape=(2, 2), dtype=tf.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds, weights=weights)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds, weights=weights)
[prec_low, prec_high] = tf.split(0, 2, prec)
prec_low = tf.reshape(prec_low, shape=())
prec_high = tf.reshape(prec_high, shape=())
[rec_low, rec_high] = tf.split(0, 2, rec)
rec_low = tf.reshape(rec_low, shape=())
rec_high = tf.reshape(rec_high, shape=())
sess.run(tf.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
def testExtremeThresholds(self):
with self.test_session() as sess:
predictions = tf.constant([1, 0, 1, 0], shape=(1, 4), dtype=tf.float32)
labels = tf.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
[prec_low, prec_high] = tf.split(0, 2, prec)
[rec_low, rec_high] = tf.split(0, 2, rec)
sess.run(tf.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.75, prec_low.eval())
self.assertAlmostEqual(0.0, prec_high.eval())
self.assertAlmostEqual(1.0, rec_low.eval())
self.assertAlmostEqual(0.0, rec_high.eval())
def testZeroLabelsPredictions(self):
with self.test_session() as sess:
predictions = tf.zeros([4], dtype=tf.float32)
labels = tf.zeros([4])
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
sess.run(tf.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval(), 6)
self.assertAlmostEqual(0, rec.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
tp = 0
fp = 0
fn = 0
tn = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 1:
tp += 1
else:
fp += 1
else:
if labels[i] == 1:
fn += 1
else:
tn += 1
epsilon = 1e-7
expected_prec = tp / (epsilon + tp + fp)
expected_rec = tp / (epsilon + tp + fn)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.test_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = tf.FIFOQueue(num_batches, dtypes=tf.float32,
shapes=(batch_size,))
labels_queue = tf.FIFOQueue(num_batches, dtypes=tf.float32,
shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = tf.constant(predictions_batches[:, i])
tf_label = tf.constant(labels_batches[:, i])
sess.run([predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
prec, prec_op = metrics.streaming_precision_at_thresholds(
tf_predictions, tf_labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
tf_predictions, tf_labels, thresholds)
sess.run(tf.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run([prec_op, rec_op])
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_prec, prec.eval(), 2)
self.assertAlmostEqual(expected_rec, rec.eval(), 2)
# TODO(ptucker): Remove when we remove `streaming_recall_at_k`.
# This op will be deprecated soon in favor of `streaming_sparse_recall_at_k`.
# Until then, this test validates that both ops yield the same results.
class StreamingRecallAtKTest(tf.test.TestCase):
def setUp(self):
np.random.seed(1)
tf.reset_default_graph()
self._batch_size = 4
self._num_classes = 3
self._np_predictions = np.matrix(('0.1 0.2 0.7;'
'0.6 0.2 0.2;'
'0.0 0.9 0.1;'
'0.2 0.0 0.8'))
self._np_labels = [0, 0, 0, 0]
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_recall_at_k(
predictions=tf.ones((self._batch_size, self._num_classes)),
labels=tf.ones((self._batch_size,), dtype=tf.int32),
k=1,
metrics_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_recall_at_k(
predictions=tf.ones((self._batch_size, self._num_classes)),
labels=tf.ones((self._batch_size,), dtype=tf.int32),
k=1,
updates_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
def testSingleUpdateKIs1(self):
predictions = tf.constant(self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=tf.float32)
labels = tf.constant(
self._np_labels, shape=(self._batch_size,), dtype=tf.int64)
recall, update_op = metrics.streaming_recall_at_k(
predictions, labels, k=1)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, tf.reshape(labels, (self._batch_size, 1)), k=1)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
self.assertEqual(0.25, sess.run(update_op))
self.assertEqual(0.25, recall.eval())
self.assertEqual(0.25, sess.run(sp_update_op))
self.assertEqual(0.25, sp_recall.eval())
def testSingleUpdateKIs2(self):
predictions = tf.constant(self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=tf.float32)
labels = tf.constant(
self._np_labels, shape=(self._batch_size,), dtype=tf.int64)
recall, update_op = metrics.streaming_recall_at_k(
predictions, labels, k=2)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, tf.reshape(labels, (self._batch_size, 1)), k=2)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
self.assertEqual(0.5, sess.run(update_op))
self.assertEqual(0.5, recall.eval())
self.assertEqual(0.5, sess.run(sp_update_op))
self.assertEqual(0.5, sp_recall.eval())
def testSingleUpdateKIs3(self):
predictions = tf.constant(self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=tf.float32)
labels = tf.constant(
self._np_labels, shape=(self._batch_size,), dtype=tf.int64)
recall, update_op = metrics.streaming_recall_at_k(
predictions, labels, k=3)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, tf.reshape(labels, (self._batch_size, 1)), k=3)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, recall.eval())
self.assertEqual(1.0, sess.run(sp_update_op))
self.assertEqual(1.0, sp_recall.eval())
def testSingleUpdateSomeMissingKIs2(self):
predictions = tf.constant(self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=tf.float32)
labels = tf.constant(
self._np_labels, shape=(self._batch_size,), dtype=tf.int64)
weights = tf.constant([0, 1, 0, 1], shape=(self._batch_size,),
dtype=tf.float32)
recall, update_op = metrics.streaming_recall_at_k(
predictions, labels, k=2, weights=weights)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, tf.reshape(labels, (self._batch_size, 1)), k=2,
weights=weights)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, recall.eval())
self.assertEqual(1.0, sess.run(sp_update_op))
self.assertEqual(1.0, sp_recall.eval())
class StreamingSparsePrecisionTest(tf.test.TestCase):
def _test_streaming_sparse_precision_at_k(self,
predictions,
labels,
k,
expected,
class_id=None,
weights=None):
with tf.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = tf.constant(weights, tf.float32)
metric, update = metrics.streaming_sparse_precision_at_k(
predictions=tf.constant(predictions, tf.float32), labels=labels,
k=k, class_id=class_id, weights=weights)
# Fails without initialized vars.
self.assertRaises(tf.OpError, metric.eval)
self.assertRaises(tf.OpError, update.eval)
tf.initialize_variables(tf.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_streaming_sparse_precision_at_top_k(self,
top_k_predictions,
labels,
expected,
class_id=None,
weights=None):
with tf.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = tf.constant(weights, tf.float32)
metric, update = metrics.streaming_sparse_precision_at_top_k(
top_k_predictions=tf.constant(top_k_predictions, tf.int32),
labels=labels, class_id=class_id, weights=weights)
# Fails without initialized vars.
self.assertRaises(tf.OpError, metric.eval)
self.assertRaises(tf.OpError, update.eval)
tf.initialize_variables(tf.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
self.assertTrue(math.isnan(update.eval()))
self.assertTrue(math.isnan(metric.eval()))
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_sparse_average_precision_at_k(self,
predictions,
labels,
k,
expected):
with tf.Graph().as_default() as g, self.test_session(g):
predictions = tf.constant(predictions, tf.float32)
metric = metric_ops.sparse_average_precision_at_k(
predictions, labels, k)
self.assertAllEqual(expected, metric.eval())
def _test_streaming_sparse_average_precision_at_k(
self, predictions, labels, k, expected, weights=None):
with tf.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = tf.constant(weights, tf.float32)
predictions = tf.constant(predictions, tf.float32)
metric, update = metrics.streaming_sparse_average_precision_at_k(
predictions, labels, k, weights=weights)
# Fails without initialized vars.
self.assertRaises(tf.OpError, metric.eval)
self.assertRaises(tf.OpError, update.eval)
local_variables = tf.local_variables()
tf.initialize_variables(local_variables).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertAlmostEqual(expected, update.eval())
self.assertAlmostEqual(expected, metric.eval())
def test_top_k_rank_invalid(self):
with self.test_session():
# top_k_predictions has rank < 2.
top_k_predictions = [9, 4, 6, 2, 0]
sp_labels = tf.SparseTensorValue(
indices=np.array([[0,], [1,], [2,]], np.int64),
values=np.array([2, 7, 8], np.int64),
shape=np.array([10,], np.int64))
with self.assertRaises(ValueError):
precision, _ = metrics.streaming_sparse_precision_at_top_k(
top_k_predictions=tf.constant(top_k_predictions, tf.int64),
labels=sp_labels)
tf.initialize_variables(tf.local_variables()).run()
precision.eval()
def test_average_precision(self):
# Example 1.
# Matches example here:
# fastml.com/what-you-wanted-to-know-about-mean-average-precision
labels_ex1 = (0, 1, 2, 3, 4)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
predictions_top_k_ex1 = (5, 3, 6, 0, 1, 2)
precision_ex1 = (
0.0 / 1,
1.0 / 2,
1.0 / 3,
2.0 / 4
)
avg_precision_ex1 = (
0.0 / 1,
precision_ex1[1] / 2,
precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4
)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=precision_ex1[i])
self._test_sparse_average_precision_at_k(
predictions, labels, k, expected=[avg_precision_ex1[i]])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
# Example 2.
labels_ex2 = (0, 2, 4, 5, 6)
labels = np.array([labels_ex2], dtype=np.int64)
predictions_ex2 = (0.3, 0.5, 0.0, 0.4, 0.0, 0.1, 0.2)
predictions = (predictions_ex2,)
predictions_top_k_ex2 = (1, 3, 0, 6, 5)
precision_ex2 = (
0.0 / 1,
0.0 / 2,
1.0 / 3,
2.0 / 4
)
avg_precision_ex2 = (
0.0 / 1,
0.0 / 2,
precision_ex2[2] / 3,
(precision_ex2[2] + precision_ex2[3]) / 4
)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex2[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex2[:k],), labels, expected=precision_ex2[i])
self._test_sparse_average_precision_at_k(
predictions, labels, k, expected=[avg_precision_ex2[i]])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex2[i])
# Both examples, we expect both precision and average precision to be the
# average of the 2 examples.
labels = np.array([labels_ex1, labels_ex2], dtype=np.int64)
predictions = (predictions_ex1, predictions_ex2)
average_precision = [
(ex1, ex2) for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)]
streaming_precision = [
(ex1 + ex2) / 2
for ex1, ex2 in zip(precision_ex1, precision_ex2)]
streaming_average_precision = [
(ex1 + ex2) / 2
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)]
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=streaming_precision[i])
predictions_top_k = (predictions_top_k_ex1[:k], predictions_top_k_ex2[:k])
self._test_streaming_sparse_precision_at_top_k(
predictions_top_k, labels, expected=streaming_precision[i])
self._test_sparse_average_precision_at_k(
predictions, labels, k, expected=average_precision[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=streaming_average_precision[i])
# Weighted examples, we expect streaming average precision to be the
# weighted average of the 2 examples.
weights = (0.3, 0.6)
streaming_average_precision = [
(weights[0] * ex1 + weights[1] * ex2) / (weights[0] + weights[1])
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)]
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=streaming_average_precision[i],
weights=weights)
def test_average_precision_some_labels_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
labels_ex1 = (-1, 0, 1, 2, 3, 4, 7)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
predictions_top_k_ex1 = (5, 3, 6, 0, 1, 2)
precision_ex1 = (
0.0 / 1,
1.0 / 2,
1.0 / 3,
2.0 / 4
)
avg_precision_ex1 = (
0.0 / 1,
precision_ex1[1] / 2,
precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4
)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=precision_ex1[i])
self._test_sparse_average_precision_at_k(
predictions, labels, k, expected=[avg_precision_ex1[i]])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
def test_one_label_at_k1_nan(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,1,2 have 0 predictions, classes -1 and 4 are out of range.
for class_id in (-1, 0, 1, 2, 4):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_one_label_at_k1(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=1.0 / 2, class_id=3)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=1.0 / 2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2)
def test_three_labels_at_k5_no_predictions(self):
predictions = [
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]
]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value([
[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]
])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_three_labels_at_k5_no_labels(self):
predictions = [
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]
]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value([
[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]
])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0, class_id=class_id)
def test_three_labels_at_k5(self):
predictions = [
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]
]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value([
[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]
])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 2 labels, 2 correct predictions.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2,
class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 1, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=3.0 / 10)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=3.0 / 10)
def test_three_labels_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
predictions = [
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]
]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sp_labels = tf.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3],
[1, 0], [1, 1], [1, 2], [1, 3]],
# values -1 and 10 are outside the [0, n_classes) range and are ignored.
values=np.array([2, 7, -1, 8,
1, 2, 5, 10], np.int64),
shape=[2, 4])
# Class 2: 2 labels, 2 correct predictions.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=2.0 / 2, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=1.0 / 1, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=0.0 / 1, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=3.0 / 10)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=3.0 / 10)
def test_3d_nan(self):
predictions = [[
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]
], [
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]
]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value([[
[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]
], [
[0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 1, 0]
]])
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_3d_no_labels(self):
predictions = [[
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]
], [
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]
]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value([[
[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]
], [
[0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 1, 0]
]])
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0, class_id=class_id)
def test_3d(self):
predictions = [[
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]
], [
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]
]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value([[
[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]
], [
[0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 1, 0]
]])
# Class 2: 4 predictions, all correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=4.0 / 4, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=4.0 / 4, class_id=2)
# Class 5: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=2.0 / 2, class_id=5)
# Class 7: 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 2, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2, class_id=7)
# All classes: 20 predictions, 7 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=7.0 / 20)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=7.0 / 20)
def test_3d_ignore_all(self):
predictions = [[
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]
], [
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]
]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value([[
[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]
], [
[0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 1, 0]
]])
for class_id in xrange(10):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN,
weights=[[0, 0], [0, 0]])
def test_3d_ignore_some(self):
predictions = [[
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]
], [
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]
]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value([[
[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]
], [
[0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 1, 0]
]])
# Class 2: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2.0, class_id=2,
weights=[[1], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=2.0 / 2.0, class_id=2,
weights=[[1], [0]])
# Class 2: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2.0, class_id=2,
weights=[[0], [1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=2.0 / 2.0, class_id=2,
weights=[[0], [1]])
# Class 7: 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0 / 1.0, class_id=7,
weights=[[1], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0 / 1.0, class_id=7,
weights=[[1], [0]])
# Class 7: 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 1.0, class_id=7,
weights=[[0], [1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 1.0, class_id=7,
weights=[[0], [1]])
# Class 7: no predictions.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=7,
weights=[[1, 0], [0, 1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=7,
weights=[[1, 0], [0, 1]])
# Class 7: 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 2.0, class_id=7,
weights=[[0, 1], [1, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2.0, class_id=7,
weights=[[0, 1], [1, 0]])
def test_sparse_tensor_value(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
labels = [[0, 0, 0, 1], [0, 0, 1, 0]]
expected_precision = 0.5
with self.test_session():
_, precision = metrics.streaming_sparse_precision_at_k(
predictions=tf.constant(predictions, tf.float32),
labels=_binary_2d_label_to_sparse_value(labels), k=1)
tf.initialize_variables(tf.local_variables()).run()
self.assertEqual(expected_precision, precision.eval())
class StreamingSparseRecallTest(tf.test.TestCase):
def _test_streaming_sparse_recall_at_k(self,
predictions,
labels,
k,
expected,
class_id=None,
weights=None):
with tf.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = tf.constant(weights, tf.float32)
metric, update = metrics.streaming_sparse_recall_at_k(
predictions=tf.constant(predictions, tf.float32),
labels=labels, k=k, class_id=class_id, weights=weights)
# Fails without initialized vars.
self.assertRaises(tf.OpError, metric.eval)
self.assertRaises(tf.OpError, update.eval)
tf.initialize_variables(tf.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def test_one_label_at_k1_nan(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
# Classes 0,1 have 0 labels, 0 predictions, classes -1 and 4 are out of
# range.
for labels in (sparse_labels, dense_labels):
for class_id in (-1, 0, 1, 4):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN,
class_id=class_id)
def test_one_label_at_k1_no_predictions(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 0 predictions.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.0,
class_id=2)
def test_one_label_at_k1(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1,
class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2)
def test_one_label_at_k1_weighted(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=3, weights=(0.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, class_id=3,
weights=(1.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, class_id=3,
weights=(2.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=3,
weights=(0.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=3,
weights=(0.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, class_id=3,
weights=(1.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, class_id=3,
weights=(1.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=2.0 / 2, class_id=3,
weights=(2.0, 3.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=3.0 / 3, class_id=3,
weights=(3.0, 2.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.3 / 0.3, class_id=3,
weights=(0.3, 0.6))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.6 / 0.6, class_id=3,
weights=(0.6, 0.3))
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, weights=(0.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(2.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, weights=(1.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.0 / 1, weights=(0.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=2.0 / 5, weights=(2.0, 3.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=3.0 / 5, weights=(3.0, 2.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.3 / 0.9, weights=(0.3, 0.6))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.6 / 0.9, weights=(0.6, 0.3))
def test_three_labels_at_k5_nan(self):
predictions = [
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
sparse_labels = _binary_2d_label_to_sparse_value([
[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
def test_three_labels_at_k5_no_predictions(self):
predictions = [
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
sparse_labels = _binary_2d_label_to_sparse_value([
[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 8: 1 label, no predictions.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=8)
def test_three_labels_at_k5(self):
predictions = [
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
sparse_labels = _binary_2d_label_to_sparse_value([
[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=7)
# All classes: 6 labels, 3 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=3.0 / 6)
def test_three_labels_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) count in denominator."""
predictions = [
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
sp_labels = tf.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3],
[1, 0], [1, 1], [1, 2], [1, 3]],
# values -1 and 10 are outside the [0, n_classes) range.
values=np.array([2, 7, -1, 8,
1, 2, 5, 10], np.int64),
shape=[2, 4])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions=predictions, labels=sp_labels, k=5, expected=2.0 / 2,
class_id=2)
# Class 5: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions=predictions, labels=sp_labels, k=5, expected=1.0 / 1,
class_id=5)
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions=predictions, labels=sp_labels, k=5, expected=0.0 / 1,
class_id=7)
# All classes: 8 labels, 3 correct.
self._test_streaming_sparse_recall_at_k(
predictions=predictions, labels=sp_labels, k=5, expected=3.0 / 8)
def test_3d_nan(self):
predictions = [[
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]
], [
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]
]]
sparse_labels = _binary_3d_label_to_sparse_value([[
[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]
], [
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 1, 1, 0]
]])
dense_labels = np.array([[
[2, 7, 8],
[1, 2, 5]
], [
[1, 2, 5],
[2, 7, 8],
]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
def test_3d_no_predictions(self):
predictions = [[
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]
], [
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]
]]
sparse_labels = _binary_3d_label_to_sparse_value([[
[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]
], [
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 1, 1, 0]
]])
dense_labels = np.array([[
[2, 7, 8],
[1, 2, 5]
], [
[1, 2, 5],
[2, 7, 8],
]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 1,8 have 0 predictions, >=1 label.
for class_id in (1, 8):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
def test_3d(self):
predictions = [[
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]
], [
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]
]]
labels = _binary_3d_label_to_sparse_value([[
[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]
], [
[0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 1, 0]
]])
# Class 2: 4 labels, all correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=4.0 / 4, class_id=2)
# Class 5: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=5)
# Class 7: 2 labels, 1 incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=1.0 / 2, class_id=7)
# All classes: 12 labels, 7 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=7.0 / 12)
def test_3d_ignore_all(self):
predictions = [[
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]
], [
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]
]]
labels = _binary_3d_label_to_sparse_value([[
[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]
], [
[0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 1, 0]
]])
for class_id in xrange(10):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0, 0], [0, 0]])
def test_3d_ignore_some(self):
predictions = [[
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]
], [
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]
]]
labels = _binary_3d_label_to_sparse_value([[
[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]
], [
[0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 1, 0]
]])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=2.0 / 2.0, class_id=2,
weights=[[1], [0]])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=2.0 / 2.0, class_id=2,
weights=[[0], [1]])
# Class 7: 1 label, correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=1.0 / 1.0, class_id=7,
weights=[[0], [1]])
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0 / 1.0, class_id=7,
weights=[[1], [0]])
# Class 7: 2 labels, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=1.0 / 2.0, class_id=7,
weights=[[1, 0], [1, 0]])
# Class 7: No labels.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, class_id=7,
weights=[[0, 1], [0, 1]])
def test_sparse_tensor_value(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
labels = [[0, 0, 1, 0], [0, 0, 0, 1]]
expected_recall = 0.5
with self.test_session():
_, recall = metrics.streaming_sparse_recall_at_k(
predictions=tf.constant(predictions, tf.float32),
labels=_binary_2d_label_to_sparse_value(labels), k=1)
tf.initialize_variables(tf.local_variables()).run()
self.assertEqual(expected_recall, recall.eval())
class StreamingMeanAbsoluteErrorTest(tf.test.TestCase):
def setUp(self):
tf.reset_default_graph()
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_absolute_error(
predictions=tf.ones((10, 1)),
labels=tf.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_absolute_error(
predictions=tf.ones((10, 1)),
labels=tf.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = tf.random_normal((10, 3), seed=1)
labels = tf.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_absolute_error(
predictions, labels)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
predictions = tf.constant([2, 4, 6, 8], shape=(1, 4), dtype=tf.float32)
labels = tf.constant([1, 3, 2, 3], shape=(1, 4), dtype=tf.float32)
weights = tf.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.streaming_mean_absolute_error(
predictions, labels, weights)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
self.assertEqual(3, sess.run(update_op))
self.assertEqual(3, error.eval())
class StreamingMeanRelativeErrorTest(tf.test.TestCase):
def setUp(self):
tf.reset_default_graph()
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_relative_error(
predictions=tf.ones((10, 1)),
labels=tf.ones((10, 1)),
normalizer=tf.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(
tf.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_relative_error(
predictions=tf.ones((10, 1)),
labels=tf.ones((10, 1)),
normalizer=tf.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = tf.random_normal((10, 3), seed=1)
labels = tf.random_normal((10, 3), seed=2)
normalizer = tf.random_normal((10, 3), seed=3)
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateNormalizedByLabels(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
np_labels = np.asarray([1, 3, 2, 3], dtype=np.float32)
expected_error = np.mean(
np.divide(np.absolute(np_predictions - np_labels),
np_labels))
predictions = tf.constant(np_predictions, shape=(1, 4), dtype=tf.float32)
labels = tf.constant(np_labels, shape=(1, 4))
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer=labels)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
self.assertEqual(expected_error, sess.run(update_op))
self.assertEqual(expected_error, error.eval())
def testSingleUpdateNormalizedByZeros(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
predictions = tf.constant(np_predictions, shape=(1, 4), dtype=tf.float32)
labels = tf.constant([1, 3, 2, 3], shape=(1, 4), dtype=tf.float32)
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer=tf.zeros_like(labels))
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
self.assertEqual(0.0, sess.run(update_op))
self.assertEqual(0.0, error.eval())
class StreamingMeanSquaredErrorTest(tf.test.TestCase):
def setUp(self):
tf.reset_default_graph()
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_squared_error(
predictions=tf.ones((10, 1)),
labels=tf.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_squared_error(
predictions=tf.ones((10, 1)),
labels=tf.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = tf.random_normal((10, 3), seed=1)
labels = tf.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_squared_error(
predictions, labels)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
predictions = tf.zeros((1, 3), dtype=tf.float32)
labels = tf.zeros((1, 3), dtype=tf.float32)
error, update_op = metrics.streaming_mean_squared_error(
predictions, labels)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError(self):
predictions = tf.constant([2, 4, 6], shape=(1, 3), dtype=tf.float32)
labels = tf.constant([1, 3, 2], shape=(1, 3), dtype=tf.float32)
error, update_op = metrics.streaming_mean_squared_error(
predictions, labels)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
self.assertEqual(6, sess.run(update_op))
self.assertEqual(6, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
predictions = tf.constant([2, 4, 6, 8], shape=(1, 4), dtype=tf.float32)
labels = tf.constant([1, 3, 2, 3], shape=(1, 4), dtype=tf.float32)
weights = tf.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.streaming_mean_squared_error(
predictions, labels, weights)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
self.assertEqual(13, sess.run(update_op))
self.assertEqual(13, error.eval())
def testMultipleBatchesOfSizeOne(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = tf.FIFOQueue(2, dtypes=tf.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = tf.FIFOQueue(2, dtypes=tf.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
error, update_op = metrics.streaming_mean_squared_error(
predictions, labels)
sess.run(tf.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(208.0 / 6, sess.run(update_op), 5)
self.assertAlmostEqual(208.0 / 6, error.eval(), 5)
def testMetricsComputedConcurrently(self):
with self.test_session() as sess:
# Create the queue that populates one set of predictions.
preds_queue0 = tf.FIFOQueue(2, dtypes=tf.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue0, [10, 8, 6])
_enqueue_vector(sess, preds_queue0, [-4, 3, -1])
predictions0 = preds_queue0.dequeue()
# Create the queue that populates one set of predictions.
preds_queue1 = tf.FIFOQueue(2, dtypes=tf.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue1, [0, 1, 1])
_enqueue_vector(sess, preds_queue1, [1, 1, 0])
predictions1 = preds_queue1.dequeue()
# Create the queue that populates one set of labels.
labels_queue0 = tf.FIFOQueue(2, dtypes=tf.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue0, [1, 3, 2])
_enqueue_vector(sess, labels_queue0, [2, 4, 6])
labels0 = labels_queue0.dequeue()
# Create the queue that populates another set of labels.
labels_queue1 = tf.FIFOQueue(2, dtypes=tf.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue1, [-5, -3, -1])
_enqueue_vector(sess, labels_queue1, [5, 4, 3])
labels1 = labels_queue1.dequeue()
mse0, update_op0 = metrics.streaming_mean_squared_error(
predictions0, labels0, name='msd0')
mse1, update_op1 = metrics.streaming_mean_squared_error(
predictions1, labels1, name='msd1')
sess.run(tf.local_variables_initializer())
sess.run([update_op0, update_op1])
sess.run([update_op0, update_op1])
mse0, mse1 = sess.run([mse0, mse1])
self.assertAlmostEqual(208.0 / 6, mse0, 5)
self.assertAlmostEqual(79.0 / 6, mse1, 5)
def testMultipleMetricsOnMultipleBatchesOfSizeOne(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = tf.FIFOQueue(2, dtypes=tf.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = tf.FIFOQueue(2, dtypes=tf.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
mae, ma_update_op = metrics.streaming_mean_absolute_error(
predictions, labels)
mse, ms_update_op = metrics.streaming_mean_squared_error(
predictions, labels)
sess.run(tf.local_variables_initializer())
sess.run([ma_update_op, ms_update_op])
sess.run([ma_update_op, ms_update_op])
self.assertAlmostEqual(32.0 / 6, mae.eval(), 5)
self.assertAlmostEqual(208.0 / 6, mse.eval(), 5)
class StreamingRootMeanSquaredErrorTest(tf.test.TestCase):
def setUp(self):
tf.reset_default_graph()
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_root_mean_squared_error(
predictions=tf.ones((10, 1)),
labels=tf.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_root_mean_squared_error(
predictions=tf.ones((10, 1)),
labels=tf.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = tf.random_normal((10, 3), seed=1)
labels = tf.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_root_mean_squared_error(
predictions, labels)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
with self.test_session() as sess:
predictions = tf.constant(0.0, shape=(1, 3), dtype=tf.float32)
labels = tf.constant(0.0, shape=(1, 3), dtype=tf.float32)
rmse, update_op = metrics.streaming_root_mean_squared_error(
predictions, labels)
sess.run(tf.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, rmse.eval())
def testSingleUpdateWithError(self):
with self.test_session() as sess:
predictions = tf.constant([2, 4, 6], shape=(1, 3), dtype=tf.float32)
labels = tf.constant([1, 3, 2], shape=(1, 3), dtype=tf.float32)
rmse, update_op = metrics.streaming_root_mean_squared_error(
predictions, labels)
sess.run(tf.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(6), update_op.eval(), 5)
self.assertAlmostEqual(math.sqrt(6), rmse.eval(), 5)
def testSingleUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
predictions = tf.constant([2, 4, 6, 8], shape=(1, 4), dtype=tf.float32)
labels = tf.constant([1, 3, 2, 3], shape=(1, 4), dtype=tf.float32)
weights = tf.constant([0, 1, 0, 1], shape=(1, 4))
rmse, update_op = metrics.streaming_root_mean_squared_error(
predictions, labels, weights)
sess.run(tf.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(13), sess.run(update_op))
self.assertAlmostEqual(math.sqrt(13), rmse.eval(), 5)
def _reweight(predictions, labels, weights):
return (np.concatenate([[p] * int(w) for p, w in zip(predictions, weights)]),
np.concatenate([[l] * int(w) for l, w in zip(labels, weights)]))
class StreamingCovarianceTest(tf.test.TestCase):
def setUp(self):
tf.reset_default_graph()
def testMetricsCollection(self):
my_collection_name = '__metrics__'
cov, _ = metrics.streaming_covariance(
predictions=tf.to_float(tf.range(10)) + tf.ones([10, 10]),
labels=tf.to_float(tf.range(10)) + tf.ones([10, 10]),
metrics_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [cov])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_covariance(
predictions=tf.to_float(tf.range(10)) + tf.ones([10, 10]),
labels=tf.to_float(tf.range(10)) + tf.ones([10, 10]),
updates_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
labels = tf.random_normal((10, 3), seed=2)
predictions = labels * 0.5 + tf.random_normal((10, 3), seed=1) * 0.5
cov, update_op = metrics.streaming_covariance(predictions, labels)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_cov = cov.eval()
for _ in range(10):
self.assertEqual(initial_cov, cov.eval())
def testSingleUpdateIdentical(self):
with self.test_session() as sess:
predictions = tf.to_float(tf.range(10))
labels = tf.to_float(tf.range(10))
cov, update_op = metrics.streaming_covariance(predictions, labels)
expected_cov = np.cov(np.arange(10), np.arange(10))[0, 1]
sess.run(tf.local_variables_initializer())
self.assertAlmostEqual(expected_cov, sess.run(update_op), 5)
self.assertAlmostEqual(expected_cov, cov.eval(), 5)
def testSingleUpdateNonIdentical(self):
with self.test_session() as sess:
predictions = tf.constant([2, 4, 6], shape=(1, 3), dtype=tf.float32)
labels = tf.constant([1, 3, 2], shape=(1, 3), dtype=tf.float32)
cov, update_op = metrics.streaming_covariance(predictions, labels)
expected_cov = np.cov([2, 4, 6], [1, 3, 2])[0, 1]
sess.run(tf.local_variables_initializer())
self.assertAlmostEqual(expected_cov, update_op.eval())
self.assertAlmostEqual(expected_cov, cov.eval())
def testSingleUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
predictions = tf.constant([2, 4, 6, 8], shape=(1, 4), dtype=tf.float32)
labels = tf.constant([1, 3, 2, 7], shape=(1, 4), dtype=tf.float32)
weights = tf.constant([0, 1, 3, 1], shape=(1, 4), dtype=tf.float32)
cov, update_op = metrics.streaming_covariance(
predictions, labels, weights=weights)
p, l = _reweight([2, 4, 6, 8], [1, 3, 2, 7], [0, 1, 3, 1])
expected_cov = np.cov(p, l)[0, 1]
sess.run(tf.local_variables_initializer())
self.assertAlmostEqual(expected_cov, sess.run(update_op))
self.assertAlmostEqual(expected_cov, cov.eval())
def testMultiUpdateWithErrorNoWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
predictions_t = tf.placeholder(tf.float32, [stride])
labels_t = tf.placeholder(tf.float32, [stride])
cov, update_op = metrics.streaming_covariance(predictions_t, labels_t)
sess.run(tf.local_variables_initializer())
prev_expected_cov = 0.
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)]
}
self.assertAlmostEqual(
prev_expected_cov, sess.run(cov, feed_dict=feed_dict), 5)
expected_cov = np.cov(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(
expected_cov, sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(
expected_cov, sess.run(cov, feed_dict=feed_dict), 5)
prev_expected_cov = expected_cov
def testMultiUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
weights = np.tile(np.arange(n // 10), n // 10)
np.random.shuffle(weights)
stride = 10
predictions_t = tf.placeholder(tf.float32, [stride])
labels_t = tf.placeholder(tf.float32, [stride])
weights_t = tf.placeholder(tf.float32, [stride])
cov, update_op = metrics.streaming_covariance(
predictions_t, labels_t, weights=weights_t)
sess.run(tf.local_variables_initializer())
prev_expected_cov = 0.
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
self.assertAlmostEqual(
prev_expected_cov, sess.run(cov, feed_dict=feed_dict), 5)
p, l = _reweight(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)], weights[:stride * (i + 1)])
expected_cov = np.cov(p, l)[0, 1]
self.assertAlmostEqual(
expected_cov, sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(
expected_cov, sess.run(cov, feed_dict=feed_dict), 5)
prev_expected_cov = expected_cov
class StreamingPearsonRTest(tf.test.TestCase):
def setUp(self):
tf.reset_default_graph()
def testMetricsCollection(self):
my_collection_name = '__metrics__'
pearson_r, _ = metrics.streaming_pearson_correlation(
predictions=tf.to_float(tf.range(10)) + tf.ones([10, 10]),
labels=tf.to_float(tf.range(10)) + tf.ones([10, 10]),
metrics_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [pearson_r])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_pearson_correlation(
predictions=tf.to_float(tf.range(10)) + tf.ones([10, 10]),
labels=tf.to_float(tf.range(10)) + tf.ones([10, 10]),
updates_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
labels = tf.random_normal((10, 3), seed=2)
predictions = labels * 0.5 + tf.random_normal((10, 3), seed=1) * 0.5
pearson_r, update_op = metrics.streaming_pearson_correlation(predictions,
labels)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_r = pearson_r.eval()
for _ in range(10):
self.assertEqual(initial_r, pearson_r.eval())
def testSingleUpdateIdentical(self):
with self.test_session() as sess:
predictions = tf.to_float(tf.range(10))
labels = tf.to_float(tf.range(10))
pearson_r, update_op = metrics.streaming_pearson_correlation(predictions,
labels)
expected_r = np.corrcoef(np.arange(10), np.arange(10))[0, 1]
sess.run(tf.local_variables_initializer())
self.assertAlmostEqual(expected_r, sess.run(update_op), 5)
self.assertAlmostEqual(expected_r, pearson_r.eval(), 5)
def testSingleUpdateNonIdentical(self):
with self.test_session() as sess:
predictions = tf.constant([2, 4, 6], shape=(1, 3), dtype=tf.float32)
labels = tf.constant([1, 3, 2], shape=(1, 3), dtype=tf.float32)
pearson_r, update_op = metrics.streaming_pearson_correlation(predictions,
labels)
expected_r = np.corrcoef([2, 4, 6], [1, 3, 2])[0, 1]
sess.run(tf.local_variables_initializer())
self.assertAlmostEqual(expected_r, update_op.eval())
self.assertAlmostEqual(expected_r, pearson_r.eval())
def testSingleUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
predictions = np.array([2, 4, 6, 8])
labels = np.array([1, 3, 2, 7])
weights = np.array([0, 1, 3, 1])
predictions_t = tf.constant(predictions, shape=(1, 4), dtype=tf.float32)
labels_t = tf.constant(labels, shape=(1, 4), dtype=tf.float32)
weights_t = tf.constant(weights, shape=(1, 4), dtype=tf.float32)
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
p, l = _reweight(predictions, labels, weights)
cmat = np.cov(p, l)
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
sess.run(tf.local_variables_initializer())
self.assertAlmostEqual(expected_r, sess.run(update_op))
self.assertAlmostEqual(expected_r, pearson_r.eval())
def testMultiUpdateWithErrorNoWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
predictions_t = tf.placeholder(tf.float32, [stride])
labels_t = tf.placeholder(tf.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t)
sess.run(tf.local_variables_initializer())
prev_expected_r = 0.
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)]
}
self.assertAlmostEqual(
prev_expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
expected_r = np.corrcoef(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(
expected_r, sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(
expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
prev_expected_r = expected_r
def testMultiUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
weights = np.tile(np.arange(n // 10), n // 10)
np.random.shuffle(weights)
stride = 10
predictions_t = tf.placeholder(tf.float32, [stride])
labels_t = tf.placeholder(tf.float32, [stride])
weights_t = tf.placeholder(tf.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
sess.run(tf.local_variables_initializer())
prev_expected_r = 0.
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
self.assertAlmostEqual(
prev_expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
p, l = _reweight(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)], weights[:stride * (i + 1)])
cmat = np.cov(p, l)
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
self.assertAlmostEqual(
expected_r, sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(
expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
prev_expected_r = expected_r
class StreamingMeanCosineDistanceTest(tf.test.TestCase):
def setUp(self):
tf.reset_default_graph()
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_cosine_distance(
predictions=tf.ones((10, 3)),
labels=tf.ones((10, 3)),
dim=1,
metrics_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_cosine_distance(
predictions=tf.ones((10, 3)),
labels=tf.ones((10, 3)),
dim=1,
updates_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = tf.random_normal((10, 3), seed=1)
labels = tf.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=1)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
np_labels = np.matrix(('1 0 0;'
'0 0 1;'
'0 1 0'))
predictions = tf.constant(np_labels, shape=(1, 3, 3), dtype=tf.float32)
labels = tf.constant(np_labels, shape=(1, 3, 3), dtype=tf.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError1(self):
np_labels = np.matrix(('1 0 0;'
'0 0 1;'
'0 1 0'))
np_predictions = np.matrix(('1 0 0;'
'0 0 -1;'
'1 0 0'))
predictions = tf.constant(np_predictions, shape=(3, 1, 3), dtype=tf.float32)
labels = tf.constant(np_labels, shape=(3, 1, 3), dtype=tf.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 5)
self.assertAlmostEqual(1, error.eval(), 5)
def testSingleUpdateWithError2(self):
np_predictions = np.matrix((
'0.819031913261206 0.567041924552012 0.087465312324590;'
'-0.665139432070255 -0.739487441769973 -0.103671883216994;'
'0.707106781186548 -0.707106781186548 0'))
np_labels = np.matrix((
'0.819031913261206 0.567041924552012 0.087465312324590;'
'0.665139432070255 0.739487441769973 0.103671883216994;'
'0.707106781186548 0.707106781186548 0'))
predictions = tf.constant(np_predictions, shape=(3, 1, 3), dtype=tf.float32)
labels = tf.constant(np_labels, shape=(3, 1, 3), dtype=tf.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.0, error.eval(), 5)
def testSingleUpdateWithErrorAndWeights1(self):
np_predictions = np.matrix(('1 0 0;'
'0 0 -1;'
'1 0 0'))
np_labels = np.matrix(('1 0 0;'
'0 0 1;'
'0 1 0'))
predictions = tf.constant(np_predictions, shape=(3, 1, 3), dtype=tf.float32)
labels = tf.constant(np_labels, shape=(3, 1, 3), dtype=tf.float32)
weights = tf.constant([1, 0, 0], shape=(3, 1, 1), dtype=tf.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2, weights=weights)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithErrorAndWeights2(self):
np_predictions = np.matrix(('1 0 0;'
'0 0 -1;'
'1 0 0'))
np_labels = np.matrix(('1 0 0;'
'0 0 1;'
'0 1 0'))
predictions = tf.constant(np_predictions, shape=(3, 1, 3), dtype=tf.float32)
labels = tf.constant(np_labels, shape=(3, 1, 3), dtype=tf.float32)
weights = tf.constant([0, 1, 1], shape=(3, 1, 1), dtype=tf.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2, weights=weights)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
self.assertEqual(1.5, update_op.eval())
self.assertEqual(1.5, error.eval())
class PcntBelowThreshTest(tf.test.TestCase):
def setUp(self):
tf.reset_default_graph()
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_percentage_less(
values=tf.ones((10,)),
threshold=2,
metrics_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_percentage_less(
values=tf.ones((10,)),
threshold=2,
updates_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
def testOneUpdate(self):
with self.test_session() as sess:
values = tf.constant([2, 4, 6, 8], shape=(1, 4), dtype=tf.float32)
pcnt0, update_op0 = metrics.streaming_percentage_less(
values, 100, name='high')
pcnt1, update_op1 = metrics.streaming_percentage_less(
values, 7, name='medium')
pcnt2, update_op2 = metrics.streaming_percentage_less(
values, 1, name='low')
sess.run(tf.local_variables_initializer())
sess.run([update_op0, update_op1, update_op2])
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.75, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
def testSomePresentOneUpdate(self):
with self.test_session() as sess:
values = tf.constant([2, 4, 6, 8], shape=(1, 4), dtype=tf.float32)
weights = tf.constant([1, 0, 0, 1], shape=(1, 4), dtype=tf.float32)
pcnt0, update_op0 = metrics.streaming_percentage_less(
values, 100, weights=weights, name='high')
pcnt1, update_op1 = metrics.streaming_percentage_less(
values, 7, weights=weights, name='medium')
pcnt2, update_op2 = metrics.streaming_percentage_less(
values, 1, weights=weights, name='low')
sess.run(tf.local_variables_initializer())
self.assertListEqual([1.0, 0.5, 0.0],
sess.run([update_op0, update_op1, update_op2]))
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.5, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
class StreamingMeanIOUTest(tf.test.TestCase):
def setUp(self):
np.random.seed(1)
tf.reset_default_graph()
def testMetricsCollections(self):
my_collection_name = '__metrics__'
mean_iou, _ = metrics.streaming_mean_iou(
predictions=tf.ones([10, 1]),
labels=tf.ones([10, 1]),
num_classes=2,
metrics_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [mean_iou])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_iou(
predictions=tf.ones([10, 1]),
labels=tf.ones([10, 1]),
num_classes=2,
updates_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = tf.ones([10, 3])
labels = tf.ones([10, 4])
with self.assertRaises(ValueError):
metrics.streaming_mean_iou(
predictions, labels, num_classes=2)
def testLabelsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = tf.ones([10])
labels = tf.ones([10])
weights = tf.zeros([9])
with self.assertRaises(ValueError):
metrics.streaming_mean_iou(
predictions, labels, num_classes=2, weights=weights)
def testValueTensorIsIdempotent(self):
num_classes = 3
predictions = tf.random_uniform([10], maxval=num_classes,
dtype=tf.int64, seed=1)
labels = tf.random_uniform([10], maxval=num_classes,
dtype=tf.int64, seed=1)
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes=num_classes)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_miou = miou.eval()
for _ in range(10):
self.assertEqual(initial_miou, miou.eval())
def testMultipleUpdates(self):
num_classes = 3
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = tf.FIFOQueue(5, dtypes=tf.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = tf.FIFOQueue(5, dtypes=tf.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes)
sess.run(tf.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0/2.0, 1.0/4.0, 0.])
self.assertEqual(desired_output, miou.eval())
def testMultipleUpdatesWithWeights(self):
num_classes = 2
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = tf.FIFOQueue(6, dtypes=tf.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = tf.FIFOQueue(6, dtypes=tf.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = tf.FIFOQueue(6, dtypes=tf.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
weights = weights_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes, weights=weights)
sess.run(tf.local_variables_initializer())
for _ in range(6):
sess.run(update_op)
desired_output = np.mean([2.0/3.0, 1.0/2.0])
self.assertAlmostEqual(desired_output, miou.eval())
def testMultipleUpdatesWithMissingClass(self):
# Test the case where there are no predicions and labels for
# one class, and thus there is one row and one column with
# zero entries in the confusion matrix.
num_classes = 3
with self.test_session() as sess:
# Create the queue that populates the predictions.
# There is no prediction for class 2.
preds_queue = tf.FIFOQueue(5, dtypes=tf.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
# There is label for class 2.
labels_queue = tf.FIFOQueue(5, dtypes=tf.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes)
sess.run(tf.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0/3.0, 2.0/4.0, 0.])
self.assertAlmostEqual(desired_output, miou.eval())
def testUpdateOpEvalIsAccumulatedConfusionMatrix(self):
predictions = tf.concat(0,
[tf.constant(0, shape=[5]),
tf.constant(1, shape=[5])])
labels = tf.concat(0,
[tf.constant(0, shape=[3]),
tf.constant(1, shape=[7])])
num_classes = 2
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes)
sess.run(tf.local_variables_initializer())
confusion_matrix = update_op.eval()
self.assertAllEqual([[3, 2], [0, 5]], confusion_matrix)
desired_miou = np.mean([3./5., 5./7.])
self.assertAlmostEqual(desired_miou, miou.eval())
def testAllCorrect(self):
predictions = tf.zeros([40])
labels = tf.zeros([40])
num_classes = 1
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes)
sess.run(tf.local_variables_initializer())
self.assertEqual(40, update_op.eval()[0])
self.assertEqual(1.0, miou.eval())
def testAllWrong(self):
predictions = tf.zeros([40])
labels = tf.ones([40])
num_classes = 2
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes)
sess.run(tf.local_variables_initializer())
self.assertAllEqual([[0, 40], [0, 0]], update_op.eval())
self.assertEqual(0., miou.eval())
def testResultsWithSomeMissing(self):
predictions = tf.concat(0, [tf.constant(0, shape=[5]),
tf.constant(1, shape=[5])])
labels = tf.concat(0, [tf.constant(0, shape=[3]),
tf.constant(1, shape=[7])])
num_classes = 2
weights = tf.concat(0, [tf.constant(0, shape=[1]),
tf.constant(1, shape=[8]),
tf.constant(0, shape=[1])])
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes, weights=weights)
sess.run(tf.local_variables_initializer())
self.assertAllEqual([[2, 2], [0, 4]], update_op.eval())
desired_miou = np.mean([2./4., 4./6.])
self.assertAlmostEqual(desired_miou, miou.eval())
class StreamingConcatTest(tf.test.TestCase):
def setUp(self):
tf.reset_default_graph()
def testMetricsCollection(self):
my_collection_name = '__metrics__'
value, _ = metrics.streaming_concat(
values=tf.ones((10,)),
metrics_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [value])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_concat(
values=tf.ones((10,)),
updates_collections=[my_collection_name])
self.assertListEqual(tf.get_collection(my_collection_name), [update_op])
def testNextArraySize(self):
next_array_size = metrics.python.ops.metric_ops._next_array_size
with self.test_session():
self.assertEqual(next_array_size(2, growth_factor=2).eval(), 2)
self.assertEqual(next_array_size(3, growth_factor=2).eval(), 4)
self.assertEqual(next_array_size(4, growth_factor=2).eval(), 4)
self.assertEqual(next_array_size(5, growth_factor=2).eval(), 8)
self.assertEqual(next_array_size(6, growth_factor=2).eval(), 8)
def testStreamingConcat(self):
with self.test_session() as sess:
values = tf.placeholder(tf.int32, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(tf.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: [0, 1, 2]})
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run([update_op], feed_dict={values: [3, 4]})
self.assertAllEqual([0, 1, 2, 3, 4], concatenated.eval())
sess.run([update_op], feed_dict={values: [5, 6, 7, 8, 9]})
self.assertAllEqual(np.arange(10), concatenated.eval())
def testStreamingConcatMaxSize(self):
with self.test_session() as sess:
values = tf.range(3)
concatenated, update_op = metrics.streaming_concat(values, max_size=5)
sess.run(tf.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2, 0, 1], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2, 0, 1], concatenated.eval())
def testStreamingConcat2D(self):
with self.test_session() as sess:
values = tf.reshape(tf.range(3), (3, 1))
concatenated, update_op = metrics.streaming_concat(values, axis=-1)
sess.run(tf.local_variables_initializer())
for _ in range(10):
sess.run([update_op])
self.assertAllEqual([[0] * 10, [1] * 10, [2] * 10],
concatenated.eval())
def testStreamingConcatErrors(self):
with self.assertRaises(ValueError):
metrics.streaming_concat(tf.placeholder(tf.float32))
values = tf.zeros((2, 3))
with self.assertRaises(ValueError):
metrics.streaming_concat(values, axis=-3, max_size=3)
with self.assertRaises(ValueError):
metrics.streaming_concat(values, axis=2, max_size=3)
with self.assertRaises(ValueError):
metrics.streaming_concat(tf.placeholder(tf.float32, [None, None]))
def testStreamingConcatReset(self):
with self.test_session() as sess:
values = tf.placeholder(tf.int32, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(tf.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: [0, 1, 2]})
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run(tf.local_variables_initializer())
sess.run([update_op], feed_dict={values: [3, 4]})
self.assertAllEqual([3, 4], concatenated.eval())
class AggregateMetricsTest(tf.test.TestCase):
def testAggregateNoMetricsRaisesValueError(self):
with self.assertRaises(ValueError):
metrics.aggregate_metrics()
def testAggregateSingleMetricReturnsOneItemLists(self):
values = tf.ones((10, 4))
value_tensors, update_ops = metrics.aggregate_metrics(
metrics.streaming_mean(values))
self.assertEqual(len(value_tensors), 1)
self.assertEqual(len(update_ops), 1)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
self.assertEqual(1, update_ops[0].eval())
self.assertEqual(1, value_tensors[0].eval())
def testAggregateMultipleMetricsReturnsListsInOrder(self):
predictions = tf.ones((10, 4))
labels = tf.ones((10, 4)) * 3
value_tensors, update_ops = metrics.aggregate_metrics(
metrics.streaming_mean_absolute_error(
predictions, labels),
metrics.streaming_mean_squared_error(
predictions, labels))
self.assertEqual(len(value_tensors), 2)
self.assertEqual(len(update_ops), 2)
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
self.assertEqual(2, update_ops[0].eval())
self.assertEqual(4, update_ops[1].eval())
self.assertEqual(2, value_tensors[0].eval())
self.assertEqual(4, value_tensors[1].eval())
class AggregateMetricMapTest(tf.test.TestCase):
def testAggregateMultipleMetricsReturnsListsInOrder(self):
predictions = tf.ones((10, 4))
labels = tf.ones((10, 4)) * 3
names_to_values, names_to_updates = metrics.aggregate_metric_map(
{
'm1': metrics.streaming_mean_absolute_error(
predictions, labels),
'm2': metrics.streaming_mean_squared_error(
predictions, labels),
})
self.assertEqual(2, len(names_to_values))
self.assertEqual(2, len(names_to_updates))
with self.test_session() as sess:
sess.run(tf.local_variables_initializer())
self.assertEqual(2, names_to_updates['m1'].eval())
self.assertEqual(4, names_to_updates['m2'].eval())
self.assertEqual(2, names_to_values['m1'].eval())
self.assertEqual(4, names_to_values['m2'].eval())
class NumRelevantTest(tf.test.TestCase):
def testNumRelevantInvalidArgs(self):
labels = tf.random_uniform(
shape=(3, 3, 3), minval=0, maxval=100, dtype=tf.int32)
with self.assertRaisesRegexp(ValueError, 'nvalid k'):
metric_ops.num_relevant(labels, k=0)
with self.assertRaisesRegexp(ValueError, 'nvalid k'):
metric_ops.num_relevant(labels, k=-1)
def testNumRelevantDense(self):
with self.test_session():
labels = tf.random_uniform(
shape=(3, 3, 3), minval=0, maxval=100, dtype=tf.int32)
ones = np.ones(shape=(3, 3))
self.assertAllEqual(ones, metric_ops.num_relevant(labels, k=1).eval())
twos = ones * 2
self.assertAllEqual(twos, metric_ops.num_relevant(labels, k=2).eval())
threes = ones * 3
self.assertAllEqual(threes, metric_ops.num_relevant(labels, k=3).eval())
self.assertAllEqual(threes, metric_ops.num_relevant(labels, k=4).eval())
self.assertAllEqual(threes, metric_ops.num_relevant(labels, k=999).eval())
def testNumRelevantSparse(self):
with self.test_session():
labels = tf.SparseTensorValue(
indices=(
(0, 0, 0), (0, 0, 1),
(0, 1, 0), (0, 1, 1), (0, 1, 2),
# (0, 2) missing
(1, 0, 0), (1, 0, 1), (1, 0, 2),
(1, 1, 0),
(1, 2, 0),
# (2, 0) missing
(2, 1, 0), (2, 1, 1),
(2, 2, 0)),
values=(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13),
shape=(3, 3, 3))
self.assertAllEqual(
((1, 1, 0), (1, 1, 1), (0, 1, 1)),
metric_ops.num_relevant(labels, k=1).eval())
self.assertAllEqual(
((2, 2, 0), (2, 1, 1), (0, 2, 1)),
metric_ops.num_relevant(labels, k=2).eval())
label_lengths = ((2, 3, 0), (3, 1, 1), (0, 2, 1))
self.assertAllEqual(
label_lengths, metric_ops.num_relevant(labels, k=3).eval())
self.assertAllEqual(
label_lengths, metric_ops.num_relevant(labels, k=999).eval())
class ExpandAndTileTest(tf.test.TestCase):
def testExpandAndTileInvalidArgs(self):
x = tf.ones(shape=(3, 3, 3))
with self.assertRaisesRegexp(ValueError, 'nvalid multiple'):
metric_ops.expand_and_tile(x, multiple=0)
with self.test_session():
with self.assertRaises(ValueError):
metric_ops.expand_and_tile(x, multiple=1, dim=-4).eval()
with self.assertRaises(ValueError):
metric_ops.expand_and_tile(x, multiple=1, dim=4).eval()
def testSparseExpandAndTileInvalidArgs(self):
x = tf.SparseTensorValue(
indices=[
(i, j, k) for i in range(3) for j in range(3) for k in range(3)],
values=[1] * 27,
shape=[3, 3, 3])
with self.assertRaisesRegexp(ValueError, 'nvalid multiple'):
metric_ops.expand_and_tile(x, multiple=0)
with self.test_session():
with self.assertRaises(tf.OpError):
metric_ops.expand_and_tile(x, multiple=1, dim=-4).eval()
with self.assertRaises(ValueError):
metric_ops.expand_and_tile(x, multiple=1, dim=4).eval()
def _test_expand_and_tile(
self, expected_shape, expected_value, tensor, multiple, dim=None):
with tf.Graph().as_default() as g, self.test_session(g):
if dim is None:
op = metric_ops.expand_and_tile(tensor=tensor, multiple=multiple)
else:
op = metric_ops.expand_and_tile(
tensor=tensor, multiple=multiple, dim=dim)
self.assertAllEqual(expected_shape, tf.shape(op).eval())
self.assertAllEqual(expected_value, op.eval())
# TODO(ptucker): Use @parameterized when it's available in tf.
def testExpandAndTile1x(self):
# Shape (3,3,3).
x = ((
(1, 2, 3),
(4, 5, 6),
(7, 8, 9)
), (
(10, 11, 12),
(13, 14, 15),
(16, 17, 18)
), (
(19, 20, 21),
(22, 23, 24),
(25, 26, 26)
))
for dim in (None, -3, 0):
self._test_expand_and_tile(
expected_shape=(1, 3, 3, 3),
expected_value=[x],
tensor=x, multiple=1, dim=dim)
for dim in (-2, 1):
self._test_expand_and_tile(
expected_shape=(3, 1, 3, 3),
expected_value=[[x1] for x1 in x],
tensor=x, multiple=1, dim=dim)
for dim in (-1, 2):
self._test_expand_and_tile(
expected_shape=(3, 3, 1, 3),
expected_value=[[[x2] for x2 in x1] for x1 in x],
tensor=x, multiple=1, dim=dim)
self._test_expand_and_tile(
expected_shape=(3, 3, 3, 1),
expected_value=[[[[x3] for x3 in x2] for x2 in x1] for x1 in x],
tensor=x, multiple=1, dim=3)
# TODO(ptucker): Use @parameterized when it's available in tf.
def testExpandAndTile5x(self):
# Shape (3,3,3).
x = ((
(1, 2, 3),
(4, 5, 6),
(7, 8, 9)
), (
(10, 11, 12),
(13, 14, 15),
(16, 17, 18)
), (
(19, 20, 21),
(22, 23, 24),
(25, 26, 26)
))
with self.test_session():
for dim in (None, -3, 0):
self._test_expand_and_tile(
expected_shape=(5, 3, 3, 3),
expected_value=[x] * 5,
tensor=x, multiple=5, dim=dim)
for dim in (-2, 1):
self._test_expand_and_tile(
expected_shape=(3, 5, 3, 3),
expected_value=[[x1] * 5 for x1 in x],
tensor=x, multiple=5, dim=dim)
for dim in (-1, 2):
self._test_expand_and_tile(
expected_shape=(3, 3, 5, 3),
expected_value=[[[x2] * 5 for x2 in x1] for x1 in x],
tensor=x, multiple=5, dim=dim)
self._test_expand_and_tile(
expected_shape=(3, 3, 3, 5),
expected_value=[[[[x3] * 5 for x3 in x2] for x2 in x1] for x1 in x],
tensor=x, multiple=5, dim=3)
def _assert_sparse_tensors_equal(self, expected, actual):
self.assertAllEqual(expected.indices, actual.indices)
self.assertAllEqual(expected.values, actual.values)
self.assertAllEqual(expected.shape, actual.shape)
# TODO(ptucker): Use @parameterized when it's available in tf.
def testSparseExpandAndTile1x(self):
# Shape (3,3).
x = tf.SparseTensorValue(
indices=[
[0, 0], [0, 1],
[1, 0], [1, 1], [1, 2],
[2, 0]],
values=[
1, 2,
3, 4, 5,
6],
shape=[3, 3])
with self.test_session():
expected_result_dim0 = tf.SparseTensorValue(
indices=[[0, i[0], i[1]] for i in x.indices], values=x.values,
shape=[1, 3, 3])
self._assert_sparse_tensors_equal(
expected_result_dim0,
metric_ops.expand_and_tile(x, multiple=1).eval())
for dim in (-2, 0):
self._assert_sparse_tensors_equal(
expected_result_dim0,
metric_ops.expand_and_tile(x, multiple=1, dim=dim).eval())
expected_result_dim1 = tf.SparseTensorValue(
indices=[[i[0], 0, i[1]] for i in x.indices], values=x.values,
shape=[3, 1, 3])
for dim in (-1, 1):
self._assert_sparse_tensors_equal(
expected_result_dim1,
metric_ops.expand_and_tile(x, multiple=1, dim=dim).eval())
expected_result_dim2 = tf.SparseTensorValue(
indices=[[i[0], i[1], 0] for i in x.indices], values=x.values,
shape=[3, 3, 1])
self._assert_sparse_tensors_equal(
expected_result_dim2,
metric_ops.expand_and_tile(x, multiple=1, dim=2).eval())
# TODO(ptucker): Use @parameterized when it's available in tf.
def testSparseExpandAndTile5x(self):
# Shape (3,3).
x = tf.SparseTensorValue(
indices=(
(0, 0), (0, 1),
(1, 0), (1, 1), (1, 2),
(2, 0)),
values=(
1, 2,
3, 4, 5,
6),
shape=(3, 3))
with self.test_session():
expected_result_dim0 = tf.SparseTensorValue(
indices=[(d0, i[0], i[1]) for d0 in range(5) for i in x.indices],
values=[v for _ in range(5) for v in x.values],
shape=(5, 3, 3))
self._assert_sparse_tensors_equal(
expected_result_dim0,
metric_ops.expand_and_tile(x, multiple=5).eval())
for dim in (-2, 0):
self._assert_sparse_tensors_equal(
expected_result_dim0,
metric_ops.expand_and_tile(x, multiple=5, dim=dim).eval())
expected_result_dim1 = tf.SparseTensorValue(
indices=[
(d0, d1, i[1])
for d0 in range(3)
for d1 in range(5)
for i in x.indices if i[0] == d0],
values=x.values[0:2] * 5 + x.values[2:5] * 5 + x.values[5:] * 5,
shape=(3, 5, 3))
for dim in (-1, 1):
self._assert_sparse_tensors_equal(
expected_result_dim1,
metric_ops.expand_and_tile(x, multiple=5, dim=dim).eval())
expected_result_dim2 = tf.SparseTensorValue(
indices=[(i[0], i[1], d2) for i in x.indices for d2 in range(5)],
values=[v for v in x.values for _ in range(5)],
shape=(3, 3, 5))
self._assert_sparse_tensors_equal(
expected_result_dim2,
metric_ops.expand_and_tile(x, multiple=5, dim=2).eval())
if __name__ == '__main__':
tf.test.main()
| nanditav/15712-TensorFlow | tensorflow/contrib/metrics/python/ops/metric_ops_test.py | Python | apache-2.0 | 163,728 |
"""
First make sure you have a Python3 program for your answer in ./answer/
Then run:
python3 zipout.py
This will create a file `output.zip`.
To customize the files used by default, run:
python3 zipout.py -h
"""
import sys, os, optparse, logging, tempfile, subprocess, shutil
import iocollect
class ZipOutput:
def __init__(self, opts):
self.run_program = opts.run_program # solution to hw that is being tested
self.python_bin = opts.python_bin # Python binary to run
self.answer_dir = opts.answer_dir # name of directory where run_program exists
self.input_dir = opts.input_dir # directory where input files are placed
self.output_dir = opts.output_dir # directory for output files of your program
self.file_suffix = opts.file_suffix # file suffix for input files
def mkdirp(self, path):
try:
os.makedirs(path)
except os.error:
print("Warning: {} already exists. Existing files will be over-written.".format(path), file=sys.stderr)
pass
def run(self, filename, path, output_path, base):
"""
Runs a command specified by an argument vector (including the program name)
and returns lists of lines from stdout and stderr.
"""
# create the output files
if output_path is not None:
stdout_path = os.path.join(output_path, "{}.out".format(base))
stderr_path = os.path.join(output_path, "{}.err".format(base))
# existing files are erased!
stdout_file = open(stdout_path, 'w')
stderr_file = open(stderr_path, 'w')
status_path = os.path.join(output_path, "{}.ret".format(base))
else:
stdout_file, stdout_path = tempfile.mkstemp("stdout")
stderr_file, stderr_path = tempfile.mkstemp("stderr")
status_path = None
run_program_path = os.path.abspath(os.path.join(self.answer_dir, self.run_program))
run_python = os.path.abspath(self.python_bin)
if os.path.exists(run_python) and os.access(run_python, os.X_OK):
argv = [ run_python, run_program_path, '-i', filename ]
else:
print("Did not find {}. Are you sure you set up a virtualenv? Run `python3 -m venv venv` in the current directory.".format(self.python_bin), file=sys.stderr)
if os.path.exists(self.run_program_path) and os.access(self.run_program_path, os.X_OK):
argv = [ run_program_path, '-i', filename ]
else:
raise ValueError("Could not run {} {}".format(self.python_bin, self.run_program_path))
stdin_file = open(filename, 'r')
try:
try:
prog = subprocess.Popen(argv, stdin=stdin_file or subprocess.PIPE, stdout=stdout_file, stderr=stderr_file)
if stdin_file is None:
prog.stdin.close()
prog.wait()
finally:
if output_path is not None:
stdout_file.close()
stderr_file.close()
else:
os.close(stdout_file)
os.close(stderr_file)
if status_path is not None:
with open(status_path, 'w') as status_file:
print(prog.returncode, file=status_file)
with open(stdout_path) as stdout_input:
stdout_lines = list(stdout_input)
with open(stderr_path) as stderr_input:
stderr_lines = list(stderr_input)
if prog.stdin != None:
prog.stdin.close()
return stdout_lines, stderr_lines, prog.returncode
except:
print("error: something went wrong when trying to run the following command:", file=sys.stderr)
print(argv, file=sys.stderr)
raise
#sys.exit(1)
finally:
if output_path is None:
os.remove(stdout_path)
os.remove(stderr_path)
def run_path(self, path, files):
# set up output directory
if path is None or path == '':
output_path = os.path.abspath(self.output_dir)
else:
output_path = os.path.abspath(os.path.join(self.output_dir, path))
self.mkdirp(output_path)
for filename in files:
if path is None or path == '':
testfile_path = os.path.abspath(os.path.join(self.input_dir, filename))
else:
testfile_path = os.path.abspath(os.path.join(self.input_dir, path, filename))
if filename[-len(self.file_suffix):] == self.file_suffix:
base = filename[:-len(self.file_suffix)]
if os.path.exists(testfile_path):
print("running on input {}".format(testfile_path), file=sys.stderr)
self.run(testfile_path, path, output_path, base)
def run_all(self):
# check that a compiled binary exists to run on the input files
argv = os.path.abspath(os.path.join(self.answer_dir, self.run_program))
if not (os.path.isfile(argv)):
logging.error("answer program missing: {}".format(argv))
raise ValueError("Compile your source file to create an executable {}".format(argv))
# check if input directory has subdirectories
testcase_subdirs = iocollect.getdirs(os.path.abspath(self.input_dir))
if len(testcase_subdirs) > 0:
for subdir in testcase_subdirs:
files = iocollect.getfiles(os.path.abspath(os.path.join(self.testcase_dir, subdir)))
self.run_path(subdir, files)
else:
files = iocollect.getfiles(os.path.abspath(self.input_dir))
self.run_path(None, files)
return True
if __name__ == '__main__':
#zipout_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
optparser = optparse.OptionParser()
optparser.add_option("-r", "--run", dest="run_program", default='ensegment.py', help="run this program against testcases [default: ensegment.py]")
optparser.add_option("-x", "--pythonbin", dest="python_bin", default='venv/bin/python3', help="run this binary of Python to run the program [default: python3]")
optparser.add_option("-a", "--answerdir", dest="answer_dir", default='answer', help="answer directory [default: answer]")
optparser.add_option("-i", "--inputdir", dest="input_dir", default=os.path.join('data', 'input'), help="testcases directory [default: data/input]")
optparser.add_option("-e", "--ending", dest="file_suffix", default='.txt', help="suffix to use for testcases [default: .txt]")
optparser.add_option("-o", "--output", dest="output_dir", default='output', help="Save the output from the testcases to this directory.")
optparser.add_option("-z", "--zipfile", dest="zipfile", default='output', help="zip file with your output answers")
optparser.add_option("-l", "--logfile", dest="logfile", default=None, help="log file for debugging")
(opts, _) = optparser.parse_args()
if opts.logfile is not None:
logging.basicConfig(filename=opts.logfile, filemode='w', level=logging.INFO)
zo = ZipOutput(opts)
if zo.run_all():
outputs_zipfile = shutil.make_archive(opts.zipfile, 'zip', opts.output_dir)
print("{} created".format(outputs_zipfile), file=sys.stderr)
else:
logging.error("problem in creating output zip file")
sys.exit(1)
| anoopsarkar/nlp-class-hw | ensegment/zipout.py | Python | apache-2.0 | 7,507 |
import os
import re
import sys
import unittest
from coalib import coala
from coalib.misc.ContextManagers import prepare_file
from coalib.tests.test_bears.LineCountTestBear import (
LineCountTestBear)
from coalib.tests.TestUtilities import execute_coala, bear_test_module
class coalaTest(unittest.TestCase):
def setUp(self):
self.old_argv = sys.argv
def tearDown(self):
sys.argv = self.old_argv
def test_coala(self):
with bear_test_module(), \
prepare_file(["#fixme"], None) as (lines, filename):
retval, output = execute_coala(
coala.main,
"coala", "-c", os.devnull,
"-f", re.escape(filename),
"-b", "LineCountTestBear")
self.assertIn("This file has 1 lines.",
output,
"The output should report count as 1 lines")
def test_did_nothing(self):
retval, output = execute_coala(coala.main, "coala", "-c", os.devnull,
"-S", "default.enabled=false")
self.assertEqual(retval, 0)
self.assertIn("No existent section was targeted or enabled", output)
def test_show_bears(self):
with bear_test_module():
retval, output = execute_coala(coala.main, "coala", "-A")
self.assertEqual(retval, 0)
bear_lines = [i.startswith(" * ") for i in output.split()]
self.assertGreater(len(bear_lines), 0)
retval, output = execute_coala(coala.main, "coala", "-B",
"-b", "LineCountTestBear",
"-c", os.devnull)
self.assertEqual(retval, 0)
self.assertIn(LineCountTestBear.run.__doc__.strip(), output)
| sudheesh001/coala | coalib/tests/coalaTest.py | Python | agpl-3.0 | 1,871 |
from django import template
from vault.models import UploadedFile
from news.models import Story
from django.template.defaultfilters import date, time
from datetime import datetime, timedelta
register = template.Library()
@register.simple_tag
def file_list(request, count=10):
files = UploadedFile.objects.for_user(request.user)[0:count]
return html_list(files)
def html_list(list):
ret = "<ul>"
for file in list:
ret += "<li><a href=\"" + file.get_absolute_url() + "\">" + file.filename + "</a><br />" + date(file.uploaded, "DATE_FORMAT") + " " + time(file.uploaded, "TIME_FORMAT") + "</li>"
ret += "</ul>"
return ret
@register.simple_tag
def last_commented_news(request, count):
tops = []
already_seen = set()
stories = Story.objects.for_user(request.user).order_by("-created").filter(created__gt=datetime.now()-timedelta(days=180))
counter = 0
for story in stories:
if counter >= count:
break
top = story.get_top()
if not top in already_seen:
counter += 1
already_seen.add(top)
tops.append((top.title, top.get_absolute_url(), story.created))
return format_active_list(tops)
def format_active_list(list):
ret = '<ul>'
for debate in list:
title, url, created = debate
ret += '<li><a href="%s">%s</a><br />%s</li>' % (url, title, date(created, "DATE_FORMAT") + " " + time(created, "TIME_FORMAT"))
ret += '</ul>'
return ret
| sigurdga/nidarholm | navigation/templatetags/interactivity.py | Python | agpl-3.0 | 1,488 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import os
from testtools import TestCase
from tokenizer import Tokenizer, parse_args, process_args
class TestTokenizer(TestCase):
def setUp(self):
super(TestTokenizer, self).setUp()
self.languages = "eng hin urd ben guj mal pan tel tam kan ori".split()
self.test_dir = os.path.dirname(os.path.abspath(__file__))
def test_tokenizer(self):
for lang in self.languages:
tok = Tokenizer(split_sen=True, lang=lang)
with io.open('%s/%s.txt' % (self.test_dir, lang),
encoding='utf-8') as fp:
for line in fp:
tokenized_text = tok.tokenize(line)
# Dummy Assertion
self.assertIsInstance(tokenized_text, list)
def test_parser(self):
# test parser arguments
parser = parse_args(['--input', 'path/to/input_file',
'--output', 'path/to/output_file',
'--language', 'kas',
'--split-sentences'])
self.assertEqual(parser.infile, 'path/to/input_file')
self.assertEqual(parser.outfile, 'path/to/output_file')
self.assertEqual(parser.lang, 'kas')
self.assertTrue(parser.split_sen)
# test parser args processing
process_args(parse_args(['-i', '%s/eng.txt' % self.test_dir,
'-o', '/tmp/test.out',
'-l', 'eng',
'-s']))
| irshadbhat/indic-tokenizer | polyglot_tokenizer/tests/test_tokenizer.py | Python | mit | 1,574 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "wordapp.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| OtagoPolytechnic/LanguageCards | admin/manage.py | Python | mit | 250 |
import os
from note.infrastructure import config
from note.utils.cached_property import cached_property
from note.utils.os.fs import exist_in_or_above
from note.utils.pattern import Singleton
class PathHelper(metaclass=Singleton):
"""
提供工作空间中程序文件的绝对路径
"""
def __init__(self):
self._root_dir = None
path = exist_in_or_above(os.getcwd(), config.APP_DATE_DIR_NAME)
if path:
# 因为path是目录,结尾以斜杠结尾,所以其父目录需要两次dirname
dirname = os.path.dirname(path)
self._root_dir = dirname
@property
def root_dir(self):
return self._root_dir
@root_dir.setter
def root_dir(self, value):
self._root_dir = value
@cached_property
def task_path(self):
return self._join(config.TASK_DIR_NAME)
@cached_property
def app_date_path(self):
return self._join(config.APP_DATE_DIR_NAME)
@cached_property
def db_path(self):
return self._join(config.DB_PATH)
@cached_property
def log_path(self):
return self._join(config.LOG_PATH)
@cached_property
def user_config_path(self):
return self._join(config.CONFIG_PATH)
@cached_property
def ignore_path(self):
return self._join(config.IGNORE_PATH)
@cached_property
def purge_path(self):
return self._join(config.PURGE_DIR_NAME)
@cached_property
def workspace_operation_record_path(self):
return self._join(config.WORKSPACE_OPERATION_RECORD_PATH)
def _join(self, path):
if self._root_dir:
return os.path.join(self._root_dir, path)
else:
raise FileNotFoundError
| urnote/urnote | note/module/pathhelper.py | Python | gpl-3.0 | 1,735 |
# encoding: utf-8
"""
@author: gallupliu
@contact: gallup-liu@hotmail.com
@version: 1.0
@license: Apache Licence
@file: v2_reader.py
@time: 2017/12/31 17:58
"""
import numpy as np
from data.models import *
from data.reader import TSVArchiveReader
class V2Reader(TSVArchiveReader):
def __init__(self, archive_path, lowercased, logger, pooled_answers=500, tokenizer='token'):
super(V2Reader, self).__init__(archive_path, lowercased, logger)
self.pooled_answers = pooled_answers
self.tokenizer = tokenizer
def file_path(self, filename):
return '{}/V2/{}'.format(self.archive_path, filename)
def read_split(self, name, vocab, answers):
filename = 'InsuranceQA.question.anslabel.{}.{}.pool.solr.{}.encoded.gz'.format(
self.tokenizer, self.pooled_answers, name
)
datapoints = []
split_answers = []
for i, line in enumerate(self.read_tsv(self.file_path(filename), is_gzip=True)):
question_tokens = [Token(vocab[t]) for t in line[1].split()]
question_sentence = Sentence(' '.join([t.text for t in question_tokens]), question_tokens)
question = TextItem(question_sentence.text, [question_sentence])
question.metadata['id'] = '{}-{}'.format(name, i)
ground_truth = [answers[gt] for gt in line[2].split(' ')]
pool = [answers[pa] for pa in line[3].split(' ')]
np.random.shuffle(pool)
datapoints.append(QAPool(question, pool, ground_truth))
split_answers += pool
# we filter out all pools that do not contain any ground truth answer,过滤点没有正确答案的数据
qa_pools_len_before = len(datapoints)
datapoints = [p for p in datapoints if len([1 for gt in p.ground_truth if gt in p.pooled_answers]) > 0]
qa_pools_len_after = len(datapoints)
self.logger.info("Split {} reduced to {} item from {} due to missing ground truth in pool".format(
name, qa_pools_len_after, qa_pools_len_before
))
return Data(name, datapoints, split_answers)
def read(self):
vocab = dict(self.read_tsv(self.file_path('vocabulary')))
answers_path = 'InsuranceQA.label2answer.{}.encoded.gz'.format(self.tokenizer)
answers = dict()
for line in self.read_tsv(self.file_path(answers_path), is_gzip=True):
id = line[0]
tokens = [Token(vocab[t]) for t in line[1].split(' ')]
answer_sentence = Sentence(' '.join(t.text for t in tokens), tokens)
answer = TextItem(answer_sentence.text, [answer_sentence])
answer.metadata['id'] = id
answers[id] = answer
train = self.read_split("train", vocab, answers)
valid = self.read_split("valid", vocab, answers)
test = self.read_split("test", vocab, answers)
questions = [qa.question for qa in (train.qa + valid.qa + test.qa)]
return Archive(train, valid, [test], questions, list(answers.values()))
| gallupliu/QA | data/insuranceqa/reader/v2_reader.py | Python | apache-2.0 | 3,036 |
# -*- coding: UTF-8 -*-
# Copyright 2015-2017 Luc Saffre
# License: BSD (see file COPYING for details)
"""A :ref:`care` site with languages "en fr de".
.. autosummary::
:toctree:
lib
user_types
settings
"""
| khchine5/book | lino_book/projects/anna/__init__.py | Python | bsd-2-clause | 223 |
import random
from .tiles import base
INITIAL_TILES = [
base.ASSASSIN, base.BOWMAN, base.CHAMPION, base.DRAGOON, base.FOOTMAN,
base.GENERAL, base.KNIGHT, base.LONGBOWMAN, base.MARSHALL, base.PIKEMAN,
base.PIKEMAN, base.PRIEST, base.RANGER, base.SEER, base.WIZARD,
]
class Game(object):
def __init__(self, initial_tiles=INITIAL_TILES):
self.board = {}
self.bags = (initial_tiles[:], initial_tiles[:])
for bag in self.bags:
random.shuffle(bag)
| rorytrent/the-duke | duke/game.py | Python | gpl-3.0 | 499 |
from django.db import models
from .constants import *
class Polity(models.Model):
"""
contains information on political entities
"""
name = models.CharField(max_length=255)
polity_type = models.CharField(choices=POLITY_TYPES,
default='City',
max_length=50)
parent_polity = models.ForeignKey('self', on_delete=models.CASCADE,
null=True, blank=True)
web_site = models.URLField(null=True, blank=True)
election_website = models.URLField(null=True, blank=True)
next_election = models.DateField(null=True, blank=True)
num_representatives = models.IntegerField(null=True, blank=True)
num_wards = models.IntegerField(null=True, blank=True)
separate_executive = models.BooleanField(default=True) # usually refers to mayor
notes = models.TextField(null=True, blank=True)
def __str__(self):
return str({'pk': self.pk, 'name': self.name})
class Election(models.Model):
"""
Election details
"""
polity = models.ForeignKey(Polity,
on_delete=models.CASCADE)
vote_date = models.DateField()
campaign_start_date = models.DateField(null=True, blank=True)
class District(models.Model):
"""
Information on ridings/wards/districts
"""
polity = models.ForeignKey(Polity,
related_name='districts',
on_delete=models.CASCADE)
name = models.CharField(max_length=255)
number = models.CharField(max_length=25,
null=True, blank=True)
num_reps = models.IntegerField(default=1,)
is_whole_polity = models.BooleanField(default=False,)
shapefile_link = models.URLField(null=True, blank=True)
class Candidate(models.Model):
"""
Information on individuals running for office
"""
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
web_site = models.URLField(blank=True, null=True)
twitter_handle = models.CharField(max_length=100, blank=True, null=True)
email = models.EmailField(blank=True, null=True)
phone = models.CharField(max_length=30, blank=True, null=True)
def __str__(self):
return self.first_name + ' ' + self.last_name
class ElectionCandidate(models.Model):
"""
Information on Candidates particular to a specific election
"""
candidate = models.ForeignKey(Candidate,
on_delete=models.CASCADE,
related_name='candidate_in')
election = models.ForeignKey(Election,
on_delete=models.CASCADE,
related_name='candidate_details')
district = models.ForeignKey(District,
on_delete=models.CASCADE)
incumbent = models.BooleanField(default=False)
class Meta:
unique_together = ('candidate', 'election', 'district')
class Poll(models.Model):
"""
Selection of questions for a particular election
"""
election = models.ForeignKey(Election,
on_delete=models.CASCADE,
related_name='polls')
name = models.CharField(max_length=100, default='default')
class IssueCategory(models.Model):
"""
Provides broader categories for poll questions
"""
category = models.CharField(max_length=100,
unique=True)
class Question(models.Model):
"""
Questions are designed to be of the form (agree/disagree) on a scale
So, no answer choices are created
"""
poll = models.ForeignKey(Poll,
on_delete=models.CASCADE,
related_name='questions')
category = models.ManyToManyField(IssueCategory,
related_name='category_questions')
question = models.TextField()
class Answer(models.Model):
question = models.ForeignKey(Question,
on_delete=models.CASCADE,
related_name='answers')
agreement = models.IntegerField(default=5)
importance = models.IntegerField(default=5)
class CandidatePosition(Answer):
candidate = models.ForeignKey(ElectionCandidate,
on_delete=models.CASCADE,
related_name='positions')
class PublicAnswer(Answer):
ip_address = models.GenericIPAddressField()
session_id = models.CharField(max_length=255)
| asterix135/whoshouldivotefor | explorer/models.py | Python | mit | 4,638 |
import dsz
import dsz.cmd
import dsz.version
import dsz.script
import ops
import ops.cmd
import ops.db
import ops.project
import ops.system.registry
from datetime import timedelta, datetime
import time
INSTALL_DATE_TAG = 'OS_INSTALL_DATE_TAG'
OS_LANGUAGE_TAG = 'OS_LANGUAGE_TAG'
SYSTEMVERSION_TAG = 'OS_VERSION_TAG'
MAX_CACHE_SIZE = 3
def get_os_language(maxage=timedelta(seconds=0), targetID=None, use_volatile=False):
lang_cmd = ops.cmd.getDszCommand('language')
return ops.project.generic_cache_get(lang_cmd, cache_tag=OS_LANGUAGE_TAG, maxage=maxage, use_volatile=use_volatile, targetID=targetID)
def get_os_version(maxage=timedelta(seconds=0), targetID=None, use_volatile=False):
sysver_cmd = ops.cmd.getDszCommand('systemversion')
return ops.project.generic_cache_get(sysver_cmd, cache_tag=SYSTEMVERSION_TAG, maxage=maxage, use_volatile=use_volatile, targetID=targetID)
def get_os_install_date(maxage=timedelta(seconds=0), targetID=None, use_volatile=False):
install_date = ops.system.registry.get_registrykey('L', 'Software\\Microsoft\\Windows NT\\CurrentVersion', cache_tag=ops.system.registry.NT_CURRENT_VERSION_KEY, maxage=timedelta(seconds=3600), use_volatile=use_volatile, targetID=targetID)
return time.asctime(time.localtime(int(install_date.key[0]['installdate'].value))) | DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Ops/PyScripts/lib/ops/system/systemversion.py | Python | unlicense | 1,313 |
from arza.types import root, api, space, plist, datatype
from arza.runtime import error
class W_MirrorType(datatype.W_BaseDatatype):
def __init__(self, name, interfaces):
datatype.W_BaseDatatype.__init__(self, name, interfaces)
def _type_(self, process):
return process.std.types.Datatype
def _to_string_(self):
return self.name._to_string_()
class W_Mirror(root.W_Root):
def __init__(self, source, interfaces):
self.source = source
type_name = space.newstring(u"<MirrorType %s>" % api.to_s(interfaces))
self.type = W_MirrorType(type_name, interfaces)
def _to_repr_(self):
return self._to_string_()
def _at_(self, key):
return self.source._at_(key)
def _contains_(self, key):
return self.source._contains_(key)
def _at_index_(self, i):
return self.source._at_index_(i)
def _get_index_(self, obj):
return self.source._get_index_(obj)
def _put_at_index_(self, i, obj):
return self.source._put_at_index_(i, obj)
def _is_empty_(self):
return self.source._is_empty_()
def _length_(self):
return self.source._length_()
def _put_(self, k, v):
return self.source._put_(k, v)
def _remove_at_(self, key):
return self.source._remove_at_(key)
def _to_string_(self):
return self.source._to_string_()
def _to_bool_(self):
return self.source._to_bool_()
def _to_integer_(self):
return self.source._to_integer_()
def _to_float_(self):
return self.source._to_float_()
def _equal_(self, other):
return self.source._equal_()
def _hash_(self):
return self.source._hash_()
def _compare_(self, other):
return self.source._compare_(other)
def _call_(self, process, args):
return self.source._call_(process, args)
def _type_(self, process):
return self.type
def _compute_hash_(self):
return self.source._compute_hash_()
def _to_routine_(self, stack, args):
return self.source._to_routine_(stack, args)
def _clone_(self):
return self.source._clone_()
def mirror(source, interfaces):
error.affirm_type(interfaces, space.islist)
error.affirm_iterable(interfaces, space.isinterface)
if space.ismirror(source):
source = source.source
error.affirm_type(source, space.isrecord)
return W_Mirror(source, interfaces)
| gloryofrobots/obin | arza/types/mirror.py | Python | gpl-2.0 | 2,472 |
from sqlalchemy import Column, String, Integer, Float, ForeignKey, PrimaryKeyConstraint
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, validates, backref
import time, json
DecBase = declarative_base()
class Server(DecBase):
__tablename__ = 'ezdonate_servers'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(255))
ip = Column(String(16))
port = Column(Integer)
@validates('port')
def validate_port(self, key, port):
assert port > 0
assert port < 65535
return port
def __init__(self, name, ip, port, id=None):
self.name = name
self.ip = ip
self.port = port
self.id = id
def __json__(self, request):
return {'id': self.id, 'name': self.name, 'address': '{ip}:{port}'.format(ip=self.ip, port=self.port)}
class Subscriber(DecBase):
__tablename__ = 'ezdonate_orders'
id = Column(Integer, primary_key=True, autoincrement=True)
serv_id = Column(Integer, ForeignKey('ezdonate_servers.id', ondelete='CASCADE'), nullable=False)
server = relationship('Server', backref=backref('subs', cascade='all,delete', lazy='joined'))
steamid = Column(String(32))
item_id = Column(Integer, ForeignKey('ezdonate_items.id', ondelete='CASCADE'))
item = relationship('Item', backref=backref('purchasers', cascade='all,delete', lazy='joined'))
expires = Column(Integer)
def __init__(self, serv_id, steamid, item_id, expires):
self.serv_id = serv_id
self.steamid = steamid
self.item_id = item_id
self.expires = expires
def __json__(self, request):
return {'id': self.id, 'server': self.serv_id, 'steamid': self.steamid, 'item': self.item_id, 'expires': self.expires}
class User(DecBase):
__tablename__ = 'ezdonate_users'
id = Column(Integer, primary_key=True, autoincrement=True)
user = Column(String(64), unique=True)
password = Column(String(512))
email = Column(String(128), unique=True)
name = Column(String(128))
steam = Column(String(128))
groups = Column(String(64))
def __init__(self, user, password, email, groups):
self.user = user
self.password = password
self.email = email
self.groups = groups
class Item(DecBase):
__tablename__ = 'ezdonate_items'
id = Column(Integer, primary_key=True, autoincrement=True)
group_id = Column(Integer, ForeignKey('ezdonate_itemgroups.id', ondelete='CASCADE'), nullable=False)
group = relationship('ItemGroup', backref=backref('items', cascade='all, delete', lazy='joined'))
name = Column(String(64))
shortdesc = Column(String(256))
description = Column(String(2048))
price = Column(Float, nullable=False, default=0.0)
duration = Column(Integer)
arguments = Column(String(2048))
def __init__(self, group_id, name, shortdesc, description, price, duration, arguments):
self.group_id = group_id
self.name = name
self.shortdesc = shortdesc
self.description = description
self.price = price
self.duration = duration
self.arguments = arguments
def __json__(self, request):
return {'id': self.id, 'group': self.group_id, 'name': self.name, 'shortdesc': self.shortdesc, 'description': self.description,
'price': self.price, 'duration': self.duration, 'arguments': json.loads(self.arguments)}
class ItemGroup(DecBase):
__tablename__ = 'ezdonate_itemgroups'
id = Column(Integer, primary_key=True)
name = Column(String(64))
values = Column(String(2048))
arguments = Column(String(2048))
def __init__(self, name, values, arguments):
self.name = name
self.arguments = arguments
self.values = values
def __json__(self, request):
return {'id': self.id, 'name': self.name, 'fields': json.loads(self.values)}
class ServerItem(DecBase):
__tablename__ = 'ezdonate_serveritems'
item_id = Column(Integer, ForeignKey('ezdonate_items.id', ondelete='CASCADE'))
item = relationship('Item', backref=backref('servitems', cascade='all,delete', lazy='joined'))
serv_id = Column(Integer, ForeignKey('ezdonate_servers.id', ondelete='CASCADE'))
server = relationship('Server', backref=backref('items', cascade='all,delete', lazy='joined'))
__table_args__ = (PrimaryKeyConstraint('item_id', 'serv_id'), {})
def __init__(self, item_id, serv_id):
self.item_id = item_id
self.serv_id = serv_id
class Transaction(DecBase):
__tablename__ = 'ezdonate_transactions'
txn_id = Column(Integer, primary_key=True, autoincrement=True)
item_id = Column(Integer, ForeignKey('ezdonate_items.id', ondelete='CASCADE'))
item = relationship('Item', backref=backref('txns', cascade='all,delete', lazy='joined'))
serv_id = Column(Integer, ForeignKey('ezdonate_servers.id', ondelete='CASCADE'))
server = relationship('Server', backref=backref('txns', cascade='all,delete', lazy='joined'))
amount = Column(Float)
steamid = Column(String(32))
email = Column(String(128))
time = Column(Integer)
def __init__(self, item_id, serv_id, amount, steamid, email, time):
self.item_id = item_id
self.serv_id = serv_id
self.amount = amount
self.steamid = steamid
self.email = email
self.time = time
class OngoingTransaction(DecBase):
__tablename__ = 'ezdonate_ongoingtxns'
pay_id = Column(String(64), primary_key=True)
txn_id = Column(Integer, ForeignKey('ezdonate_transactions.txn_id', ondelete='CASCADE'))
transaction = relationship('Transaction', backref=backref('ongoing', cascade='all,delete', lazy='joined'))
def __init__(self, pay_id, txn_id):
self.pay_id = pay_id
self.txn_id = txn_id
class CompletedTransaction(DecBase):
__tablename__ = 'ezdonate_completetxns'
id = Column(Integer, primary_key=True, autoincrement=True)
txn_id = Column(Integer)
item_id = Column(Integer)
serv_id = Column(Integer)
steamid = Column(String(62))
email = Column(String(128))
amount = Column(Float)
time_started = Column(Integer)
time_finished = Column(Integer)
def __init__(self, txn_id, item_id, serv_id, steamid, email, amount, time_started, time_finished=time.time()):
self.txn_id = txn_id
self.item_id = item_id
self.serv_id = serv_id
self.steamid = steamid
self.email = email
self.amount = amount
self.time_started = time_started
self.time_finished = time_finished
class Promotion(DecBase):
__tablename__ = 'ezdonate_promotions'
id = Column(Integer, primary_key=True, autoincrement=True)
type = Column(Integer)
value = Column(String(16))
name = Column(String(64))
code = Column(String(64))
expires = Column(Integer)
def __init__(self, type, value, name, code, expires):
self.type = type
self.value = value
self.name = name
self.code = code
self.expires = expires
def __json__(self, request):
return {'id': self.id, 'type': self.type, 'value': self.value, 'name': self.name, 'code': self.code, 'expires': self.expires}
class ItemPromotion(DecBase):
__tablename__ = 'ezdonage_promoitems'
promo_id = Column(Integer, ForeignKey('ezdonate_promotions.id', ondelete='CASCADE'))
promotion = relationship('Promotion', backref=backref('items', cascade='all,delete', lazy='joined'))
item_id = Column(Integer, ForeignKey('ezdonate_items.id', ondelete='CASCADE'))
item = relationship('Item', backref=backref('promotions', cascade='all,delete', lazy='joined'))
__table_args__ = (PrimaryKeyConstraint('promo_id', 'item_id'), {})
def __init__(self, promo_id, item_id):
self.item_id = item_id
self.promo_id = promo_id | EasyDonate/EasyDonate | EasyDonate/ORM.py | Python | gpl-3.0 | 7,304 |
#The MIT License (MIT)
#
#Copyright (C) 2014 OpenBet Limited
#
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
#of the Software, and to permit persons to whom the Software is furnished to do
#so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#ITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
#THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
from shutit_module import ShutItModule
class casperjs(ShutItModule):
def build(self, shutit):
shutit.install('git')
shutit.run_script("""
#!/bin/bash
cd /opt
git clone git://github.com/n1k0/casperjs.git
cd casperjs
git checkout tags/1.0.2
""", in_shell=False)
return True
def remove(self, shutit):
shutit.send('rm -rf /opt/casperjs')
return True
def module():
return casperjs(
'shutit.tk.casperjs.casperjs', 0.314,
description='http://casperjs.org/',
depends=['shutit.tk.setup']
)
| ianmiell/shutit-library | casperjs/casperjs.py | Python | mit | 1,626 |
import os
import shutil
import tempfile
import git
from dvc import logger
from dvc.exceptions import DvcException
class TempRepoException(DvcException):
"""Raise when temporary package repository has not properly created"""
def __init__(self, temp_repo, msg, cause=None):
m = "temp repository '{}' error: {}".format(temp_repo.addr, msg)
super(TempRepoException, self).__init__(m, cause)
class TempGitRepo(object):
GIT_DIR_TO_REMOVE = ".git"
GIT_FILES_TO_REMOVE = [".gitignore", ".gitmodules"]
SUFFIX = "tmp_DVC_mod"
def __init__(self, addr, module_name, modules_dir):
self.addr = addr
self.modules_dir = modules_dir
self._tmp_mod_prefix = module_name + "_" + self.SUFFIX + "_"
self._reset_state()
def _reset_state(self):
self._set_state(None, [])
def _set_state(self, cloned_tmp_dir, outs):
from dvc.repo import Repo
self.repo = Repo(cloned_tmp_dir) if cloned_tmp_dir else None
self._cloned_tmp_dir = cloned_tmp_dir
self.outs = outs
self._moved_files = []
def fetch(self, targets):
for target in targets:
try:
self.repo.fetch(target)
except Exception as ex:
msg = "error in fetching data from {}: {}".format(
os.path.basename(target), ex
)
raise TempRepoException(self, msg)
@property
def is_state_set(self):
return self._cloned_tmp_dir is not None
def __enter__(self):
if self.is_state_set:
raise TempRepoException(self, "Git repo cloning duplication")
module_temp_dir = None
try:
module_temp_dir = self._clone_to_temp_dir()
self._remove_git_files_and_dirs(module_temp_dir)
outputs = self._read_outputs(module_temp_dir)
self._set_state(module_temp_dir, outputs)
finally:
self._clean_mod_temp_dir(module_temp_dir)
return self
def _clone_to_temp_dir(self):
result = tempfile.mktemp(
prefix=self._tmp_mod_prefix, dir=self.modules_dir
)
logger.debug(
"Cloning git repository {} to temp dir {}".format(
self.addr, result
)
)
git.Repo.clone_from(self.addr, result, depth=1)
return result
def _remove_git_files_and_dirs(self, module_temp_dir):
logger.debug(
"Removing git meta files from {}: {} dif and {}".format(
module_temp_dir,
TempGitRepo.GIT_DIR_TO_REMOVE,
", ".join(TempGitRepo.GIT_FILES_TO_REMOVE),
)
)
shutil.rmtree(
os.path.join(module_temp_dir, TempGitRepo.GIT_DIR_TO_REMOVE)
)
for item in TempGitRepo.GIT_FILES_TO_REMOVE:
fname = os.path.join(module_temp_dir, item)
if os.path.exists(fname):
os.remove(fname)
def _clean_mod_temp_dir(self, module_temp_dir):
if not self.is_state_set:
if module_temp_dir and os.path.exists(module_temp_dir):
shutil.rmtree(module_temp_dir, ignore_errors=True)
def _read_outputs(self, module_temp_dir):
from dvc.repo import Repo
pkg_repo = Repo(module_temp_dir)
stages = pkg_repo.stages()
return [out for s in stages for out in s.outs]
def persist_to(self, module_dir, parent_repo):
if not self.is_state_set:
raise TempRepoException(self, "cannot persist")
tmp_repo_cache = self.repo.cache.local.url
for prefix in os.listdir(tmp_repo_cache):
if len(prefix) != 2:
logger.warning(
"wrong dir format in cache {}: dir {}".format(
tmp_repo_cache, prefix
)
)
self._move_all_cache_files(parent_repo, prefix, tmp_repo_cache)
shutil.move(self._cloned_tmp_dir, module_dir)
self._reset_state()
@staticmethod
def _move_all_cache_files(parent_repo, prefix, tmp_repo_cache):
obj_name = os.path.join(tmp_repo_cache, prefix)
for suffix in os.listdir(obj_name):
src_path = os.path.join(tmp_repo_cache, prefix, suffix)
pre = os.path.join(parent_repo.cache.local.url, prefix)
if not os.path.exists(pre):
os.mkdir(pre)
dest = os.path.join(pre, suffix)
shutil.move(src_path, dest)
def __exit__(self, exc_type, exc_value, traceback):
if self.is_state_set:
shutil.rmtree(self._cloned_tmp_dir)
self._reset_state()
pass
| dataversioncontrol/dvc | dvc/temp_git_repo.py | Python | apache-2.0 | 4,707 |
import imp
from kivy.uix.floatlayout import FloatLayout
import traceback
from core.failedscreen import FailedScreen
class InfoScreen(FloatLayout):
def __init__(self, **kwargs):
super(InfoScreen, self).__init__(**kwargs)
# Get our list of available plugins
plugins = kwargs["plugins"]
# We need a list to hold the names of the enabled screens
self.availablescreens = []
# and an index so we can loop through them:
self.index = 0
# We want to handle failures gracefully so set up some variables
# variable to hold the FailScreen object (if needed)
self.failscreen = None
# Empty lists to track various failures
dep_fail = []
failedscreens = []
# Create a reference to the screenmanager instance
self.scrmgr = self.ids.iscreenmgr
# Loop over plugins
for p in plugins:
# Set up a tuple to store list of unmet dependencies
p_dep = (p["name"], [])
# Until we hit a failure, there are no unmet dependencies
unmet = False
# Loop over dependencies and test if they exist
for d in p["dependencies"]:
try:
imp.find_module(d)
except ImportError:
# We've got at least one unmet dependency for this screen
unmet = True
p_dep[1].append(d)
# Can we use the screen?
if unmet:
# Add the tupe to our list of unmet dependencies
dep_fail.append(p_dep)
# No unmet dependencies so let's try to load the screen.
else:
try:
plugin = imp.load_module("screen", *p["info"])
screen = getattr(plugin, p["screen"])
self.scrmgr.add_widget(screen(name=p["name"],
master=self,
params=p["params"]))
# Uh oh, something went wrong...
except Exception, e:
# Add the screen name and error message to our list
failedscreens.append((p["name"], repr(e)))
traceback.print_exc()
else:
# We can add the screen to our list of available screens.
self.availablescreens.append(p["name"])
# If we've got any failures then let's notify the user.
if dep_fail or failedscreens:
# Create the FailedScreen instance
self.failscreen = FailedScreen(dep=dep_fail,
failed=failedscreens,
name="FAILEDSCREENS")
# Add it to our screen manager and make sure it's the first screen
# the user sees.
self.scrmgr.add_widget(self.failscreen)
self.scrmgr.current = "FAILEDSCREENS"
def on_stop(self):
for screen in self.scrmgr.screens:
try:
screen.on_stop()
except AttributeError:
pass
def next_screen(self, rev=False):
if rev:
self.scrmgr.transition.direction = "right"
inc = -1
else:
self.scrmgr.transition.direction = "left"
inc = 1
self.index = (self.index + inc) % len(self.availablescreens)
self.scrmgr.current = self.availablescreens[self.index]
| 9and3r/RPi-InfoScreen-Kivy | core/infoscreen.py | Python | gpl-3.0 | 3,546 |
import random
import numpy as np
class ReplayBuffer(object):
def __init__(self, max_size):
self.max_size = max_size
self.cur_size = 0
self.buffer = {}
self.init_length = 0
def __len__(self):
return self.cur_size
def seed_buffer(self, episodes):
self.init_length = len(episodes)
self.add(episodes, np.ones(self.init_length))
def add(self, episodes, *args):
"""Add episodes to buffer."""
idx = 0
while self.cur_size < self.max_size and idx < len(episodes):
self.buffer[self.cur_size] = episodes[idx]
self.cur_size += 1
idx += 1
if idx < len(episodes):
remove_idxs = self.remove_n(len(episodes) - idx)
for remove_idx in remove_idxs:
self.buffer[remove_idx] = episodes[idx]
idx += 1
assert len(self.buffer) == self.cur_size
def remove_n(self, n):
"""Get n items for removal."""
# random removal
idxs = random.sample(xrange(self.init_length, self.cur_size), n)
return idxs
def get_batch(self, n):
"""Get batch of episodes to train on."""
# random batch
idxs = random.sample(xrange(self.cur_size), n)
return [self.buffer[idx] for idx in idxs], None
def update_last_batch(self, delta):
pass
class PrioritizedReplayBuffer(ReplayBuffer):
def __init__(self, max_size, alpha=0.2,
eviction_strategy='rand'):
self.max_size = max_size
self.alpha = alpha
self.eviction_strategy = eviction_strategy
assert self.eviction_strategy in ['rand', 'fifo', 'rank']
self.remove_idx = 0
self.cur_size = 0
self.buffer = {}
self.priorities = np.zeros(self.max_size)
self.init_length = 0
def __len__(self):
return self.cur_size
def add(self, episodes, priorities, new_idxs=None):
"""Add episodes to buffer."""
if new_idxs is None:
idx = 0
new_idxs = []
while self.cur_size < self.max_size and idx < len(episodes):
self.buffer[self.cur_size] = episodes[idx]
new_idxs.append(self.cur_size)
self.cur_size += 1
idx += 1
if idx < len(episodes):
remove_idxs = self.remove_n(len(episodes) - idx)
for remove_idx in remove_idxs:
self.buffer[remove_idx] = episodes[idx]
new_idxs.append(remove_idx)
idx += 1
else:
assert len(new_idxs) == len(episodes)
for new_idx, ep in zip(new_idxs, episodes):
self.buffer[new_idx] = ep
self.priorities[new_idxs] = priorities
self.priorities[0:self.init_length] = np.max(
self.priorities[self.init_length:])
assert len(self.buffer) == self.cur_size
return new_idxs
def remove_n(self, n):
"""Get n items for removal."""
assert self.init_length + n <= self.cur_size
if self.eviction_strategy == 'rand':
# random removal
idxs = random.sample(xrange(self.init_length, self.cur_size), n)
elif self.eviction_strategy == 'fifo':
# overwrite elements in cyclical fashion
idxs = [
self.init_length +
(self.remove_idx + i) % (self.max_size - self.init_length)
for i in xrange(n)]
self.remove_idx = idxs[-1] + 1 - self.init_length
elif self.eviction_strategy == 'rank':
# remove lowest-priority indices
idxs = np.argpartition(self.priorities, n)[:n]
return idxs
def sampling_distribution(self):
p = self.priorities[:self.cur_size]
p = np.exp(self.alpha * (p - np.max(p)))
norm = np.sum(p)
if norm > 0:
uniform = 0.0
p = p / norm * (1 - uniform) + 1.0 / self.cur_size * uniform
else:
p = np.ones(self.cur_size) / self.cur_size
return p
def get_batch(self, n):
"""Get batch of episodes to train on."""
p = self.sampling_distribution()
idxs = np.random.choice(self.cur_size, size=n, replace=False, p=p)
self.last_batch = idxs
return [self.buffer[idx] for idx in idxs], p[idxs]
def update_last_batch(self, delta):
"""Update last batch idxs with new priority."""
self.priorities[self.last_batch] = np.abs(delta)
self.priorities[0:self.init_length] = np.max(
self.priorities[self.init_length:])
| n3011/deeprl | dataset/replay_v2.py | Python | mit | 4,664 |
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from exporters import Exporter
from os.path import basename
class DS5_5(Exporter):
NAME = 'DS5'
TARGETS = [
'LPC1768',
'LPC11U24',
'LPC812',
'UBLOX_C027',
'ARCH_PRO',
'RZ_A1H',
]
USING_MICROLIB = [
'LPC812',
]
FILE_TYPES = {
'c_sources':'1',
'cpp_sources':'8',
's_sources':'2'
}
def get_toolchain(self):
return 'uARM' if (self.target in self.USING_MICROLIB) else 'ARM'
def generate(self):
source_files = []
for r_type, n in DS5_5.FILE_TYPES.iteritems():
for file in getattr(self.resources, r_type):
source_files.append({
'name': basename(file), 'type': n, 'path': file
})
ctx = {
'name': self.program_name,
'include_paths': self.resources.inc_dirs,
'scatter_file': self.resources.linker_script,
'object_files': self.resources.objects + self.resources.libraries,
'source_files': source_files,
'symbols': self.get_symbols()
}
target = self.target.lower()
# Project file
self.gen_file('ds5_5_%s.project.tmpl' % target, ctx, '.project')
self.gen_file('ds5_5_%s.cproject.tmpl' % target, ctx, '.cproject')
self.gen_file('ds5_5_%s.launch.tmpl' % target, ctx, 'ds5_%s.launch' % target)
| jferreir/mbed | workspace_tools/export/ds5_5.py | Python | apache-2.0 | 1,997 |
#!/usr/bin/env python3
# Copyright (c) 2017-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test debug logging."""
import os
from test_framework.test_framework import SyscoinTestFramework
from test_framework.test_node import ErrorMatch
class LoggingTest(SyscoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def relative_log_path(self, name):
return os.path.join(self.nodes[0].datadir, self.chain, name)
def run_test(self):
# test default log file name
default_log_path = self.relative_log_path("debug.log")
assert os.path.isfile(default_log_path)
# test alternative log file name in datadir
self.restart_node(0, ["-debuglogfile=foo.log"])
assert os.path.isfile(self.relative_log_path("foo.log"))
# test alternative log file name outside datadir
tempname = os.path.join(self.options.tmpdir, "foo.log")
self.restart_node(0, [f"-debuglogfile={tempname}"])
assert os.path.isfile(tempname)
# check that invalid log (relative) will cause error
invdir = self.relative_log_path("foo")
invalidname = os.path.join("foo", "foo.log")
self.stop_node(0)
exp_stderr = r"Error: Could not open debug log file \S+$"
self.nodes[0].assert_start_raises_init_error([f"-debuglogfile={invalidname}"], exp_stderr, match=ErrorMatch.FULL_REGEX)
assert not os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (relative) works after path exists
self.stop_node(0)
os.mkdir(invdir)
self.start_node(0, [f"-debuglogfile={invalidname}"])
assert os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (absolute) will cause error
self.stop_node(0)
invdir = os.path.join(self.options.tmpdir, "foo")
invalidname = os.path.join(invdir, "foo.log")
self.nodes[0].assert_start_raises_init_error([f"-debuglogfile={invalidname}"], exp_stderr, match=ErrorMatch.FULL_REGEX)
assert not os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (absolute) works after path exists
self.stop_node(0)
os.mkdir(invdir)
self.start_node(0, [f"-debuglogfile={invalidname}"])
assert os.path.isfile(os.path.join(invdir, "foo.log"))
# check that -nodebuglogfile disables logging
self.stop_node(0)
os.unlink(default_log_path)
assert not os.path.isfile(default_log_path)
self.start_node(0, ["-nodebuglogfile"])
assert not os.path.isfile(default_log_path)
# just sanity check no crash here
self.restart_node(0, [f"-debuglogfile={os.devnull}"])
if __name__ == '__main__':
LoggingTest().main()
| syscoin/syscoin | test/functional/feature_logging.py | Python | mit | 2,940 |
from __future__ import absolute_import
from rest_framework.response import Response
from six.moves import range
from sentry.app import tsdb
from sentry.api.base import DocSection, StatsMixin
from sentry.api.bases.team import TeamEndpoint
from sentry.models import Project
from sentry.utils.apidocs import scenario, attach_scenarios
@scenario('RetrieveEventCountsTeam')
def retrieve_event_counts_team(runner):
runner.request(
method='GET',
path='/teams/%s/%s/stats/' % (
runner.org.slug, runner.default_team.slug)
)
class TeamStatsEndpoint(TeamEndpoint, StatsMixin):
doc_section = DocSection.TEAMS
@attach_scenarios([retrieve_event_counts_team])
def get(self, request, team):
"""
Retrieve Event Counts for a Team
````````````````````````````````
.. caution::
This endpoint may change in the future without notice.
Return a set of points representing a normalized timestamp and the
number of events seen in the period.
Query ranges are limited to Sentry's configured time-series
resolutions.
:pparam string organization_slug: the slug of the organization.
:pparam string team_slug: the slug of the team.
:qparam string stat: the name of the stat to query (``"received"``,
``"rejected"``)
:qparam timestamp since: a timestamp to set the start of the query
in seconds since UNIX epoch.
:qparam timestamp until: a timestamp to set the end of the query
in seconds since UNIX epoch.
:qparam string resolution: an explicit resolution to search
for (eg: ``10s``). This should not be
used unless you are familiar with Sentry's
internals as it's restricted to pre-defined
values.
:auth: required
"""
projects = Project.objects.get_for_user(
team=team,
user=request.user,
)
if not projects:
return Response([])
data = list(tsdb.get_range(
model=tsdb.models.project,
keys=[p.id for p in projects],
**self._parse_args(request)
).values())
summarized = []
for n in range(len(data[0])):
total = sum(d[n][1] for d in data)
summarized.append((data[0][n][0], total))
return Response(summarized)
| alexm92/sentry | src/sentry/api/endpoints/team_stats.py | Python | bsd-3-clause | 2,574 |
# PyVision License
#
# Copyright (c) 2006-2008 David S. Bolme
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither name of copyright holders nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class FaceRecognizer:
''' Base class for a face recognition algorithm. Output is a similarity score. '''
def __init__(self):
self.training_data=[]
def getTrainingMatches(self):
'''
Returns a list of all pairs of images in the training set that
are of the same person.
'''
matches = []
for i in range(len(self.training_data)):
for j in range(i+1,len(self.training_data)):
if i == j:
continue
if self.training_data[i][3] == self.training_data[j][3]:
matches.append([self.training_data[i],self.training_data[j]])
return matches
def getTrainingNonMatches(self):
'''
Returns a list of all pairs in the training images that are of
different people.
'''
nonmatches = []
for i in range(len(self.training_data)):
for j in range(i+1,len(self.training_data)):
if i == j:
continue
if self.training_data[i][3] != self.training_data[j][3]:
nonmatches.append([self.training_data[i],self.training_data[j]])
return nonmatches
def addTraining(self,img,rect=None,eyes=None,id=None):
'''Adds a training face for the algorithm.'''
self.training_data.append([img,rect,eyes,id])
def distance(self, fr1, fr2):
'''Compute the similarity of two faces'''
raise NotImplementedError()
def computeFaceRecord(self,im,rect=None,eyes=None):
'''
Given an image and face location, compute a face record.
@param im: image containing the face
@param rect: specifies the location of the face in the image, and is
typically defined as a detection rectangle or eye coordinates.
@returns: data that represents the identity of the face, such as
eigen coeffecients for PCA.
'''
raise NotImplementedError()
| tigerking/pyvision | src/pyvision/face/FaceRecognizer.py | Python | bsd-3-clause | 3,652 |
#
# Copyright 2012 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
import os
import tempfile
from functools import wraps
MODULE_LIST = ('cli', 'hooks', 'services', 'tasks',
'gfapi', 'storagedev', 'api')
def makePublic(func):
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.superVdsm = True
return wrapper
def listPublicFunctions():
methods = []
for modName in MODULE_LIST:
try:
module = __import__('gluster.' + modName, fromlist=['gluster'])
for name in dir(module):
func = getattr(module, name)
if getattr(func, 'superVdsm', False):
funcName = 'gluster%s%s' % (name[0].upper(), name[1:])
methods.append((funcName, func))
except ImportError:
pass
return methods
def safeWrite(fileName, content):
with tempfile.NamedTemporaryFile(dir=os.path.dirname(fileName),
delete=False) as tmp:
tmp.write(content)
tmpFileName = tmp.name
os.rename(tmpFileName, fileName)
| kvaps/vdsm | vdsm/gluster/__init__.py | Python | gpl-2.0 | 1,886 |
from datetime import datetime
from project.application.entities import db
class Material(db.Model):
"""
Need to add Table Structure
"""
__tablename__ = "materials"
id = db.Column(db.Integer, primary_key=True)
document = db.Column(db.Text, nullable=True)
information = db.Column(db.Text, nullable=True)
lesson_id = db.Column(db.Integer, db.ForeignKey('lessons.id'))
subject_id = db.Column(db.Integer, db.ForeignKey('subjects.id'))
creation_date = db.Column(db.DateTime, default=datetime.utcnow())
| Warprobot/diplom | project/application/entities/materials/model.py | Python | cc0-1.0 | 540 |
import cocos
import pyglet
from pyglet import input
import game
import constants
control = None
def init():
global control
#control = GamepadController()
control = PlayerController()
return control
def get_state():
return control.state
class Controller(cocos.layer.Layer):
"""Base class for input handlers"""
def __init__(self):
super(Controller, self).__init__()
# The state is what the player ultimately wants to achieve, after keybinds. This is what is sent to the server from clients
self.state = {
"movement": [0, 0], # Vector target direction
"aim": [0, 0], # The point the player is aiming towards
"attacking": False,
}
self.state["updated"] = False # Do we need to send an update to the server?
class PlayerController(Controller):
"""Keyboard + mouse"""
is_event_handler = True
def on_key_press(self, key, modifiers):
skey = pyglet.window.key.symbol_string(key) # String representation of the key
#print "[CONTROLS] Key pressed: ", skey
updated = False
if skey in ("A", "D", "W", "S"):
updated = True
self.state["updated"] = True
if skey == "A":
self.state["movement"][0] -= 1
elif skey == "D":
self.state["movement"][0] += 1
elif skey == "W":
self.state["movement"][1] += 1
elif skey == "S":
self.state["movement"][1] -= 1
if updated:
player = game.Game.get_player()
if player:
player.update_input(self.state)
return True
else:
return False
def on_key_release(self, key, modifiers):
skey = pyglet.window.key.symbol_string(key) # String representation of the key
#print "[CONTROLS] Key released: ", skey
updated = False
if skey in ("A", "D", "W", "S"):
updated = True
self.state["updated"] = True
if skey == "A":
self.state["movement"][0] += 1
elif skey == "D":
self.state["movement"][0] -= 1
elif skey == "W":
self.state["movement"][1] -= 1
elif skey == "S":
self.state["movement"][1] += 1
if updated:
player = game.Game.get_player()
if player:
player.update_input(self.state)
return True
else:
return False
def on_mouse_motion(self, x, y, dx, dy):
self.state["updated"] = True
player = game.Game.get_player()
if not player:
return
px, py = player.position
px *= constants.PIXEL_TO_METER
py *= constants.PIXEL_TO_METER
fx, fy = game.scroller.s_layer.point_to_local((x - px, y - py)) # TODO: game.scroller; nasty
self.state["aim"][0] = fx
self.state["aim"][1] = fy
player.update_input(self.state)
return True
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
self.state["updated"] = True
self.on_mouse_motion(x, y, dx, dy)
def on_mouse_press(self, x, y, buttons, modifiers):
self.state["updated"] = True
self.state["attacking"] = True
#print "[CONTROLS] Mouse key pressed: ", a, b, c,
def on_mouse_release(self, x, y, buttons, modifiers):
self.state["updated"] = True
self.state["attacking"] = False
def on_joyaxis_motion(self, axis, value):
#print "Recieved joystick input"
pass
class GamepadController(Controller):
def __init__(self):
super(GamepadController, self).__init__()
self.schedule(lambda x: 0)
def on_enter(self):
super(GamepadController, self).on_enter()
self.joy = input.get_joysticks()[0]
self.joy.on_joyaxis_motion = self.on_joyaxis_motion
self.joy.on_joyhat_motion = self.on_joyhat_motion
self.joy.on_joybutton_press = self.on_joybutton_press
self.joy.on_joybutton_release = self.on_joybutton_release
self.joy.open()
def on_joyaxis_motion(self, joystick, axis, value):
#print "Recieved joystick input: ", repr(axis), repr(value)
updated = False
if axis in ("x", "y"): # main directional axis is changed, update movement
self.state["updated"] = True
updated = True
x, y = self.state["movement"]
if axis == "x":
x = value
else:
y = value * -1 # y-axis is inverted
self.state["movement"] = x, y
elif axis in ("z", "rz"): # secondary directional axis is changed, update aim
if abs(value) < 0.005: # clamp values that result from leaving the dpad at center
return
self.state["updated"] = True
updated = True
x, y = self.state["aim"]
if axis == "z":
x = value
else:
y = value * -1 # y-axis is inverted
self.state["aim"] = x, y
if updated:
player = game.Game.get_player()
if player:
player.update_input(self.state)
def on_joyhat_motion(self, joystick, hat_x, hat_y):
#print "Recieved joystick input: ", hat_x, hat_y
pass
def on_joybutton_press(self, joystick, button):
#print "joybutton press: ", button
if button == 5:
self.state["attacking"] = True
player = game.Game.get_player()
if player:
player.update_input(self.state)
def on_joybutton_release(self, joystick, button):
#print "joybutton release: ", button
if button == 5:
self.state["attacking"] = False
player = game.Game.get_player()
if player:
player.update_input(self.state)
| BadlybadGames/RPGame-3.0 | src/interface/controls.py | Python | mit | 5,979 |
import base64
import logging
import os
import random
import uuid
from base64 import b64encode
from datetime import datetime, timedelta, timezone
from io import BytesIO
from tempfile import NamedTemporaryFile
from uuid import uuid4
import pytz
from django.conf import settings
from django.core.files import File
from django.db import models
from django.db.models import Count, Sum
from django.utils.module_loading import import_string
from django.utils.translation import ugettext_lazy as _
from django_renderpdf.helpers import render_pdf
from lxml import etree
from lxml.builder import E
from zeep.exceptions import Fault
from . import clients, crypto, exceptions, parsers, serializers
logger = logging.getLogger(__name__)
TZ_AR = pytz.timezone(pytz.country_timezones['ar'][0])
# http://www.afip.gov.ar/afip/resol1415_anexo2.html
VAT_CONDITIONS = (
'IVA Responsable Inscripto',
'IVA Responsable No Inscripto',
'IVA Exento',
'No Responsable IVA',
'Responsable Monotributo',
)
# NOTE: If you find a VAT condition not listed here, please open an issue, and
# a reference to where it's defined.
CLIENT_VAT_CONDITIONS = (
'IVA Responsable Inscripto',
'IVA Responsable No Inscripto',
'IVA Sujeto Exento',
'Consumidor Final',
'Responsable Monotributo',
'Proveedor del Exterior',
'Cliente del Exterior',
'IVA Liberado - Ley Nº 19.640',
'IVA Responsable Inscripto - Agente de Percepción',
'Monotributista Social',
'IVA no alcanzado',
)
def populate_all():
"""Fetch and store all metadata from the AFIP."""
ReceiptType.objects.populate()
ConceptType.objects.populate()
DocumentType.objects.populate()
VatType.objects.populate()
TaxType.objects.populate()
CurrencyType.objects.populate()
def check_response(response):
"""
Check that a response is not an error.
AFIP allows us to create valid tickets with invalid key/CUIT pairs, so we
can end up with tickets that fail on any service.
Due to how zeep works, we can't quite catch these sort of errors on some
middleware level (well, honestly, we need to do a large refactor).
This method checks if responses have an error, and raise a readable
message.
"""
if response.Errors:
raise exceptions.AfipException(response)
def first_currency():
"""
Returns the id for the first currency
The `default` parameter of a foreign key *MUST* be a primary key (and not
an instance), else migrations break. This helper method exists solely for
that purpose.
"""
ct = CurrencyType.objects.filter(code='PES').first()
if ct:
return ct.pk
def _get_storage_from_settings(setting_name=None):
path = getattr(settings, setting_name, None)
if not path:
return import_string(settings.DEFAULT_FILE_STORAGE)()
return import_string(path)
class GenericAfipTypeManager(models.Manager):
"""Default Manager for GenericAfipType."""
def __init__(self, service_name, type_name):
"""
Create a new Manager instance for a GenericAfipType.
This should generally only be required to manually populate a single
type with upstream data.
"""
super().__init__()
self.__service_name = service_name
self.__type_name = type_name
def populate(self, ticket=None):
"""
Populate the database with types retrieved from the AFIP.
If no ticket is provided, the most recent available one will be used.
"""
ticket = ticket or AuthTicket.objects.get_any_active('wsfe')
client = clients.get_client('wsfe', ticket.owner.is_sandboxed)
service = getattr(client.service, self.__service_name)
response_xml = service(serializers.serialize_ticket(ticket))
check_response(response_xml)
for result in getattr(response_xml.ResultGet, self.__type_name):
self.get_or_create(
code=result.Id,
description=result.Desc,
valid_from=parsers.parse_date(result.FchDesde),
valid_to=parsers.parse_date(result.FchHasta),
)
class GenericAfipType(models.Model):
"""An abstract class for several of AFIP's metadata types."""
code = models.CharField(
_('code'),
max_length=3,
)
description = models.CharField(
_('description'),
max_length=250,
)
valid_from = models.DateField(
_('valid from'),
null=True,
blank=True,
)
valid_to = models.DateField(
_('valid until'),
null=True,
blank=True,
)
def __str__(self):
return self.description
class Meta:
abstract = True
class ReceiptType(GenericAfipType):
"""
An AFIP receipt type.
See the AFIP's documentation for details on each receipt type.
"""
objects = GenericAfipTypeManager('FEParamGetTiposCbte', 'CbteTipo')
class Meta:
verbose_name = _('receipt type')
verbose_name_plural = _('receipt types')
class ConceptType(GenericAfipType):
"""
An AFIP concept type.
See the AFIP's documentation for details on each concept type.
"""
objects = GenericAfipTypeManager('FEParamGetTiposConcepto', 'ConceptoTipo')
class Meta:
verbose_name = _('concept type')
verbose_name_plural = _('concept types')
class DocumentType(GenericAfipType):
"""
An AFIP document type.
See the AFIP's documentation for details on each document type.
"""
objects = GenericAfipTypeManager('FEParamGetTiposDoc', 'DocTipo')
class Meta:
verbose_name = _('document type')
verbose_name_plural = _('document types')
class VatType(GenericAfipType):
"""
An AFIP VAT type.
See the AFIP's documentation for details on each VAT type.
"""
objects = GenericAfipTypeManager('FEParamGetTiposIva', 'IvaTipo')
class Meta:
verbose_name = _('vat type')
verbose_name_plural = _('vat types')
class TaxType(GenericAfipType):
"""
An AFIP tax type.
See the AFIP's documentation for details on each tax type.
"""
objects = GenericAfipTypeManager('FEParamGetTiposTributos', 'TributoTipo')
class Meta:
verbose_name = _('tax type')
verbose_name_plural = _('tax types')
class CurrencyType(GenericAfipType):
"""
An AFIP curreny type.
See the AFIP's documentation for details on each currency type.
"""
objects = GenericAfipTypeManager('FEParamGetTiposMonedas', 'Moneda')
def __str__(self):
return '{} ({})'.format(self.description, self.code)
class Meta:
verbose_name = _('currency type')
verbose_name_plural = _('currency types')
class TaxPayer(models.Model):
"""
Represents an AFIP TaxPayer.
This class has the bare minimum required for most AFIP services.
Note that multiple instances of this object can actually represent the same
taxpayer, each using a different key.
"""
name = models.CharField(
_('name'),
max_length=32,
help_text=_('A friendly name to recognize this taxpayer.'),
)
key = models.FileField(
_('key'),
upload_to='afip/taxpayers/keys/',
storage=_get_storage_from_settings('AFIP_KEY_STORAGE'),
blank=True,
null=True,
)
certificate = models.FileField(
_('certificate'),
upload_to='afip/taxpayers/certs/',
storage=_get_storage_from_settings('AFIP_CERT_STORAGE'),
blank=True,
null=True,
)
cuit = models.BigIntegerField(
_('cuit'),
)
is_sandboxed = models.BooleanField(
_('is sandboxed'),
help_text=_(
'Indicates if this taxpayer interacts with the sandbox servers '
'rather than the production servers'
)
)
certificate_expiration = models.DateTimeField(
_('certificate expiration'),
editable=False,
null=True, # Either no cert, or and old TaxPayer
help_text=_(
'Stores expiration for the current certificate. Note that this '
'field is updated pre-save, so the value may be invalid for '
'unsaved models.'
),
)
active_since = models.DateField(
_('active since'),
help_text=_('Date since which this taxpayer has been legally active.'),
)
@property
def certificate_object(self):
"""
Returns the certificate as an OpenSSL object
Returns the certificate as an OpenSSL object (rather than as a file
object).
"""
if not self.certificate:
return None
self.certificate.seek(0)
return crypto.parse_certificate(self.certificate.read())
def get_certificate_expiration(self):
"""
Gets the certificate expiration from the certificate
Gets the certificate expiration from the certificate file. Note that
this value is stored into ``certificate_expiration`` when an instance
is saved, so you should generally prefer that method (since this one
requires reading and parsing the entire certificate).
"""
datestring = self.certificate_object.get_notAfter().decode()
dt = datetime.strptime(datestring, '%Y%m%d%H%M%SZ')
return dt.replace(tzinfo=timezone.utc)
def generate_key(self, force=False):
"""
Creates a key file for this TaxPayer
Creates a key file for this TaxPayer if it does not have one, and
immediately saves it.
Returns True if and only if a key was created.
"""
if self.key and not force:
logger.warning(
'Tried to generate key for a taxpayer that already had one'
)
return False
with NamedTemporaryFile(suffix='.key') as file_:
crypto.create_key(file_)
self.key = File(file_, name='{}.key'.format(uuid.uuid4().hex))
self.save()
return True
def generate_csr(self, basename='djangoafip'):
"""
Creates a CSR for this TaxPayer's key
Creates a file-like object that contains the CSR which can be used to
request a new certificate from AFIP.
"""
csr = BytesIO()
crypto.create_csr(
self.key.file,
self.name,
'{}{}'.format(basename, int(datetime.now().timestamp())),
'CUIT {}'.format(self.cuit),
csr,
)
csr.seek(0)
return csr
def create_ticket(self, service):
"""Create an AuthTicket for a given service."""
ticket = AuthTicket(owner=self, service=service)
ticket.authorize()
return ticket
def get_ticket(self, service):
"""Return an existing AuthTicket for a given service."""
return self.auth_tickets \
.filter(expires__gt=datetime.now(timezone.utc), service=service) \
.last()
def get_or_create_ticket(self, service):
"""
Return or create a new AuthTicket for a given serivce.
Return an existing ticket for a service if one is available, otherwise,
create a new one and return that.
This is generally the preferred method of obtaining tickets for any
service.
"""
return self.get_ticket(service) or self.create_ticket(service)
def fetch_points_of_sales(self, ticket=None):
"""
Fetch all point of sales objects.
Fetch all point of sales from the WS and store (or update) them
locally.
Returns a list of tuples with the format (pos, created,).
"""
ticket = ticket or self.get_or_create_ticket('wsfe')
client = clients.get_client('wsfe', self.is_sandboxed)
response = client.service.FEParamGetPtosVenta(
serializers.serialize_ticket(ticket),
)
check_response(response)
results = []
for pos_data in response.ResultGet.PtoVenta:
results.append(PointOfSales.objects.update_or_create(
number=pos_data.Nro,
owner=self,
defaults={
'issuance_type': pos_data.EmisionTipo,
'blocked': pos_data.Bloqueado == 'N',
'drop_date': parsers.parse_date(pos_data.FchBaja),
}
))
return results
def __repr__(self):
return '<TaxPayer {}: {}, CUIT {}>'.format(
self.pk,
self.name,
self.cuit,
)
def __str__(self):
return str(self.cuit)
class Meta:
verbose_name = _('taxpayer')
verbose_name_plural = _('taxpayers')
class TaxPayerProfile(models.Model):
"""
Metadata about a taxpayer used for printable receipts.
None of this information is required or sent to the AFIP when notifying
about receipt generation. It is used *only* for PDF generation.
Most of these can be overriden per-receipt as this class is a placeholder
for default values.
"""
taxpayer = models.OneToOneField(
TaxPayer,
related_name='profile',
verbose_name=_('taxpayer'),
on_delete=models.CASCADE,
)
issuing_name = models.CharField(
max_length=128,
verbose_name=_('issuing name'),
)
issuing_address = models.TextField(
_('issuing address'),
)
issuing_email = models.CharField(
max_length=128,
verbose_name=_('issuing email'),
blank=True,
null=True,
)
vat_condition = models.CharField(
max_length=48,
choices=((condition, condition,) for condition in VAT_CONDITIONS),
verbose_name=_('vat condition'),
)
gross_income_condition = models.CharField(
max_length=48,
verbose_name=_('gross income condition'),
)
sales_terms = models.CharField(
max_length=48,
verbose_name=_('sales terms'),
help_text=_(
'The terms of the sale printed onto receipts by default '
'(eg: single payment, checking account, etc).'
),
)
class Meta:
verbose_name = _('taxpayer profile')
verbose_name_plural = _('taxpayer profiles')
class TaxPayerExtras(models.Model):
"""Holds optional extra data for taxpayers."""
taxpayer = models.OneToOneField(
TaxPayer,
related_name='extras',
verbose_name=_('taxpayer'),
on_delete=models.CASCADE,
)
logo = models.ImageField(
verbose_name=_('pdf file'),
upload_to='afip/taxpayers/logos/',
storage=_get_storage_from_settings('AFIP_LOGO_STORAGE'),
blank=True,
null=True,
help_text=_('A logo to use when generating printable receipts.'),
)
@property
def logo_as_data_uri(self):
"""This TaxPayer's logo as a data uri."""
_, ext = os.path.splitext(self.logo.file.name)
with self.logo.open() as f:
data = base64.b64encode(f.read())
return 'data:image/{};base64,{}'.format(
ext[1:], # Remove the leading dot.
data.decode()
)
class Meta:
verbose_name = _('taxpayer extras')
verbose_name_plural = _('taxpayers extras')
class PointOfSales(models.Model):
"""
Represents an existing AFIP point of sale.
Points of sales need to be created via AFIP's web interface and it is
recommended that you use :meth:`~.TaxPayer.fetch_points_of_sales` to fetch
these programatically.
Note that deleting or altering these models will not affect upstream point
of sales.
"""
number = models.PositiveSmallIntegerField(
_('number'),
)
issuance_type = models.CharField(
_('issuance type'),
max_length=24,
help_text='Indicates if thie POS emits using CAE and CAEA.'
)
blocked = models.BooleanField(
_('blocked'),
)
drop_date = models.DateField(
_('drop date'),
null=True,
blank=True,
)
owner = models.ForeignKey(
TaxPayer,
related_name='points_of_sales',
verbose_name=_('owner'),
on_delete=models.CASCADE,
)
def __str__(self):
return str(self.number)
class Meta:
unique_together = (
('number', 'owner'),
)
verbose_name = _('point of sales')
verbose_name_plural = _('points of sales')
class AuthTicketManager(models.Manager):
def get_any_active(self, service):
ticket = AuthTicket.objects.filter(
token__isnull=False,
expires__gt=datetime.now(timezone.utc),
service=service,
).first()
if ticket:
return ticket
taxpayer = TaxPayer.objects.order_by('?').first()
if not taxpayer:
raise exceptions.AuthenticationError(
_('There are no taxpayers to generate a ticket.'),
)
return taxpayer.create_ticket(service)
class AuthTicket(models.Model):
"""
An AFIP Authorization ticket.
This is a signed ticket used to communicate with AFIP's webservices.
Applications should not generally have to deal with these tickets
themselves; most services will find or create one as necessary.
"""
def default_generated():
return datetime.now(TZ_AR)
def default_expires():
tomorrow = datetime.now(TZ_AR) + timedelta(hours=12)
return tomorrow
def default_unique_id():
return random.randint(0, 2147483647)
owner = models.ForeignKey(
TaxPayer,
verbose_name=_('owner'),
related_name='auth_tickets',
on_delete=models.CASCADE,
)
unique_id = models.IntegerField(
_('unique id'),
default=default_unique_id,
)
generated = models.DateTimeField(
_('generated'),
default=default_generated,
)
expires = models.DateTimeField(
_('expires'),
default=default_expires,
)
service = models.CharField(
_('service'),
max_length=6,
help_text=_('Service for which this ticket has been authorized'),
)
token = models.TextField(
_('token'),
)
signature = models.TextField(
_('signature'),
)
objects = AuthTicketManager()
TOKEN_XPATH = '/loginTicketResponse/credentials/token'
SIGN_XPATH = '/loginTicketResponse/credentials/sign'
def __create_request_xml(self):
request_xml = (
E.loginTicketRequest(
{'version': '1.0'},
E.header(
E.uniqueId(str(self.unique_id)),
E.generationTime(
serializers.serialize_datetime(self.generated)
),
E.expirationTime(
serializers.serialize_datetime(self.expires)
),
),
E.service(self.service)
)
)
return etree.tostring(request_xml, pretty_print=True)
def __sign_request(self, request):
self.owner.certificate.file.open()
cert = self.owner.certificate.file.read().decode()
self.owner.certificate.file.close()
self.owner.key.file.open()
key = self.owner.key.file.read()
self.owner.key.file.close()
return crypto.create_embeded_pkcs7_signature(request, cert, key)
def authorize(self):
"""Send this ticket to AFIP for authorization."""
request = self.__create_request_xml()
request = self.__sign_request(request)
request = b64encode(request).decode()
client = clients.get_client('wsaa', self.owner.is_sandboxed)
try:
raw_response = client.service.loginCms(request)
except Fault as e:
if str(e) == 'Certificado expirado':
raise exceptions.CertificateExpired(str(e)) from e
if str(e) == 'Certificado no emitido por AC de confianza':
raise exceptions.UntrustedCertificate(str(e)) from e
raise exceptions.AuthenticationError(str(e)) from e
response = etree.fromstring(raw_response.encode('utf-8'))
self.token = response.xpath(self.TOKEN_XPATH)[0].text
self.signature = response.xpath(self.SIGN_XPATH)[0].text
self.save()
def __str__(self):
return str(self.unique_id)
class Meta:
verbose_name = _('authorization ticket')
verbose_name_plural = _('authorization tickets')
class ReceiptQuerySet(models.QuerySet):
"""
The default queryset obtains when querying via :class:`~.ReceiptManager`.
"""
def _assign_numbers(self):
"""
Assign numbers in preparation for validating these receipts.
WARNING: Don't call the method manually unless you know what you're
doing!
"""
first = self.select_related('point_of_sales', 'receipt_type').first()
next_num = Receipt.objects.fetch_last_receipt_number(
first.point_of_sales,
first.receipt_type,
) + 1
for receipt in self.filter(receipt_number__isnull=True):
# Atomically update receipt number
Receipt.objects.filter(
pk=receipt.id,
receipt_number__isnull=True,
).update(
receipt_number=next_num,
)
next_num += 1
def check_groupable(self):
"""
Checks that all receipts returned by this queryset are groupable.
"Groupable" means that they can be validated together: they have the
same POS and receipt type.
Returns the same queryset is all receipts are groupable, otherwise,
raises :class:`~.CannotValidateTogether`.
"""
types = self.aggregate(
poses=Count('point_of_sales_id', ),
types=Count('receipt_type'),
)
if set(types.values()) > {1}:
raise exceptions.CannotValidateTogether()
return self
def validate(self, ticket=None):
"""
Validates all receipts matching this queryset.
Note that, due to how AFIP implements its numbering, this method is not
thread-safe, or even multiprocess-safe.
Because of this, it is possible that not all instances matching this
queryset are validated properly. Obviously, only successfully validated
receipts will be updated.
Returns a list of errors as returned from AFIP's webservices. An
exception is not raised because partial failures are possible.
Receipts that succesfully validate will have a
:class:`~.ReceiptValidation` object attatched to them with a validation
date and CAE information.
Already-validated receipts are ignored.
Attempting to validate an empty queryset will simply return an empty
list.
"""
# Skip any already-validated ones:
qs = self.filter(validation__isnull=True).check_groupable()
if qs.count() == 0:
return []
qs.order_by('issued_date', 'id')._assign_numbers()
return qs._validate(ticket)
def _validate(self, ticket=None):
first = self.first()
ticket = (
ticket or
first.point_of_sales.owner.get_or_create_ticket('wsfe')
)
client = clients.get_client(
'wsfe', first.point_of_sales.owner.is_sandboxed
)
response = client.service.FECAESolicitar(
serializers.serialize_ticket(ticket),
serializers.serialize_multiple_receipts(self),
)
check_response(response)
errs = []
for cae_data in response.FeDetResp.FECAEDetResponse:
if cae_data.Resultado == ReceiptValidation.RESULT_APPROVED:
validation = ReceiptValidation.objects.create(
result=cae_data.Resultado,
cae=cae_data.CAE,
cae_expiration=parsers.parse_date(cae_data.CAEFchVto),
receipt=self.get(
receipt_number=cae_data.CbteDesde,
),
processed_date=parsers.parse_datetime(
response.FeCabResp.FchProceso,
),
)
if cae_data.Observaciones:
for obs in cae_data.Observaciones.Obs:
observation = Observation.objects.create(
code=obs.Code,
message=obs.Msg,
)
validation.observations.add(observation)
elif cae_data.Observaciones:
for obs in cae_data.Observaciones.Obs:
errs.append(
'Error {}: {}'.format(
obs.Code,
parsers.parse_string(obs.Msg),
)
)
# Remove the number from ones that failed to validate:
self.filter(validation__isnull=True).update(receipt_number=None)
return errs
class ReceiptManager(models.Manager):
"""
The default manager for the :class:`~.Receipt` class.
You should generally access this using ``Receipt.objects``.
"""
def fetch_last_receipt_number(self, point_of_sales, receipt_type):
"""Returns the number for the last validated receipt."""
client = clients.get_client('wsfe', point_of_sales.owner.is_sandboxed)
response_xml = client.service.FECompUltimoAutorizado(
serializers.serialize_ticket(
point_of_sales.owner.get_or_create_ticket('wsfe')
),
point_of_sales.number,
receipt_type.code,
)
check_response(response_xml)
# TODO XXX: Error handling
# (FERecuperaLastCbteResponse){
# PtoVta = 0
# CbteTipo = 0
# CbteNro = 0
# Errors =
# (ArrayOfErr){
# Err[] =
# (Err){
# Code = 601
# Msg = "CUIT representada no incluida en Token"
# },
# }
# }
return response_xml.CbteNro
def get_queryset(self):
return ReceiptQuerySet(self.model, using=self._db).select_related(
'receipt_type',
)
class Receipt(models.Model):
"""
A receipt, as sent to AFIP.
Note that AFIP allows sending ranges of receipts, but this isn't generally
what you want, so we model invoices individually.
You'll probably want to relate some `Sale` or `Order` object from your
model with each Receipt.
All ``document_`` fields contain the recipient's data.
If the taxpayer has taxes or pays VAT, you need to attach :class:`~.Tax`
and/or :class:`~.Vat` instances to the Receipt.
"""
point_of_sales = models.ForeignKey(
PointOfSales,
related_name='receipts',
verbose_name=_('point of sales'),
on_delete=models.PROTECT,
)
receipt_type = models.ForeignKey(
ReceiptType,
related_name='receipts',
verbose_name=_('receipt type'),
on_delete=models.PROTECT,
)
concept = models.ForeignKey(
ConceptType,
verbose_name=_('concept'),
related_name='receipts',
on_delete=models.PROTECT,
)
document_type = models.ForeignKey(
DocumentType,
verbose_name=_('document type'),
related_name='receipts',
help_text=_(
'The document type of the customer to whom this receipt '
'is addressed'
),
on_delete=models.PROTECT,
)
document_number = models.BigIntegerField(
_('document number'),
help_text=_(
'The document number of the customer to whom this receipt '
'is addressed'
)
)
# NOTE: WS will expect receipt_from and receipt_to.
receipt_number = models.PositiveIntegerField(
_('receipt number'),
null=True,
blank=True,
help_text=_(
'If left blank, the next valid number will assigned when '
'validating the receipt.'
)
)
issued_date = models.DateField(
verbose_name=_('issued date'),
help_text=_('Can diverge up to 5 days for good, or 10 days otherwise'),
)
total_amount = models.DecimalField(
# ImpTotal
_('total amount'),
max_digits=15,
decimal_places=2,
help_text=_(
'Must be equal to the sum of net_taxed, exempt_amount, net_taxes, '
'and all taxes and vats.'
)
)
net_untaxed = models.DecimalField(
# ImpTotConc
_('total untaxable amount'),
max_digits=15,
decimal_places=2,
help_text=_(
'The total amount to which taxes do not apply. '
'For C-type receipts, this must be zero.'
),
)
net_taxed = models.DecimalField(
# ImpNeto
_('total taxable amount'),
max_digits=15,
decimal_places=2,
help_text=_(
'The total amount to which taxes apply. '
'For C-type receipts, this is equal to the subtotal.'
),
)
exempt_amount = models.DecimalField(
# ImpOpEx
# Sólo para emisores que son IVA exento
_('exempt amount'),
max_digits=15,
decimal_places=2,
help_text=_(
'Only for categories which are tax-exempt. '
'For C-type receipts, this must be zero.'
),
)
service_start = models.DateField(
_('service start date'),
help_text=_(
'Date on which a service started. No applicable for goods.'
),
null=True,
blank=True,
)
service_end = models.DateField(
_('service end date'),
help_text=_(
'Date on which a service ended. No applicable for goods.'
),
null=True,
blank=True,
)
expiration_date = models.DateField(
_('receipt expiration date'),
help_text=_(
'Date on which this receipt expires. No applicable for goods.'
),
null=True,
blank=True,
)
currency = models.ForeignKey(
CurrencyType,
verbose_name=_('currency'),
related_name='documents',
help_text=_(
'Currency in which this receipt is issued.',
),
on_delete=models.PROTECT,
default=first_currency,
)
currency_quote = models.DecimalField(
_('currency quote'),
max_digits=10,
decimal_places=6,
default=1,
help_text=_(
'Quote of the day for the currency used in the receipt',
),
)
related_receipts = models.ManyToManyField(
'Receipt',
verbose_name=_('related receipts'),
blank=True,
)
objects = ReceiptManager()
# TODO: Not implemented: optionals
# TODO: methods to validate totals
@property
def total_vat(self):
"""Returns the sum of all Vat objects."""
q = Vat.objects.filter(receipt=self).aggregate(total=Sum('amount'))
return q['total'] or 0
@property
def total_tax(self):
"""Returns the sum of all Tax objects."""
q = Tax.objects.filter(receipt=self).aggregate(total=Sum('amount'))
return q['total'] or 0
@property
def formatted_number(self):
if self.receipt_number:
return '{:04d}-{:08d}'.format(
self.point_of_sales.number,
self.receipt_number,
)
return None
@property
def is_validated(self):
"""
Returns True if this instance is validated.
Note that resolving this property requires a DB query, so if you've a
very large amount of receipts you should prefetch (see django's
``select_related``) the ``validation`` field. Even so, a DB query *may*
be triggered.
If you need a large list of validated receipts, you should actually
filter them via a QuerySet::
Receipt.objects.filter(validation__result==RESULT_APPROVED)
:rtype: bool
"""
# Avoid the DB lookup if possible:
if not self.receipt_number:
return False
try:
return self.validation.result == ReceiptValidation.RESULT_APPROVED
except ReceiptValidation.DoesNotExist:
return False
def validate(self, ticket=None, raise_=False):
"""
Validates this receipt.
This is a shortcut to :class:`~.ReceiptQuerySet`'s method of the same
name. Calling this validates only this instance.
:param AuthTicket ticket: Use this ticket. If None, one will be loaded
or created automatically.
:param bool raise_: If True, an exception will be raised when
validation fails.
"""
# XXX: Maybe actually have this sortcut raise an exception?
rv = Receipt.objects.filter(pk=self.pk).validate(ticket)
# Since we're operating via a queryset, this instance isn't properly
# updated:
self.refresh_from_db()
if raise_ and rv:
raise exceptions.ValidationError(rv[0])
return rv
def __repr__(self):
return '<Receipt {}: {} {} for {}>'.format(
self.pk,
self.receipt_type,
self.receipt_number,
self.point_of_sales.owner,
)
def __str__(self):
if self.receipt_number:
return '{} {}'.format(self.receipt_type, self.formatted_number)
else:
return _('Unnumbered %s') % self.receipt_type
class Meta:
ordering = ('issued_date',)
verbose_name = _('receipt')
verbose_name_plural = _('receipts')
unique_together = (
('point_of_sales', 'receipt_type', 'receipt_number',)
)
# TODO: index_together...
class ReceiptPDFManager(models.Manager):
def create_for_receipt(self, receipt, **kwargs):
"""
Creates a ReceiptPDF object for a given receipt. Does not actually
generate the related PDF file.
All attributes will be completed with the information for the relevant
``TaxPayerProfile`` instance.
:param Receipt receipt: The receipt for the PDF which will be
generated.
"""
try:
profile = TaxPayerProfile.objects.get(
taxpayer__points_of_sales__receipts=receipt,
)
except TaxPayerProfile.DoesNotExist:
raise exceptions.DjangoAfipException(
'Cannot generate a PDF for taxpayer with no profile',
)
pdf = ReceiptPDF.objects.create(
receipt=receipt,
issuing_name=profile.issuing_name,
issuing_address=profile.issuing_address,
issuing_email=profile.issuing_email,
vat_condition=profile.vat_condition,
gross_income_condition=profile.gross_income_condition,
sales_terms=profile.sales_terms,
**kwargs
)
return pdf
class ReceiptPDF(models.Model):
"""
Printable version of a receipt.
Contains all print-related data of a receipt.
All ``issuing_*`` fields contain data for the entity issuing the Receipt
(these may change from one receipt to the next if, for example, the entity
moved).
The PDF file itself is saved into the ``pdf_file`` attribute, and is
generated prior to saving the model for the first time (by a pre_save
hook). If any attributes are changed, you should manually call
:meth:`~.ReceiptPDF.save_pdf` to regenerate the PDF file.
PDF generation is skipped if the receipt has not been validated.
"""
def upload_to(self, filename='untitled', instance=None):
"""
Returns the full path for generated receipts.
These are bucketed inside nested directories, to avoid hundreds of
thousands of children in single directories (which can make reading
them excessively slow).
"""
_, extension = os.path.splitext(os.path.basename(filename))
uuid = uuid4().hex
buckets = uuid[0:2], uuid[2:4]
filename = ''.join([uuid4().hex, extension])
return os.path.join('afip/receipts', buckets[0], buckets[1], filename)
receipt = models.OneToOneField(
Receipt,
verbose_name=_('receipt'),
on_delete=models.PROTECT,
)
pdf_file = models.FileField(
verbose_name=_('pdf file'),
upload_to=upload_to,
storage=_get_storage_from_settings('AFIP_PDF_STORAGE'),
blank=True,
null=True,
help_text=_('The actual file which contains the PDF data.'),
)
issuing_name = models.CharField(
max_length=128,
verbose_name=_('issuing name'),
)
issuing_address = models.TextField(
_('issuing address'),
)
issuing_email = models.CharField(
max_length=128,
verbose_name=_('issuing email'),
blank=True,
null=True,
)
vat_condition = models.CharField(
max_length=48,
choices=((condition, condition,) for condition in VAT_CONDITIONS),
verbose_name=_('vat condition'),
)
gross_income_condition = models.CharField(
max_length=48,
verbose_name=_('gross income condition'),
)
client_name = models.CharField(
max_length=128,
verbose_name=_('client name'),
)
client_address = models.TextField(
_('client address'),
blank=True,
)
client_vat_condition = models.CharField(
max_length=48,
choices=((cond, cond,) for cond in CLIENT_VAT_CONDITIONS),
verbose_name=_('client vat condition'),
)
sales_terms = models.CharField(
max_length=48,
verbose_name=_('sales terms'),
help_text=_(
'Should be something like "Cash", "Payable in 30 days", etc'
),
)
objects = ReceiptPDFManager()
def save_pdf(self, save_model=True):
"""
Save the receipt as a PDF related to this model.
The related :class:`~.Receipt` should be validated first, of course.
:param bool save_model: If True, immediately save this model instance.
"""
from django_afip.views import ReceiptPDFView
if not self.receipt.is_validated:
raise exceptions.DjangoAfipException(
_('Cannot generate pdf for non-authorized receipt')
)
self.pdf_file = File(BytesIO(), name='{}.pdf'.format(uuid.uuid4().hex))
render_pdf(
template='receipts/code_{}.html'.format(
self.receipt.receipt_type.code,
),
file_=self.pdf_file,
context=ReceiptPDFView.get_context_for_pk(self.receipt_id),
)
if save_model:
self.save()
def __str__(self):
return _('Receipt PDF for %s') % self.receipt_id
class Meta:
verbose_name = _('receipt pdf')
verbose_name_plural = _('receipt pdfs')
class ReceiptEntry(models.Model):
"""
An entry in a receipt.
Each ReceiptEntry represents a line in printable version of a Receipt. You
should generally have one instance per product or service.
Note that each entry has a :class:`~.Vat` because a single Receipt can have
multiple products with different :class:`~.VatType`.
"""
receipt = models.ForeignKey(
Receipt,
related_name='entries',
verbose_name=_('receipt'),
on_delete=models.PROTECT,
)
description = models.CharField(
max_length=128,
verbose_name=_('description'),
)
quantity = models.PositiveSmallIntegerField(
_('quantity'),
)
unit_price = models.DecimalField(
_('unit price'),
max_digits=15,
decimal_places=2,
help_text=_('Price per unit before vat or taxes.'),
)
vat = models.ForeignKey(
VatType,
related_name='receipt_entries',
verbose_name=_('vat'),
blank=True,
null=True,
on_delete=models.PROTECT,
)
@property
def total_price(self):
"""The total price for this line (quantity * price)."""
return self.quantity * self.unit_price
class Meta:
verbose_name = _('receipt entry')
verbose_name_plural = _('receipt entries')
class Tax(models.Model):
"""A tax (type+amount) for a specific Receipt."""
tax_type = models.ForeignKey(
TaxType,
verbose_name=_('tax type'),
on_delete=models.PROTECT,
)
description = models.CharField(
_('description'),
max_length=80,
)
base_amount = models.DecimalField(
_('base amount'),
max_digits=15,
decimal_places=2,
)
aliquot = models.DecimalField(
_('aliquot'),
max_digits=5,
decimal_places=2,
)
amount = models.DecimalField(
_('amount'),
max_digits=15,
decimal_places=2,
)
receipt = models.ForeignKey(
Receipt,
related_name='taxes',
on_delete=models.PROTECT,
)
def compute_amount(self):
"""Auto-assign and return the total amount for this tax."""
self.amount = self.base_amount * self.aliquot / 100
return self.amount
class Meta:
verbose_name = _('tax')
verbose_name_plural = _('taxes')
class Vat(models.Model):
"""A VAT (type+amount) for a specific Receipt."""
vat_type = models.ForeignKey(
VatType,
verbose_name=_('vat type'),
on_delete=models.PROTECT,
)
base_amount = models.DecimalField(
_('base amount'),
max_digits=15,
decimal_places=2,
)
amount = models.DecimalField(
_('amount'),
max_digits=15,
decimal_places=2,
)
receipt = models.ForeignKey(
Receipt,
related_name='vat',
on_delete=models.PROTECT,
)
class Meta:
verbose_name = _('vat')
verbose_name_plural = _('vat')
class Observation(models.Model):
"""
An observation returned by AFIP.
AFIP seems to assign re-used codes to Observation, so we actually store
them as separate objects, and link to them from failed validations.
"""
code = models.PositiveSmallIntegerField(
_('code'),
)
message = models.CharField(
_('message'),
max_length=255,
)
class Meta:
verbose_name = _('observation')
verbose_name_plural = _('observations')
class ReceiptValidation(models.Model):
"""
The validation for a single :class:`~.Receipt`.
This contains all validation-related data for a receipt, including its CAE
and the CAE expiration, unless validation has failed.
The ``observation`` field may contain any data returned by AFIP regarding
validation failure.
"""
RESULT_APPROVED = 'A'
RESULT_REJECTED = 'R'
# TODO: replace this with a `successful` boolean field.
result = models.CharField(
_('result'),
max_length=1,
choices=(
(RESULT_APPROVED, _('approved')),
(RESULT_REJECTED, _('rejected')),
),
help_text=_('Indicates whether the validation was succesful or not'),
)
processed_date = models.DateTimeField(
_('processed date'),
)
cae = models.CharField(
_('cae'),
max_length=14,
help_text=_('The CAE as returned by the AFIP'),
)
cae_expiration = models.DateField(
_('cae expiration'),
help_text=_('The CAE expiration as returned by the AFIP'),
)
observations = models.ManyToManyField(
Observation,
verbose_name=_('observations'),
related_name='validations',
help_text=_(
'The observations as returned by the AFIP. These are generally '
'present for failed validations.'
),
)
receipt = models.OneToOneField(
Receipt,
related_name='validation',
verbose_name=_('receipt'),
help_text=_('The Receipt for which this validation applies'),
on_delete=models.PROTECT,
)
def __repr__(self):
return '<{} {}: {} for Receipt {}>'.format(
self.__class__.__name__,
self.pk,
self.result,
self.receipt_id,
)
class Meta:
verbose_name = _('receipt validation')
verbose_name_plural = _('receipt validations')
| hobarrera/django-afip | django_afip/models.py | Python | isc | 44,887 |
import re
import tokenizer
from feature_extractor_counts import FeatureExtractorCounts
class FeatureExtractorProperty(FeatureExtractorCounts):
def __init__(self, min_df=2, max_per=1.0, binarize=False, transform=None, replace_num='#',
source=None, subdir=None, pseudotype=None, splits_file=None, stage='training'):
name = 'property'
prefix = '_p_'
FeatureExtractorCounts.__init__(self, name=name, prefix=prefix, min_df=min_df, max_per=max_per,
binarize=binarize, transform=transform, source=source, subdir=subdir,
pseudotype=pseudotype, splits_file=splits_file, replace_num=replace_num,
stage=stage)
def extract_tokens_from_text(self, data, items_to_load, doc_index=None):
token_dict = {}
for key in items_to_load:
if doc_index is not None:
doc_key = doc_index[key]['filename']
else:
doc_key = key
text = data[doc_key]
text = text.lower()
text = text.lstrip()
text = text.rstrip()
if self.replace_num is not None:
text = re.sub('\d', self.replace_num, text)
token = [self.get_prefix() + text]
token_dict[key] = token
return token_dict
| dallascard/guac | core/feature_extractors/feature_extractor_property.py | Python | apache-2.0 | 1,393 |
from __future__ import absolute_import, unicode_literals
import os
import unittest
import django
from django.conf import settings
from django.test import TestCase
from wagtail.wagtailcore.models import Site
from .utils import get_test_image_file, Image
@unittest.skipIf(django.VERSION < (1, 8), 'Multiple engines only supported in Django>=1.8')
class TestImagesJinja(TestCase):
def setUp(self):
# This does not exist on Django<1.8
from django.template import engines
self.engine = engines['jinja2']
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def render(self, string, context=None, request_context=True):
if context is None:
context = {}
# Add a request to the template, to simulate a RequestContext
if request_context:
site = Site.objects.get(is_default_site=True)
request = self.client.get('/test/', HTTP_HOST=site.hostname)
request.site = site
context['request'] = request
template = self.engine.from_string(string)
return template.render(context)
def get_image_filename(self, image, filterspec):
"""
Get the generated filename for a resized image
"""
name, ext = os.path.splitext(os.path.basename(image.file.name))
return '{}images/{}.{}{}'.format(
settings.MEDIA_URL, name, filterspec, ext)
def test_image(self):
self.assertHTMLEqual(
self.render('{{ image(myimage, "width-200") }}', {'myimage': self.image}),
'<img alt="Test image" src="{}" width="200" height="150">'.format(
self.get_image_filename(self.image, "width-200")))
def test_image_attributes(self):
self.assertHTMLEqual(
self.render('{{ image(myimage, "width-200", alt="alternate", class="test") }}', {'myimage': self.image}),
'<img alt="alternate" src="{}" width="200" height="150" class="test">'.format(
self.get_image_filename(self.image, "width-200")))
def test_image_assignment(self):
template = ('{% set background=image(myimage, "width-200") %}'
'width: {{ background.width }}, url: {{ background.url }}')
output = ('width: 200, url: ' + self.get_image_filename(self.image, "width-200"))
self.assertHTMLEqual(self.render(template, {'myimage': self.image}), output)
| serzans/wagtail | wagtail/wagtailimages/tests/test_jinja2.py | Python | bsd-3-clause | 2,475 |
from collections import Iterable
def is_iterable(arg):
"""
Checks if the provided argument is an iterable and not a string.
"""
return not isinstance(arg, str) and isinstance(arg, Iterable)
| iluxonchik/the-chronic | thechronic/utils.py | Python | mit | 207 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.