repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
JioEducation/edx-platform
|
refs/heads/master
|
lms/djangoapps/django_comment_client/migrations/__init__.py
|
12133432
| |
bernardokyotoku/skillplant
|
refs/heads/master
|
django/contrib/staticfiles/management/__init__.py
|
12133432
| |
mcanthony/cython
|
refs/heads/master
|
Cython/Compiler/Optimize.py
|
5
|
from __future__ import absolute_import
import sys
import copy
import codecs
from . import TypeSlots
from .ExprNodes import not_a_constant
import cython
cython.declare(UtilityCode=object, EncodedString=object, bytes_literal=object,
Nodes=object, ExprNodes=object, PyrexTypes=object, Builtin=object,
UtilNodes=object, _py_int_types=object)
if sys.version_info[0] >= 3:
_py_int_types = int
else:
_py_int_types = (int, long)
from . import Nodes
from . import ExprNodes
from . import PyrexTypes
from . import Visitor
from . import Builtin
from . import UtilNodes
from . import Options
from .Code import UtilityCode, TempitaUtilityCode
from .StringEncoding import EncodedString, bytes_literal
from .Errors import error
from .ParseTreeTransforms import SkipDeclarations
try:
from __builtin__ import reduce
except ImportError:
from functools import reduce
try:
from __builtin__ import basestring
except ImportError:
basestring = str # Python 3
def load_c_utility(name):
return UtilityCode.load_cached(name, "Optimize.c")
def unwrap_coerced_node(node, coercion_nodes=(ExprNodes.CoerceToPyTypeNode, ExprNodes.CoerceFromPyTypeNode)):
if isinstance(node, coercion_nodes):
return node.arg
return node
def unwrap_node(node):
while isinstance(node, UtilNodes.ResultRefNode):
node = node.expression
return node
def is_common_value(a, b):
a = unwrap_node(a)
b = unwrap_node(b)
if isinstance(a, ExprNodes.NameNode) and isinstance(b, ExprNodes.NameNode):
return a.name == b.name
if isinstance(a, ExprNodes.AttributeNode) and isinstance(b, ExprNodes.AttributeNode):
return not a.is_py_attr and is_common_value(a.obj, b.obj) and a.attribute == b.attribute
return False
def filter_none_node(node):
if node is not None and node.constant_result is None:
return None
return node
class _YieldNodeCollector(Visitor.TreeVisitor):
"""
YieldExprNode finder for generator expressions.
"""
def __init__(self):
Visitor.TreeVisitor.__init__(self)
self.yield_stat_nodes = {}
self.yield_nodes = []
visit_Node = Visitor.TreeVisitor.visitchildren
def visit_YieldExprNode(self, node):
self.yield_nodes.append(node)
self.visitchildren(node)
def visit_ExprStatNode(self, node):
self.visitchildren(node)
if node.expr in self.yield_nodes:
self.yield_stat_nodes[node.expr] = node
# everything below these nodes is out of scope:
def visit_GeneratorExpressionNode(self, node):
pass
def visit_LambdaNode(self, node):
pass
def _find_single_yield_expression(node):
collector = _YieldNodeCollector()
collector.visitchildren(node)
if len(collector.yield_nodes) != 1:
return None, None
yield_node = collector.yield_nodes[0]
try:
return yield_node.arg, collector.yield_stat_nodes[yield_node]
except KeyError:
return None, None
class IterationTransform(Visitor.EnvTransform):
"""Transform some common for-in loop patterns into efficient C loops:
- for-in-dict loop becomes a while loop calling PyDict_Next()
- for-in-enumerate is replaced by an external counter variable
- for-in-range loop becomes a plain C for loop
"""
def visit_PrimaryCmpNode(self, node):
if node.is_ptr_contains():
# for t in operand2:
# if operand1 == t:
# res = True
# break
# else:
# res = False
pos = node.pos
result_ref = UtilNodes.ResultRefNode(node)
if node.operand2.is_subscript:
base_type = node.operand2.base.type.base_type
else:
base_type = node.operand2.type.base_type
target_handle = UtilNodes.TempHandle(base_type)
target = target_handle.ref(pos)
cmp_node = ExprNodes.PrimaryCmpNode(
pos, operator=u'==', operand1=node.operand1, operand2=target)
if_body = Nodes.StatListNode(
pos,
stats = [Nodes.SingleAssignmentNode(pos, lhs=result_ref, rhs=ExprNodes.BoolNode(pos, value=1)),
Nodes.BreakStatNode(pos)])
if_node = Nodes.IfStatNode(
pos,
if_clauses=[Nodes.IfClauseNode(pos, condition=cmp_node, body=if_body)],
else_clause=None)
for_loop = UtilNodes.TempsBlockNode(
pos,
temps = [target_handle],
body = Nodes.ForInStatNode(
pos,
target=target,
iterator=ExprNodes.IteratorNode(node.operand2.pos, sequence=node.operand2),
body=if_node,
else_clause=Nodes.SingleAssignmentNode(pos, lhs=result_ref, rhs=ExprNodes.BoolNode(pos, value=0))))
for_loop = for_loop.analyse_expressions(self.current_env())
for_loop = self.visit(for_loop)
new_node = UtilNodes.TempResultFromStatNode(result_ref, for_loop)
if node.operator == 'not_in':
new_node = ExprNodes.NotNode(pos, operand=new_node)
return new_node
else:
self.visitchildren(node)
return node
def visit_ForInStatNode(self, node):
self.visitchildren(node)
return self._optimise_for_loop(node, node.iterator.sequence)
def _optimise_for_loop(self, node, iterator, reversed=False):
if iterator.type is Builtin.dict_type:
# like iterating over dict.keys()
if reversed:
# CPython raises an error here: not a sequence
return node
return self._transform_dict_iteration(
node, dict_obj=iterator, method=None, keys=True, values=False)
# C array (slice) iteration?
if iterator.type.is_ptr or iterator.type.is_array:
return self._transform_carray_iteration(node, iterator, reversed=reversed)
if iterator.type is Builtin.bytes_type:
return self._transform_bytes_iteration(node, iterator, reversed=reversed)
if iterator.type is Builtin.unicode_type:
return self._transform_unicode_iteration(node, iterator, reversed=reversed)
# the rest is based on function calls
if not isinstance(iterator, ExprNodes.SimpleCallNode):
return node
if iterator.args is None:
arg_count = iterator.arg_tuple and len(iterator.arg_tuple.args) or 0
else:
arg_count = len(iterator.args)
if arg_count and iterator.self is not None:
arg_count -= 1
function = iterator.function
# dict iteration?
if function.is_attribute and not reversed and not arg_count:
base_obj = iterator.self or function.obj
method = function.attribute
# in Py3, items() is equivalent to Py2's iteritems()
is_safe_iter = self.global_scope().context.language_level >= 3
if not is_safe_iter and method in ('keys', 'values', 'items'):
# try to reduce this to the corresponding .iter*() methods
if isinstance(base_obj, ExprNodes.CallNode):
inner_function = base_obj.function
if (inner_function.is_name and inner_function.name == 'dict'
and inner_function.entry
and inner_function.entry.is_builtin):
# e.g. dict(something).items() => safe to use .iter*()
is_safe_iter = True
keys = values = False
if method == 'iterkeys' or (is_safe_iter and method == 'keys'):
keys = True
elif method == 'itervalues' or (is_safe_iter and method == 'values'):
values = True
elif method == 'iteritems' or (is_safe_iter and method == 'items'):
keys = values = True
if keys or values:
return self._transform_dict_iteration(
node, base_obj, method, keys, values)
# enumerate/reversed ?
if iterator.self is None and function.is_name and \
function.entry and function.entry.is_builtin:
if function.name == 'enumerate':
if reversed:
# CPython raises an error here: not a sequence
return node
return self._transform_enumerate_iteration(node, iterator)
elif function.name == 'reversed':
if reversed:
# CPython raises an error here: not a sequence
return node
return self._transform_reversed_iteration(node, iterator)
# range() iteration?
if Options.convert_range and node.target.type.is_int:
if iterator.self is None and function.is_name and \
function.entry and function.entry.is_builtin and \
function.name in ('range', 'xrange'):
return self._transform_range_iteration(node, iterator, reversed=reversed)
return node
def _transform_reversed_iteration(self, node, reversed_function):
args = reversed_function.arg_tuple.args
if len(args) == 0:
error(reversed_function.pos,
"reversed() requires an iterable argument")
return node
elif len(args) > 1:
error(reversed_function.pos,
"reversed() takes exactly 1 argument")
return node
arg = args[0]
# reversed(list/tuple) ?
if arg.type in (Builtin.tuple_type, Builtin.list_type):
node.iterator.sequence = arg.as_none_safe_node("'NoneType' object is not iterable")
node.iterator.reversed = True
return node
return self._optimise_for_loop(node, arg, reversed=True)
PyBytes_AS_STRING_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_char_ptr_type, [
PyrexTypes.CFuncTypeArg("s", Builtin.bytes_type, None)
])
PyBytes_GET_SIZE_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ssize_t_type, [
PyrexTypes.CFuncTypeArg("s", Builtin.bytes_type, None)
])
def _transform_bytes_iteration(self, node, slice_node, reversed=False):
target_type = node.target.type
if not target_type.is_int and target_type is not Builtin.bytes_type:
# bytes iteration returns bytes objects in Py2, but
# integers in Py3
return node
unpack_temp_node = UtilNodes.LetRefNode(
slice_node.as_none_safe_node("'NoneType' is not iterable"))
slice_base_node = ExprNodes.PythonCapiCallNode(
slice_node.pos, "PyBytes_AS_STRING",
self.PyBytes_AS_STRING_func_type,
args = [unpack_temp_node],
is_temp = 0,
)
len_node = ExprNodes.PythonCapiCallNode(
slice_node.pos, "PyBytes_GET_SIZE",
self.PyBytes_GET_SIZE_func_type,
args = [unpack_temp_node],
is_temp = 0,
)
return UtilNodes.LetNode(
unpack_temp_node,
self._transform_carray_iteration(
node,
ExprNodes.SliceIndexNode(
slice_node.pos,
base = slice_base_node,
start = None,
step = None,
stop = len_node,
type = slice_base_node.type,
is_temp = 1,
),
reversed = reversed))
PyUnicode_READ_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ucs4_type, [
PyrexTypes.CFuncTypeArg("kind", PyrexTypes.c_int_type, None),
PyrexTypes.CFuncTypeArg("data", PyrexTypes.c_void_ptr_type, None),
PyrexTypes.CFuncTypeArg("index", PyrexTypes.c_py_ssize_t_type, None)
])
init_unicode_iteration_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_int_type, [
PyrexTypes.CFuncTypeArg("s", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("length", PyrexTypes.c_py_ssize_t_ptr_type, None),
PyrexTypes.CFuncTypeArg("data", PyrexTypes.c_void_ptr_ptr_type, None),
PyrexTypes.CFuncTypeArg("kind", PyrexTypes.c_int_ptr_type, None)
],
exception_value = '-1')
def _transform_unicode_iteration(self, node, slice_node, reversed=False):
if slice_node.is_literal:
# try to reduce to byte iteration for plain Latin-1 strings
try:
bytes_value = bytes_literal(slice_node.value.encode('latin1'), 'iso8859-1')
except UnicodeEncodeError:
pass
else:
bytes_slice = ExprNodes.SliceIndexNode(
slice_node.pos,
base=ExprNodes.BytesNode(
slice_node.pos, value=bytes_value,
constant_result=bytes_value,
type=PyrexTypes.c_const_char_ptr_type).coerce_to(
PyrexTypes.c_const_uchar_ptr_type, self.current_env()),
start=None,
stop=ExprNodes.IntNode(
slice_node.pos, value=str(len(bytes_value)),
constant_result=len(bytes_value),
type=PyrexTypes.c_py_ssize_t_type),
type=Builtin.unicode_type, # hint for Python conversion
)
return self._transform_carray_iteration(node, bytes_slice, reversed)
unpack_temp_node = UtilNodes.LetRefNode(
slice_node.as_none_safe_node("'NoneType' is not iterable"))
start_node = ExprNodes.IntNode(
node.pos, value='0', constant_result=0, type=PyrexTypes.c_py_ssize_t_type)
length_temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
end_node = length_temp.ref(node.pos)
if reversed:
relation1, relation2 = '>', '>='
start_node, end_node = end_node, start_node
else:
relation1, relation2 = '<=', '<'
kind_temp = UtilNodes.TempHandle(PyrexTypes.c_int_type)
data_temp = UtilNodes.TempHandle(PyrexTypes.c_void_ptr_type)
counter_temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
target_value = ExprNodes.PythonCapiCallNode(
slice_node.pos, "__Pyx_PyUnicode_READ",
self.PyUnicode_READ_func_type,
args = [kind_temp.ref(slice_node.pos),
data_temp.ref(slice_node.pos),
counter_temp.ref(node.target.pos)],
is_temp = False,
)
if target_value.type != node.target.type:
target_value = target_value.coerce_to(node.target.type,
self.current_env())
target_assign = Nodes.SingleAssignmentNode(
pos = node.target.pos,
lhs = node.target,
rhs = target_value)
body = Nodes.StatListNode(
node.pos,
stats = [target_assign, node.body])
loop_node = Nodes.ForFromStatNode(
node.pos,
bound1=start_node, relation1=relation1,
target=counter_temp.ref(node.target.pos),
relation2=relation2, bound2=end_node,
step=None, body=body,
else_clause=node.else_clause,
from_range=True)
setup_node = Nodes.ExprStatNode(
node.pos,
expr = ExprNodes.PythonCapiCallNode(
slice_node.pos, "__Pyx_init_unicode_iteration",
self.init_unicode_iteration_func_type,
args = [unpack_temp_node,
ExprNodes.AmpersandNode(slice_node.pos, operand=length_temp.ref(slice_node.pos),
type=PyrexTypes.c_py_ssize_t_ptr_type),
ExprNodes.AmpersandNode(slice_node.pos, operand=data_temp.ref(slice_node.pos),
type=PyrexTypes.c_void_ptr_ptr_type),
ExprNodes.AmpersandNode(slice_node.pos, operand=kind_temp.ref(slice_node.pos),
type=PyrexTypes.c_int_ptr_type),
],
is_temp = True,
result_is_used = False,
utility_code=UtilityCode.load_cached("unicode_iter", "Optimize.c"),
))
return UtilNodes.LetNode(
unpack_temp_node,
UtilNodes.TempsBlockNode(
node.pos, temps=[counter_temp, length_temp, data_temp, kind_temp],
body=Nodes.StatListNode(node.pos, stats=[setup_node, loop_node])))
def _transform_carray_iteration(self, node, slice_node, reversed=False):
neg_step = False
if isinstance(slice_node, ExprNodes.SliceIndexNode):
slice_base = slice_node.base
start = filter_none_node(slice_node.start)
stop = filter_none_node(slice_node.stop)
step = None
if not stop:
if not slice_base.type.is_pyobject:
error(slice_node.pos, "C array iteration requires known end index")
return node
elif slice_node.is_subscript:
assert isinstance(slice_node.index, ExprNodes.SliceNode)
slice_base = slice_node.base
index = slice_node.index
start = filter_none_node(index.start)
stop = filter_none_node(index.stop)
step = filter_none_node(index.step)
if step:
if not isinstance(step.constant_result, _py_int_types) \
or step.constant_result == 0 \
or step.constant_result > 0 and not stop \
or step.constant_result < 0 and not start:
if not slice_base.type.is_pyobject:
error(step.pos, "C array iteration requires known step size and end index")
return node
else:
# step sign is handled internally by ForFromStatNode
step_value = step.constant_result
if reversed:
step_value = -step_value
neg_step = step_value < 0
step = ExprNodes.IntNode(step.pos, type=PyrexTypes.c_py_ssize_t_type,
value=str(abs(step_value)),
constant_result=abs(step_value))
elif slice_node.type.is_array:
if slice_node.type.size is None:
error(slice_node.pos, "C array iteration requires known end index")
return node
slice_base = slice_node
start = None
stop = ExprNodes.IntNode(
slice_node.pos, value=str(slice_node.type.size),
type=PyrexTypes.c_py_ssize_t_type, constant_result=slice_node.type.size)
step = None
else:
if not slice_node.type.is_pyobject:
error(slice_node.pos, "C array iteration requires known end index")
return node
if start:
start = start.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
if stop:
stop = stop.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
if stop is None:
if neg_step:
stop = ExprNodes.IntNode(
slice_node.pos, value='-1', type=PyrexTypes.c_py_ssize_t_type, constant_result=-1)
else:
error(slice_node.pos, "C array iteration requires known step size and end index")
return node
if reversed:
if not start:
start = ExprNodes.IntNode(slice_node.pos, value="0", constant_result=0,
type=PyrexTypes.c_py_ssize_t_type)
# if step was provided, it was already negated above
start, stop = stop, start
ptr_type = slice_base.type
if ptr_type.is_array:
ptr_type = ptr_type.element_ptr_type()
carray_ptr = slice_base.coerce_to_simple(self.current_env())
if start and start.constant_result != 0:
start_ptr_node = ExprNodes.AddNode(
start.pos,
operand1=carray_ptr,
operator='+',
operand2=start,
type=ptr_type)
else:
start_ptr_node = carray_ptr
if stop and stop.constant_result != 0:
stop_ptr_node = ExprNodes.AddNode(
stop.pos,
operand1=ExprNodes.CloneNode(carray_ptr),
operator='+',
operand2=stop,
type=ptr_type
).coerce_to_simple(self.current_env())
else:
stop_ptr_node = ExprNodes.CloneNode(carray_ptr)
counter = UtilNodes.TempHandle(ptr_type)
counter_temp = counter.ref(node.target.pos)
if slice_base.type.is_string and node.target.type.is_pyobject:
# special case: char* -> bytes/unicode
if slice_node.type is Builtin.unicode_type:
target_value = ExprNodes.CastNode(
ExprNodes.DereferenceNode(
node.target.pos, operand=counter_temp,
type=ptr_type.base_type),
PyrexTypes.c_py_ucs4_type).coerce_to(
node.target.type, self.current_env())
else:
# char* -> bytes coercion requires slicing, not indexing
target_value = ExprNodes.SliceIndexNode(
node.target.pos,
start=ExprNodes.IntNode(node.target.pos, value='0',
constant_result=0,
type=PyrexTypes.c_int_type),
stop=ExprNodes.IntNode(node.target.pos, value='1',
constant_result=1,
type=PyrexTypes.c_int_type),
base=counter_temp,
type=Builtin.bytes_type,
is_temp=1)
elif node.target.type.is_ptr and not node.target.type.assignable_from(ptr_type.base_type):
# Allow iteration with pointer target to avoid copy.
target_value = counter_temp
else:
# TODO: can this safely be replaced with DereferenceNode() as above?
target_value = ExprNodes.IndexNode(
node.target.pos,
index=ExprNodes.IntNode(node.target.pos, value='0',
constant_result=0,
type=PyrexTypes.c_int_type),
base=counter_temp,
type=ptr_type.base_type)
if target_value.type != node.target.type:
target_value = target_value.coerce_to(node.target.type,
self.current_env())
target_assign = Nodes.SingleAssignmentNode(
pos = node.target.pos,
lhs = node.target,
rhs = target_value)
body = Nodes.StatListNode(
node.pos,
stats = [target_assign, node.body])
relation1, relation2 = self._find_for_from_node_relations(neg_step, reversed)
for_node = Nodes.ForFromStatNode(
node.pos,
bound1=start_ptr_node, relation1=relation1,
target=counter_temp,
relation2=relation2, bound2=stop_ptr_node,
step=step, body=body,
else_clause=node.else_clause,
from_range=True)
return UtilNodes.TempsBlockNode(
node.pos, temps=[counter],
body=for_node)
def _transform_enumerate_iteration(self, node, enumerate_function):
args = enumerate_function.arg_tuple.args
if len(args) == 0:
error(enumerate_function.pos,
"enumerate() requires an iterable argument")
return node
elif len(args) > 2:
error(enumerate_function.pos,
"enumerate() takes at most 2 arguments")
return node
if not node.target.is_sequence_constructor:
# leave this untouched for now
return node
targets = node.target.args
if len(targets) != 2:
# leave this untouched for now
return node
enumerate_target, iterable_target = targets
counter_type = enumerate_target.type
if not counter_type.is_pyobject and not counter_type.is_int:
# nothing we can do here, I guess
return node
if len(args) == 2:
start = unwrap_coerced_node(args[1]).coerce_to(counter_type, self.current_env())
else:
start = ExprNodes.IntNode(enumerate_function.pos,
value='0',
type=counter_type,
constant_result=0)
temp = UtilNodes.LetRefNode(start)
inc_expression = ExprNodes.AddNode(
enumerate_function.pos,
operand1 = temp,
operand2 = ExprNodes.IntNode(node.pos, value='1',
type=counter_type,
constant_result=1),
operator = '+',
type = counter_type,
#inplace = True, # not worth using in-place operation for Py ints
is_temp = counter_type.is_pyobject
)
loop_body = [
Nodes.SingleAssignmentNode(
pos = enumerate_target.pos,
lhs = enumerate_target,
rhs = temp),
Nodes.SingleAssignmentNode(
pos = enumerate_target.pos,
lhs = temp,
rhs = inc_expression)
]
if isinstance(node.body, Nodes.StatListNode):
node.body.stats = loop_body + node.body.stats
else:
loop_body.append(node.body)
node.body = Nodes.StatListNode(
node.body.pos,
stats = loop_body)
node.target = iterable_target
node.item = node.item.coerce_to(iterable_target.type, self.current_env())
node.iterator.sequence = args[0]
# recurse into loop to check for further optimisations
return UtilNodes.LetNode(temp, self._optimise_for_loop(node, node.iterator.sequence))
def _find_for_from_node_relations(self, neg_step_value, reversed):
if reversed:
if neg_step_value:
return '<', '<='
else:
return '>', '>='
else:
if neg_step_value:
return '>=', '>'
else:
return '<=', '<'
def _transform_range_iteration(self, node, range_function, reversed=False):
args = range_function.arg_tuple.args
if len(args) < 3:
step_pos = range_function.pos
step_value = 1
step = ExprNodes.IntNode(step_pos, value='1', constant_result=1)
else:
step = args[2]
step_pos = step.pos
if not isinstance(step.constant_result, _py_int_types):
# cannot determine step direction
return node
step_value = step.constant_result
if step_value == 0:
# will lead to an error elsewhere
return node
step = ExprNodes.IntNode(step_pos, value=str(step_value),
constant_result=step_value)
if len(args) == 1:
bound1 = ExprNodes.IntNode(range_function.pos, value='0',
constant_result=0)
bound2 = args[0].coerce_to_integer(self.current_env())
else:
bound1 = args[0].coerce_to_integer(self.current_env())
bound2 = args[1].coerce_to_integer(self.current_env())
relation1, relation2 = self._find_for_from_node_relations(step_value < 0, reversed)
bound2_ref_node = None
if reversed:
bound1, bound2 = bound2, bound1
abs_step = abs(step_value)
if abs_step != 1:
if (isinstance(bound1.constant_result, _py_int_types) and
isinstance(bound2.constant_result, _py_int_types)):
# calculate final bounds now
if step_value < 0:
begin_value = bound2.constant_result
end_value = bound1.constant_result
bound1_value = begin_value - abs_step * ((begin_value - end_value - 1) // abs_step) - 1
else:
begin_value = bound1.constant_result
end_value = bound2.constant_result
bound1_value = end_value + abs_step * ((begin_value - end_value - 1) // abs_step) + 1
bound1 = ExprNodes.IntNode(
bound1.pos, value=str(bound1_value), constant_result=bound1_value,
type=PyrexTypes.spanning_type(bound1.type, bound2.type))
else:
# evaluate the same expression as above at runtime
bound2_ref_node = UtilNodes.LetRefNode(bound2)
spanning_type = PyrexTypes.spanning_type(bound1.type, bound2.type)
if step.type.is_int and abs(step_value) < 0x7FFF:
# Avoid loss of integer precision warnings.
spanning_step_type = PyrexTypes.spanning_type(spanning_type, PyrexTypes.c_int_type)
else:
spanning_step_type = PyrexTypes.spanning_type(spanning_type, step.type)
if step_value < 0:
begin_value = bound2_ref_node
end_value = bound1
final_op = '-'
else:
begin_value = bound1
end_value = bound2_ref_node
final_op = '+'
bound1 = ExprNodes.binop_node(
bound1.pos,
operand1=ExprNodes.binop_node(
bound1.pos,
operand1=bound2_ref_node,
operator=final_op, # +/-
operand2=ExprNodes.MulNode(
bound1.pos,
operand1=ExprNodes.IntNode(
bound1.pos,
value=str(abs_step),
constant_value=abs_step,
type=spanning_step_type),
operator='*',
operand2=ExprNodes.DivNode(
bound1.pos,
operand1=ExprNodes.SubNode(
bound1.pos,
operand1=ExprNodes.SubNode(
bound1.pos,
operand1=begin_value,
operator='-',
operand2=end_value,
type=spanning_type),
operator='-',
operand2=ExprNodes.IntNode(
bound1.pos,
value='1',
constant_result=1),
type=spanning_step_type),
operator='//',
operand2=ExprNodes.IntNode(
bound1.pos,
value=str(abs_step),
constant_value=abs_step,
type=spanning_step_type),
type=spanning_step_type),
type=spanning_step_type),
type=spanning_step_type),
operator=final_op, # +/-
operand2=ExprNodes.IntNode(
bound1.pos,
value='1',
constant_result=1),
type=spanning_type)
if step_value < 0:
step_value = -step_value
step.value = str(step_value)
step.constant_result = step_value
step = step.coerce_to_integer(self.current_env())
if not bound2.is_literal:
# stop bound must be immutable => keep it in a temp var
bound2_is_temp = True
bound2 = bound2_ref_node or UtilNodes.LetRefNode(bound2)
else:
bound2_is_temp = False
for_node = Nodes.ForFromStatNode(
node.pos,
target=node.target,
bound1=bound1, relation1=relation1,
relation2=relation2, bound2=bound2,
step=step, body=node.body,
else_clause=node.else_clause,
from_range=True)
if bound2_is_temp:
for_node = UtilNodes.LetNode(bound2, for_node)
return for_node
def _transform_dict_iteration(self, node, dict_obj, method, keys, values):
temps = []
temp = UtilNodes.TempHandle(PyrexTypes.py_object_type)
temps.append(temp)
dict_temp = temp.ref(dict_obj.pos)
temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
temps.append(temp)
pos_temp = temp.ref(node.pos)
key_target = value_target = tuple_target = None
if keys and values:
if node.target.is_sequence_constructor:
if len(node.target.args) == 2:
key_target, value_target = node.target.args
else:
# unusual case that may or may not lead to an error
return node
else:
tuple_target = node.target
elif keys:
key_target = node.target
else:
value_target = node.target
if isinstance(node.body, Nodes.StatListNode):
body = node.body
else:
body = Nodes.StatListNode(pos = node.body.pos,
stats = [node.body])
# keep original length to guard against dict modification
dict_len_temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
temps.append(dict_len_temp)
dict_len_temp_addr = ExprNodes.AmpersandNode(
node.pos, operand=dict_len_temp.ref(dict_obj.pos),
type=PyrexTypes.c_ptr_type(dict_len_temp.type))
temp = UtilNodes.TempHandle(PyrexTypes.c_int_type)
temps.append(temp)
is_dict_temp = temp.ref(node.pos)
is_dict_temp_addr = ExprNodes.AmpersandNode(
node.pos, operand=is_dict_temp,
type=PyrexTypes.c_ptr_type(temp.type))
iter_next_node = Nodes.DictIterationNextNode(
dict_temp, dict_len_temp.ref(dict_obj.pos), pos_temp,
key_target, value_target, tuple_target,
is_dict_temp)
iter_next_node = iter_next_node.analyse_expressions(self.current_env())
body.stats[0:0] = [iter_next_node]
if method:
method_node = ExprNodes.StringNode(
dict_obj.pos, is_identifier=True, value=method)
dict_obj = dict_obj.as_none_safe_node(
"'NoneType' object has no attribute '%s'",
error = "PyExc_AttributeError",
format_args = [method])
else:
method_node = ExprNodes.NullNode(dict_obj.pos)
dict_obj = dict_obj.as_none_safe_node("'NoneType' object is not iterable")
def flag_node(value):
value = value and 1 or 0
return ExprNodes.IntNode(node.pos, value=str(value), constant_result=value)
result_code = [
Nodes.SingleAssignmentNode(
node.pos,
lhs = pos_temp,
rhs = ExprNodes.IntNode(node.pos, value='0',
constant_result=0)),
Nodes.SingleAssignmentNode(
dict_obj.pos,
lhs = dict_temp,
rhs = ExprNodes.PythonCapiCallNode(
dict_obj.pos,
"__Pyx_dict_iterator",
self.PyDict_Iterator_func_type,
utility_code = UtilityCode.load_cached("dict_iter", "Optimize.c"),
args = [dict_obj, flag_node(dict_obj.type is Builtin.dict_type),
method_node, dict_len_temp_addr, is_dict_temp_addr,
],
is_temp=True,
)),
Nodes.WhileStatNode(
node.pos,
condition = None,
body = body,
else_clause = node.else_clause
)
]
return UtilNodes.TempsBlockNode(
node.pos, temps=temps,
body=Nodes.StatListNode(
node.pos,
stats = result_code
))
PyDict_Iterator_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("is_dict", PyrexTypes.c_int_type, None),
PyrexTypes.CFuncTypeArg("method_name", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("p_orig_length", PyrexTypes.c_py_ssize_t_ptr_type, None),
PyrexTypes.CFuncTypeArg("p_is_dict", PyrexTypes.c_int_ptr_type, None),
])
class SwitchTransform(Visitor.EnvTransform):
"""
This transformation tries to turn long if statements into C switch statements.
The requirement is that every clause be an (or of) var == value, where the var
is common among all clauses and both var and value are ints.
"""
NO_MATCH = (None, None, None)
def extract_conditions(self, cond, allow_not_in):
while True:
if isinstance(cond, (ExprNodes.CoerceToTempNode,
ExprNodes.CoerceToBooleanNode)):
cond = cond.arg
elif isinstance(cond, ExprNodes.BoolBinopResultNode):
cond = cond.arg.arg
elif isinstance(cond, UtilNodes.EvalWithTempExprNode):
# this is what we get from the FlattenInListTransform
cond = cond.subexpression
elif isinstance(cond, ExprNodes.TypecastNode):
cond = cond.operand
else:
break
if isinstance(cond, ExprNodes.PrimaryCmpNode):
if cond.cascade is not None:
return self.NO_MATCH
elif cond.is_c_string_contains() and \
isinstance(cond.operand2, (ExprNodes.UnicodeNode, ExprNodes.BytesNode)):
not_in = cond.operator == 'not_in'
if not_in and not allow_not_in:
return self.NO_MATCH
if isinstance(cond.operand2, ExprNodes.UnicodeNode) and \
cond.operand2.contains_surrogates():
# dealing with surrogates leads to different
# behaviour on wide and narrow Unicode
# platforms => refuse to optimise this case
return self.NO_MATCH
return not_in, cond.operand1, self.extract_in_string_conditions(cond.operand2)
elif not cond.is_python_comparison():
if cond.operator == '==':
not_in = False
elif allow_not_in and cond.operator == '!=':
not_in = True
else:
return self.NO_MATCH
# this looks somewhat silly, but it does the right
# checks for NameNode and AttributeNode
if is_common_value(cond.operand1, cond.operand1):
if cond.operand2.is_literal:
return not_in, cond.operand1, [cond.operand2]
elif getattr(cond.operand2, 'entry', None) \
and cond.operand2.entry.is_const:
return not_in, cond.operand1, [cond.operand2]
if is_common_value(cond.operand2, cond.operand2):
if cond.operand1.is_literal:
return not_in, cond.operand2, [cond.operand1]
elif getattr(cond.operand1, 'entry', None) \
and cond.operand1.entry.is_const:
return not_in, cond.operand2, [cond.operand1]
elif isinstance(cond, ExprNodes.BoolBinopNode):
if cond.operator == 'or' or (allow_not_in and cond.operator == 'and'):
allow_not_in = (cond.operator == 'and')
not_in_1, t1, c1 = self.extract_conditions(cond.operand1, allow_not_in)
not_in_2, t2, c2 = self.extract_conditions(cond.operand2, allow_not_in)
if t1 is not None and not_in_1 == not_in_2 and is_common_value(t1, t2):
if (not not_in_1) or allow_not_in:
return not_in_1, t1, c1+c2
return self.NO_MATCH
def extract_in_string_conditions(self, string_literal):
if isinstance(string_literal, ExprNodes.UnicodeNode):
charvals = list(map(ord, set(string_literal.value)))
charvals.sort()
return [ ExprNodes.IntNode(string_literal.pos, value=str(charval),
constant_result=charval)
for charval in charvals ]
else:
# this is a bit tricky as Py3's bytes type returns
# integers on iteration, whereas Py2 returns 1-char byte
# strings
characters = string_literal.value
characters = list(set([ characters[i:i+1] for i in range(len(characters)) ]))
characters.sort()
return [ ExprNodes.CharNode(string_literal.pos, value=charval,
constant_result=charval)
for charval in characters ]
def extract_common_conditions(self, common_var, condition, allow_not_in):
not_in, var, conditions = self.extract_conditions(condition, allow_not_in)
if var is None:
return self.NO_MATCH
elif common_var is not None and not is_common_value(var, common_var):
return self.NO_MATCH
elif not (var.type.is_int or var.type.is_enum) or sum([not (cond.type.is_int or cond.type.is_enum) for cond in conditions]):
return self.NO_MATCH
return not_in, var, conditions
def has_duplicate_values(self, condition_values):
# duplicated values don't work in a switch statement
seen = set()
for value in condition_values:
if value.has_constant_result():
if value.constant_result in seen:
return True
seen.add(value.constant_result)
else:
# this isn't completely safe as we don't know the
# final C value, but this is about the best we can do
try:
if value.entry.cname in seen:
return True
except AttributeError:
return True # play safe
seen.add(value.entry.cname)
return False
def visit_IfStatNode(self, node):
if not self.current_directives.get('optimize.use_switch'):
self.visitchildren(node)
return node
common_var = None
cases = []
for if_clause in node.if_clauses:
_, common_var, conditions = self.extract_common_conditions(
common_var, if_clause.condition, False)
if common_var is None:
self.visitchildren(node)
return node
cases.append(Nodes.SwitchCaseNode(pos = if_clause.pos,
conditions = conditions,
body = if_clause.body))
condition_values = [
cond for case in cases for cond in case.conditions]
if len(condition_values) < 2:
self.visitchildren(node)
return node
if self.has_duplicate_values(condition_values):
self.visitchildren(node)
return node
common_var = unwrap_node(common_var)
switch_node = Nodes.SwitchStatNode(pos = node.pos,
test = common_var,
cases = cases,
else_clause = node.else_clause)
return switch_node
def visit_CondExprNode(self, node):
if not self.current_directives.get('optimize.use_switch'):
self.visitchildren(node)
return node
not_in, common_var, conditions = self.extract_common_conditions(
None, node.test, True)
if common_var is None \
or len(conditions) < 2 \
or self.has_duplicate_values(conditions):
self.visitchildren(node)
return node
return self.build_simple_switch_statement(
node, common_var, conditions, not_in,
node.true_val, node.false_val)
def visit_BoolBinopNode(self, node):
if not self.current_directives.get('optimize.use_switch'):
self.visitchildren(node)
return node
not_in, common_var, conditions = self.extract_common_conditions(
None, node, True)
if common_var is None \
or len(conditions) < 2 \
or self.has_duplicate_values(conditions):
self.visitchildren(node)
node.wrap_operands(self.current_env()) # in case we changed the operands
return node
return self.build_simple_switch_statement(
node, common_var, conditions, not_in,
ExprNodes.BoolNode(node.pos, value=True, constant_result=True),
ExprNodes.BoolNode(node.pos, value=False, constant_result=False))
def visit_PrimaryCmpNode(self, node):
if not self.current_directives.get('optimize.use_switch'):
self.visitchildren(node)
return node
not_in, common_var, conditions = self.extract_common_conditions(
None, node, True)
if common_var is None \
or len(conditions) < 2 \
or self.has_duplicate_values(conditions):
self.visitchildren(node)
return node
return self.build_simple_switch_statement(
node, common_var, conditions, not_in,
ExprNodes.BoolNode(node.pos, value=True, constant_result=True),
ExprNodes.BoolNode(node.pos, value=False, constant_result=False))
def build_simple_switch_statement(self, node, common_var, conditions,
not_in, true_val, false_val):
result_ref = UtilNodes.ResultRefNode(node)
true_body = Nodes.SingleAssignmentNode(
node.pos,
lhs=result_ref,
rhs=true_val.coerce_to(node.type, self.current_env()),
first=True)
false_body = Nodes.SingleAssignmentNode(
node.pos,
lhs=result_ref,
rhs=false_val.coerce_to(node.type, self.current_env()),
first=True)
if not_in:
true_body, false_body = false_body, true_body
cases = [Nodes.SwitchCaseNode(pos = node.pos,
conditions = conditions,
body = true_body)]
common_var = unwrap_node(common_var)
switch_node = Nodes.SwitchStatNode(pos = node.pos,
test = common_var,
cases = cases,
else_clause = false_body)
replacement = UtilNodes.TempResultFromStatNode(result_ref, switch_node)
return replacement
def visit_EvalWithTempExprNode(self, node):
if not self.current_directives.get('optimize.use_switch'):
self.visitchildren(node)
return node
# drop unused expression temp from FlattenInListTransform
orig_expr = node.subexpression
temp_ref = node.lazy_temp
self.visitchildren(node)
if node.subexpression is not orig_expr:
# node was restructured => check if temp is still used
if not Visitor.tree_contains(node.subexpression, temp_ref):
return node.subexpression
return node
visit_Node = Visitor.VisitorTransform.recurse_to_children
class FlattenInListTransform(Visitor.VisitorTransform, SkipDeclarations):
"""
This transformation flattens "x in [val1, ..., valn]" into a sequential list
of comparisons.
"""
def visit_PrimaryCmpNode(self, node):
self.visitchildren(node)
if node.cascade is not None:
return node
elif node.operator == 'in':
conjunction = 'or'
eq_or_neq = '=='
elif node.operator == 'not_in':
conjunction = 'and'
eq_or_neq = '!='
else:
return node
if not isinstance(node.operand2, (ExprNodes.TupleNode,
ExprNodes.ListNode,
ExprNodes.SetNode)):
return node
args = node.operand2.args
if len(args) == 0:
# note: lhs may have side effects
return node
lhs = UtilNodes.ResultRefNode(node.operand1)
conds = []
temps = []
for arg in args:
try:
# Trial optimisation to avoid redundant temp
# assignments. However, since is_simple() is meant to
# be called after type analysis, we ignore any errors
# and just play safe in that case.
is_simple_arg = arg.is_simple()
except Exception:
is_simple_arg = False
if not is_simple_arg:
# must evaluate all non-simple RHS before doing the comparisons
arg = UtilNodes.LetRefNode(arg)
temps.append(arg)
cond = ExprNodes.PrimaryCmpNode(
pos = node.pos,
operand1 = lhs,
operator = eq_or_neq,
operand2 = arg,
cascade = None)
conds.append(ExprNodes.TypecastNode(
pos = node.pos,
operand = cond,
type = PyrexTypes.c_bint_type))
def concat(left, right):
return ExprNodes.BoolBinopNode(
pos = node.pos,
operator = conjunction,
operand1 = left,
operand2 = right)
condition = reduce(concat, conds)
new_node = UtilNodes.EvalWithTempExprNode(lhs, condition)
for temp in temps[::-1]:
new_node = UtilNodes.EvalWithTempExprNode(temp, new_node)
return new_node
visit_Node = Visitor.VisitorTransform.recurse_to_children
class DropRefcountingTransform(Visitor.VisitorTransform):
"""Drop ref-counting in safe places.
"""
visit_Node = Visitor.VisitorTransform.recurse_to_children
def visit_ParallelAssignmentNode(self, node):
"""
Parallel swap assignments like 'a,b = b,a' are safe.
"""
left_names, right_names = [], []
left_indices, right_indices = [], []
temps = []
for stat in node.stats:
if isinstance(stat, Nodes.SingleAssignmentNode):
if not self._extract_operand(stat.lhs, left_names,
left_indices, temps):
return node
if not self._extract_operand(stat.rhs, right_names,
right_indices, temps):
return node
elif isinstance(stat, Nodes.CascadedAssignmentNode):
# FIXME
return node
else:
return node
if left_names or right_names:
# lhs/rhs names must be a non-redundant permutation
lnames = [ path for path, n in left_names ]
rnames = [ path for path, n in right_names ]
if set(lnames) != set(rnames):
return node
if len(set(lnames)) != len(right_names):
return node
if left_indices or right_indices:
# base name and index of index nodes must be a
# non-redundant permutation
lindices = []
for lhs_node in left_indices:
index_id = self._extract_index_id(lhs_node)
if not index_id:
return node
lindices.append(index_id)
rindices = []
for rhs_node in right_indices:
index_id = self._extract_index_id(rhs_node)
if not index_id:
return node
rindices.append(index_id)
if set(lindices) != set(rindices):
return node
if len(set(lindices)) != len(right_indices):
return node
# really supporting IndexNode requires support in
# __Pyx_GetItemInt(), so let's stop short for now
return node
temp_args = [t.arg for t in temps]
for temp in temps:
temp.use_managed_ref = False
for _, name_node in left_names + right_names:
if name_node not in temp_args:
name_node.use_managed_ref = False
for index_node in left_indices + right_indices:
index_node.use_managed_ref = False
return node
def _extract_operand(self, node, names, indices, temps):
node = unwrap_node(node)
if not node.type.is_pyobject:
return False
if isinstance(node, ExprNodes.CoerceToTempNode):
temps.append(node)
node = node.arg
name_path = []
obj_node = node
while obj_node.is_attribute:
if obj_node.is_py_attr:
return False
name_path.append(obj_node.member)
obj_node = obj_node.obj
if obj_node.is_name:
name_path.append(obj_node.name)
names.append( ('.'.join(name_path[::-1]), node) )
elif node.is_subscript:
if node.base.type != Builtin.list_type:
return False
if not node.index.type.is_int:
return False
if not node.base.is_name:
return False
indices.append(node)
else:
return False
return True
def _extract_index_id(self, index_node):
base = index_node.base
index = index_node.index
if isinstance(index, ExprNodes.NameNode):
index_val = index.name
elif isinstance(index, ExprNodes.ConstNode):
# FIXME:
return None
else:
return None
return (base.name, index_val)
class EarlyReplaceBuiltinCalls(Visitor.EnvTransform):
"""Optimize some common calls to builtin types *before* the type
analysis phase and *after* the declarations analysis phase.
This transform cannot make use of any argument types, but it can
restructure the tree in a way that the type analysis phase can
respond to.
Introducing C function calls here may not be a good idea. Move
them to the OptimizeBuiltinCalls transform instead, which runs
after type analysis.
"""
# only intercept on call nodes
visit_Node = Visitor.VisitorTransform.recurse_to_children
def visit_SimpleCallNode(self, node):
self.visitchildren(node)
function = node.function
if not self._function_is_builtin_name(function):
return node
return self._dispatch_to_handler(node, function, node.args)
def visit_GeneralCallNode(self, node):
self.visitchildren(node)
function = node.function
if not self._function_is_builtin_name(function):
return node
arg_tuple = node.positional_args
if not isinstance(arg_tuple, ExprNodes.TupleNode):
return node
args = arg_tuple.args
return self._dispatch_to_handler(
node, function, args, node.keyword_args)
def _function_is_builtin_name(self, function):
if not function.is_name:
return False
env = self.current_env()
entry = env.lookup(function.name)
if entry is not env.builtin_scope().lookup_here(function.name):
return False
# if entry is None, it's at least an undeclared name, so likely builtin
return True
def _dispatch_to_handler(self, node, function, args, kwargs=None):
if kwargs is None:
handler_name = '_handle_simple_function_%s' % function.name
else:
handler_name = '_handle_general_function_%s' % function.name
handle_call = getattr(self, handler_name, None)
if handle_call is not None:
if kwargs is None:
return handle_call(node, args)
else:
return handle_call(node, args, kwargs)
return node
def _inject_capi_function(self, node, cname, func_type, utility_code=None):
node.function = ExprNodes.PythonCapiFunctionNode(
node.function.pos, node.function.name, cname, func_type,
utility_code = utility_code)
def _error_wrong_arg_count(self, function_name, node, args, expected=None):
if not expected: # None or 0
arg_str = ''
elif isinstance(expected, basestring) or expected > 1:
arg_str = '...'
elif expected == 1:
arg_str = 'x'
else:
arg_str = ''
if expected is not None:
expected_str = 'expected %s, ' % expected
else:
expected_str = ''
error(node.pos, "%s(%s) called with wrong number of args, %sfound %d" % (
function_name, arg_str, expected_str, len(args)))
# specific handlers for simple call nodes
def _handle_simple_function_float(self, node, pos_args):
if not pos_args:
return ExprNodes.FloatNode(node.pos, value='0.0')
if len(pos_args) > 1:
self._error_wrong_arg_count('float', node, pos_args, 1)
arg_type = getattr(pos_args[0], 'type', None)
if arg_type in (PyrexTypes.c_double_type, Builtin.float_type):
return pos_args[0]
return node
def _handle_simple_function_slice(self, node, pos_args):
arg_count = len(pos_args)
start = step = None
if arg_count == 1:
stop, = pos_args
elif arg_count == 2:
start, stop = pos_args
elif arg_count == 3:
start, stop, step = pos_args
else:
self._error_wrong_arg_count('slice', node, pos_args)
return node
return ExprNodes.SliceNode(
node.pos,
start=start or ExprNodes.NoneNode(node.pos),
stop=stop,
step=step or ExprNodes.NoneNode(node.pos))
def _handle_simple_function_ord(self, node, pos_args):
"""Unpack ord('X').
"""
if len(pos_args) != 1:
return node
arg = pos_args[0]
if isinstance(arg, (ExprNodes.UnicodeNode, ExprNodes.BytesNode)):
if len(arg.value) == 1:
return ExprNodes.IntNode(
arg.pos, type=PyrexTypes.c_long_type,
value=str(ord(arg.value)),
constant_result=ord(arg.value)
)
elif isinstance(arg, ExprNodes.StringNode):
if arg.unicode_value and len(arg.unicode_value) == 1 \
and ord(arg.unicode_value) <= 255: # Py2/3 portability
return ExprNodes.IntNode(
arg.pos, type=PyrexTypes.c_int_type,
value=str(ord(arg.unicode_value)),
constant_result=ord(arg.unicode_value)
)
return node
# sequence processing
def _handle_simple_function_all(self, node, pos_args):
"""Transform
_result = all(x for L in LL for x in L)
into
for L in LL:
for x in L:
if not x:
_result = False
break
else:
continue
break
else:
_result = True
"""
return self._transform_any_all(node, pos_args, False)
def _handle_simple_function_any(self, node, pos_args):
"""Transform
_result = any(x for L in LL for x in L)
into
for L in LL:
for x in L:
if x:
_result = True
break
else:
continue
break
else:
_result = False
"""
return self._transform_any_all(node, pos_args, True)
def _transform_any_all(self, node, pos_args, is_any):
if len(pos_args) != 1:
return node
if not isinstance(pos_args[0], ExprNodes.GeneratorExpressionNode):
return node
gen_expr_node = pos_args[0]
generator_body = gen_expr_node.def_node.gbody
loop_node = generator_body.body
yield_expression, yield_stat_node = _find_single_yield_expression(loop_node)
if yield_expression is None:
return node
if is_any:
condition = yield_expression
else:
condition = ExprNodes.NotNode(yield_expression.pos, operand=yield_expression)
test_node = Nodes.IfStatNode(
yield_expression.pos, else_clause=None, if_clauses=[
Nodes.IfClauseNode(
yield_expression.pos,
condition=condition,
body=Nodes.ReturnStatNode(
node.pos,
value=ExprNodes.BoolNode(yield_expression.pos, value=is_any, constant_result=is_any))
)]
)
loop = loop_node
while isinstance(loop.body, Nodes.LoopNode):
next_loop = loop.body
loop.body = Nodes.StatListNode(loop.body.pos, stats=[
loop.body,
Nodes.BreakStatNode(yield_expression.pos)
])
next_loop.else_clause = Nodes.ContinueStatNode(yield_expression.pos)
loop = next_loop
loop_node.else_clause = Nodes.ReturnStatNode(
node.pos,
value=ExprNodes.BoolNode(yield_expression.pos, value=not is_any, constant_result=not is_any))
Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, test_node)
return ExprNodes.InlinedGeneratorExpressionNode(
gen_expr_node.pos, gen=gen_expr_node, orig_func='any' if is_any else 'all')
PySequence_List_func_type = PyrexTypes.CFuncType(
Builtin.list_type,
[PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)])
def _handle_simple_function_sorted(self, node, pos_args):
"""Transform sorted(genexpr) and sorted([listcomp]) into
[listcomp].sort(). CPython just reads the iterable into a
list and calls .sort() on it. Expanding the iterable in a
listcomp is still faster and the result can be sorted in
place.
"""
if len(pos_args) != 1:
return node
arg = pos_args[0]
if isinstance(arg, ExprNodes.ComprehensionNode) and arg.type is Builtin.list_type:
list_node = pos_args[0]
loop_node = list_node.loop
elif isinstance(arg, ExprNodes.GeneratorExpressionNode):
gen_expr_node = arg
loop_node = gen_expr_node.loop
yield_expression, yield_stat_node = _find_single_yield_expression(loop_node)
if yield_expression is None:
return node
list_node = ExprNodes.InlinedGeneratorExpressionNode(
node.pos, gen_expr_node, orig_func='sorted',
comprehension_type=Builtin.list_type)
append_node = ExprNodes.ComprehensionAppendNode(
yield_expression.pos,
expr=yield_expression,
target=list_node.target)
Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, append_node)
elif arg.is_sequence_constructor:
# sorted([a, b, c]) or sorted((a, b, c)). The result is always a list,
# so starting off with a fresh one is more efficient.
list_node = loop_node = arg.as_list()
else:
# Interestingly, PySequence_List works on a lot of non-sequence
# things as well.
list_node = loop_node = ExprNodes.PythonCapiCallNode(
node.pos, "PySequence_List", self.PySequence_List_func_type,
args=pos_args, is_temp=True)
result_node = UtilNodes.ResultRefNode(
pos=loop_node.pos, type=Builtin.list_type, may_hold_none=False)
list_assign_node = Nodes.SingleAssignmentNode(
node.pos, lhs=result_node, rhs=list_node, first=True)
sort_method = ExprNodes.AttributeNode(
node.pos, obj=result_node, attribute=EncodedString('sort'),
# entry ? type ?
needs_none_check=False)
sort_node = Nodes.ExprStatNode(
node.pos, expr=ExprNodes.SimpleCallNode(
node.pos, function=sort_method, args=[]))
sort_node.analyse_declarations(self.current_env())
return UtilNodes.TempResultFromStatNode(
result_node,
Nodes.StatListNode(node.pos, stats=[list_assign_node, sort_node]))
def __handle_simple_function_sum(self, node, pos_args):
"""Transform sum(genexpr) into an equivalent inlined aggregation loop.
"""
if len(pos_args) not in (1,2):
return node
if not isinstance(pos_args[0], (ExprNodes.GeneratorExpressionNode,
ExprNodes.ComprehensionNode)):
return node
gen_expr_node = pos_args[0]
loop_node = gen_expr_node.loop
if isinstance(gen_expr_node, ExprNodes.GeneratorExpressionNode):
yield_expression, yield_stat_node = _find_single_yield_expression(loop_node)
# FIXME: currently nonfunctional
yield_expression = None
if yield_expression is None:
return node
else: # ComprehensionNode
yield_stat_node = gen_expr_node.append
yield_expression = yield_stat_node.expr
try:
if not yield_expression.is_literal or not yield_expression.type.is_int:
return node
except AttributeError:
return node # in case we don't have a type yet
# special case: old Py2 backwards compatible "sum([int_const for ...])"
# can safely be unpacked into a genexpr
if len(pos_args) == 1:
start = ExprNodes.IntNode(node.pos, value='0', constant_result=0)
else:
start = pos_args[1]
result_ref = UtilNodes.ResultRefNode(pos=node.pos, type=PyrexTypes.py_object_type)
add_node = Nodes.SingleAssignmentNode(
yield_expression.pos,
lhs = result_ref,
rhs = ExprNodes.binop_node(node.pos, '+', result_ref, yield_expression)
)
Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, add_node)
exec_code = Nodes.StatListNode(
node.pos,
stats = [
Nodes.SingleAssignmentNode(
start.pos,
lhs = UtilNodes.ResultRefNode(pos=node.pos, expression=result_ref),
rhs = start,
first = True),
loop_node
])
return ExprNodes.InlinedGeneratorExpressionNode(
gen_expr_node.pos, loop = exec_code, result_node = result_ref,
expr_scope = gen_expr_node.expr_scope, orig_func = 'sum',
has_local_scope = gen_expr_node.has_local_scope)
def _handle_simple_function_min(self, node, pos_args):
return self._optimise_min_max(node, pos_args, '<')
def _handle_simple_function_max(self, node, pos_args):
return self._optimise_min_max(node, pos_args, '>')
def _optimise_min_max(self, node, args, operator):
"""Replace min(a,b,...) and max(a,b,...) by explicit comparison code.
"""
if len(args) <= 1:
if len(args) == 1 and args[0].is_sequence_constructor:
args = args[0].args
else:
# leave this to Python
return node
cascaded_nodes = list(map(UtilNodes.ResultRefNode, args[1:]))
last_result = args[0]
for arg_node in cascaded_nodes:
result_ref = UtilNodes.ResultRefNode(last_result)
last_result = ExprNodes.CondExprNode(
arg_node.pos,
true_val = arg_node,
false_val = result_ref,
test = ExprNodes.PrimaryCmpNode(
arg_node.pos,
operand1 = arg_node,
operator = operator,
operand2 = result_ref,
)
)
last_result = UtilNodes.EvalWithTempExprNode(result_ref, last_result)
for ref_node in cascaded_nodes[::-1]:
last_result = UtilNodes.EvalWithTempExprNode(ref_node, last_result)
return last_result
# builtin type creation
def _DISABLED_handle_simple_function_tuple(self, node, pos_args):
if not pos_args:
return ExprNodes.TupleNode(node.pos, args=[], constant_result=())
# This is a bit special - for iterables (including genexps),
# Python actually overallocates and resizes a newly created
# tuple incrementally while reading items, which we can't
# easily do without explicit node support. Instead, we read
# the items into a list and then copy them into a tuple of the
# final size. This takes up to twice as much memory, but will
# have to do until we have real support for genexps.
result = self._transform_list_set_genexpr(node, pos_args, Builtin.list_type)
if result is not node:
return ExprNodes.AsTupleNode(node.pos, arg=result)
return node
def _handle_simple_function_frozenset(self, node, pos_args):
"""Replace frozenset([...]) by frozenset((...)) as tuples are more efficient.
"""
if len(pos_args) != 1:
return node
if pos_args[0].is_sequence_constructor and not pos_args[0].args:
del pos_args[0]
elif isinstance(pos_args[0], ExprNodes.ListNode):
pos_args[0] = pos_args[0].as_tuple()
return node
def _handle_simple_function_list(self, node, pos_args):
if not pos_args:
return ExprNodes.ListNode(node.pos, args=[], constant_result=[])
return self._transform_list_set_genexpr(node, pos_args, Builtin.list_type)
def _handle_simple_function_set(self, node, pos_args):
if not pos_args:
return ExprNodes.SetNode(node.pos, args=[], constant_result=set())
return self._transform_list_set_genexpr(node, pos_args, Builtin.set_type)
def _transform_list_set_genexpr(self, node, pos_args, target_type):
"""Replace set(genexpr) and list(genexpr) by an inlined comprehension.
"""
if len(pos_args) > 1:
return node
if not isinstance(pos_args[0], ExprNodes.GeneratorExpressionNode):
return node
gen_expr_node = pos_args[0]
loop_node = gen_expr_node.loop
yield_expression, yield_stat_node = _find_single_yield_expression(loop_node)
if yield_expression is None:
return node
result_node = ExprNodes.InlinedGeneratorExpressionNode(
node.pos, gen_expr_node,
orig_func='set' if target_type is Builtin.set_type else 'list',
comprehension_type=target_type)
append_node = ExprNodes.ComprehensionAppendNode(
yield_expression.pos,
expr=yield_expression,
target=result_node.target)
Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, append_node)
return result_node
def _handle_simple_function_dict(self, node, pos_args):
"""Replace dict( (a,b) for ... ) by an inlined { a:b for ... }
"""
if len(pos_args) == 0:
return ExprNodes.DictNode(node.pos, key_value_pairs=[], constant_result={})
if len(pos_args) > 1:
return node
if not isinstance(pos_args[0], ExprNodes.GeneratorExpressionNode):
return node
gen_expr_node = pos_args[0]
loop_node = gen_expr_node.loop
yield_expression, yield_stat_node = _find_single_yield_expression(loop_node)
if yield_expression is None:
return node
if not isinstance(yield_expression, ExprNodes.TupleNode):
return node
if len(yield_expression.args) != 2:
return node
result_node = ExprNodes.InlinedGeneratorExpressionNode(
node.pos, gen_expr_node, orig_func='dict',
comprehension_type=Builtin.dict_type)
append_node = ExprNodes.DictComprehensionAppendNode(
yield_expression.pos,
key_expr = yield_expression.args[0],
value_expr = yield_expression.args[1],
target=result_node.target)
Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, append_node)
return result_node
# specific handlers for general call nodes
def _handle_general_function_dict(self, node, pos_args, kwargs):
"""Replace dict(a=b,c=d,...) by the underlying keyword dict
construction which is done anyway.
"""
if len(pos_args) > 0:
return node
if not isinstance(kwargs, ExprNodes.DictNode):
return node
return kwargs
class InlineDefNodeCalls(Visitor.NodeRefCleanupMixin, Visitor.EnvTransform):
visit_Node = Visitor.VisitorTransform.recurse_to_children
def get_constant_value_node(self, name_node):
if name_node.cf_state is None:
return None
if name_node.cf_state.cf_is_null:
return None
entry = self.current_env().lookup(name_node.name)
if not entry or (not entry.cf_assignments
or len(entry.cf_assignments) != 1):
# not just a single assignment in all closures
return None
return entry.cf_assignments[0].rhs
def visit_SimpleCallNode(self, node):
self.visitchildren(node)
if not self.current_directives.get('optimize.inline_defnode_calls'):
return node
function_name = node.function
if not function_name.is_name:
return node
function = self.get_constant_value_node(function_name)
if not isinstance(function, ExprNodes.PyCFunctionNode):
return node
inlined = ExprNodes.InlinedDefNodeCallNode(
node.pos, function_name=function_name,
function=function, args=node.args)
if inlined.can_be_inlined():
return self.replace(node, inlined)
return node
class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
Visitor.MethodDispatcherTransform):
"""Optimize some common methods calls and instantiation patterns
for builtin types *after* the type analysis phase.
Running after type analysis, this transform can only perform
function replacements that do not alter the function return type
in a way that was not anticipated by the type analysis.
"""
### cleanup to avoid redundant coercions to/from Python types
def _visit_PyTypeTestNode(self, node):
# disabled - appears to break assignments in some cases, and
# also drops a None check, which might still be required
"""Flatten redundant type checks after tree changes.
"""
old_arg = node.arg
self.visitchildren(node)
if old_arg is node.arg or node.arg.type != node.type:
return node
return node.arg
def _visit_TypecastNode(self, node):
# disabled - the user may have had a reason to put a type
# cast, even if it looks redundant to Cython
"""
Drop redundant type casts.
"""
self.visitchildren(node)
if node.type == node.operand.type:
return node.operand
return node
def visit_ExprStatNode(self, node):
"""
Drop useless coercions.
"""
self.visitchildren(node)
if isinstance(node.expr, ExprNodes.CoerceToPyTypeNode):
node.expr = node.expr.arg
return node
def visit_CoerceToBooleanNode(self, node):
"""Drop redundant conversion nodes after tree changes.
"""
self.visitchildren(node)
arg = node.arg
if isinstance(arg, ExprNodes.PyTypeTestNode):
arg = arg.arg
if isinstance(arg, ExprNodes.CoerceToPyTypeNode):
if arg.type in (PyrexTypes.py_object_type, Builtin.bool_type):
return arg.arg.coerce_to_boolean(self.current_env())
return node
def visit_CoerceFromPyTypeNode(self, node):
"""Drop redundant conversion nodes after tree changes.
Also, optimise away calls to Python's builtin int() and
float() if the result is going to be coerced back into a C
type anyway.
"""
self.visitchildren(node)
arg = node.arg
if not arg.type.is_pyobject:
# no Python conversion left at all, just do a C coercion instead
if node.type == arg.type:
return arg
else:
return arg.coerce_to(node.type, self.current_env())
if isinstance(arg, ExprNodes.PyTypeTestNode):
arg = arg.arg
if arg.is_literal:
if (node.type.is_int and isinstance(arg, ExprNodes.IntNode) or
node.type.is_float and isinstance(arg, ExprNodes.FloatNode) or
node.type.is_int and isinstance(arg, ExprNodes.BoolNode)):
return arg.coerce_to(node.type, self.current_env())
elif isinstance(arg, ExprNodes.CoerceToPyTypeNode):
if arg.type is PyrexTypes.py_object_type:
if node.type.assignable_from(arg.arg.type):
# completely redundant C->Py->C coercion
return arg.arg.coerce_to(node.type, self.current_env())
elif isinstance(arg, ExprNodes.SimpleCallNode):
if node.type.is_int or node.type.is_float:
return self._optimise_numeric_cast_call(node, arg)
elif arg.is_subscript:
index_node = arg.index
if isinstance(index_node, ExprNodes.CoerceToPyTypeNode):
index_node = index_node.arg
if index_node.type.is_int:
return self._optimise_int_indexing(node, arg, index_node)
return node
PyBytes_GetItemInt_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_char_type, [
PyrexTypes.CFuncTypeArg("bytes", Builtin.bytes_type, None),
PyrexTypes.CFuncTypeArg("index", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("check_bounds", PyrexTypes.c_int_type, None),
],
exception_value = "((char)-1)",
exception_check = True)
def _optimise_int_indexing(self, coerce_node, arg, index_node):
env = self.current_env()
bound_check_bool = env.directives['boundscheck'] and 1 or 0
if arg.base.type is Builtin.bytes_type:
if coerce_node.type in (PyrexTypes.c_char_type, PyrexTypes.c_uchar_type):
# bytes[index] -> char
bound_check_node = ExprNodes.IntNode(
coerce_node.pos, value=str(bound_check_bool),
constant_result=bound_check_bool)
node = ExprNodes.PythonCapiCallNode(
coerce_node.pos, "__Pyx_PyBytes_GetItemInt",
self.PyBytes_GetItemInt_func_type,
args=[
arg.base.as_none_safe_node("'NoneType' object is not subscriptable"),
index_node.coerce_to(PyrexTypes.c_py_ssize_t_type, env),
bound_check_node,
],
is_temp=True,
utility_code=UtilityCode.load_cached(
'bytes_index', 'StringTools.c'))
if coerce_node.type is not PyrexTypes.c_char_type:
node = node.coerce_to(coerce_node.type, env)
return node
return coerce_node
def _optimise_numeric_cast_call(self, node, arg):
function = arg.function
if not isinstance(function, ExprNodes.NameNode) \
or not function.type.is_builtin_type \
or not isinstance(arg.arg_tuple, ExprNodes.TupleNode):
return node
args = arg.arg_tuple.args
if len(args) != 1:
return node
func_arg = args[0]
if isinstance(func_arg, ExprNodes.CoerceToPyTypeNode):
func_arg = func_arg.arg
elif func_arg.type.is_pyobject:
# play safe: Python conversion might work on all sorts of things
return node
if function.name == 'int':
if func_arg.type.is_int or node.type.is_int:
if func_arg.type == node.type:
return func_arg
elif node.type.assignable_from(func_arg.type) or func_arg.type.is_float:
return ExprNodes.TypecastNode(
node.pos, operand=func_arg, type=node.type)
elif function.name == 'float':
if func_arg.type.is_float or node.type.is_float:
if func_arg.type == node.type:
return func_arg
elif node.type.assignable_from(func_arg.type) or func_arg.type.is_float:
return ExprNodes.TypecastNode(
node.pos, operand=func_arg, type=node.type)
return node
def _error_wrong_arg_count(self, function_name, node, args, expected=None):
if not expected: # None or 0
arg_str = ''
elif isinstance(expected, basestring) or expected > 1:
arg_str = '...'
elif expected == 1:
arg_str = 'x'
else:
arg_str = ''
if expected is not None:
expected_str = 'expected %s, ' % expected
else:
expected_str = ''
error(node.pos, "%s(%s) called with wrong number of args, %sfound %d" % (
function_name, arg_str, expected_str, len(args)))
### generic fallbacks
def _handle_function(self, node, function_name, function, arg_list, kwargs):
return node
def _handle_method(self, node, type_name, attr_name, function,
arg_list, is_unbound_method, kwargs):
"""
Try to inject C-API calls for unbound method calls to builtin types.
While the method declarations in Builtin.py already handle this, we
can additionally resolve bound and unbound methods here that were
assigned to variables ahead of time.
"""
if kwargs:
return node
if not function or not function.is_attribute or not function.obj.is_name:
# cannot track unbound method calls over more than one indirection as
# the names might have been reassigned in the meantime
return node
type_entry = self.current_env().lookup(type_name)
if not type_entry:
return node
method = ExprNodes.AttributeNode(
node.function.pos,
obj=ExprNodes.NameNode(
function.pos,
name=type_name,
entry=type_entry,
type=type_entry.type),
attribute=attr_name,
is_called=True).analyse_as_type_attribute(self.current_env())
if method is None:
return node
args = node.args
if args is None and node.arg_tuple:
args = node.arg_tuple.args
call_node = ExprNodes.SimpleCallNode(
node.pos,
function=method,
args=args)
if not is_unbound_method:
call_node.self = function.obj
call_node.analyse_c_function_call(self.current_env())
call_node.analysed = True
return call_node.coerce_to(node.type, self.current_env())
### builtin types
PyDict_Copy_func_type = PyrexTypes.CFuncType(
Builtin.dict_type, [
PyrexTypes.CFuncTypeArg("dict", Builtin.dict_type, None)
])
def _handle_simple_function_dict(self, node, function, pos_args):
"""Replace dict(some_dict) by PyDict_Copy(some_dict).
"""
if len(pos_args) != 1:
return node
arg = pos_args[0]
if arg.type is Builtin.dict_type:
arg = arg.as_none_safe_node("'NoneType' is not iterable")
return ExprNodes.PythonCapiCallNode(
node.pos, "PyDict_Copy", self.PyDict_Copy_func_type,
args = [arg],
is_temp = node.is_temp
)
return node
PySequence_List_func_type = PyrexTypes.CFuncType(
Builtin.list_type,
[PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)])
def _handle_simple_function_list(self, node, function, pos_args):
"""Turn list(ob) into PySequence_List(ob).
"""
if len(pos_args) != 1:
return node
arg = pos_args[0]
return ExprNodes.PythonCapiCallNode(
node.pos, "PySequence_List", self.PySequence_List_func_type,
args=pos_args, is_temp=node.is_temp)
PyList_AsTuple_func_type = PyrexTypes.CFuncType(
Builtin.tuple_type, [
PyrexTypes.CFuncTypeArg("list", Builtin.list_type, None)
])
PySequence_Tuple_func_type = PyrexTypes.CFuncType(
Builtin.tuple_type,
[PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)])
def _handle_simple_function_tuple(self, node, function, pos_args):
"""Replace tuple([...]) by PyList_AsTuple or PySequence_Tuple.
"""
if len(pos_args) != 1:
return node
arg = pos_args[0]
if arg.type is Builtin.tuple_type and not arg.may_be_none():
return arg
if arg.type is Builtin.list_type:
pos_args[0] = arg.as_none_safe_node(
"'NoneType' object is not iterable")
return ExprNodes.PythonCapiCallNode(
node.pos, "PyList_AsTuple", self.PyList_AsTuple_func_type,
args=pos_args, is_temp=node.is_temp)
else:
return ExprNodes.PythonCapiCallNode(
node.pos, "PySequence_Tuple", self.PySequence_Tuple_func_type,
args=pos_args, is_temp=node.is_temp)
PySet_New_func_type = PyrexTypes.CFuncType(
Builtin.set_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)
])
def _handle_simple_function_set(self, node, function, pos_args):
if len(pos_args) != 1:
return node
if pos_args[0].is_sequence_constructor:
# We can optimise set([x,y,z]) safely into a set literal,
# but only if we create all items before adding them -
# adding an item may raise an exception if it is not
# hashable, but creating the later items may have
# side-effects.
args = []
temps = []
for arg in pos_args[0].args:
if not arg.is_simple():
arg = UtilNodes.LetRefNode(arg)
temps.append(arg)
args.append(arg)
result = ExprNodes.SetNode(node.pos, is_temp=1, args=args)
self.replace(node, result)
for temp in temps[::-1]:
result = UtilNodes.EvalWithTempExprNode(temp, result)
return result
else:
# PySet_New(it) is better than a generic Python call to set(it)
return self.replace(node, ExprNodes.PythonCapiCallNode(
node.pos, "PySet_New",
self.PySet_New_func_type,
args=pos_args,
is_temp=node.is_temp,
py_name="set"))
PyFrozenSet_New_func_type = PyrexTypes.CFuncType(
Builtin.frozenset_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)
])
def _handle_simple_function_frozenset(self, node, function, pos_args):
if not pos_args:
pos_args = [ExprNodes.NullNode(node.pos)]
elif len(pos_args) > 1:
return node
elif pos_args[0].type is Builtin.frozenset_type and not pos_args[0].may_be_none():
return pos_args[0]
# PyFrozenSet_New(it) is better than a generic Python call to frozenset(it)
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_PyFrozenSet_New",
self.PyFrozenSet_New_func_type,
args=pos_args,
is_temp=node.is_temp,
utility_code=UtilityCode.load_cached('pyfrozenset_new', 'Builtins.c'),
py_name="frozenset")
PyObject_AsDouble_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_double_type, [
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None),
],
exception_value = "((double)-1)",
exception_check = True)
def _handle_simple_function_float(self, node, function, pos_args):
"""Transform float() into either a C type cast or a faster C
function call.
"""
# Note: this requires the float() function to be typed as
# returning a C 'double'
if len(pos_args) == 0:
return ExprNodes.FloatNode(
node, value="0.0", constant_result=0.0
).coerce_to(Builtin.float_type, self.current_env())
elif len(pos_args) != 1:
self._error_wrong_arg_count('float', node, pos_args, '0 or 1')
return node
func_arg = pos_args[0]
if isinstance(func_arg, ExprNodes.CoerceToPyTypeNode):
func_arg = func_arg.arg
if func_arg.type is PyrexTypes.c_double_type:
return func_arg
elif node.type.assignable_from(func_arg.type) or func_arg.type.is_numeric:
return ExprNodes.TypecastNode(
node.pos, operand=func_arg, type=node.type)
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_PyObject_AsDouble",
self.PyObject_AsDouble_func_type,
args = pos_args,
is_temp = node.is_temp,
utility_code = load_c_utility('pyobject_as_double'),
py_name = "float")
PyNumber_Int_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("o", PyrexTypes.py_object_type, None)
])
def _handle_simple_function_int(self, node, function, pos_args):
"""Transform int() into a faster C function call.
"""
if len(pos_args) == 0:
return ExprNodes.IntNode(node.pos, value="0", constant_result=0,
type=PyrexTypes.py_object_type)
elif len(pos_args) != 1:
return node # int(x, base)
func_arg = pos_args[0]
if isinstance(func_arg, ExprNodes.CoerceToPyTypeNode):
return node # handled in visit_CoerceFromPyTypeNode()
if func_arg.type.is_pyobject and node.type.is_pyobject:
return ExprNodes.PythonCapiCallNode(
node.pos, "PyNumber_Int", self.PyNumber_Int_func_type,
args=pos_args, is_temp=True)
return node
def _handle_simple_function_bool(self, node, function, pos_args):
"""Transform bool(x) into a type coercion to a boolean.
"""
if len(pos_args) == 0:
return ExprNodes.BoolNode(
node.pos, value=False, constant_result=False
).coerce_to(Builtin.bool_type, self.current_env())
elif len(pos_args) != 1:
self._error_wrong_arg_count('bool', node, pos_args, '0 or 1')
return node
else:
# => !!<bint>(x) to make sure it's exactly 0 or 1
operand = pos_args[0].coerce_to_boolean(self.current_env())
operand = ExprNodes.NotNode(node.pos, operand = operand)
operand = ExprNodes.NotNode(node.pos, operand = operand)
# coerce back to Python object as that's the result we are expecting
return operand.coerce_to_pyobject(self.current_env())
### builtin functions
Pyx_strlen_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_size_t_type, [
PyrexTypes.CFuncTypeArg("bytes", PyrexTypes.c_const_char_ptr_type, None)
])
Pyx_Py_UNICODE_strlen_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_size_t_type, [
PyrexTypes.CFuncTypeArg("unicode", PyrexTypes.c_const_py_unicode_ptr_type, None)
])
PyObject_Size_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ssize_t_type, [
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None)
],
exception_value="-1")
_map_to_capi_len_function = {
Builtin.unicode_type: "__Pyx_PyUnicode_GET_LENGTH",
Builtin.bytes_type: "PyBytes_GET_SIZE",
Builtin.list_type: "PyList_GET_SIZE",
Builtin.tuple_type: "PyTuple_GET_SIZE",
Builtin.set_type: "PySet_GET_SIZE",
Builtin.frozenset_type: "PySet_GET_SIZE",
Builtin.dict_type: "PyDict_Size",
}.get
_ext_types_with_pysize = set(["cpython.array.array"])
def _handle_simple_function_len(self, node, function, pos_args):
"""Replace len(char*) by the equivalent call to strlen(),
len(Py_UNICODE) by the equivalent Py_UNICODE_strlen() and
len(known_builtin_type) by an equivalent C-API call.
"""
if len(pos_args) != 1:
self._error_wrong_arg_count('len', node, pos_args, 1)
return node
arg = pos_args[0]
if isinstance(arg, ExprNodes.CoerceToPyTypeNode):
arg = arg.arg
if arg.type.is_string:
new_node = ExprNodes.PythonCapiCallNode(
node.pos, "strlen", self.Pyx_strlen_func_type,
args = [arg],
is_temp = node.is_temp,
utility_code = UtilityCode.load_cached("IncludeStringH", "StringTools.c"))
elif arg.type.is_pyunicode_ptr:
new_node = ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_Py_UNICODE_strlen", self.Pyx_Py_UNICODE_strlen_func_type,
args = [arg],
is_temp = node.is_temp)
elif arg.type.is_pyobject:
cfunc_name = self._map_to_capi_len_function(arg.type)
if cfunc_name is None:
arg_type = arg.type
if ((arg_type.is_extension_type or arg_type.is_builtin_type)
and arg_type.entry.qualified_name in self._ext_types_with_pysize):
cfunc_name = 'Py_SIZE'
else:
return node
arg = arg.as_none_safe_node(
"object of type 'NoneType' has no len()")
new_node = ExprNodes.PythonCapiCallNode(
node.pos, cfunc_name, self.PyObject_Size_func_type,
args = [arg],
is_temp = node.is_temp)
elif arg.type.is_unicode_char:
return ExprNodes.IntNode(node.pos, value='1', constant_result=1,
type=node.type)
else:
return node
if node.type not in (PyrexTypes.c_size_t_type, PyrexTypes.c_py_ssize_t_type):
new_node = new_node.coerce_to(node.type, self.current_env())
return new_node
Pyx_Type_func_type = PyrexTypes.CFuncType(
Builtin.type_type, [
PyrexTypes.CFuncTypeArg("object", PyrexTypes.py_object_type, None)
])
def _handle_simple_function_type(self, node, function, pos_args):
"""Replace type(o) by a macro call to Py_TYPE(o).
"""
if len(pos_args) != 1:
return node
node = ExprNodes.PythonCapiCallNode(
node.pos, "Py_TYPE", self.Pyx_Type_func_type,
args = pos_args,
is_temp = False)
return ExprNodes.CastNode(node, PyrexTypes.py_object_type)
Py_type_check_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_bint_type, [
PyrexTypes.CFuncTypeArg("arg", PyrexTypes.py_object_type, None)
])
def _handle_simple_function_isinstance(self, node, function, pos_args):
"""Replace isinstance() checks against builtin types by the
corresponding C-API call.
"""
if len(pos_args) != 2:
return node
arg, types = pos_args
temps = []
if isinstance(types, ExprNodes.TupleNode):
types = types.args
if len(types) == 1 and not types[0].type is Builtin.type_type:
return node # nothing to improve here
if arg.is_attribute or not arg.is_simple():
arg = UtilNodes.ResultRefNode(arg)
temps.append(arg)
elif types.type is Builtin.type_type:
types = [types]
else:
return node
tests = []
test_nodes = []
env = self.current_env()
for test_type_node in types:
builtin_type = None
if test_type_node.is_name:
if test_type_node.entry:
entry = env.lookup(test_type_node.entry.name)
if entry and entry.type and entry.type.is_builtin_type:
builtin_type = entry.type
if builtin_type is Builtin.type_type:
# all types have type "type", but there's only one 'type'
if entry.name != 'type' or not (
entry.scope and entry.scope.is_builtin_scope):
builtin_type = None
if builtin_type is not None:
type_check_function = entry.type.type_check_function(exact=False)
if type_check_function in tests:
continue
tests.append(type_check_function)
type_check_args = [arg]
elif test_type_node.type is Builtin.type_type:
type_check_function = '__Pyx_TypeCheck'
type_check_args = [arg, test_type_node]
else:
if not test_type_node.is_literal:
test_type_node = UtilNodes.ResultRefNode(test_type_node)
temps.append(test_type_node)
type_check_function = 'PyObject_IsInstance'
type_check_args = [arg, test_type_node]
test_nodes.append(
ExprNodes.PythonCapiCallNode(
test_type_node.pos, type_check_function, self.Py_type_check_func_type,
args=type_check_args,
is_temp=True,
))
def join_with_or(a, b, make_binop_node=ExprNodes.binop_node):
or_node = make_binop_node(node.pos, 'or', a, b)
or_node.type = PyrexTypes.c_bint_type
or_node.wrap_operands(env)
return or_node
test_node = reduce(join_with_or, test_nodes).coerce_to(node.type, env)
for temp in temps[::-1]:
test_node = UtilNodes.EvalWithTempExprNode(temp, test_node)
return test_node
def _handle_simple_function_ord(self, node, function, pos_args):
"""Unpack ord(Py_UNICODE) and ord('X').
"""
if len(pos_args) != 1:
return node
arg = pos_args[0]
if isinstance(arg, ExprNodes.CoerceToPyTypeNode):
if arg.arg.type.is_unicode_char:
return ExprNodes.TypecastNode(
arg.pos, operand=arg.arg, type=PyrexTypes.c_long_type
).coerce_to(node.type, self.current_env())
elif isinstance(arg, ExprNodes.UnicodeNode):
if len(arg.value) == 1:
return ExprNodes.IntNode(
arg.pos, type=PyrexTypes.c_int_type,
value=str(ord(arg.value)),
constant_result=ord(arg.value)
).coerce_to(node.type, self.current_env())
elif isinstance(arg, ExprNodes.StringNode):
if arg.unicode_value and len(arg.unicode_value) == 1 \
and ord(arg.unicode_value) <= 255: # Py2/3 portability
return ExprNodes.IntNode(
arg.pos, type=PyrexTypes.c_int_type,
value=str(ord(arg.unicode_value)),
constant_result=ord(arg.unicode_value)
).coerce_to(node.type, self.current_env())
return node
### special methods
Pyx_tp_new_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("type", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("args", Builtin.tuple_type, None),
])
Pyx_tp_new_kwargs_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("type", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("args", Builtin.tuple_type, None),
PyrexTypes.CFuncTypeArg("kwargs", Builtin.dict_type, None),
])
def _handle_any_slot__new__(self, node, function, args,
is_unbound_method, kwargs=None):
"""Replace 'exttype.__new__(exttype, ...)' by a call to exttype->tp_new()
"""
obj = function.obj
if not is_unbound_method or len(args) < 1:
return node
type_arg = args[0]
if not obj.is_name or not type_arg.is_name:
# play safe
return node
if obj.type != Builtin.type_type or type_arg.type != Builtin.type_type:
# not a known type, play safe
return node
if not type_arg.type_entry or not obj.type_entry:
if obj.name != type_arg.name:
return node
# otherwise, we know it's a type and we know it's the same
# type for both - that should do
elif type_arg.type_entry != obj.type_entry:
# different types - may or may not lead to an error at runtime
return node
args_tuple = ExprNodes.TupleNode(node.pos, args=args[1:])
args_tuple = args_tuple.analyse_types(
self.current_env(), skip_children=True)
if type_arg.type_entry:
ext_type = type_arg.type_entry.type
if (ext_type.is_extension_type and ext_type.typeobj_cname and
ext_type.scope.global_scope() == self.current_env().global_scope()):
# known type in current module
tp_slot = TypeSlots.ConstructorSlot("tp_new", '__new__')
slot_func_cname = TypeSlots.get_slot_function(ext_type.scope, tp_slot)
if slot_func_cname:
cython_scope = self.context.cython_scope
PyTypeObjectPtr = PyrexTypes.CPtrType(
cython_scope.lookup('PyTypeObject').type)
pyx_tp_new_kwargs_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("type", PyTypeObjectPtr, None),
PyrexTypes.CFuncTypeArg("args", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("kwargs", PyrexTypes.py_object_type, None),
])
type_arg = ExprNodes.CastNode(type_arg, PyTypeObjectPtr)
if not kwargs:
kwargs = ExprNodes.NullNode(node.pos, type=PyrexTypes.py_object_type) # hack?
return ExprNodes.PythonCapiCallNode(
node.pos, slot_func_cname,
pyx_tp_new_kwargs_func_type,
args=[type_arg, args_tuple, kwargs],
is_temp=True)
else:
# arbitrary variable, needs a None check for safety
type_arg = type_arg.as_none_safe_node(
"object.__new__(X): X is not a type object (NoneType)")
utility_code = UtilityCode.load_cached('tp_new', 'ObjectHandling.c')
if kwargs:
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_tp_new_kwargs", self.Pyx_tp_new_kwargs_func_type,
args=[type_arg, args_tuple, kwargs],
utility_code=utility_code,
is_temp=node.is_temp
)
else:
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_tp_new", self.Pyx_tp_new_func_type,
args=[type_arg, args_tuple],
utility_code=utility_code,
is_temp=node.is_temp
)
### methods of builtin types
PyObject_Append_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_returncode_type, [
PyrexTypes.CFuncTypeArg("list", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("item", PyrexTypes.py_object_type, None),
],
exception_value="-1")
def _handle_simple_method_object_append(self, node, function, args, is_unbound_method):
"""Optimistic optimisation as X.append() is almost always
referring to a list.
"""
if len(args) != 2 or node.result_is_used:
return node
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_PyObject_Append", self.PyObject_Append_func_type,
args=args,
may_return_none=False,
is_temp=node.is_temp,
result_is_used=False,
utility_code=load_c_utility('append')
)
PyByteArray_Append_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_returncode_type, [
PyrexTypes.CFuncTypeArg("bytearray", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("value", PyrexTypes.c_int_type, None),
],
exception_value="-1")
PyByteArray_AppendObject_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_returncode_type, [
PyrexTypes.CFuncTypeArg("bytearray", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("value", PyrexTypes.py_object_type, None),
],
exception_value="-1")
def _handle_simple_method_bytearray_append(self, node, function, args, is_unbound_method):
if len(args) != 2:
return node
func_name = "__Pyx_PyByteArray_Append"
func_type = self.PyByteArray_Append_func_type
value = unwrap_coerced_node(args[1])
if value.type.is_int or isinstance(value, ExprNodes.IntNode):
value = value.coerce_to(PyrexTypes.c_int_type, self.current_env())
utility_code = UtilityCode.load_cached("ByteArrayAppend", "StringTools.c")
elif value.is_string_literal:
if not value.can_coerce_to_char_literal():
return node
value = value.coerce_to(PyrexTypes.c_char_type, self.current_env())
utility_code = UtilityCode.load_cached("ByteArrayAppend", "StringTools.c")
elif value.type.is_pyobject:
func_name = "__Pyx_PyByteArray_AppendObject"
func_type = self.PyByteArray_AppendObject_func_type
utility_code = UtilityCode.load_cached("ByteArrayAppendObject", "StringTools.c")
else:
return node
new_node = ExprNodes.PythonCapiCallNode(
node.pos, func_name, func_type,
args=[args[0], value],
may_return_none=False,
is_temp=node.is_temp,
utility_code=utility_code,
)
if node.result_is_used:
new_node = new_node.coerce_to(node.type, self.current_env())
return new_node
PyObject_Pop_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("list", PyrexTypes.py_object_type, None),
])
PyObject_PopIndex_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("list", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("py_index", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("c_index", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("is_signed", PyrexTypes.c_int_type, None),
],
has_varargs=True) # to fake the additional macro args that lack a proper C type
def _handle_simple_method_list_pop(self, node, function, args, is_unbound_method):
return self._handle_simple_method_object_pop(
node, function, args, is_unbound_method, is_list=True)
def _handle_simple_method_object_pop(self, node, function, args, is_unbound_method, is_list=False):
"""Optimistic optimisation as X.pop([n]) is almost always
referring to a list.
"""
if not args:
return node
obj = args[0]
if is_list:
type_name = 'List'
obj = obj.as_none_safe_node(
"'NoneType' object has no attribute '%s'",
error="PyExc_AttributeError",
format_args=['pop'])
else:
type_name = 'Object'
if len(args) == 1:
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_Py%s_Pop" % type_name,
self.PyObject_Pop_func_type,
args=[obj],
may_return_none=True,
is_temp=node.is_temp,
utility_code=load_c_utility('pop'),
)
elif len(args) == 2:
index = unwrap_coerced_node(args[1])
py_index = ExprNodes.NoneNode(index.pos)
orig_index_type = index.type
if not index.type.is_int:
if isinstance(index, ExprNodes.IntNode):
py_index = index.coerce_to_pyobject(self.current_env())
index = index.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
elif is_list:
if index.type.is_pyobject:
py_index = index.coerce_to_simple(self.current_env())
index = ExprNodes.CloneNode(py_index)
index = index.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
else:
return node
elif not PyrexTypes.numeric_type_fits(index.type, PyrexTypes.c_py_ssize_t_type):
return node
elif isinstance(index, ExprNodes.IntNode):
py_index = index.coerce_to_pyobject(self.current_env())
# real type might still be larger at runtime
if not orig_index_type.is_int:
orig_index_type = index.type
if not orig_index_type.create_to_py_utility_code(self.current_env()):
return node
convert_func = orig_index_type.to_py_function
conversion_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [PyrexTypes.CFuncTypeArg("intval", orig_index_type, None)])
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_Py%s_PopIndex" % type_name,
self.PyObject_PopIndex_func_type,
args=[obj, py_index, index,
ExprNodes.IntNode(index.pos, value=str(orig_index_type.signed and 1 or 0),
constant_result=orig_index_type.signed and 1 or 0,
type=PyrexTypes.c_int_type),
ExprNodes.RawCNameExprNode(index.pos, PyrexTypes.c_void_type,
orig_index_type.empty_declaration_code()),
ExprNodes.RawCNameExprNode(index.pos, conversion_type, convert_func)],
may_return_none=True,
is_temp=node.is_temp,
utility_code=load_c_utility("pop_index"),
)
return node
single_param_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_returncode_type, [
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None),
],
exception_value = "-1")
def _handle_simple_method_list_sort(self, node, function, args, is_unbound_method):
"""Call PyList_Sort() instead of the 0-argument l.sort().
"""
if len(args) != 1:
return node
return self._substitute_method_call(
node, function, "PyList_Sort", self.single_param_func_type,
'sort', is_unbound_method, args).coerce_to(node.type, self.current_env)
Pyx_PyDict_GetItem_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("key", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("default", PyrexTypes.py_object_type, None),
])
def _handle_simple_method_dict_get(self, node, function, args, is_unbound_method):
"""Replace dict.get() by a call to PyDict_GetItem().
"""
if len(args) == 2:
args.append(ExprNodes.NoneNode(node.pos))
elif len(args) != 3:
self._error_wrong_arg_count('dict.get', node, args, "2 or 3")
return node
return self._substitute_method_call(
node, function,
"__Pyx_PyDict_GetItemDefault", self.Pyx_PyDict_GetItem_func_type,
'get', is_unbound_method, args,
may_return_none = True,
utility_code = load_c_utility("dict_getitem_default"))
Pyx_PyDict_SetDefault_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("key", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("default", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("is_safe_type", PyrexTypes.c_int_type, None),
])
def _handle_simple_method_dict_setdefault(self, node, function, args, is_unbound_method):
"""Replace dict.setdefault() by calls to PyDict_GetItem() and PyDict_SetItem().
"""
if len(args) == 2:
args.append(ExprNodes.NoneNode(node.pos))
elif len(args) != 3:
self._error_wrong_arg_count('dict.setdefault', node, args, "2 or 3")
return node
key_type = args[1].type
if key_type.is_builtin_type:
is_safe_type = int(key_type.name in
'str bytes unicode float int long bool')
elif key_type is PyrexTypes.py_object_type:
is_safe_type = -1 # don't know
else:
is_safe_type = 0 # definitely not
args.append(ExprNodes.IntNode(
node.pos, value=str(is_safe_type), constant_result=is_safe_type))
return self._substitute_method_call(
node, function,
"__Pyx_PyDict_SetDefault", self.Pyx_PyDict_SetDefault_func_type,
'setdefault', is_unbound_method, args,
may_return_none=True,
utility_code=load_c_utility('dict_setdefault'))
Pyx_PyInt_BinopInt_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("op1", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("op2", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("intval", PyrexTypes.c_long_type, None),
PyrexTypes.CFuncTypeArg("inplace", PyrexTypes.c_bint_type, None),
])
Pyx_PyFloat_BinopInt_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("op1", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("op2", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("fval", PyrexTypes.c_double_type, None),
PyrexTypes.CFuncTypeArg("inplace", PyrexTypes.c_bint_type, None),
])
def _handle_simple_method_object___add__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Add', node, function, args, is_unbound_method)
def _handle_simple_method_object___sub__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Subtract', node, function, args, is_unbound_method)
def _handle_simple_method_object___eq__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Eq', node, function, args, is_unbound_method)
def _handle_simple_method_object___neq__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Ne', node, function, args, is_unbound_method)
def _handle_simple_method_object___and__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('And', node, function, args, is_unbound_method)
def _handle_simple_method_object___or__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Or', node, function, args, is_unbound_method)
def _handle_simple_method_object___xor__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Xor', node, function, args, is_unbound_method)
def _handle_simple_method_object___rshift__(self, node, function, args, is_unbound_method):
if len(args) != 2 or not isinstance(args[1], ExprNodes.IntNode):
return node
if not args[1].has_constant_result() or not (1 <= args[1].constant_result <= 63):
return node
return self._optimise_num_binop('Rshift', node, function, args, is_unbound_method)
def _handle_simple_method_object___mod__(self, node, function, args, is_unbound_method):
return self._optimise_num_div('Remainder', node, function, args, is_unbound_method)
def _handle_simple_method_object___floordiv__(self, node, function, args, is_unbound_method):
return self._optimise_num_div('FloorDivide', node, function, args, is_unbound_method)
def _handle_simple_method_object___truediv__(self, node, function, args, is_unbound_method):
return self._optimise_num_div('TrueDivide', node, function, args, is_unbound_method)
def _handle_simple_method_object___div__(self, node, function, args, is_unbound_method):
return self._optimise_num_div('Divide', node, function, args, is_unbound_method)
def _optimise_num_div(self, operator, node, function, args, is_unbound_method):
if len(args) != 2 or not args[1].has_constant_result() or args[1].constant_result == 0:
return node
if isinstance(args[1], ExprNodes.IntNode):
if not (-2**30 <= args[1].constant_result <= 2**30):
return node
elif isinstance(args[1], ExprNodes.FloatNode):
if not (-2**53 <= args[1].constant_result <= 2**53):
return node
else:
return node
return self._optimise_num_binop(operator, node, function, args, is_unbound_method)
def _handle_simple_method_float___add__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Add', node, function, args, is_unbound_method)
def _handle_simple_method_float___sub__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Subtract', node, function, args, is_unbound_method)
def _handle_simple_method_float___truediv__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('TrueDivide', node, function, args, is_unbound_method)
def _handle_simple_method_float___div__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Divide', node, function, args, is_unbound_method)
def _handle_simple_method_float___mod__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Remainder', node, function, args, is_unbound_method)
def _handle_simple_method_float___eq__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Eq', node, function, args, is_unbound_method)
def _handle_simple_method_float___neq__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Ne', node, function, args, is_unbound_method)
def _optimise_num_binop(self, operator, node, function, args, is_unbound_method):
"""
Optimise math operators for (likely) float or small integer operations.
"""
if len(args) != 2:
return node
if not node.type.is_pyobject:
return node
# When adding IntNode/FloatNode to something else, assume other operand is also numeric.
# Prefer constants on RHS as they allows better size control for some operators.
num_nodes = (ExprNodes.IntNode, ExprNodes.FloatNode)
if isinstance(args[1], num_nodes):
if args[0].type is not PyrexTypes.py_object_type:
return node
numval = args[1]
arg_order = 'ObjC'
elif isinstance(args[0], num_nodes):
if args[1].type is not PyrexTypes.py_object_type:
return node
numval = args[0]
arg_order = 'CObj'
else:
return node
if not numval.has_constant_result():
return node
is_float = isinstance(numval, ExprNodes.FloatNode)
if is_float:
if operator not in ('Add', 'Subtract', 'Remainder', 'TrueDivide', 'Divide', 'Eq', 'Ne'):
return node
elif operator == 'Divide':
# mixed old-/new-style division is not currently optimised for integers
return node
elif abs(numval.constant_result) > 2**30:
return node
args = list(args)
args.append((ExprNodes.FloatNode if is_float else ExprNodes.IntNode)(
numval.pos, value=numval.value, constant_result=numval.constant_result,
type=PyrexTypes.c_double_type if is_float else PyrexTypes.c_long_type))
inplace = node.inplace if isinstance(node, ExprNodes.NumBinopNode) else False
args.append(ExprNodes.BoolNode(node.pos, value=inplace, constant_result=inplace))
utility_code = TempitaUtilityCode.load_cached(
"PyFloatBinop" if is_float else "PyIntBinop", "Optimize.c",
context=dict(op=operator, order=arg_order))
return self._substitute_method_call(
node, function, "__Pyx_Py%s_%s%s" % ('Float' if is_float else 'Int', operator, arg_order),
self.Pyx_PyFloat_BinopInt_func_type if is_float else self.Pyx_PyInt_BinopInt_func_type,
'__%s__' % operator[:3].lower(), is_unbound_method, args,
may_return_none=True,
with_none_check=False,
utility_code=utility_code)
### unicode type methods
PyUnicode_uchar_predicate_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_bint_type, [
PyrexTypes.CFuncTypeArg("uchar", PyrexTypes.c_py_ucs4_type, None),
])
def _inject_unicode_predicate(self, node, function, args, is_unbound_method):
if is_unbound_method or len(args) != 1:
return node
ustring = args[0]
if not isinstance(ustring, ExprNodes.CoerceToPyTypeNode) or \
not ustring.arg.type.is_unicode_char:
return node
uchar = ustring.arg
method_name = function.attribute
if method_name == 'istitle':
# istitle() doesn't directly map to Py_UNICODE_ISTITLE()
utility_code = UtilityCode.load_cached(
"py_unicode_istitle", "StringTools.c")
function_name = '__Pyx_Py_UNICODE_ISTITLE'
else:
utility_code = None
function_name = 'Py_UNICODE_%s' % method_name.upper()
func_call = self._substitute_method_call(
node, function,
function_name, self.PyUnicode_uchar_predicate_func_type,
method_name, is_unbound_method, [uchar],
utility_code = utility_code)
if node.type.is_pyobject:
func_call = func_call.coerce_to_pyobject(self.current_env)
return func_call
_handle_simple_method_unicode_isalnum = _inject_unicode_predicate
_handle_simple_method_unicode_isalpha = _inject_unicode_predicate
_handle_simple_method_unicode_isdecimal = _inject_unicode_predicate
_handle_simple_method_unicode_isdigit = _inject_unicode_predicate
_handle_simple_method_unicode_islower = _inject_unicode_predicate
_handle_simple_method_unicode_isnumeric = _inject_unicode_predicate
_handle_simple_method_unicode_isspace = _inject_unicode_predicate
_handle_simple_method_unicode_istitle = _inject_unicode_predicate
_handle_simple_method_unicode_isupper = _inject_unicode_predicate
PyUnicode_uchar_conversion_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ucs4_type, [
PyrexTypes.CFuncTypeArg("uchar", PyrexTypes.c_py_ucs4_type, None),
])
def _inject_unicode_character_conversion(self, node, function, args, is_unbound_method):
if is_unbound_method or len(args) != 1:
return node
ustring = args[0]
if not isinstance(ustring, ExprNodes.CoerceToPyTypeNode) or \
not ustring.arg.type.is_unicode_char:
return node
uchar = ustring.arg
method_name = function.attribute
function_name = 'Py_UNICODE_TO%s' % method_name.upper()
func_call = self._substitute_method_call(
node, function,
function_name, self.PyUnicode_uchar_conversion_func_type,
method_name, is_unbound_method, [uchar])
if node.type.is_pyobject:
func_call = func_call.coerce_to_pyobject(self.current_env)
return func_call
_handle_simple_method_unicode_lower = _inject_unicode_character_conversion
_handle_simple_method_unicode_upper = _inject_unicode_character_conversion
_handle_simple_method_unicode_title = _inject_unicode_character_conversion
PyUnicode_Splitlines_func_type = PyrexTypes.CFuncType(
Builtin.list_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("keepends", PyrexTypes.c_bint_type, None),
])
def _handle_simple_method_unicode_splitlines(self, node, function, args, is_unbound_method):
"""Replace unicode.splitlines(...) by a direct call to the
corresponding C-API function.
"""
if len(args) not in (1,2):
self._error_wrong_arg_count('unicode.splitlines', node, args, "1 or 2")
return node
self._inject_bint_default_argument(node, args, 1, False)
return self._substitute_method_call(
node, function,
"PyUnicode_Splitlines", self.PyUnicode_Splitlines_func_type,
'splitlines', is_unbound_method, args)
PyUnicode_Split_func_type = PyrexTypes.CFuncType(
Builtin.list_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("sep", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("maxsplit", PyrexTypes.c_py_ssize_t_type, None),
]
)
def _handle_simple_method_unicode_split(self, node, function, args, is_unbound_method):
"""Replace unicode.split(...) by a direct call to the
corresponding C-API function.
"""
if len(args) not in (1,2,3):
self._error_wrong_arg_count('unicode.split', node, args, "1-3")
return node
if len(args) < 2:
args.append(ExprNodes.NullNode(node.pos))
self._inject_int_default_argument(
node, args, 2, PyrexTypes.c_py_ssize_t_type, "-1")
return self._substitute_method_call(
node, function,
"PyUnicode_Split", self.PyUnicode_Split_func_type,
'split', is_unbound_method, args)
PyUnicode_Join_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("seq", PyrexTypes.py_object_type, None),
])
def _handle_simple_method_unicode_join(self, node, function, args, is_unbound_method):
"""
unicode.join() builds a list first => see if we can do this more efficiently
"""
if len(args) != 2:
self._error_wrong_arg_count('unicode.join', node, args, "2")
return node
if isinstance(args[1], ExprNodes.GeneratorExpressionNode):
gen_expr_node = args[1]
loop_node = gen_expr_node.loop
yield_expression, yield_stat_node = _find_single_yield_expression(loop_node)
if yield_expression is not None:
inlined_genexpr = ExprNodes.InlinedGeneratorExpressionNode(
node.pos, gen_expr_node, orig_func='list',
comprehension_type=Builtin.list_type)
append_node = ExprNodes.ComprehensionAppendNode(
yield_expression.pos,
expr=yield_expression,
target=inlined_genexpr.target)
Visitor.recursively_replace_node(gen_expr_node, yield_stat_node, append_node)
args[1] = inlined_genexpr
return self._substitute_method_call(
node, function,
"PyUnicode_Join", self.PyUnicode_Join_func_type,
'join', is_unbound_method, args)
PyString_Tailmatch_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_bint_type, [
PyrexTypes.CFuncTypeArg("str", PyrexTypes.py_object_type, None), # bytes/str/unicode
PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("end", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("direction", PyrexTypes.c_int_type, None),
],
exception_value = '-1')
def _handle_simple_method_unicode_endswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'unicode', 'endswith',
unicode_tailmatch_utility_code, +1)
def _handle_simple_method_unicode_startswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'unicode', 'startswith',
unicode_tailmatch_utility_code, -1)
def _inject_tailmatch(self, node, function, args, is_unbound_method, type_name,
method_name, utility_code, direction):
"""Replace unicode.startswith(...) and unicode.endswith(...)
by a direct call to the corresponding C-API function.
"""
if len(args) not in (2,3,4):
self._error_wrong_arg_count('%s.%s' % (type_name, method_name), node, args, "2-4")
return node
self._inject_int_default_argument(
node, args, 2, PyrexTypes.c_py_ssize_t_type, "0")
self._inject_int_default_argument(
node, args, 3, PyrexTypes.c_py_ssize_t_type, "PY_SSIZE_T_MAX")
args.append(ExprNodes.IntNode(
node.pos, value=str(direction), type=PyrexTypes.c_int_type))
method_call = self._substitute_method_call(
node, function,
"__Pyx_Py%s_Tailmatch" % type_name.capitalize(),
self.PyString_Tailmatch_func_type,
method_name, is_unbound_method, args,
utility_code = utility_code)
return method_call.coerce_to(Builtin.bool_type, self.current_env())
PyUnicode_Find_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ssize_t_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("end", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("direction", PyrexTypes.c_int_type, None),
],
exception_value = '-2')
def _handle_simple_method_unicode_find(self, node, function, args, is_unbound_method):
return self._inject_unicode_find(
node, function, args, is_unbound_method, 'find', +1)
def _handle_simple_method_unicode_rfind(self, node, function, args, is_unbound_method):
return self._inject_unicode_find(
node, function, args, is_unbound_method, 'rfind', -1)
def _inject_unicode_find(self, node, function, args, is_unbound_method,
method_name, direction):
"""Replace unicode.find(...) and unicode.rfind(...) by a
direct call to the corresponding C-API function.
"""
if len(args) not in (2,3,4):
self._error_wrong_arg_count('unicode.%s' % method_name, node, args, "2-4")
return node
self._inject_int_default_argument(
node, args, 2, PyrexTypes.c_py_ssize_t_type, "0")
self._inject_int_default_argument(
node, args, 3, PyrexTypes.c_py_ssize_t_type, "PY_SSIZE_T_MAX")
args.append(ExprNodes.IntNode(
node.pos, value=str(direction), type=PyrexTypes.c_int_type))
method_call = self._substitute_method_call(
node, function, "PyUnicode_Find", self.PyUnicode_Find_func_type,
method_name, is_unbound_method, args)
return method_call.coerce_to_pyobject(self.current_env())
PyUnicode_Count_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ssize_t_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("end", PyrexTypes.c_py_ssize_t_type, None),
],
exception_value = '-1')
def _handle_simple_method_unicode_count(self, node, function, args, is_unbound_method):
"""Replace unicode.count(...) by a direct call to the
corresponding C-API function.
"""
if len(args) not in (2,3,4):
self._error_wrong_arg_count('unicode.count', node, args, "2-4")
return node
self._inject_int_default_argument(
node, args, 2, PyrexTypes.c_py_ssize_t_type, "0")
self._inject_int_default_argument(
node, args, 3, PyrexTypes.c_py_ssize_t_type, "PY_SSIZE_T_MAX")
method_call = self._substitute_method_call(
node, function, "PyUnicode_Count", self.PyUnicode_Count_func_type,
'count', is_unbound_method, args)
return method_call.coerce_to_pyobject(self.current_env())
PyUnicode_Replace_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("replstr", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("maxcount", PyrexTypes.c_py_ssize_t_type, None),
])
def _handle_simple_method_unicode_replace(self, node, function, args, is_unbound_method):
"""Replace unicode.replace(...) by a direct call to the
corresponding C-API function.
"""
if len(args) not in (3,4):
self._error_wrong_arg_count('unicode.replace', node, args, "3-4")
return node
self._inject_int_default_argument(
node, args, 3, PyrexTypes.c_py_ssize_t_type, "-1")
return self._substitute_method_call(
node, function, "PyUnicode_Replace", self.PyUnicode_Replace_func_type,
'replace', is_unbound_method, args)
PyUnicode_AsEncodedString_func_type = PyrexTypes.CFuncType(
Builtin.bytes_type, [
PyrexTypes.CFuncTypeArg("obj", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_const_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_const_char_ptr_type, None),
])
PyUnicode_AsXyzString_func_type = PyrexTypes.CFuncType(
Builtin.bytes_type, [
PyrexTypes.CFuncTypeArg("obj", Builtin.unicode_type, None),
])
_special_encodings = ['UTF8', 'UTF16', 'Latin1', 'ASCII',
'unicode_escape', 'raw_unicode_escape']
_special_codecs = [ (name, codecs.getencoder(name))
for name in _special_encodings ]
def _handle_simple_method_unicode_encode(self, node, function, args, is_unbound_method):
"""Replace unicode.encode(...) by a direct C-API call to the
corresponding codec.
"""
if len(args) < 1 or len(args) > 3:
self._error_wrong_arg_count('unicode.encode', node, args, '1-3')
return node
string_node = args[0]
if len(args) == 1:
null_node = ExprNodes.NullNode(node.pos)
return self._substitute_method_call(
node, function, "PyUnicode_AsEncodedString",
self.PyUnicode_AsEncodedString_func_type,
'encode', is_unbound_method, [string_node, null_node, null_node])
parameters = self._unpack_encoding_and_error_mode(node.pos, args)
if parameters is None:
return node
encoding, encoding_node, error_handling, error_handling_node = parameters
if encoding and isinstance(string_node, ExprNodes.UnicodeNode):
# constant, so try to do the encoding at compile time
try:
value = string_node.value.encode(encoding, error_handling)
except:
# well, looks like we can't
pass
else:
value = bytes_literal(value, encoding)
return ExprNodes.BytesNode(string_node.pos, value=value, type=Builtin.bytes_type)
if encoding and error_handling == 'strict':
# try to find a specific encoder function
codec_name = self._find_special_codec_name(encoding)
if codec_name is not None:
encode_function = "PyUnicode_As%sString" % codec_name
return self._substitute_method_call(
node, function, encode_function,
self.PyUnicode_AsXyzString_func_type,
'encode', is_unbound_method, [string_node])
return self._substitute_method_call(
node, function, "PyUnicode_AsEncodedString",
self.PyUnicode_AsEncodedString_func_type,
'encode', is_unbound_method,
[string_node, encoding_node, error_handling_node])
PyUnicode_DecodeXyz_func_ptr_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("string", PyrexTypes.c_const_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("size", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_const_char_ptr_type, None),
]))
_decode_c_string_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("string", PyrexTypes.c_const_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("stop", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_const_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_const_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("decode_func", PyUnicode_DecodeXyz_func_ptr_type, None),
])
_decode_bytes_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("string", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("stop", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_const_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_const_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("decode_func", PyUnicode_DecodeXyz_func_ptr_type, None),
])
_decode_cpp_string_func_type = None # lazy init
def _handle_simple_method_bytes_decode(self, node, function, args, is_unbound_method):
"""Replace char*.decode() by a direct C-API call to the
corresponding codec, possibly resolving a slice on the char*.
"""
if not (1 <= len(args) <= 3):
self._error_wrong_arg_count('bytes.decode', node, args, '1-3')
return node
# normalise input nodes
string_node = args[0]
start = stop = None
if isinstance(string_node, ExprNodes.SliceIndexNode):
index_node = string_node
string_node = index_node.base
start, stop = index_node.start, index_node.stop
if not start or start.constant_result == 0:
start = None
if isinstance(string_node, ExprNodes.CoerceToPyTypeNode):
string_node = string_node.arg
string_type = string_node.type
if string_type in (Builtin.bytes_type, Builtin.bytearray_type):
if is_unbound_method:
string_node = string_node.as_none_safe_node(
"descriptor '%s' requires a '%s' object but received a 'NoneType'",
format_args=['decode', string_type.name])
else:
string_node = string_node.as_none_safe_node(
"'NoneType' object has no attribute '%s'",
error="PyExc_AttributeError",
format_args=['decode'])
elif not string_type.is_string and not string_type.is_cpp_string:
# nothing to optimise here
return node
parameters = self._unpack_encoding_and_error_mode(node.pos, args)
if parameters is None:
return node
encoding, encoding_node, error_handling, error_handling_node = parameters
if not start:
start = ExprNodes.IntNode(node.pos, value='0', constant_result=0)
elif not start.type.is_int:
start = start.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
if stop and not stop.type.is_int:
stop = stop.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
# try to find a specific encoder function
codec_name = None
if encoding is not None:
codec_name = self._find_special_codec_name(encoding)
if codec_name is not None:
decode_function = ExprNodes.RawCNameExprNode(
node.pos, type=self.PyUnicode_DecodeXyz_func_ptr_type,
cname="PyUnicode_Decode%s" % codec_name)
encoding_node = ExprNodes.NullNode(node.pos)
else:
decode_function = ExprNodes.NullNode(node.pos)
# build the helper function call
temps = []
if string_type.is_string:
# C string
if not stop:
# use strlen() to find the string length, just as CPython would
if not string_node.is_name:
string_node = UtilNodes.LetRefNode(string_node) # used twice
temps.append(string_node)
stop = ExprNodes.PythonCapiCallNode(
string_node.pos, "strlen", self.Pyx_strlen_func_type,
args=[string_node],
is_temp=False,
utility_code=UtilityCode.load_cached("IncludeStringH", "StringTools.c"),
).coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
helper_func_type = self._decode_c_string_func_type
utility_code_name = 'decode_c_string'
elif string_type.is_cpp_string:
# C++ std::string
if not stop:
stop = ExprNodes.IntNode(node.pos, value='PY_SSIZE_T_MAX',
constant_result=ExprNodes.not_a_constant)
if self._decode_cpp_string_func_type is None:
# lazy init to reuse the C++ string type
self._decode_cpp_string_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("string", string_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("stop", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_const_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_const_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("decode_func", self.PyUnicode_DecodeXyz_func_ptr_type, None),
])
helper_func_type = self._decode_cpp_string_func_type
utility_code_name = 'decode_cpp_string'
else:
# Python bytes/bytearray object
if not stop:
stop = ExprNodes.IntNode(node.pos, value='PY_SSIZE_T_MAX',
constant_result=ExprNodes.not_a_constant)
helper_func_type = self._decode_bytes_func_type
if string_type is Builtin.bytes_type:
utility_code_name = 'decode_bytes'
else:
utility_code_name = 'decode_bytearray'
node = ExprNodes.PythonCapiCallNode(
node.pos, '__Pyx_%s' % utility_code_name, helper_func_type,
args=[string_node, start, stop, encoding_node, error_handling_node, decode_function],
is_temp=node.is_temp,
utility_code=UtilityCode.load_cached(utility_code_name, 'StringTools.c'),
)
for temp in temps[::-1]:
node = UtilNodes.EvalWithTempExprNode(temp, node)
return node
_handle_simple_method_bytearray_decode = _handle_simple_method_bytes_decode
def _find_special_codec_name(self, encoding):
try:
requested_codec = codecs.getencoder(encoding)
except LookupError:
return None
for name, codec in self._special_codecs:
if codec == requested_codec:
if '_' in name:
name = ''.join([s.capitalize()
for s in name.split('_')])
return name
return None
def _unpack_encoding_and_error_mode(self, pos, args):
null_node = ExprNodes.NullNode(pos)
if len(args) >= 2:
encoding, encoding_node = self._unpack_string_and_cstring_node(args[1])
if encoding_node is None:
return None
else:
encoding = None
encoding_node = null_node
if len(args) == 3:
error_handling, error_handling_node = self._unpack_string_and_cstring_node(args[2])
if error_handling_node is None:
return None
if error_handling == 'strict':
error_handling_node = null_node
else:
error_handling = 'strict'
error_handling_node = null_node
return (encoding, encoding_node, error_handling, error_handling_node)
def _unpack_string_and_cstring_node(self, node):
if isinstance(node, ExprNodes.CoerceToPyTypeNode):
node = node.arg
if isinstance(node, ExprNodes.UnicodeNode):
encoding = node.value
node = ExprNodes.BytesNode(
node.pos, value=encoding.as_utf8_string(), type=PyrexTypes.c_const_char_ptr_type)
elif isinstance(node, (ExprNodes.StringNode, ExprNodes.BytesNode)):
encoding = node.value.decode('ISO-8859-1')
node = ExprNodes.BytesNode(
node.pos, value=node.value, type=PyrexTypes.c_const_char_ptr_type)
elif node.type is Builtin.bytes_type:
encoding = None
node = node.coerce_to(PyrexTypes.c_const_char_ptr_type, self.current_env())
elif node.type.is_string:
encoding = None
else:
encoding = node = None
return encoding, node
def _handle_simple_method_str_endswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'str', 'endswith',
str_tailmatch_utility_code, +1)
def _handle_simple_method_str_startswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'str', 'startswith',
str_tailmatch_utility_code, -1)
def _handle_simple_method_bytes_endswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'bytes', 'endswith',
bytes_tailmatch_utility_code, +1)
def _handle_simple_method_bytes_startswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'bytes', 'startswith',
bytes_tailmatch_utility_code, -1)
''' # disabled for now, enable when we consider it worth it (see StringTools.c)
def _handle_simple_method_bytearray_endswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'bytearray', 'endswith',
bytes_tailmatch_utility_code, +1)
def _handle_simple_method_bytearray_startswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'bytearray', 'startswith',
bytes_tailmatch_utility_code, -1)
'''
### helpers
def _substitute_method_call(self, node, function, name, func_type,
attr_name, is_unbound_method, args=(),
utility_code=None, is_temp=None,
may_return_none=ExprNodes.PythonCapiCallNode.may_return_none,
with_none_check=True):
args = list(args)
if with_none_check and args and not args[0].is_literal:
self_arg = args[0]
if is_unbound_method:
self_arg = self_arg.as_none_safe_node(
"descriptor '%s' requires a '%s' object but received a 'NoneType'",
format_args=[attr_name, function.obj.name])
else:
self_arg = self_arg.as_none_safe_node(
"'NoneType' object has no attribute '%s'",
error = "PyExc_AttributeError",
format_args = [attr_name])
args[0] = self_arg
if is_temp is None:
is_temp = node.is_temp
return ExprNodes.PythonCapiCallNode(
node.pos, name, func_type,
args = args,
is_temp = is_temp,
utility_code = utility_code,
may_return_none = may_return_none,
result_is_used = node.result_is_used,
)
def _inject_int_default_argument(self, node, args, arg_index, type, default_value):
assert len(args) >= arg_index
if len(args) == arg_index:
args.append(ExprNodes.IntNode(node.pos, value=str(default_value),
type=type, constant_result=default_value))
else:
args[arg_index] = args[arg_index].coerce_to(type, self.current_env())
def _inject_bint_default_argument(self, node, args, arg_index, default_value):
assert len(args) >= arg_index
if len(args) == arg_index:
default_value = bool(default_value)
args.append(ExprNodes.BoolNode(node.pos, value=default_value,
constant_result=default_value))
else:
args[arg_index] = args[arg_index].coerce_to_boolean(self.current_env())
unicode_tailmatch_utility_code = UtilityCode.load_cached('unicode_tailmatch', 'StringTools.c')
bytes_tailmatch_utility_code = UtilityCode.load_cached('bytes_tailmatch', 'StringTools.c')
str_tailmatch_utility_code = UtilityCode.load_cached('str_tailmatch', 'StringTools.c')
class ConstantFolding(Visitor.VisitorTransform, SkipDeclarations):
"""Calculate the result of constant expressions to store it in
``expr_node.constant_result``, and replace trivial cases by their
constant result.
General rules:
- We calculate float constants to make them available to the
compiler, but we do not aggregate them into a single literal
node to prevent any loss of precision.
- We recursively calculate constants from non-literal nodes to
make them available to the compiler, but we only aggregate
literal nodes at each step. Non-literal nodes are never merged
into a single node.
"""
def __init__(self, reevaluate=False):
"""
The reevaluate argument specifies whether constant values that were
previously computed should be recomputed.
"""
super(ConstantFolding, self).__init__()
self.reevaluate = reevaluate
def _calculate_const(self, node):
if (not self.reevaluate and
node.constant_result is not ExprNodes.constant_value_not_set):
return
# make sure we always set the value
not_a_constant = ExprNodes.not_a_constant
node.constant_result = not_a_constant
# check if all children are constant
children = self.visitchildren(node)
for child_result in children.values():
if type(child_result) is list:
for child in child_result:
if getattr(child, 'constant_result', not_a_constant) is not_a_constant:
return
elif getattr(child_result, 'constant_result', not_a_constant) is not_a_constant:
return
# now try to calculate the real constant value
try:
node.calculate_constant_result()
# if node.constant_result is not ExprNodes.not_a_constant:
# print node.__class__.__name__, node.constant_result
except (ValueError, TypeError, KeyError, IndexError, AttributeError, ArithmeticError):
# ignore all 'normal' errors here => no constant result
pass
except Exception:
# this looks like a real error
import traceback, sys
traceback.print_exc(file=sys.stdout)
NODE_TYPE_ORDER = [ExprNodes.BoolNode, ExprNodes.CharNode,
ExprNodes.IntNode, ExprNodes.FloatNode]
def _widest_node_class(self, *nodes):
try:
return self.NODE_TYPE_ORDER[
max(map(self.NODE_TYPE_ORDER.index, map(type, nodes)))]
except ValueError:
return None
def _bool_node(self, node, value):
value = bool(value)
return ExprNodes.BoolNode(node.pos, value=value, constant_result=value)
def visit_ExprNode(self, node):
self._calculate_const(node)
return node
def visit_UnopNode(self, node):
self._calculate_const(node)
if not node.has_constant_result():
if node.operator == '!':
return self._handle_NotNode(node)
return node
if not node.operand.is_literal:
return node
if node.operator == '!':
return self._bool_node(node, node.constant_result)
elif isinstance(node.operand, ExprNodes.BoolNode):
return ExprNodes.IntNode(node.pos, value=str(int(node.constant_result)),
type=PyrexTypes.c_int_type,
constant_result=int(node.constant_result))
elif node.operator == '+':
return self._handle_UnaryPlusNode(node)
elif node.operator == '-':
return self._handle_UnaryMinusNode(node)
return node
_negate_operator = {
'in': 'not_in',
'not_in': 'in',
'is': 'is_not',
'is_not': 'is'
}.get
def _handle_NotNode(self, node):
operand = node.operand
if isinstance(operand, ExprNodes.PrimaryCmpNode):
operator = self._negate_operator(operand.operator)
if operator:
node = copy.copy(operand)
node.operator = operator
node = self.visit_PrimaryCmpNode(node)
return node
def _handle_UnaryMinusNode(self, node):
def _negate(value):
if value.startswith('-'):
value = value[1:]
else:
value = '-' + value
return value
node_type = node.operand.type
if isinstance(node.operand, ExprNodes.FloatNode):
# this is a safe operation
return ExprNodes.FloatNode(node.pos, value=_negate(node.operand.value),
type=node_type,
constant_result=node.constant_result)
if node_type.is_int and node_type.signed or \
isinstance(node.operand, ExprNodes.IntNode) and node_type.is_pyobject:
return ExprNodes.IntNode(node.pos, value=_negate(node.operand.value),
type=node_type,
longness=node.operand.longness,
constant_result=node.constant_result)
return node
def _handle_UnaryPlusNode(self, node):
if (node.operand.has_constant_result() and
node.constant_result == node.operand.constant_result):
return node.operand
return node
def visit_BoolBinopNode(self, node):
self._calculate_const(node)
if not node.operand1.has_constant_result():
return node
if node.operand1.constant_result:
if node.operator == 'and':
return node.operand2
else:
return node.operand1
else:
if node.operator == 'and':
return node.operand1
else:
return node.operand2
def visit_BinopNode(self, node):
self._calculate_const(node)
if node.constant_result is ExprNodes.not_a_constant:
return node
if isinstance(node.constant_result, float):
return node
operand1, operand2 = node.operand1, node.operand2
if not operand1.is_literal or not operand2.is_literal:
return node
# now inject a new constant node with the calculated value
try:
type1, type2 = operand1.type, operand2.type
if type1 is None or type2 is None:
return node
except AttributeError:
return node
if type1.is_numeric and type2.is_numeric:
widest_type = PyrexTypes.widest_numeric_type(type1, type2)
else:
widest_type = PyrexTypes.py_object_type
target_class = self._widest_node_class(operand1, operand2)
if target_class is None:
return node
elif target_class is ExprNodes.BoolNode and node.operator in '+-//<<%**>>':
# C arithmetic results in at least an int type
target_class = ExprNodes.IntNode
elif target_class is ExprNodes.CharNode and node.operator in '+-//<<%**>>&|^':
# C arithmetic results in at least an int type
target_class = ExprNodes.IntNode
if target_class is ExprNodes.IntNode:
unsigned = getattr(operand1, 'unsigned', '') and \
getattr(operand2, 'unsigned', '')
longness = "LL"[:max(len(getattr(operand1, 'longness', '')),
len(getattr(operand2, 'longness', '')))]
new_node = ExprNodes.IntNode(pos=node.pos,
unsigned=unsigned, longness=longness,
value=str(int(node.constant_result)),
constant_result=int(node.constant_result))
# IntNode is smart about the type it chooses, so we just
# make sure we were not smarter this time
if widest_type.is_pyobject or new_node.type.is_pyobject:
new_node.type = PyrexTypes.py_object_type
else:
new_node.type = PyrexTypes.widest_numeric_type(widest_type, new_node.type)
else:
if target_class is ExprNodes.BoolNode:
node_value = node.constant_result
else:
node_value = str(node.constant_result)
new_node = target_class(pos=node.pos, type = widest_type,
value = node_value,
constant_result = node.constant_result)
return new_node
def visit_AddNode(self, node):
self._calculate_const(node)
if node.constant_result is ExprNodes.not_a_constant:
return node
if node.operand1.is_string_literal and node.operand2.is_string_literal:
# some people combine string literals with a '+'
str1, str2 = node.operand1, node.operand2
if isinstance(str1, ExprNodes.UnicodeNode) and isinstance(str2, ExprNodes.UnicodeNode):
bytes_value = None
if str1.bytes_value is not None and str2.bytes_value is not None:
if str1.bytes_value.encoding == str2.bytes_value.encoding:
bytes_value = bytes_literal(
str1.bytes_value + str2.bytes_value,
str1.bytes_value.encoding)
string_value = EncodedString(node.constant_result)
return ExprNodes.UnicodeNode(
str1.pos, value=string_value, constant_result=node.constant_result, bytes_value=bytes_value)
elif isinstance(str1, ExprNodes.BytesNode) and isinstance(str2, ExprNodes.BytesNode):
if str1.value.encoding == str2.value.encoding:
bytes_value = bytes_literal(node.constant_result, str1.value.encoding)
return ExprNodes.BytesNode(str1.pos, value=bytes_value, constant_result=node.constant_result)
# all other combinations are rather complicated
# to get right in Py2/3: encodings, unicode escapes, ...
return self.visit_BinopNode(node)
def visit_MulNode(self, node):
self._calculate_const(node)
if node.operand1.is_sequence_constructor:
return self._calculate_constant_seq(node, node.operand1, node.operand2)
if isinstance(node.operand1, ExprNodes.IntNode) and \
node.operand2.is_sequence_constructor:
return self._calculate_constant_seq(node, node.operand2, node.operand1)
return self.visit_BinopNode(node)
def _calculate_constant_seq(self, node, sequence_node, factor):
if factor.constant_result != 1 and sequence_node.args:
if isinstance(factor.constant_result, _py_int_types) and factor.constant_result <= 0:
del sequence_node.args[:]
sequence_node.mult_factor = None
elif sequence_node.mult_factor is not None:
if (isinstance(factor.constant_result, _py_int_types) and
isinstance(sequence_node.mult_factor.constant_result, _py_int_types)):
value = sequence_node.mult_factor.constant_result * factor.constant_result
sequence_node.mult_factor = ExprNodes.IntNode(
sequence_node.mult_factor.pos,
value=str(value), constant_result=value)
else:
# don't know if we can combine the factors, so don't
return self.visit_BinopNode(node)
else:
sequence_node.mult_factor = factor
return sequence_node
def visit_MergedDictNode(self, node):
"""Unpack **args in place if we can."""
self.visitchildren(node)
args = []
items = []
def add(arg):
if arg.is_dict_literal:
if items:
items[0].key_value_pairs.extend(arg.key_value_pairs)
else:
items.append(arg)
elif isinstance(arg, ExprNodes.MergedDictNode):
for child_arg in arg.keyword_args:
add(child_arg)
else:
if items:
args.append(items[0])
del items[:]
args.append(arg)
for arg in node.keyword_args:
add(arg)
if items:
args.append(items[0])
if len(args) == 1:
arg = args[0]
if arg.is_dict_literal or isinstance(arg, ExprNodes.MergedDictNode):
return arg
node.keyword_args[:] = args
self._calculate_const(node)
return node
def visit_MergedSequenceNode(self, node):
"""Unpack *args in place if we can."""
self.visitchildren(node)
is_set = node.type is Builtin.set_type
args = []
values = []
def add(arg):
if (is_set and arg.is_set_literal) or (arg.is_sequence_constructor and not arg.mult_factor):
if values:
values[0].args.extend(arg.args)
else:
values.append(arg)
elif isinstance(arg, ExprNodes.MergedSequenceNode):
for child_arg in arg.args:
add(child_arg)
else:
if values:
args.append(values[0])
del values[:]
args.append(arg)
for arg in node.args:
add(arg)
if values:
args.append(values[0])
if len(args) == 1:
arg = args[0]
if ((is_set and arg.is_set_literal) or
(arg.is_sequence_constructor and arg.type is node.type) or
isinstance(arg, ExprNodes.MergedSequenceNode)):
return arg
node.args[:] = args
self._calculate_const(node)
return node
def visit_SequenceNode(self, node):
"""Unpack *args in place if we can."""
self.visitchildren(node)
args = []
for arg in node.args:
if not arg.is_starred:
args.append(arg)
elif arg.target.is_sequence_constructor and not arg.target.mult_factor:
args.extend(arg.target.args)
else:
args.append(arg)
node.args[:] = args
self._calculate_const(node)
return node
def visit_PrimaryCmpNode(self, node):
# calculate constant partial results in the comparison cascade
self.visitchildren(node, ['operand1'])
left_node = node.operand1
cmp_node = node
while cmp_node is not None:
self.visitchildren(cmp_node, ['operand2'])
right_node = cmp_node.operand2
cmp_node.constant_result = not_a_constant
if left_node.has_constant_result() and right_node.has_constant_result():
try:
cmp_node.calculate_cascaded_constant_result(left_node.constant_result)
except (ValueError, TypeError, KeyError, IndexError, AttributeError, ArithmeticError):
pass # ignore all 'normal' errors here => no constant result
left_node = right_node
cmp_node = cmp_node.cascade
if not node.cascade:
if node.has_constant_result():
return self._bool_node(node, node.constant_result)
return node
# collect partial cascades: [[value, CmpNode...], [value, CmpNode, ...], ...]
cascades = [[node.operand1]]
final_false_result = []
def split_cascades(cmp_node):
if cmp_node.has_constant_result():
if not cmp_node.constant_result:
# False => short-circuit
final_false_result.append(self._bool_node(cmp_node, False))
return
else:
# True => discard and start new cascade
cascades.append([cmp_node.operand2])
else:
# not constant => append to current cascade
cascades[-1].append(cmp_node)
if cmp_node.cascade:
split_cascades(cmp_node.cascade)
split_cascades(node)
cmp_nodes = []
for cascade in cascades:
if len(cascade) < 2:
continue
cmp_node = cascade[1]
pcmp_node = ExprNodes.PrimaryCmpNode(
cmp_node.pos,
operand1=cascade[0],
operator=cmp_node.operator,
operand2=cmp_node.operand2,
constant_result=not_a_constant)
cmp_nodes.append(pcmp_node)
last_cmp_node = pcmp_node
for cmp_node in cascade[2:]:
last_cmp_node.cascade = cmp_node
last_cmp_node = cmp_node
last_cmp_node.cascade = None
if final_false_result:
# last cascade was constant False
cmp_nodes.append(final_false_result[0])
elif not cmp_nodes:
# only constants, but no False result
return self._bool_node(node, True)
node = cmp_nodes[0]
if len(cmp_nodes) == 1:
if node.has_constant_result():
return self._bool_node(node, node.constant_result)
else:
for cmp_node in cmp_nodes[1:]:
node = ExprNodes.BoolBinopNode(
node.pos,
operand1=node,
operator='and',
operand2=cmp_node,
constant_result=not_a_constant)
return node
def visit_CondExprNode(self, node):
self._calculate_const(node)
if not node.test.has_constant_result():
return node
if node.test.constant_result:
return node.true_val
else:
return node.false_val
def visit_IfStatNode(self, node):
self.visitchildren(node)
# eliminate dead code based on constant condition results
if_clauses = []
for if_clause in node.if_clauses:
condition = if_clause.condition
if condition.has_constant_result():
if condition.constant_result:
# always true => subsequent clauses can safely be dropped
node.else_clause = if_clause.body
break
# else: false => drop clause
else:
# unknown result => normal runtime evaluation
if_clauses.append(if_clause)
if if_clauses:
node.if_clauses = if_clauses
return node
elif node.else_clause:
return node.else_clause
else:
return Nodes.StatListNode(node.pos, stats=[])
def visit_SliceIndexNode(self, node):
self._calculate_const(node)
# normalise start/stop values
if node.start is None or node.start.constant_result is None:
start = node.start = None
else:
start = node.start.constant_result
if node.stop is None or node.stop.constant_result is None:
stop = node.stop = None
else:
stop = node.stop.constant_result
# cut down sliced constant sequences
if node.constant_result is not not_a_constant:
base = node.base
if base.is_sequence_constructor and base.mult_factor is None:
base.args = base.args[start:stop]
return base
elif base.is_string_literal:
base = base.as_sliced_node(start, stop)
if base is not None:
return base
return node
def visit_ComprehensionNode(self, node):
self.visitchildren(node)
if isinstance(node.loop, Nodes.StatListNode) and not node.loop.stats:
# loop was pruned already => transform into literal
if node.type is Builtin.list_type:
return ExprNodes.ListNode(
node.pos, args=[], constant_result=[])
elif node.type is Builtin.set_type:
return ExprNodes.SetNode(
node.pos, args=[], constant_result=set())
elif node.type is Builtin.dict_type:
return ExprNodes.DictNode(
node.pos, key_value_pairs=[], constant_result={})
return node
def visit_ForInStatNode(self, node):
self.visitchildren(node)
sequence = node.iterator.sequence
if isinstance(sequence, ExprNodes.SequenceNode):
if not sequence.args:
if node.else_clause:
return node.else_clause
else:
# don't break list comprehensions
return Nodes.StatListNode(node.pos, stats=[])
# iterating over a list literal? => tuples are more efficient
if isinstance(sequence, ExprNodes.ListNode):
node.iterator.sequence = sequence.as_tuple()
return node
def visit_WhileStatNode(self, node):
self.visitchildren(node)
if node.condition and node.condition.has_constant_result():
if node.condition.constant_result:
node.condition = None
node.else_clause = None
else:
return node.else_clause
return node
def visit_ExprStatNode(self, node):
self.visitchildren(node)
if not isinstance(node.expr, ExprNodes.ExprNode):
# ParallelRangeTransform does this ...
return node
# drop unused constant expressions
if node.expr.has_constant_result():
return None
return node
# in the future, other nodes can have their own handler method here
# that can replace them with a constant result node
visit_Node = Visitor.VisitorTransform.recurse_to_children
class FinalOptimizePhase(Visitor.CythonTransform, Visitor.NodeRefCleanupMixin):
"""
This visitor handles several commuting optimizations, and is run
just before the C code generation phase.
The optimizations currently implemented in this class are:
- eliminate None assignment and refcounting for first assignment.
- isinstance -> typecheck for cdef types
- eliminate checks for None and/or types that became redundant after tree changes
- replace Python function calls that look like method calls by a faster PyMethodCallNode
"""
def visit_SingleAssignmentNode(self, node):
"""Avoid redundant initialisation of local variables before their
first assignment.
"""
self.visitchildren(node)
if node.first:
lhs = node.lhs
lhs.lhs_of_first_assignment = True
return node
def visit_SimpleCallNode(self, node):
"""
Replace generic calls to isinstance(x, type) by a more efficient type check.
Replace likely Python method calls by a specialised PyMethodCallNode.
"""
self.visitchildren(node)
function = node.function
if function.type.is_cfunction and function.is_name:
if function.name == 'isinstance' and len(node.args) == 2:
type_arg = node.args[1]
if type_arg.type.is_builtin_type and type_arg.type.name == 'type':
cython_scope = self.context.cython_scope
function.entry = cython_scope.lookup('PyObject_TypeCheck')
function.type = function.entry.type
PyTypeObjectPtr = PyrexTypes.CPtrType(cython_scope.lookup('PyTypeObject').type)
node.args[1] = ExprNodes.CastNode(node.args[1], PyTypeObjectPtr)
elif (self.current_directives.get("optimize.unpack_method_calls")
and node.is_temp and function.type.is_pyobject):
# optimise simple Python methods calls
if isinstance(node.arg_tuple, ExprNodes.TupleNode) and not (
node.arg_tuple.mult_factor or (node.arg_tuple.is_literal and node.arg_tuple.args)):
# simple call, now exclude calls to objects that are definitely not methods
may_be_a_method = True
if function.type is Builtin.type_type:
may_be_a_method = False
elif function.is_attribute:
if function.entry and function.entry.type.is_cfunction:
# optimised builtin method
may_be_a_method = False
elif function.is_name:
entry = function.entry
if entry.is_builtin or entry.type.is_cfunction:
may_be_a_method = False
elif entry.cf_assignments:
# local functions/classes are definitely not methods
non_method_nodes = (ExprNodes.PyCFunctionNode, ExprNodes.ClassNode, ExprNodes.Py3ClassNode)
may_be_a_method = any(
assignment.rhs and not isinstance(assignment.rhs, non_method_nodes)
for assignment in entry.cf_assignments)
if may_be_a_method:
if (node.self and function.is_attribute and
isinstance(function.obj, ExprNodes.CloneNode) and function.obj.arg is node.self):
# function self object was moved into a CloneNode => undo
function.obj = function.obj.arg
node = self.replace(node, ExprNodes.PyMethodCallNode.from_node(
node, function=function, arg_tuple=node.arg_tuple, type=node.type))
return node
def visit_PyTypeTestNode(self, node):
"""Remove tests for alternatively allowed None values from
type tests when we know that the argument cannot be None
anyway.
"""
self.visitchildren(node)
if not node.notnone:
if not node.arg.may_be_none():
node.notnone = True
return node
def visit_NoneCheckNode(self, node):
"""Remove None checks from expressions that definitely do not
carry a None value.
"""
self.visitchildren(node)
if not node.arg.may_be_none():
return node.arg
return node
class ConsolidateOverflowCheck(Visitor.CythonTransform):
"""
This class facilitates the sharing of overflow checking among all nodes
of a nested arithmetic expression. For example, given the expression
a*b + c, where a, b, and x are all possibly overflowing ints, the entire
sequence will be evaluated and the overflow bit checked only at the end.
"""
overflow_bit_node = None
def visit_Node(self, node):
if self.overflow_bit_node is not None:
saved = self.overflow_bit_node
self.overflow_bit_node = None
self.visitchildren(node)
self.overflow_bit_node = saved
else:
self.visitchildren(node)
return node
def visit_NumBinopNode(self, node):
if node.overflow_check and node.overflow_fold:
top_level_overflow = self.overflow_bit_node is None
if top_level_overflow:
self.overflow_bit_node = node
else:
node.overflow_bit_node = self.overflow_bit_node
node.overflow_check = False
self.visitchildren(node)
if top_level_overflow:
self.overflow_bit_node = None
else:
self.visitchildren(node)
return node
|
m0r13/derpbot
|
refs/heads/master
|
derpbot/plugins/imdb.py
|
1
|
from derpbot import plugin
import urllib
import urllib2
import json
def get_imdb_info(title):
url = "http://imdbapi.com/?" + urllib.urlencode(dict(t=title))
data = urllib2.urlopen(url).read()
data = json.loads(data)
return data
class IMDBPlugin(plugin.Plugin):
def __init__(self, bot, config):
super(IMDBPlugin, self).__init__(bot, config)
def enable(self):
pass
def disable(self):
pass
@plugin.command("imdb (.*)",
name="imdb",
usage="imdb [title]",
desc="Shows some informations about a movie or tv series from IMDB.")
def imdb(self, channel, nick, match, message, args):
title = " ".join(args[1:])
data = get_imdb_info(title)
if data["Response"] == "False":
if "Error" in data:
channel.sendto(nick, "Error: %s" % data["Error"])
else:
channel.sendto(nick, "An unknown error happened.")
self.log_warning("Got an unknown error from IMDB searching '%s': %s" % (title, repr(data)))
else:
type = data["Type"]
format = (type.upper(), data["Title"], data["Year"],
data["imdbRating"], data["Genre"],
"http://imdb.com/title/%s" % data["imdbID"])
message = ("[%s] Title: %s | Year: %s | Rating: %s" + \
" | Genre: %s | IMDB Link: %s") % format
channel.send(message)
|
ivanbusthomi/inasafe
|
refs/heads/develop
|
safe/report/test/test_util.py
|
1
|
# coding=utf-8
"""Unittest for report utilities."""
import logging
import unittest
from safe.definitions.exposure import (
exposure_structure,
exposure_population,
exposure_road)
from safe.definitions.exposure_classifications import (
generic_structure_classes,
generic_road_classes)
from safe.definitions.hazard import (
hazard_generic,
hazard_earthquake,
hazard_tsunami,
hazard_cyclone)
from safe.definitions.hazard_classifications import (
generic_hazard_classes,
earthquake_mmi_scale,
tsunami_hazard_classes,
cyclone_au_bom_hazard_classes)
from safe.report.extractors.util import (
layer_definition_type,
layer_hazard_classification,
resolve_from_dictionary,
retrieve_exposure_classes_lists)
from safe.test.utilities import (
standard_data_path,
get_qgis_app)
from safe.gis.tools import load_layer
QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app()
__copyright__ = "Copyright 2016, The InaSAFE Project"
__license__ = "GPL version 3"
__email__ = "info@inasafe.org"
__revision__ = '$Format:%H$'
LOGGER = logging.getLogger('InaSAFE')
class TestReportUtil(unittest.TestCase):
def setUp(self):
self.maxDiff = None
self.layer_paths_list = [
['gisv4', 'hazard', 'classified_vector.geojson'],
['gisv4', 'hazard', 'earthquake.asc'],
['gisv4', 'hazard', 'tsunami_vector.geojson'],
['gisv4', 'hazard', 'cyclone_AUBOM_km_h.asc'],
['gisv4', 'exposure', 'building-points.geojson'],
['gisv4', 'exposure', 'buildings.geojson'],
['gisv4', 'exposure', 'population.geojson'],
['gisv4', 'exposure', 'roads.geojson'],
]
def test_layer_definition_type(self):
"""Test layer_definition_type method.
.. versionadded:: 4.0
"""
layer_paths = self.layer_paths_list
expected_definitions = [
hazard_generic,
hazard_earthquake,
hazard_tsunami,
hazard_cyclone,
exposure_structure,
exposure_structure,
exposure_population,
exposure_road,
]
for layer_path, expected_definition in zip(
layer_paths, expected_definitions):
path = standard_data_path(*layer_path)
layer, _ = load_layer(path)
actual_definition = layer_definition_type(layer)
try:
self.assertEqual(expected_definition, actual_definition)
except Exception as e:
LOGGER.error('Layer path: {path}'.format(
path=path))
LOGGER.error('Expected {name}'.format(
**expected_definition))
LOGGER.error('Actual {name}'.format(
**actual_definition))
raise e
# We are using multi hazard classification so this test will fail
# the layer needs to run on impact function first or we can inject
# the classification for this test.
def test_layer_hazard_classification(self):
"""Test layer_hazard_classification method.
.. versionadded:: 4.0
"""
layer_paths = self.layer_paths_list
expected_classifications = [
generic_hazard_classes,
earthquake_mmi_scale,
tsunami_hazard_classes,
cyclone_au_bom_hazard_classes,
None,
None,
None,
None,
]
for layer_path, expected_classification in zip(
layer_paths, expected_classifications):
path = standard_data_path(*layer_path)
layer, _ = load_layer(path)
# inject classification keyword
if expected_classification:
layer.keywords['classification'] = (
expected_classification['key'])
actual_classification = layer_hazard_classification(layer)
try:
self.assertEqual(
expected_classification, actual_classification)
except Exception as e:
LOGGER.error('Layer path: {path}'.format(
path=path))
LOGGER.error('Expected {name}'.format(
**expected_classification))
LOGGER.error('Actual {name}'.format(
**actual_classification))
raise e
def test_resolve_from_dictionary(self):
"""Test resolve_from_dictionary method.
.. versionadded:: 4.0
"""
test_dict = {
'foo': {
'bar': {
'bin': {
'baz': 1
}
}
},
'foobar': 10
}
# test nested resolve
expected = 1
actual = resolve_from_dictionary(test_dict, [
'foo', 'bar', 'bin', 'baz'])
self.assertEqual(expected, actual)
# test single resolve using list
expected = 10
actual = resolve_from_dictionary(test_dict, ['foobar'])
self.assertEqual(expected, actual)
# test single resolve using shorthand notation
expected = 10
actual = resolve_from_dictionary(test_dict, 'foobar')
self.assertEqual(expected, actual)
def test_retrieve_exposure_classes_lists(self):
"""Test retrieve_exposure_classes_lists method.
.. versionadded:: 4.0
"""
layer_paths = self.layer_paths_list
expected_classes_lists = [
None,
None,
None,
None,
generic_structure_classes['classes'],
generic_structure_classes['classes'],
None,
generic_road_classes['classes']
]
for layer_path, expected_classes in zip(
layer_paths, expected_classes_lists):
path = standard_data_path(*layer_path)
layer, _ = load_layer(path)
actual_classes = retrieve_exposure_classes_lists(layer.keywords)
try:
self.assertEqual(
expected_classes, actual_classes)
except Exception as e:
LOGGER.error('Layer path: {path}'.format(
path=path))
LOGGER.error('Expected {classes}'.format(
classes=expected_classes))
LOGGER.error('Actual {classes}'.format(
classes=actual_classes))
raise e
|
duyetdev/openerp-6.1.1
|
refs/heads/master
|
openerp/addons/mrp_repair/report/order.py
|
9
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from report import report_sxw
class order(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(order, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'total': self.total,
})
def total(self, repair):
total = 0.0
for operation in repair.operations:
total += operation.price_subtotal
for fee in repair.fees_lines:
total += fee.price_subtotal
total = total + repair.amount_tax
return total
report_sxw.report_sxw('report.repair.order','mrp.repair','addons/mrp_repair/report/order.rml',parser=order)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
riteshshrv/django
|
refs/heads/master
|
django/core/cache/backends/locmem.py
|
586
|
"Thread-safe in-memory cache backend."
import time
from contextlib import contextmanager
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
from django.utils.synch import RWLock
try:
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
# Global in-memory store of cache data. Keyed by name, to provide
# multiple named local memory caches.
_caches = {}
_expire_info = {}
_locks = {}
@contextmanager
def dummy():
"""A context manager that does nothing special."""
yield
class LocMemCache(BaseCache):
def __init__(self, name, params):
BaseCache.__init__(self, params)
self._cache = _caches.setdefault(name, {})
self._expire_info = _expire_info.setdefault(name, {})
self._lock = _locks.setdefault(name, RWLock())
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
with self._lock.writer():
if self._has_expired(key):
self._set(key, pickled, timeout)
return True
return False
def get(self, key, default=None, version=None, acquire_lock=True):
key = self.make_key(key, version=version)
self.validate_key(key)
pickled = None
with (self._lock.reader() if acquire_lock else dummy()):
if not self._has_expired(key):
pickled = self._cache[key]
if pickled is not None:
try:
return pickle.loads(pickled)
except pickle.PickleError:
return default
with (self._lock.writer() if acquire_lock else dummy()):
try:
del self._cache[key]
del self._expire_info[key]
except KeyError:
pass
return default
def _set(self, key, value, timeout=DEFAULT_TIMEOUT):
if len(self._cache) >= self._max_entries:
self._cull()
self._cache[key] = value
self._expire_info[key] = self.get_backend_timeout(timeout)
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
with self._lock.writer():
self._set(key, pickled, timeout)
def incr(self, key, delta=1, version=None):
with self._lock.writer():
value = self.get(key, version=version, acquire_lock=False)
if value is None:
raise ValueError("Key '%s' not found" % key)
new_value = value + delta
key = self.make_key(key, version=version)
pickled = pickle.dumps(new_value, pickle.HIGHEST_PROTOCOL)
self._cache[key] = pickled
return new_value
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
with self._lock.reader():
if not self._has_expired(key):
return True
with self._lock.writer():
try:
del self._cache[key]
del self._expire_info[key]
except KeyError:
pass
return False
def _has_expired(self, key):
exp = self._expire_info.get(key, -1)
if exp is None or exp > time.time():
return False
return True
def _cull(self):
if self._cull_frequency == 0:
self.clear()
else:
doomed = [k for (i, k) in enumerate(self._cache) if i % self._cull_frequency == 0]
for k in doomed:
self._delete(k)
def _delete(self, key):
try:
del self._cache[key]
except KeyError:
pass
try:
del self._expire_info[key]
except KeyError:
pass
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
with self._lock.writer():
self._delete(key)
def clear(self):
self._cache.clear()
self._expire_info.clear()
|
yinquan529/platform-external-chromium_org
|
refs/heads/master
|
media/tools/layout_tests/layouttest_analyzer_helpers.py
|
120
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper functions for the layout test analyzer."""
from datetime import datetime
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import fileinput
import os
import pickle
import re
import smtplib
import socket
import sys
import time
from bug import Bug
from test_expectations_history import TestExpectationsHistory
DEFAULT_TEST_EXPECTATION_PATH = ('trunk/LayoutTests/TestExpectations')
LEGACY_DEFAULT_TEST_EXPECTATION_PATH = (
'trunk/LayoutTests/platform/chromium/test_expectations.txt')
REVISION_LOG_URL = ('http://build.chromium.org/f/chromium/perf/dashboard/ui/'
'changelog_blink.html?url=/trunk/LayoutTests/%s&range=%d:%d')
DEFAULT_REVISION_VIEW_URL = 'http://src.chromium.org/viewvc/blink?revision=%s'
class AnalyzerResultMap:
"""A class to deal with joined result produed by the analyzer.
The join is done between layouttests and the test_expectations object
(based on the test expectation file). The instance variable |result_map|
contains the following keys: 'whole','skip','nonskip'. The value of 'whole'
contains information about all layouttests. The value of 'skip' contains
information about skipped layouttests where it has 'SKIP' in its entry in
the test expectation file. The value of 'nonskip' contains all information
about non skipped layout tests, which are in the test expectation file but
not skipped. The information is exactly same as the one parsed by the
analyzer.
"""
def __init__(self, test_info_map):
"""Initialize the AnalyzerResultMap based on test_info_map.
Test_info_map contains all layouttest information. The job here is to
classify them as 'whole', 'skip' or 'nonskip' based on that information.
Args:
test_info_map: the result map of |layouttests.JoinWithTestExpectation|.
The key of the map is test name such as 'media/media-foo.html'.
The value of the map is a map that contains the following keys:
'desc'(description), 'te_info' (test expectation information),
which is a list of test expectation information map. The key of the
test expectation information map is test expectation keywords such
as "SKIP" and other keywords (for full list of keywords, please
refer to |test_expectations.ALL_TE_KEYWORDS|).
"""
self.result_map = {}
self.result_map['whole'] = {}
self.result_map['skip'] = {}
self.result_map['nonskip'] = {}
if test_info_map:
for (k, value) in test_info_map.iteritems():
self.result_map['whole'][k] = value
if 'te_info' in value:
# Don't count SLOW PASS, WONTFIX, or ANDROID tests as failures.
if any([True for x in value['te_info'] if set(x.keys()) ==
set(['SLOW', 'PASS', 'Bugs', 'Comments', 'Platforms']) or
'WONTFIX' in x or x['Platforms'] == ['ANDROID']]):
continue
if any([True for x in value['te_info'] if 'SKIP' in x]):
self.result_map['skip'][k] = value
else:
self.result_map['nonskip'][k] = value
@staticmethod
def GetDiffString(diff_map_element, type_str):
"""Get difference string out of diff map element.
The difference string shows difference between two analyzer results
(for example, a result for now and a result for sometime in the past)
in HTML format (with colors). This is used for generating email messages.
Args:
diff_map_element: An element of the compared map generated by
|CompareResultMaps()|. The element has two lists of test cases. One
is for test names that are in the current result but NOT in the
previous result. The other is for test names that are in the previous
results but NOT in the current result. Please refer to comments in
|CompareResultMaps()| for details.
type_str: a string indicating the test group to which |diff_map_element|
belongs; used for color determination. Must be 'whole', 'skip', or
'nonskip'.
Returns:
a string in HTML format (with colors) to show difference between two
analyzer results.
"""
if not diff_map_element[0] and not diff_map_element[1]:
return 'No Change'
color = ''
diff = len(diff_map_element[0]) - len(diff_map_element[1])
if diff > 0 and type_str != 'whole':
color = 'red'
else:
color = 'green'
diff_sign = ''
if diff > 0:
diff_sign = '+'
if not diff:
whole_str = 'No Change'
else:
whole_str = '<font color="%s">%s%d</font>' % (color, diff_sign, diff)
colors = ['red', 'green']
if type_str == 'whole':
# Bug 107773 - when we increase the number of tests,
# the name of the tests are in red, it should be green
# since it is good thing.
colors = ['green', 'red']
str1 = ''
for (name, _) in diff_map_element[0]:
str1 += '<font color="%s">%s,</font>' % (colors[0], name)
str2 = ''
for (name, _) in diff_map_element[1]:
str2 += '<font color="%s">%s,</font>' % (colors[1], name)
if str1 or str2:
whole_str += ':'
if str1:
whole_str += str1
if str2:
whole_str += str2
# Remove the last occurrence of ','.
whole_str = ''.join(whole_str.rsplit(',', 1))
return whole_str
def GetPassingRate(self):
"""Get passing rate.
Returns:
layout test passing rate of this result in percent.
Raises:
ValueEror when the number of tests in test group "whole" is equal
or less than that of "skip".
"""
delta = len(self.result_map['whole'].keys()) - (
len(self.result_map['skip'].keys()))
if delta <= 0:
raise ValueError('The number of tests in test group "whole" is equal or '
'less than that of "skip"')
return 100 - len(self.result_map['nonskip'].keys()) * 100.0 / delta
def ConvertToCSVText(self, current_time):
"""Convert |self.result_map| into stats and issues text in CSV format.
Both are used as inputs for Google spreadsheet.
Args:
current_time: a string depicting a time in year-month-day-hour
format (e.g., 2011-11-08-16).
Returns:
a tuple of stats and issues_txt
stats: analyzer result in CSV format that shows:
(current_time, the number of tests, the number of skipped tests,
the number of failing tests, passing rate)
For example,
"2011-11-10-15,204,22,12,94"
issues_txt: issues listed in CSV format that shows:
(BUGWK or BUGCR, bug number, the test expectation entry,
the name of the test)
For example,
"BUGWK,71543,TIMEOUT PASS,media/media-element-play-after-eos.html,
BUGCR,97657,IMAGE CPU MAC TIMEOUT PASS,media/audio-repaint.html,"
"""
stats = ','.join([current_time, str(len(self.result_map['whole'].keys())),
str(len(self.result_map['skip'].keys())),
str(len(self.result_map['nonskip'].keys())),
str(self.GetPassingRate())])
issues_txt = ''
for bug_txt, test_info_list in (
self.GetListOfBugsForNonSkippedTests().iteritems()):
matches = re.match(r'(BUG(CR|WK))(\d+)', bug_txt)
bug_suffix = ''
bug_no = ''
if matches:
bug_suffix = matches.group(1)
bug_no = matches.group(3)
issues_txt += bug_suffix + ',' + bug_no + ','
for test_info in test_info_list:
test_name, te_info = test_info
issues_txt += ' '.join(te_info.keys()) + ',' + test_name + ','
issues_txt += '\n'
return stats, issues_txt
def ConvertToString(self, prev_time, diff_map, issue_detail_mode):
"""Convert this result to HTML display for email.
Args:
prev_time: the previous time string that are compared against.
diff_map: the compared map generated by |CompareResultMaps()|.
issue_detail_mode: includes the issue details in the output string if
this is True.
Returns:
a analyzer result string in HTML format.
"""
return_str = ''
if diff_map:
return_str += (
'<b>Statistics (Diff Compared to %s):</b><ul>'
'<li>The number of tests: %d (%s)</li>'
'<li>The number of failing skipped tests: %d (%s)</li>'
'<li>The number of failing non-skipped tests: %d (%s)</li>'
'<li>Passing rate: %.2f %%</li></ul>') % (
prev_time, len(self.result_map['whole'].keys()),
AnalyzerResultMap.GetDiffString(diff_map['whole'], 'whole'),
len(self.result_map['skip'].keys()),
AnalyzerResultMap.GetDiffString(diff_map['skip'], 'skip'),
len(self.result_map['nonskip'].keys()),
AnalyzerResultMap.GetDiffString(diff_map['nonskip'], 'nonskip'),
self.GetPassingRate())
if issue_detail_mode:
return_str += '<b>Current issues about failing non-skipped tests:</b>'
for (bug_txt, test_info_list) in (
self.GetListOfBugsForNonSkippedTests().iteritems()):
return_str += '<ul>%s' % Bug(bug_txt)
for test_info in test_info_list:
(test_name, te_info) = test_info
gpu_link = ''
if 'GPU' in te_info:
gpu_link = 'group=%40ToT%20GPU%20Mesa%20-%20chromium.org&'
dashboard_link = ('http://test-results.appspot.com/dashboards/'
'flakiness_dashboard.html#%stests=%s') % (
gpu_link, test_name)
return_str += '<li><a href="%s">%s</a> (%s) </li>' % (
dashboard_link, test_name, ' '.join(
[key for key in te_info.keys() if key != 'Platforms']))
return_str += '</ul>\n'
return return_str
def CompareToOtherResultMap(self, other_result_map):
"""Compare this result map with the other to see if there are any diff.
The comparison is done for layouttests which belong to 'whole', 'skip',
or 'nonskip'.
Args:
other_result_map: another result map to be compared against the result
map of the current object.
Returns:
a map that has 'whole', 'skip' and 'nonskip' as keys.
Please refer to |diff_map| in |SendStatusEmail()|.
"""
comp_result_map = {}
for name in ['whole', 'skip', 'nonskip']:
if name == 'nonskip':
# Look into expectation to get diff only for non-skipped tests.
lookIntoTestExpectationInfo = True
else:
# Otherwise, only test names are compared to get diff.
lookIntoTestExpectationInfo = False
comp_result_map[name] = GetDiffBetweenMaps(
self.result_map[name], other_result_map.result_map[name],
lookIntoTestExpectationInfo)
return comp_result_map
@staticmethod
def Load(file_path):
"""Load the object from |file_path| using pickle library.
Args:
file_path: the string path to the file from which to read the result.
Returns:
a AnalyzerResultMap object read from |file_path|.
"""
file_object = open(file_path)
analyzer_result_map = pickle.load(file_object)
file_object.close()
return analyzer_result_map
def Save(self, file_path):
"""Save the object to |file_path| using pickle library.
Args:
file_path: the string path to the file in which to store the result.
"""
file_object = open(file_path, 'wb')
pickle.dump(self, file_object)
file_object.close()
def GetListOfBugsForNonSkippedTests(self):
"""Get a list of bugs for non-skipped layout tests.
This is used for generating email content.
Returns:
a mapping from bug modifier text (e.g., BUGCR1111) to a test name and
main test information string which excludes comments and bugs.
This is used for grouping test names by bug.
"""
bug_map = {}
for (name, value) in self.result_map['nonskip'].iteritems():
for te_info in value['te_info']:
main_te_info = {}
for k in te_info.keys():
if k != 'Comments' and k != 'Bugs':
main_te_info[k] = True
if 'Bugs' in te_info:
for bug in te_info['Bugs']:
if bug not in bug_map:
bug_map[bug] = []
bug_map[bug].append((name, main_te_info))
return bug_map
def SendStatusEmail(prev_time, analyzer_result_map, diff_map,
receiver_email_address, test_group_name,
appended_text_to_email, email_content, rev_str,
email_only_change_mode):
"""Send status email.
Args:
prev_time: the date string such as '2011-10-09-11'. This format has been
used in this analyzer.
analyzer_result_map: current analyzer result.
diff_map: a map that has 'whole', 'skip' and 'nonskip' as keys.
The values of the map are the result of |GetDiffBetweenMaps()|.
The element has two lists of test cases. One (with index 0) is for
test names that are in the current result but NOT in the previous
result. The other (with index 1) is for test names that are in the
previous results but NOT in the current result.
For example (test expectation information is omitted for
simplicity),
comp_result_map['whole'][0] = ['foo1.html']
comp_result_map['whole'][1] = ['foo2.html']
This means that current result has 'foo1.html' but it is NOT in the
previous result. This also means the previous result has 'foo2.html'
but it is NOT in the current result.
receiver_email_address: receiver's email address.
test_group_name: string representing the test group name (e.g., 'media').
appended_text_to_email: a text which is appended at the end of the status
email.
email_content: an email content string that will be shown on the dashboard.
rev_str: a revision string that contains revision information that is sent
out in the status email. It is obtained by calling
|GetRevisionString()|.
email_only_change_mode: send email only when there is a change if this is
True. Otherwise, always send email after each run.
"""
if rev_str:
email_content += '<br><b>Revision Information:</b>'
email_content += rev_str
localtime = time.asctime(time.localtime(time.time()))
change_str = ''
if email_only_change_mode:
change_str = 'Status Change '
subject = 'Layout Test Analyzer Result %s(%s): %s' % (change_str,
test_group_name,
localtime)
SendEmail('no-reply@chromium.org', [receiver_email_address],
subject, email_content + appended_text_to_email)
def GetRevisionString(prev_time, current_time, diff_map):
"""Get a string for revision information during the specified time period.
Args:
prev_time: the previous time as a floating point number expressed
in seconds since the epoch, in UTC.
current_time: the current time as a floating point number expressed
in seconds since the epoch, in UTC. It is typically obtained by
time.time() function.
diff_map: a map that has 'whole', 'skip' and 'nonskip' as keys.
Please refer to |diff_map| in |SendStatusEmail()|.
Returns:
a tuple of strings:
1) full string containing links, author, date, and line for each
change in the test expectation file.
2) shorter string containing only links to the change. Used for
trend graph annotations.
3) last revision number for the given test group.
4) last revision date for the given test group.
"""
if not diff_map:
return ('', '', '', '')
testname_map = {}
for test_group in ['skip', 'nonskip']:
for i in range(2):
for (k, _) in diff_map[test_group][i]:
testname_map[k] = True
rev_infos = TestExpectationsHistory.GetDiffBetweenTimes(prev_time,
current_time,
testname_map.keys())
rev_str = ''
simple_rev_str = ''
rev = ''
rev_date = ''
if rev_infos:
# Get latest revision number and date.
rev = rev_infos[-1][1]
rev_date = rev_infos[-1][3]
for rev_info in rev_infos:
(old_rev, new_rev, author, date, _, target_lines) = rev_info
# test_expectations.txt was renamed to TestExpectations at r119317.
new_path = DEFAULT_TEST_EXPECTATION_PATH
if new_rev < 119317:
new_path = LEGACY_DEFAULT_TEST_EXPECTATION_PATH
old_path = DEFAULT_TEST_EXPECTATION_PATH
if old_rev < 119317:
old_path = LEGACY_DEFAULT_TEST_EXPECTATION_PATH
link = REVISION_LOG_URL % (new_path, old_rev, new_rev)
rev_str += '<ul><a href="%s">%s->%s</a>\n' % (link, old_rev, new_rev)
simple_rev_str = '<a href="%s">%s->%s</a>,' % (link, old_rev, new_rev)
rev_str += '<li>%s</li>\n' % author
rev_str += '<li>%s</li>\n<ul>' % date
for line in target_lines:
# Find *.html pattern (test name) and replace it with the link to
# flakiness dashboard.
test_name_pattern = r'(\S+.html)'
match = re.search(test_name_pattern, line)
if match:
test_name = match.group(1)
gpu_link = ''
if 'GPU' in line:
gpu_link = 'group=%40ToT%20GPU%20Mesa%20-%20chromium.org&'
dashboard_link = ('http://test-results.appspot.com/dashboards/'
'flakiness_dashboard.html#%stests=%s') % (
gpu_link, test_name)
line = line.replace(test_name, '<a href="%s">%s</a>' % (
dashboard_link, test_name))
# Find bug text and replace it with the link to the bug.
bug = Bug(line)
if bug.bug_txt:
line = '<li>%s</li>\n' % line.replace(bug.bug_txt, str(bug))
rev_str += line
rev_str += '</ul></ul>'
return (rev_str, simple_rev_str, rev, rev_date)
def SendEmail(sender_email_address, receivers_email_addresses, subject,
message):
"""Send email using localhost's mail server.
Args:
sender_email_address: sender's email address.
receivers_email_addresses: receiver's email addresses.
subject: subject string.
message: email message.
"""
try:
html_top = """
<html>
<head></head>
<body>
"""
html_bot = """
</body>
</html>
"""
html = html_top + message + html_bot
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = sender_email_address
msg['To'] = receivers_email_addresses[0]
part1 = MIMEText(html, 'html')
smtp_obj = smtplib.SMTP('localhost')
msg.attach(part1)
smtp_obj.sendmail(sender_email_address, receivers_email_addresses,
msg.as_string())
print 'Successfully sent email'
except smtplib.SMTPException, ex:
print 'Authentication failed:', ex
print 'Error: unable to send email'
except (socket.gaierror, socket.error, socket.herror), ex:
print ex
print 'Error: unable to send email'
def FindLatestTime(time_list):
"""Find latest time from |time_list|.
The current status is compared to the status of the latest file in
|RESULT_DIR|.
Args:
time_list: a list of time string in the form of 'Year-Month-Day-Hour'
(e.g., 2011-10-23-23). Strings not in this format are ignored.
Returns:
a string representing latest time among the time_list or None if
|time_list| is empty or no valid date string in |time_list|.
"""
if not time_list:
return None
latest_date = None
for time_element in time_list:
try:
item_date = datetime.strptime(time_element, '%Y-%m-%d-%H')
if latest_date is None or latest_date < item_date:
latest_date = item_date
except ValueError:
# Do nothing.
pass
if latest_date:
return latest_date.strftime('%Y-%m-%d-%H')
else:
return None
def ReplaceLineInFile(file_path, search_exp, replace_line):
"""Replace line which has |search_exp| with |replace_line| within a file.
Args:
file_path: the file that is being replaced.
search_exp: search expression to find a line to be replaced.
replace_line: the new line.
"""
for line in fileinput.input(file_path, inplace=1):
if search_exp in line:
line = replace_line
sys.stdout.write(line)
def FindLatestResult(result_dir):
"""Find the latest result in |result_dir| and read and return them.
This is used for comparison of analyzer result between current analyzer
and most known latest result.
Args:
result_dir: the result directory.
Returns:
A tuple of filename (latest_time) and the latest analyzer result.
Returns None if there is no file or no file that matches the file
patterns used ('%Y-%m-%d-%H').
"""
dir_list = os.listdir(result_dir)
file_name = FindLatestTime(dir_list)
if not file_name:
return None
file_path = os.path.join(result_dir, file_name)
return (file_name, AnalyzerResultMap.Load(file_path))
def GetDiffBetweenMaps(map1, map2, lookIntoTestExpectationInfo=False):
"""Get difference between maps.
Args:
map1: analyzer result map to be compared.
map2: analyzer result map to be compared.
lookIntoTestExpectationInfo: a boolean to indicate whether to compare
test expectation information in addition to just the test case names.
Returns:
a tuple of |name1_list| and |name2_list|. |Name1_list| contains all test
name and the test expectation information in |map1| but not in |map2|.
|Name2_list| contains all test name and the test expectation
information in |map2| but not in |map1|.
"""
def GetDiffBetweenMapsHelper(map1, map2, lookIntoTestExpectationInfo):
"""A helper function for GetDiffBetweenMaps.
Args:
map1: analyzer result map to be compared.
map2: analyzer result map to be compared.
lookIntoTestExpectationInfo: a boolean to indicate whether to compare
test expectation information in addition to just the test case names.
Returns:
a list of tuples (name, te_info) that are in |map1| but not in |map2|.
"""
name_list = []
for (name, value1) in map1.iteritems():
if name in map2:
if lookIntoTestExpectationInfo and 'te_info' in value1:
list1 = value1['te_info']
list2 = map2[name]['te_info']
te_diff = [item for item in list1 if not item in list2]
if te_diff:
name_list.append((name, te_diff))
else:
name_list.append((name, value1))
return name_list
return (GetDiffBetweenMapsHelper(map1, map2, lookIntoTestExpectationInfo),
GetDiffBetweenMapsHelper(map2, map1, lookIntoTestExpectationInfo))
|
romain-intel/bcc
|
refs/heads/master
|
tools/runqlen.py
|
3
|
#!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# runqlen Summarize scheduler run queue length as a histogram.
# For Linux, uses BCC, eBPF.
#
# This counts the length of the run queue, excluding the currently running
# thread, and shows it as a histogram.
#
# Also answers run queue occupancy.
#
# USAGE: runqlen [-h] [-T] [-Q] [-m] [-D] [interval] [count]
#
# REQUIRES: Linux 4.9+ (BPF_PROG_TYPE_PERF_EVENT support). Under tools/old is
# a version of this tool that may work on Linux 4.6 - 4.8.
#
# Copyright 2016 Netflix, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 12-Dec-2016 Brendan Gregg Created this.
from __future__ import print_function
from bcc import BPF, PerfType, PerfSWConfig
from time import sleep, strftime
import argparse
# arguments
examples = """examples:
./runqlen # summarize run queue length as a histogram
./runqlen 1 10 # print 1 second summaries, 10 times
./runqlen -T 1 # 1s summaries and timestamps
./runqlen -O # report run queue occupancy
./runqlen -C # show each CPU separately
"""
parser = argparse.ArgumentParser(
description="Summarize scheduler run queue length as a histogram",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-T", "--timestamp", action="store_true",
help="include timestamp on output")
parser.add_argument("-O", "--runqocc", action="store_true",
help="report run queue occupancy")
parser.add_argument("-C", "--cpus", action="store_true",
help="print output for each CPU separately")
parser.add_argument("interval", nargs="?", default=99999999,
help="output interval, in seconds")
parser.add_argument("count", nargs="?", default=99999999,
help="number of outputs")
args = parser.parse_args()
countdown = int(args.count)
debug = 0
frequency = 99
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/sched.h>
// Declare enough of cfs_rq to find nr_running, since we can't #import the
// header. This will need maintenance. It is from kernel/sched/sched.h:
struct cfs_rq_partial {
struct load_weight load;
unsigned int nr_running, h_nr_running;
};
typedef struct cpu_key {
int cpu;
unsigned int slot;
} cpu_key_t;
STORAGE
int do_perf_event()
{
unsigned int len = 0;
pid_t pid = 0;
struct task_struct *task = NULL;
struct cfs_rq_partial *my_q = NULL;
// Fetch the run queue length from task->se.cfs_rq->nr_running. This is an
// unstable interface and may need maintenance. Perhaps a future version
// of BPF will support task_rq(p) or something similar as a more reliable
// interface.
task = (struct task_struct *)bpf_get_current_task();
bpf_probe_read(&my_q, sizeof(my_q), &task->se.cfs_rq);
bpf_probe_read(&len, sizeof(len), &my_q->nr_running);
// Calculate run queue length by subtracting the currently running task,
// if present. len 0 == idle, len 1 == one running task.
if (len > 0)
len--;
STORE
return 0;
}
"""
if args.cpus:
bpf_text = bpf_text.replace('STORAGE',
'BPF_HISTOGRAM(dist, cpu_key_t);')
bpf_text = bpf_text.replace('STORE', 'cpu_key_t key = {.slot = len}; ' +
'key.cpu = bpf_get_smp_processor_id(); ' +
'dist.increment(key);')
else:
bpf_text = bpf_text.replace('STORAGE',
'BPF_HISTOGRAM(dist, unsigned int);')
bpf_text = bpf_text.replace('STORE', 'dist.increment(len);')
# code substitutions
if debug:
print(bpf_text)
# load BPF program
b = BPF(text=bpf_text)
# initialize BPF & perf_events
b = BPF(text=bpf_text)
b.attach_perf_event(ev_type=PerfType.SOFTWARE,
ev_config=PerfSWConfig.CPU_CLOCK, fn_name="do_perf_event",
sample_period=0, sample_freq=frequency)
print("Sampling run queue length... Hit Ctrl-C to end.")
# output
exiting = 0 if args.interval else 1
dist = b.get_table("dist")
while (1):
try:
sleep(int(args.interval))
except KeyboardInterrupt:
exiting = 1
print()
if args.timestamp:
print("%-8s\n" % strftime("%H:%M:%S"), end="")
if args.runqocc:
if args.cpus:
# run queue occupancy, per-CPU summary
idle = {}
queued = {}
cpumax = 0
for k, v in dist.items():
if k.cpu > cpumax:
cpumax = k.cpu
for c in range(0, cpumax + 1):
idle[c] = 0
queued[c] = 0
for k, v in dist.items():
if k.slot == 0:
idle[k.cpu] += v.value
else:
queued[k.cpu] += v.value
for c in range(0, cpumax + 1):
samples = idle[c] + queued[c]
if samples:
runqocc = float(queued[c]) / samples
else:
runqocc = 0
print("runqocc, CPU %-3d %6.2f%%" % (c, 100 * runqocc))
else:
# run queue occupancy, system-wide summary
idle = 0
queued = 0
for k, v in dist.items():
if k.value == 0:
idle += v.value
else:
queued += v.value
samples = idle + queued
if samples:
runqocc = float(queued) / samples
else:
runqocc = 0
print("runqocc: %0.2f%%" % (100 * runqocc))
else:
# run queue length histograms
dist.print_linear_hist("runqlen", "cpu")
dist.clear()
countdown -= 1
if exiting or countdown == 0:
exit()
|
oVirt/vdsm
|
refs/heads/master
|
tests/network/unit/ip_address_test.py
|
1
|
# Copyright 2016-2021 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
from __future__ import division
import pytest
from vdsm.network.ip import address
DEVICE_NAME = 'foo'
IPV4_PREFIX = 29
IPV4_NETMASK = address.prefix2netmask(IPV4_PREFIX)
IPV4_A_ADDRESS = '192.168.99.1'
IPV4_A_WITH_PREFIXLEN = '{}/{}'.format(IPV4_A_ADDRESS, IPV4_PREFIX)
IPV4_INVALID_ADDRESS = '333.333.333.333'
IPV4_INVALID_WITH_PREFIXLEN = '{}/{}'.format(IPV4_INVALID_ADDRESS, IPV4_PREFIX)
IPV6_PREFIX = 64
IPV6_NETMASK = 'ffff:ffff:ffff:ffff::'
IPV6_A_ADDRESS = '2001:99::1'
IPV6_A_WITH_PREFIXLEN = '{}/{}'.format(IPV6_A_ADDRESS, IPV6_PREFIX)
IPV6_INVALID_ADDRESS = '2001::99::1'
IPV6_INVALID_WITH_PREFIXLEN = '{}/{}'.format(IPV6_INVALID_ADDRESS, IPV6_PREFIX)
class TestIPAddressData(object):
def test_ipv4_init(self):
ip_data = address.IPAddressData(
IPV4_A_WITH_PREFIXLEN, device=DEVICE_NAME
)
assert ip_data.device == DEVICE_NAME
assert ip_data.family == 4
assert ip_data.address == IPV4_A_ADDRESS
assert ip_data.netmask == IPV4_NETMASK
assert ip_data.prefixlen == IPV4_PREFIX
assert ip_data.address_with_prefixlen == IPV4_A_WITH_PREFIXLEN
def test_ipv4_init_invalid(self):
with pytest.raises(address.IPAddressDataError):
address.IPAddressData(
IPV4_INVALID_WITH_PREFIXLEN, device=DEVICE_NAME
)
def test_ipv6_init(self):
ip_data = address.IPAddressData(
IPV6_A_WITH_PREFIXLEN, device=DEVICE_NAME
)
assert ip_data.device == DEVICE_NAME
assert ip_data.family == 6
assert ip_data.address == IPV6_A_ADDRESS
assert ip_data.netmask == IPV6_NETMASK
assert ip_data.prefixlen == IPV6_PREFIX
assert ip_data.address_with_prefixlen == IPV6_A_WITH_PREFIXLEN
def test_ipv6_init_invalid(self):
with pytest.raises(address.IPAddressDataError):
address.IPAddressData(
IPV6_INVALID_WITH_PREFIXLEN, device=DEVICE_NAME
)
def test_ipv4_init_with_scope_and_flags(self):
SCOPE = 'local'
FLAGS = frozenset([address.Flags.SECONDARY, address.Flags.PERMANENT])
ip_data = address.IPAddressData(
IPV4_A_WITH_PREFIXLEN, device=DEVICE_NAME, scope=SCOPE, flags=FLAGS
)
assert ip_data.scope == SCOPE
assert ip_data.flags == FLAGS
assert not ip_data.is_primary()
assert ip_data.is_permanent()
|
LighthouseHPC/lighthouse
|
refs/heads/master
|
sandbox/lily/django_orthg/dojango/forms/widgets.py
|
3
|
import datetime
import time
from django.forms import *
from django.utils import formats
from django.utils.encoding import StrAndUnicode, force_unicode
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
from django.forms.util import flatatt
from django.utils import datetime_safe
from dojango.util import json_encode
from dojango.util.config import Config
from dojango.util import dojo_collector
__all__ = (
'Media', 'MediaDefiningClass', # original django classes
'DojoWidgetMixin', 'Input', 'Widget', 'TextInput', 'PasswordInput',
'HiddenInput', 'MultipleHiddenInput', 'FileInput', 'Textarea',
'DateInput', 'DateTimeInput', 'TimeInput', 'CheckboxInput', 'Select',
'NullBooleanSelect', 'SelectMultiple', 'RadioInput', 'RadioFieldRenderer',
'RadioSelect', 'CheckboxSelectMultiple', 'MultiWidget', 'SplitDateTimeWidget',
'SplitHiddenDateTimeWidget', 'SimpleTextarea', 'EditorInput', 'HorizontalSliderInput',
'VerticalSliderInput', 'ValidationTextInput', 'ValidationPasswordInput',
'EmailTextInput', 'IPAddressTextInput', 'URLTextInput', 'NumberTextInput',
'RangeBoundTextInput', 'NumberSpinnerInput', 'RatingInput', 'DateInputAnim',
'DropDownSelect', 'CheckedMultiSelect', 'FilteringSelect', 'ComboBox',
'ComboBoxStore', 'FilteringSelectStore', 'ListInput',
)
dojo_config = Config() # initialize the configuration
class DojoWidgetMixin:
"""A helper mixin, that is used by every custom dojo widget.
Some dojo widgets can utilize the validation information of a field and here
we mixin those attributes into the widget. Field attributes that are listed
in the 'valid_extra_attrs' will be mixed into the attributes of a widget.
The 'default_field_attr_map' property contains the default mapping of field
attributes to dojo widget attributes.
This mixin also takes care passing the required dojo modules to the collector.
'dojo_type' defines the used dojo module type of this widget and adds this
module to the collector, if no 'alt_require' property is defined. When
'alt_require' is set, this module will be passed to the collector. By using
'extra_dojo_require' it is possible to pass additional dojo modules to the
collector.
"""
dojo_type = None # this is the dojoType definition of the widget. also used for generating the dojo.require call
alt_require = None # alternative dojo.require call (not using the dojo_type)
extra_dojo_require = [] # these dojo modules also needs to be loaded for this widget
default_field_attr_map = { # the default map for mapping field attributes to dojo attributes
'required':'required',
'help_text':'promptMessage',
'min_value':'constraints.min',
'max_value':'constraints.max',
'max_length':'maxLength',
'max_digits':'maxLength',
'decimal_places':'constraints.places',
'js_regex':'regExp',
'multiple':'multiple',
}
field_attr_map = {} # used for overwriting the default attr-map
valid_extra_attrs = [] # these field_attributes are valid for the current widget
def _mixin_attr(self, attrs, key, value):
"""Mixes in the passed key/value into the passed attrs and returns that
extended attrs dictionary.
A 'key', that is separated by a dot, e.g. 'constraints.min', will be
added as:
{'constraints':{'min':value}}
"""
dojo_field_attr = key.split(".")
inner_dict = attrs
len_fields = len(dojo_field_attr)
count = 0
for i in dojo_field_attr:
count = count+1
if count == len_fields and inner_dict.get(i, None) is None:
if isinstance(value, datetime.datetime):
if isinstance(self, TimeInput):
value = value.strftime('T%H:%M:%S')
if isinstance(self, DateInput):
value = value.strftime('%Y-%m-%d')
value = str(value).replace(' ', 'T') # see dojo.date.stamp
if isinstance(value, datetime.date):
value = str(value)
if isinstance(value, datetime.time):
value = "T" + str(value) # see dojo.date.stamp
inner_dict[i] = value
elif not inner_dict.has_key(i):
inner_dict[i] = {}
inner_dict = inner_dict[i]
return attrs
def build_attrs(self, extra_attrs=None, **kwargs):
"""Overwritten helper function for building an attribute dictionary.
This helper also takes care passing the used dojo modules to the
collector. Furthermore it mixes in the used field attributes into the
attributes of this widget.
"""
# gathering all widget attributes
attrs = dict(self.attrs, **kwargs)
field_attr = self.default_field_attr_map.copy() # use a copy of that object. otherwise changed field_attr_map would overwrite the default-map for all widgets!
field_attr.update(self.field_attr_map) # the field-attribute-mapping can be customzied
if extra_attrs:
attrs.update(extra_attrs)
# assigning dojoType to our widget
dojo_type = getattr(self, "dojo_type", False)
if dojo_type:
attrs["dojoType"] = dojo_type # add the dojoType attribute
# fill the global collector object
if getattr(self, "alt_require", False):
dojo_collector.add_module(self.alt_require)
elif dojo_type:
dojo_collector.add_module(self.dojo_type)
extra_requires = getattr(self, "extra_dojo_require", [])
for i in extra_requires:
dojo_collector.add_module(i)
# mixin those additional field attrs, that are valid for this widget
extra_field_attrs = attrs.get("extra_field_attrs", False)
if extra_field_attrs:
for i in self.valid_extra_attrs:
field_val = extra_field_attrs.get(i, None)
new_attr_name = field_attr.get(i, None)
if field_val is not None and new_attr_name is not None:
attrs = self._mixin_attr(attrs, new_attr_name, field_val)
del attrs["extra_field_attrs"]
# now encode several attributes, e.g. False = false, True = true
for i in attrs:
if isinstance(attrs[i], bool):
attrs[i] = json_encode(attrs[i])
return attrs
#############################################
# ALL OVERWRITTEN DEFAULT DJANGO WIDGETS
#############################################
class Widget(DojoWidgetMixin, widgets.Widget):
dojo_type = 'dijit._Widget'
class Input(DojoWidgetMixin, widgets.Input):
pass
class TextInput(DojoWidgetMixin, widgets.TextInput):
dojo_type = 'dijit.form.TextBox'
valid_extra_attrs = [
'max_length',
]
class PasswordInput(DojoWidgetMixin, widgets.PasswordInput):
dojo_type = 'dijit.form.TextBox'
valid_extra_attrs = [
'max_length',
]
class HiddenInput(DojoWidgetMixin, widgets.HiddenInput):
dojo_type = 'dijit.form.TextBox' # otherwise dijit.form.Form can't get its values
class MultipleHiddenInput(DojoWidgetMixin, widgets.MultipleHiddenInput):
dojo_type = 'dijit.form.TextBox' # otherwise dijit.form.Form can't get its values
class FileInput(DojoWidgetMixin, widgets.FileInput):
dojo_type = 'dojox.form.FileInput'
class Media:
css = {
'all': ('%(base_url)s/dojox/form/resources/FileInput.css' % {
'base_url':dojo_config.dojo_base_url
},)
}
class Textarea(DojoWidgetMixin, widgets.Textarea):
"""Auto resizing textarea"""
dojo_type = 'dijit.form.Textarea'
valid_extra_attrs = [
'max_length'
]
if DateInput:
class DateInput(DojoWidgetMixin, widgets.DateInput):
manual_format = True
format = '%Y-%m-%d' # force to US format (dojo will do the locale-specific formatting)
dojo_type = 'dijit.form.DateTextBox'
valid_extra_attrs = [
'required',
'help_text',
'min_value',
'max_value',
]
else: # fallback for older django versions
class DateInput(TextInput):
"""Copy of the implementation in Django 1.1. Before this widget did not exists."""
dojo_type = 'dijit.form.DateTextBox'
valid_extra_attrs = [
'required',
'help_text',
'min_value',
'max_value',
]
format = '%Y-%m-%d' # '2006-10-25'
def __init__(self, attrs=None, format=None):
super(DateInput, self).__init__(attrs)
if format:
self.format = format
def render(self, name, value, attrs=None):
if value is None:
value = ''
elif hasattr(value, 'strftime'):
value = datetime_safe.new_date(value)
value = value.strftime(self.format)
return super(DateInput, self).render(name, value, attrs)
if TimeInput:
class TimeInput(DojoWidgetMixin, widgets.TimeInput):
dojo_type = 'dijit.form.TimeTextBox'
valid_extra_attrs = [
'required',
'help_text',
'min_value',
'max_value',
]
manual_format = True
format = "T%H:%M:%S" # special for dojo: 'T12:12:33'
def __init__(self, attrs=None, format=None):
# always passing the dojo time format
super(TimeInput, self).__init__(attrs, format=self.format)
def _has_changed(self, initial, data):
try:
input_format = self.format
initial = datetime.time(*time.strptime(initial, input_format)[3:6])
except (TypeError, ValueError):
pass
return super(TimeInput, self)._has_changed(self._format_value(initial), data)
else: # fallback for older django versions
class TimeInput(TextInput):
"""Copy of the implementation in Django 1.1. Before this widget did not exists."""
dojo_type = 'dijit.form.TimeTextBox'
valid_extra_attrs = [
'required',
'help_text',
'min_value',
'max_value',
]
format = "T%H:%M:%S" # special for dojo: 'T12:12:33'
def __init__(self, attrs=None, format=None):
super(TimeInput, self).__init__(attrs)
if format:
self.format = format
def render(self, name, value, attrs=None):
if value is None:
value = ''
elif hasattr(value, 'strftime'):
value = value.strftime(self.format)
return super(TimeInput, self).render(name, value, attrs)
class CheckboxInput(DojoWidgetMixin, widgets.CheckboxInput):
dojo_type = 'dijit.form.CheckBox'
class Select(DojoWidgetMixin, widgets.Select):
dojo_type = dojo_config.version < '1.4' and 'dijit.form.FilteringSelect' or 'dijit.form.Select'
valid_extra_attrs = dojo_config.version < '1.4' and \
['required', 'help_text',] or \
['required',]
class NullBooleanSelect(DojoWidgetMixin, widgets.NullBooleanSelect):
dojo_type = dojo_config.version < '1.4' and 'dijit.form.FilteringSelect' or 'dijit.form.Select'
valid_extra_attrs = dojo_config.version < '1.4' and \
['required', 'help_text',] or \
['required',]
class SelectMultiple(DojoWidgetMixin, widgets.SelectMultiple):
dojo_type = 'dijit.form.MultiSelect'
RadioInput = widgets.RadioInput
RadioFieldRenderer = widgets.RadioFieldRenderer
class RadioSelect(DojoWidgetMixin, widgets.RadioSelect):
dojo_type = 'dijit.form.RadioButton'
def __init__(self, *args, **kwargs):
if dojo_config.version < '1.3':
self.alt_require = 'dijit.form.CheckBox'
super(RadioSelect, self).__init__(*args, **kwargs)
class CheckboxSelectMultiple(DojoWidgetMixin, widgets.CheckboxSelectMultiple):
dojo_type = 'dijit.form.CheckBox'
class MultiWidget(DojoWidgetMixin, widgets.MultiWidget):
dojo_type = None
class SplitDateTimeWidget(widgets.SplitDateTimeWidget):
"DateTimeInput is using two input fields."
try:
# for older django versions
date_format = DateInput.format
time_format = TimeInput.format
except AttributeError:
date_format = None
time_format = None
def __init__(self, attrs=None, date_format=None, time_format=None):
if date_format:
self.date_format = date_format
if time_format:
self.time_format = time_format
split_widgets = (DateInput(attrs=attrs, format=self.date_format),
TimeInput(attrs=attrs, format=self.time_format))
# Note that we're calling MultiWidget, not SplitDateTimeWidget, because
# we want to define widgets.
widgets.MultiWidget.__init__(self, split_widgets, attrs)
class SplitHiddenDateTimeWidget(DojoWidgetMixin, widgets.SplitHiddenDateTimeWidget):
dojo_type = "dijit.form.TextBox"
DateTimeInput = SplitDateTimeWidget
#############################################
# MORE ENHANCED DJANGO/DOJO WIDGETS
#############################################
class SimpleTextarea(Textarea):
"""No autoexpanding textarea"""
dojo_type = "dijit.form.SimpleTextarea"
class EditorInput(Textarea):
dojo_type = 'dijit.Editor'
def render(self, name, value, attrs=None):
if value is None: value = ''
final_attrs = self.build_attrs(attrs, name=name)
# dijit.Editor must be rendered in a div (see dijit/_editor/RichText.js)
return mark_safe(u'<div%s>%s</div>' % (flatatt(final_attrs),
force_unicode(value))) # we don't escape the value for the editor
class HorizontalSliderInput(TextInput):
dojo_type = 'dijit.form.HorizontalSlider'
valid_extra_attrs = [
'max_value',
'min_value',
]
field_attr_map = {
'max_value': 'maximum',
'min_value': 'minimum',
}
def __init__(self, attrs=None):
if dojo_config.version < '1.3':
self.alt_require = 'dijit.form.Slider'
super(HorizontalSliderInput, self).__init__(attrs)
class VerticalSliderInput(HorizontalSliderInput):
dojo_type = 'dijit.form.VerticalSlider'
class ValidationTextInput(TextInput):
dojo_type = 'dijit.form.ValidationTextBox'
valid_extra_attrs = [
'required',
'help_text',
'js_regex',
'max_length',
]
js_regex_func = None
def render(self, name, value, attrs=None):
if self.js_regex_func:
attrs = self.build_attrs(attrs, regExpGen=self.js_regex_func)
return super(ValidationTextInput, self).render(name, value, attrs)
class ValidationPasswordInput(PasswordInput):
dojo_type = 'dijit.form.ValidationTextBox'
valid_extra_attrs = [
'required',
'help_text',
'js_regex',
'max_length',
]
class EmailTextInput(ValidationTextInput):
extra_dojo_require = [
'dojox.validate.regexp'
]
js_regex_func = "dojox.validate.regexp.emailAddress"
def __init__(self, attrs=None):
if dojo_config.version < '1.3':
self.js_regex_func = 'dojox.regexp.emailAddress'
super(EmailTextInput, self).__init__(attrs)
class IPAddressTextInput(ValidationTextInput):
extra_dojo_require = [
'dojox.validate.regexp'
]
js_regex_func = "dojox.validate.regexp.ipAddress"
def __init__(self, attrs=None):
if dojo_config.version < '1.3':
self.js_regex_func = 'dojox.regexp.ipAddress'
super(IPAddressTextInput, self).__init__(attrs)
class URLTextInput(ValidationTextInput):
extra_dojo_require = [
'dojox.validate.regexp'
]
js_regex_func = "dojox.validate.regexp.url"
def __init__(self, attrs=None):
if dojo_config.version < '1.3':
self.js_regex_func = 'dojox.regexp.url'
super(URLTextInput, self).__init__(attrs)
class NumberTextInput(TextInput):
dojo_type = 'dijit.form.NumberTextBox'
valid_extra_attrs = [
'min_value',
'max_value',
'required',
'help_text',
'decimal_places',
'max_digits',
]
class RangeBoundTextInput(NumberTextInput):
dojo_type = 'dijit.form.RangeBoundTextBox'
class NumberSpinnerInput(NumberTextInput):
dojo_type = 'dijit.form.NumberSpinner'
class RatingInput(TextInput):
dojo_type = 'dojox.form.Rating'
valid_extra_attrs = [
'max_value',
]
field_attr_map = {
'max_value': 'numStars',
}
class Media:
css = {
'all': ('%(base_url)s/dojox/form/resources/Rating.css' % {
'base_url':dojo_config.dojo_base_url
},)
}
class DateInputAnim(DateInput):
dojo_type = 'dojox.form.DateTextBox'
class Media:
css = {
'all': ('%(base_url)s/dojox/widget/Calendar/Calendar.css' % {
'base_url':dojo_config.dojo_base_url
},)
}
class DropDownSelect(Select):
dojo_type = 'dojox.form.DropDownSelect'
valid_extra_attrs = []
class Media:
css = {
'all': ('%(base_url)s/dojox/form/resources/DropDownSelect.css' % {
'base_url':dojo_config.dojo_base_url
},)
}
class CheckedMultiSelect(SelectMultiple):
dojo_type = 'dojox.form.CheckedMultiSelect'
valid_extra_attrs = []
# TODO: fix attribute multiple=multiple
# seems there is a dependency in dojox.form.CheckedMultiSelect for dijit.form.MultiSelect,
# but CheckedMultiSelect is not extending that
class Media:
css = {
'all': ('%(base_url)s/dojox/form/resources/CheckedMultiSelect.css' % {
'base_url':dojo_config.dojo_base_url
},)
}
class ComboBox(DojoWidgetMixin, widgets.Select):
"""Nearly the same as FilteringSelect, but ignoring the option value."""
dojo_type = 'dijit.form.ComboBox'
valid_extra_attrs = [
'required',
'help_text',
]
class FilteringSelect(ComboBox):
dojo_type = 'dijit.form.FilteringSelect'
class ComboBoxStore(TextInput):
"""A combobox that is receiving data from a given dojo data url.
As default dojo.data.ItemFileReadStore is used. You can overwrite
that behaviour by passing a different store name
(e.g. dojox.data.QueryReadStore).
Usage:
ComboBoxStore("/dojo-data-store-url/")
"""
dojo_type = 'dijit.form.ComboBox'
valid_extra_attrs = [
'required',
'help_text',
]
store = 'dojo.data.ItemFileReadStore'
store_attrs = {}
url = None
def __init__(self, url, attrs=None, store=None, store_attrs={}):
self.url = url
if store:
self.store = store
if store_attrs:
self.store_attrs = store_attrs
self.extra_dojo_require.append(self.store)
super(ComboBoxStore, self).__init__(attrs)
def render(self, name, value, attrs=None):
if value is None: value = ''
store_id = self.get_store_id(getattr(attrs, "id", None), name)
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name, store=store_id)
if value != '':
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_unicode(self._format_value(value))
self.store_attrs.update({
'dojoType': self.store,
'url': self.url,
'jsId':store_id
})
# TODO: convert store attributes to valid js-format (False => false, dict => {}, array = [])
store_node = '<div%s></div>' % flatatt(self.store_attrs)
return mark_safe(u'%s<input%s />' % (store_node, flatatt(final_attrs)))
def get_store_id(self, id, name):
return "_store_" + (id and id or name)
class FilteringSelectStore(ComboBoxStore):
dojo_type = 'dijit.form.FilteringSelect'
class ListInput(DojoWidgetMixin, widgets.TextInput):
dojo_type = 'dojox.form.ListInput'
class Media:
css = {
'all': ('%(base_url)s/dojox/form/resources/ListInput.css' % {
'base_url':dojo_config.dojo_base_url
},)
}
# THE RANGE SLIDER NEEDS A DIFFERENT REPRESENTATION WITHIN HTML
# SOMETHING LIKE:
# <div dojoType="dojox.form.RangeSlider"><input value="5"/><input value="10"/></div>
'''class HorizontalRangeSlider(HorizontalSliderInput):
"""This just can be used with a comma-separated-value like: 20,40"""
dojo_type = 'dojox.form.HorizontalRangeSlider'
alt_require = 'dojox.form.RangeSlider'
class Media:
css = {
'all': ('%(base_url)s/dojox/form/resources/RangeSlider.css' % {
'base_url':dojo_config.dojo_base_url
},)
}
'''
# TODO: implement
# dojox.form.RangeSlider
# dojox.form.MultiComboBox
# dojox.form.FileUploader
|
tta/gnuradio-tta
|
refs/heads/master
|
gnuradio-examples/python/digital/pick_bitrate.py
|
12
|
#
# Copyright 2005,2006 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import eng_notation
_default_bitrate = 500e3
_valid_samples_per_symbol = (2,3,4,5,6,7)
def _gen_tx_info(converter_rate, xrates):
results = []
for samples_per_symbol in _valid_samples_per_symbol:
for interp in xrates:
bitrate = converter_rate / interp / samples_per_symbol
results.append((bitrate, samples_per_symbol, interp))
results.sort()
return results
def _gen_rx_info(converter_rate, xrates):
results = []
for samples_per_symbol in _valid_samples_per_symbol:
for decim in xrates:
bitrate = converter_rate / decim / samples_per_symbol
results.append((bitrate, samples_per_symbol, decim))
results.sort()
return results
def _filter_info(info, samples_per_symbol, xrate):
if samples_per_symbol is not None:
info = [x for x in info if x[1] == samples_per_symbol]
if xrate is not None:
info = [x for x in info if x[2] == xrate]
return info
def _pick_best(target_bitrate, bits_per_symbol, info):
"""
@returns tuple (bitrate, samples_per_symbol, interp_rate_or_decim_rate)
"""
if len(info) == 0:
raise RuntimeError, "info is zero length!"
if target_bitrate is None: # return the fastest one
return info[-1]
# convert bit rate to symbol rate
target_symbolrate = target_bitrate / bits_per_symbol
# Find the closest matching symbol rate.
# In the event of a tie, the one with the lowest samples_per_symbol wins.
# (We already sorted them, so the first one is the one we take)
best = info[0]
best_delta = abs(target_symbolrate - best[0])
for x in info[1:]:
delta = abs(target_symbolrate - x[0])
if delta < best_delta:
best_delta = delta
best = x
# convert symbol rate back to bit rate
return ((best[0] * bits_per_symbol),) + best[1:]
def _pick_bitrate(bitrate, bits_per_symbol, samples_per_symbol,
xrate, converter_rate, xrates, gen_info):
"""
@returns tuple (bitrate, samples_per_symbol, interp_rate_or_decim_rate)
"""
if not isinstance(bits_per_symbol, int) or bits_per_symbol < 1:
raise ValueError, "bits_per_symbol must be an int >= 1"
if samples_per_symbol is not None and xrate is not None: # completely determined
return (float(converter_rate) / xrate / samples_per_symbol,
samples_per_symbol, xrate)
if bitrate is None and samples_per_symbol is None and xrate is None:
bitrate = _default_bitrate
# now we have a target bitrate and possibly an xrate or
# samples_per_symbol constraint, but not both of them.
ret = _pick_best(bitrate, bits_per_symbol,
_filter_info(gen_info(converter_rate, xrates), samples_per_symbol, xrate))
print "Actual Bitrate:", eng_notation.num_to_str(ret[0])
return ret
# ---------------------------------------------------------------------------------------
def pick_tx_bitrate(bitrate, bits_per_symbol, samples_per_symbol,
interp_rate, converter_rate, possible_interps):
"""
Given the 4 input parameters, return at configuration that matches
@param bitrate: desired bitrate or None
@type bitrate: number or None
@param bits_per_symbol: E.g., BPSK -> 1, QPSK -> 2, 8-PSK -> 3
@type bits_per_symbol: integer >= 1
@param samples_per_symbol: samples/baud (aka samples/symbol)
@type samples_per_symbol: number or None
@param interp_rate: USRP interpolation factor
@type interp_rate: integer or None
@param converter_rate: converter sample rate in Hz
@type converter_rate: number
@param possible_interps: a list of possible rates
@type possible_interps: a list of integers
@returns tuple (bitrate, samples_per_symbol, interp_rate)
"""
print "Requested TX Bitrate:", bitrate and eng_notation.num_to_str(bitrate) or 'Auto',
return _pick_bitrate(bitrate, bits_per_symbol, samples_per_symbol,
interp_rate, converter_rate, possible_interps, _gen_tx_info)
def pick_rx_bitrate(bitrate, bits_per_symbol, samples_per_symbol,
decim_rate, converter_rate, possible_decims):
"""
Given the 4 input parameters, return at configuration that matches
@param bitrate: desired bitrate or None
@type bitrate: number or None
@param bits_per_symbol: E.g., BPSK -> 1, QPSK -> 2, 8-PSK -> 3
@type bits_per_symbol: integer >= 1
@param samples_per_symbol: samples/baud (aka samples/symbol)
@type samples_per_symbol: number or None
@param decim_rate: USRP decimation factor
@type decim_rate: integer or None
@param converter_rate: converter sample rate in Hz
@type converter_rate: number
@param possible_decims: a list of possible rates
@type possible_decims: a list of integers
@returns tuple (bitrate, samples_per_symbol, decim_rate)
"""
print "Requested RX Bitrate:", bitrate and eng_notation.num_to_str(bitrate) or 'Auto'
return _pick_bitrate(bitrate, bits_per_symbol, samples_per_symbol,
decim_rate, converter_rate, possible_decims, _gen_rx_info)
|
huor/incubator-hawq
|
refs/heads/master
|
tools/bin/gppylib/commands/dca.py
|
12
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Module for commands that are DCA specific
"""
import os
from gppylib.gplog import get_default_logger
from base import Command, LOCAL, REMOTE
logger = get_default_logger()
# NOTE THIS IS A CHECK FOR 1040 or later appliance
def is_dca_appliance():
try:
if os.path.isfile('/opt/dca/bin/dca_gpdb_initialized'):
return True
except:
pass
return False
#-----------------------------------------------
class DcaGpdbInitialized(Command):
def __init__(self, name, ctxt=LOCAL, remoteHost=None):
self.cmdStr="/opt/dca/bin/dca_gpdb_initialized"
Command.__init__(self, name, self.cmdStr, ctxt, remoteHost)
@staticmethod
def local():
try:
cmd=DcaGpdbInitialized('dcainit')
cmd.run(validateAfter=True)
except Exception, e:
logger.error(e.__str__())
logger.error("Exception running dca initialization")
except:
logger.error("Exception running dca initialization")
#-----------------------------------------------
class DcaGpdbStopped(Command):
def __init__(self, name, ctxt=LOCAL, remoteHost=None):
self.cmdStr="/opt/dca/bin/dca_gpdb_stopped"
Command.__init__(self, name, self.cmdStr, ctxt, remoteHost)
@staticmethod
def local():
try:
cmd=DcaGpdbStopped('dcastop')
cmd.run(validateAfter=True)
except Exception, e:
logger.error(e.__str__())
logger.error("Exception running dca de-initialization")
except:
logger.error("Exception running dca de-initialization")
|
Barbarian1010/pychess
|
refs/heads/master
|
testing/frc_castling.py
|
20
|
from __future__ import print_function
import unittest
from pychess.Utils.const import *
from pychess.Utils.lutils.leval import LBoard
from pychess.Utils.lutils.lmove import FLAG
from pychess.Utils.lutils.lmovegen import genCastles, newMove
# TODO: add more test data
data = (
("r3k2r/8/8/8/8/8/8/R3K2R w AH - 0 1", [(E1, G1, KING_CASTLE), (E1, C1, QUEEN_CASTLE)]),
("r3k2r/8/8/8/8/8/8/R3K2R b ah - 0 1", [(E8, G8, KING_CASTLE), (E8, C8, QUEEN_CASTLE)]),
("1br3kr/2p5/8/8/8/8/8/1BR3KR w CH - 0 2", [(G1, G1, KING_CASTLE), (G1, C1, QUEEN_CASTLE)]),
("1br3kr/2p5/8/8/8/8/8/1BR3KR b ch - 0 2", [(G8, G8, KING_CASTLE), (G8, C8, QUEEN_CASTLE)]),
("2r1k2r/8/8/8/8/8/8/2R1K2R w H - 0 1", [(E1, G1, KING_CASTLE)]),
("2r1k2r/8/8/8/8/8/8/2R1K2R b h - 0 1", [(E8, G8, KING_CASTLE)]),
("3rk1qr/8/8/8/8/8/8/3RK1QR w - - 0 1", []),
("3rk1qr/8/8/8/8/8/8/3RK1QR b - - 0 1", []),
)
class FRCCastlingTestCase(unittest.TestCase):
def testFRCCastling(self):
"""Testing FRC castling movegen"""
print()
for fen, castles in data:
print(fen)
board = LBoard(FISCHERRANDOMCHESS)
board.applyFen(fen)
#print board
moves = [move for move in genCastles(board)]
self.assertEqual(len(moves), len(castles))
for i, castle in enumerate(castles):
kfrom, kto, flag = castle
self.assertEqual(moves[i], newMove(kfrom, kto, flag))
if __name__ == '__main__':
unittest.main()
|
blockstack/blockstack-server
|
refs/heads/master
|
integration_tests/blockstack_integration_tests/scenarios/name_import_update_transfer.py
|
1
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Blockstack
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of Blockstack
Blockstack is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstack is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Blockstack. If not, see <http://www.gnu.org/licenses/>.
"""
import testlib
import virtualchain
import json
import shutil
import tempfile
import os
wallets = [
testlib.Wallet( "5JesPiN68qt44Hc2nT8qmyZ1JDwHebfoh9KQ52Lazb1m1LaKNj9", 100000000000 ),
testlib.Wallet( "5KHqsiU9qa77frZb6hQy9ocV7Sus9RWJcQGYYBJJBb2Efj1o77e", 100000000000 ),
testlib.Wallet( "5Kg5kJbQHvk1B64rJniEmgbD83FpZpbw2RjdAZEzTefs9ihN3Bz", 100000000000 ),
testlib.Wallet( "5JuVsoS9NauksSkqEjbUZxWwgGDQbMwPsEfoRBSpLpgDX1RtLX7", 100000000000 ),
testlib.Wallet( "5KEpiSRr1BrT8vRD7LKGCEmudokTh1iMHbiThMQpLdwBwhDJB1T", 100000000000 ),
testlib.Wallet( "5K5hDuynZ6EQrZ4efrchCwy6DLhdsEzuJtTDAf3hqdsCKbxfoeD", 100000000000 ),
testlib.Wallet( "5J39aXEeHh9LwfQ4Gy5Vieo7sbqiUMBXkPH7SaMHixJhSSBpAqz", 100000000000 ),
testlib.Wallet( "5K9LmMQskQ9jP1p7dyieLDAeB6vsAj4GK8dmGNJAXS1qHDqnWhP", 100000000000 ),
testlib.Wallet( "5KcNen67ERBuvz2f649t9F2o1ddTjC5pVUEqcMtbxNgHqgxG2gZ", 100000000000 )
]
consensus = "17ac43c1d8549c3181b200f1bf97eb7d"
debug = True
def scenario( wallets, **kw ):
# make a test namespace
resp = testlib.blockstack_namespace_preorder( "test", wallets[1].addr, wallets[0].privkey )
if debug or 'error' in resp:
print json.dumps( resp, indent=4 )
testlib.next_block( **kw )
resp = testlib.blockstack_namespace_reveal( "test", wallets[1].addr, 52595, 250, 4, [6,5,4,3,2,1,0,0,0,0,0,0,0,0,0,0], 10, 10, wallets[0].privkey )
if debug or 'error' in resp:
print json.dumps( resp, indent=4 )
testlib.next_block( **kw )
resp = testlib.blockstack_name_import( "foo.test", wallets[3].addr, "11" * 20, wallets[1].privkey )
if 'error' in resp:
print json.dumps( resp, indent=4 )
testlib.next_block( **kw )
resp = testlib.blockstack_namespace_ready( "test", wallets[1].privkey )
if debug or 'error' in resp:
print json.dumps( resp, indent=4 )
testlib.next_block( **kw )
resp = testlib.blockstack_name_update( "foo.test", "22" * 20, wallets[3].privkey )
if 'error' in resp:
print json.dumps( resp, indent=4 )
testlib.next_block( **kw )
resp = testlib.blockstack_name_transfer( "foo.test", wallets[4].addr, True, wallets[3].privkey )
if 'error' in resp:
print json.dumps( resp, indent=4 )
testlib.next_block( **kw )
def check( state_engine ):
# not revealed, but ready
ns = state_engine.get_namespace_reveal( "test" )
if ns is not None:
return False
ns = state_engine.get_namespace( "test" )
if ns is None:
return False
if ns['namespace_id'] != 'test':
return False
# not preordered
for i in xrange(0, len(wallets)):
preorder = state_engine.get_name_preorder( "foo.test", virtualchain.make_payment_script(wallets[i].addr), wallets[(i+1)%5].addr )
if preorder is not None:
print "preordered"
return False
# registered
name_rec = state_engine.get_name( "foo.test" )
if name_rec is None:
print "no name"
return False
# updated, and data preserved
if name_rec['value_hash'] != "22" * 20:
print "wrong value hash"
return False
# transferred
if name_rec['address'] != wallets[4].addr or name_rec['sender'] != virtualchain.make_payment_script( wallets[4].addr ):
print "wrong owner"
return False
return True
|
ovnicraft/edx-platform
|
refs/heads/master
|
common/test/acceptance/tests/studio/test_studio_rerun.py
|
122
|
"""
Acceptance tests for Studio related to course reruns.
"""
import random
from bok_choy.promise import EmptyPromise
from nose.tools import assert_in
from ...pages.studio.index import DashboardPage
from ...pages.studio.course_rerun import CourseRerunPage
from ...pages.studio.overview import CourseOutlinePage
from ...pages.lms.courseware import CoursewarePage
from ...fixtures.course import XBlockFixtureDesc
from base_studio_test import StudioCourseTest
class CourseRerunTest(StudioCourseTest):
"""
Feature: Courses can be rerun
"""
__test__ = True
SECTION_NAME = 'Rerun Section'
SUBSECITON_NAME = 'Rerun Subsection'
UNIT_NAME = 'Rerun Unit'
COMPONENT_NAME = 'Rerun Component'
COMPONENT_CONTENT = 'Test Content'
def setUp(self):
"""
Login as global staff because that's the only way to rerun a course.
"""
super(CourseRerunTest, self).setUp(is_staff=True)
self.dashboard_page = DashboardPage(self.browser)
def populate_course_fixture(self, course_fixture):
"""
Create a sample course with one section, one subsection, one unit, and one component.
"""
course_fixture.add_children(
XBlockFixtureDesc('chapter', self.SECTION_NAME).add_children(
XBlockFixtureDesc('sequential', self.SUBSECITON_NAME).add_children(
XBlockFixtureDesc('vertical', self.UNIT_NAME).add_children(
XBlockFixtureDesc('html', self.COMPONENT_NAME, self.COMPONENT_CONTENT)
)
)
)
)
def test_course_rerun(self):
"""
Scenario: Courses can be rerun
Given I have a course with a section, subsesction, vertical, and html component with content 'Test Content'
When I visit the course rerun page
And I type 'test_rerun' in the course run field
And I click Create Rerun
And I visit the course listing page
And I wait for all courses to finish processing
And I click on the course with run 'test_rerun'
Then I see a rerun notification on the course outline page
And when I click 'Dismiss' on the notification
Then I do not see a rerun notification
And when I expand the subsection and click on the unit
And I click 'View Live Version'
Then I see one html component with the content 'Test Content'
"""
course_info = (self.course_info['org'], self.course_info['number'], self.course_info['run'])
self.dashboard_page.visit()
self.dashboard_page.create_rerun(self.course_info['display_name'])
rerun_page = CourseRerunPage(self.browser, *course_info)
rerun_page.wait_for_page()
course_run = 'test_rerun_' + str(random.randrange(1000000, 9999999))
rerun_page.course_run = course_run
rerun_page.create_rerun()
def finished_processing():
self.dashboard_page.visit()
return not self.dashboard_page.has_processing_courses
EmptyPromise(finished_processing, "Rerun finished processing", try_interval=5, timeout=60).fulfill()
assert_in(course_run, self.dashboard_page.course_runs)
self.dashboard_page.click_course_run(course_run)
outline_page = CourseOutlinePage(self.browser, *course_info)
outline_page.wait_for_page()
self.assertTrue(outline_page.has_rerun_notification)
outline_page.dismiss_rerun_notification()
EmptyPromise(lambda: not outline_page.has_rerun_notification, "Rerun notification dismissed").fulfill()
subsection = outline_page.section(self.SECTION_NAME).subsection(self.SUBSECITON_NAME)
subsection.expand_subsection()
unit_page = subsection.unit(self.UNIT_NAME).go_to()
unit_page.view_published_version()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_xblock_components, 1)
self.assertEqual(courseware.xblock_component_html_content(), self.COMPONENT_CONTENT)
|
alianmohammad/pd-gem5-latest
|
refs/heads/master
|
src/arch/x86/isa/insts/simd128/floating_point/data_conversion/__init__.py
|
91
|
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
categories = ["convert_floating_point_to_floating_point",
"convert_floating_point_to_xmm_integer",
"convert_floating_point_to_mmx_integer",
"convert_floating_point_to_gpr_integer"]
microcode = '''
# SSE instructions
'''
for category in categories:
exec "import %s as cat" % category
microcode += cat.microcode
|
KiChjang/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/webdriver/tests/minimize_window/__init__.py
|
12133432
| |
Bogh/django-oscar
|
refs/heads/master
|
sites/sandbox/apps/__init__.py
|
12133432
| |
viniciusgama/blog_gae
|
refs/heads/master
|
django/utils/__init__.py
|
12133432
| |
jpypi/dup-image-search
|
refs/heads/master
|
db/create_database.py
|
1
|
#!/usr/bin/env python2
"""
create_database.py
Creates the initial database for image hashing.
:author: Brandon Arrendondo
:author: James Jenkins
:license: MIT
"""
import sys
import argparse
from db import hash_db
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument("--dbfilepath", default="hashdb.sqlite",
help="Filepath of the database to create.")
args = parser.parse_args()
db_conn = hash_db.db_open(args.dbfilepath)
hash_db.reset_db(db_conn)
print "Initial empty database created."
if __name__ == "__main__":
main(sys.argv[1:])
|
vtoomas/pupy
|
refs/heads/master
|
client/additional_imports.py
|
10
|
import socket
import threading
import Queue
import collections
import SocketServer
import struct
import os
import sys
import time
import traceback
import uuid
import subprocess
import StringIO
import imp
import hashlib
import base64
import logging
import re
import ssl
import tempfile
import string
import datetime
import random
import shutil
import platform
import errno, stat
import zlib
import tempfile
import code
import Queue
import glob
import multiprocessing
import math
import binascii
import inspect
import shlex
import json
import ctypes
import ctypes.wintypes
import threading
import time
import urllib
import urllib2
import socks
|
ConeyLiu/spark
|
refs/heads/master
|
python/pyspark/sql/tests/test_session.py
|
8
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import unittest
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession, SQLContext, Row
from pyspark.testing.sqlutils import ReusedSQLTestCase
from pyspark.testing.utils import PySparkTestCase
class SparkSessionTests(ReusedSQLTestCase):
def test_sqlcontext_reuses_sparksession(self):
sqlContext1 = SQLContext(self.sc)
sqlContext2 = SQLContext(self.sc)
self.assertTrue(sqlContext1.sparkSession is sqlContext2.sparkSession)
class SparkSessionTests1(ReusedSQLTestCase):
# We can't include this test into SQLTests because we will stop class's SparkContext and cause
# other tests failed.
def test_sparksession_with_stopped_sparkcontext(self):
self.sc.stop()
sc = SparkContext('local[4]', self.sc.appName)
spark = SparkSession.builder.getOrCreate()
try:
df = spark.createDataFrame([(1, 2)], ["c", "c"])
df.collect()
finally:
spark.stop()
sc.stop()
class SparkSessionTests2(PySparkTestCase):
# This test is separate because it's closely related with session's start and stop.
# See SPARK-23228.
def test_set_jvm_default_session(self):
spark = SparkSession.builder.getOrCreate()
try:
self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isDefined())
finally:
spark.stop()
self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isEmpty())
def test_jvm_default_session_already_set(self):
# Here, we assume there is the default session already set in JVM.
jsession = self.sc._jvm.SparkSession(self.sc._jsc.sc())
self.sc._jvm.SparkSession.setDefaultSession(jsession)
spark = SparkSession.builder.getOrCreate()
try:
self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isDefined())
# The session should be the same with the exiting one.
self.assertTrue(jsession.equals(spark._jvm.SparkSession.getDefaultSession().get()))
finally:
spark.stop()
class SparkSessionTests3(unittest.TestCase):
def test_active_session(self):
spark = SparkSession.builder \
.master("local") \
.getOrCreate()
try:
activeSession = SparkSession.getActiveSession()
df = activeSession.createDataFrame([(1, 'Alice')], ['age', 'name'])
self.assertEqual(df.collect(), [Row(age=1, name=u'Alice')])
finally:
spark.stop()
def test_get_active_session_when_no_active_session(self):
active = SparkSession.getActiveSession()
self.assertEqual(active, None)
spark = SparkSession.builder \
.master("local") \
.getOrCreate()
active = SparkSession.getActiveSession()
self.assertEqual(active, spark)
spark.stop()
active = SparkSession.getActiveSession()
self.assertEqual(active, None)
def test_SparkSession(self):
spark = SparkSession.builder \
.master("local") \
.config("some-config", "v2") \
.getOrCreate()
try:
self.assertEqual(spark.conf.get("some-config"), "v2")
self.assertEqual(spark.sparkContext._conf.get("some-config"), "v2")
self.assertEqual(spark.version, spark.sparkContext.version)
spark.sql("CREATE DATABASE test_db")
spark.catalog.setCurrentDatabase("test_db")
self.assertEqual(spark.catalog.currentDatabase(), "test_db")
spark.sql("CREATE TABLE table1 (name STRING, age INT) USING parquet")
self.assertEqual(spark.table("table1").columns, ['name', 'age'])
self.assertEqual(spark.range(3).count(), 3)
finally:
spark.sql("DROP DATABASE test_db CASCADE")
spark.stop()
def test_global_default_session(self):
spark = SparkSession.builder \
.master("local") \
.getOrCreate()
try:
self.assertEqual(SparkSession.builder.getOrCreate(), spark)
finally:
spark.stop()
def test_default_and_active_session(self):
spark = SparkSession.builder \
.master("local") \
.getOrCreate()
activeSession = spark._jvm.SparkSession.getActiveSession()
defaultSession = spark._jvm.SparkSession.getDefaultSession()
try:
self.assertEqual(activeSession, defaultSession)
finally:
spark.stop()
def test_config_option_propagated_to_existing_session(self):
session1 = SparkSession.builder \
.master("local") \
.config("spark-config1", "a") \
.getOrCreate()
self.assertEqual(session1.conf.get("spark-config1"), "a")
session2 = SparkSession.builder \
.config("spark-config1", "b") \
.getOrCreate()
try:
self.assertEqual(session1, session2)
self.assertEqual(session1.conf.get("spark-config1"), "b")
finally:
session1.stop()
def test_new_session(self):
session = SparkSession.builder \
.master("local") \
.getOrCreate()
newSession = session.newSession()
try:
self.assertNotEqual(session, newSession)
finally:
session.stop()
newSession.stop()
def test_create_new_session_if_old_session_stopped(self):
session = SparkSession.builder \
.master("local") \
.getOrCreate()
session.stop()
newSession = SparkSession.builder \
.master("local") \
.getOrCreate()
try:
self.assertNotEqual(session, newSession)
finally:
newSession.stop()
def test_active_session_with_None_and_not_None_context(self):
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
sc = None
session = None
try:
sc = SparkContext._active_spark_context
self.assertEqual(sc, None)
activeSession = SparkSession.getActiveSession()
self.assertEqual(activeSession, None)
sparkConf = SparkConf()
sc = SparkContext.getOrCreate(sparkConf)
activeSession = sc._jvm.SparkSession.getActiveSession()
self.assertFalse(activeSession.isDefined())
session = SparkSession(sc)
activeSession = sc._jvm.SparkSession.getActiveSession()
self.assertTrue(activeSession.isDefined())
activeSession2 = SparkSession.getActiveSession()
self.assertNotEqual(activeSession2, None)
finally:
if session is not None:
session.stop()
if sc is not None:
sc.stop()
class SparkSessionTests4(ReusedSQLTestCase):
def test_get_active_session_after_create_dataframe(self):
session2 = None
try:
activeSession1 = SparkSession.getActiveSession()
session1 = self.spark
self.assertEqual(session1, activeSession1)
session2 = self.spark.newSession()
activeSession2 = SparkSession.getActiveSession()
self.assertEqual(session1, activeSession2)
self.assertNotEqual(session2, activeSession2)
session2.createDataFrame([(1, 'Alice')], ['age', 'name'])
activeSession3 = SparkSession.getActiveSession()
self.assertEqual(session2, activeSession3)
session1.createDataFrame([(1, 'Alice')], ['age', 'name'])
activeSession4 = SparkSession.getActiveSession()
self.assertEqual(session1, activeSession4)
finally:
if session2 is not None:
session2.stop()
class SparkSessionTests5(unittest.TestCase):
def setUp(self):
# These tests require restarting the Spark context so we set up a new one for each test
# rather than at the class level.
self.sc = SparkContext('local[4]', self.__class__.__name__, conf=SparkConf())
self.spark = SparkSession(self.sc)
def tearDown(self):
self.sc.stop()
self.spark.stop()
def test_sqlcontext_with_stopped_sparksession(self):
# SPARK-30856: test that SQLContext.getOrCreate() returns a usable instance after
# the SparkSession is restarted.
sql_context = self.spark._wrapped
self.spark.stop()
sc = SparkContext('local[4]', self.sc.appName)
spark = SparkSession(sc) # Instantiate the underlying SQLContext
new_sql_context = spark._wrapped
self.assertIsNot(new_sql_context, sql_context)
self.assertIs(SQLContext.getOrCreate(sc).sparkSession, spark)
try:
df = spark.createDataFrame([(1, 2)], ['c', 'c'])
df.collect()
finally:
spark.stop()
self.assertIsNone(SQLContext._instantiatedContext)
sc.stop()
def test_sqlcontext_with_stopped_sparkcontext(self):
# SPARK-30856: test initialization via SparkSession when only the SparkContext is stopped
self.sc.stop()
self.sc = SparkContext('local[4]', self.sc.appName)
self.spark = SparkSession(self.sc)
self.assertIs(SQLContext.getOrCreate(self.sc).sparkSession, self.spark)
def test_get_sqlcontext_with_stopped_sparkcontext(self):
# SPARK-30856: test initialization via SQLContext.getOrCreate() when only the SparkContext
# is stopped
self.sc.stop()
self.sc = SparkContext('local[4]', self.sc.appName)
self.assertIs(SQLContext.getOrCreate(self.sc)._sc, self.sc)
class SparkSessionBuilderTests(unittest.TestCase):
def test_create_spark_context_first_then_spark_session(self):
sc = None
session = None
try:
conf = SparkConf().set("key1", "value1")
sc = SparkContext('local[4]', "SessionBuilderTests", conf=conf)
session = SparkSession.builder.config("key2", "value2").getOrCreate()
self.assertEqual(session.conf.get("key1"), "value1")
self.assertEqual(session.conf.get("key2"), "value2")
self.assertEqual(session.sparkContext, sc)
self.assertFalse(sc.getConf().contains("key2"))
self.assertEqual(sc.getConf().get("key1"), "value1")
finally:
if session is not None:
session.stop()
if sc is not None:
sc.stop()
def test_another_spark_session(self):
session1 = None
session2 = None
try:
session1 = SparkSession.builder.config("key1", "value1").getOrCreate()
session2 = SparkSession.builder.config("key2", "value2").getOrCreate()
self.assertEqual(session1.conf.get("key1"), "value1")
self.assertEqual(session2.conf.get("key1"), "value1")
self.assertEqual(session1.conf.get("key2"), "value2")
self.assertEqual(session2.conf.get("key2"), "value2")
self.assertEqual(session1.sparkContext, session2.sparkContext)
self.assertEqual(session1.sparkContext.getConf().get("key1"), "value1")
self.assertFalse(session1.sparkContext.getConf().contains("key2"))
finally:
if session1 is not None:
session1.stop()
if session2 is not None:
session2.stop()
class SparkExtensionsTest(unittest.TestCase):
# These tests are separate because it uses 'spark.sql.extensions' which is
# static and immutable. This can't be set or unset, for example, via `spark.conf`.
@classmethod
def setUpClass(cls):
import glob
from pyspark.find_spark_home import _find_spark_home
SPARK_HOME = _find_spark_home()
filename_pattern = (
"sql/core/target/scala-*/test-classes/org/apache/spark/sql/"
"SparkSessionExtensionSuite.class")
if not glob.glob(os.path.join(SPARK_HOME, filename_pattern)):
raise unittest.SkipTest(
"'org.apache.spark.sql.SparkSessionExtensionSuite' is not "
"available. Will skip the related tests.")
# Note that 'spark.sql.extensions' is a static immutable configuration.
cls.spark = SparkSession.builder \
.master("local[4]") \
.appName(cls.__name__) \
.config(
"spark.sql.extensions",
"org.apache.spark.sql.MyExtensions") \
.getOrCreate()
@classmethod
def tearDownClass(cls):
cls.spark.stop()
def test_use_custom_class_for_extensions(self):
self.assertTrue(
self.spark._jsparkSession.sessionState().planner().strategies().contains(
self.spark._jvm.org.apache.spark.sql.MySparkStrategy(self.spark._jsparkSession)),
"MySparkStrategy not found in active planner strategies")
self.assertTrue(
self.spark._jsparkSession.sessionState().analyzer().extendedResolutionRules().contains(
self.spark._jvm.org.apache.spark.sql.MyRule(self.spark._jsparkSession)),
"MyRule not found in extended resolution rules")
if __name__ == "__main__":
from pyspark.sql.tests.test_session import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
avinassh/Bitbucket-import
|
refs/heads/master
|
templates.py
|
1
|
from string import Template
sel_script_template = Template("""<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head profile="http://selenium-ide.openqa.org/profiles/test-case">
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<link rel="selenium.base" href="https://bitbucket.org/account/signin/?next=/repo/import" />
<title>New Test</title>
</head>
<body>
<table cellpadding="1" cellspacing="1" border="1">
<thead>
<tr><td rowspan="1" colspan="3">New Test</td></tr>
</thead><tbody>
<tr>
<td>open</td>
<td>/account/signin/?next=/repo/import</td>
<td></td>
</tr>
<tr>
<td>type</td>
<td>id=id_username</td>
<td>$bb_username</td>
</tr>
<tr>
<td>type</td>
<td>id=id_password</td>
<td>$bb_password</td>
</tr>
<tr>
<td>clickAndWait</td>
<td>name=submit</td>
<td></td>
</tr>
<tr>
<td>pause</td>
<td>100</td>
<td></td>
</tr>
$commands
</tbody></table>
</body>
</html>
""")
command_template = Template("""<tr>
<td>pause</td>
<td>1000</td>
<td></td>
</tr>
<tr>
<td>open</td>
<td>https://bitbucket.org/repo/import</td>
<td></td>
</tr>
<tr>
<td>pause</td>
<td>100</td>
<td></td>
</tr>
<tr>
<td>open</td>
<td>https://bitbucket.org/repo/import</td>
<td></td>
</tr>
<tr>
<td>type</td>
<td>id=id_url</td>
<td>$repo_url</td>
</tr>
<tr>
<td>click</td>
<td>id=id_auth</td>
<td></td>
</tr>
<tr>
<td>type</td>
<td>id=id_username</td>
<td>$bb_username</td>
</tr>
<tr>
<td>type</td>
<td>id=id_password</td>
<td>$bb_password</td>
</tr>
<tr>
<td>select</td>
<td>id=id_owner</td>
<td>value=$owner_value</td>
</tr>
<tr>
<td>clickAndWait</td>
<td>xpath=(//button[@type='submit'])[2]</td>
<td></td>
</tr>""")
|
daxxi13/CouchPotatoServer
|
refs/heads/develop
|
libs/CodernityDB/__init__.py
|
45
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = '0.4.2'
__license__ = "Apache 2.0"
|
tornadozou/tensorflow
|
refs/heads/master
|
tensorflow/python/framework/test_util_test.py
|
51
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.test_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import threading
import numpy as np
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import googletest
class TestUtilTest(test_util.TensorFlowTestCase):
def test_assert_ops_in_graph(self):
with self.test_session():
constant_op.constant(["hello", "taffy"], name="hello")
test_util.assert_ops_in_graph({"hello": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"bye": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"hello": "Variable"}, ops.get_default_graph())
def test_assert_equal_graph_def(self):
with ops.Graph().as_default() as g:
def_empty = g.as_graph_def()
constant_op.constant(5, name="five")
constant_op.constant(7, name="seven")
def_57 = g.as_graph_def()
with ops.Graph().as_default() as g:
constant_op.constant(7, name="seven")
constant_op.constant(5, name="five")
def_75 = g.as_graph_def()
# Comparing strings is order dependent
self.assertNotEqual(str(def_57), str(def_75))
# assert_equal_graph_def doesn't care about order
test_util.assert_equal_graph_def(def_57, def_75)
# Compare two unequal graphs
with self.assertRaisesRegexp(AssertionError,
r"^Found unexpected node 'seven"):
test_util.assert_equal_graph_def(def_57, def_empty)
def testIsGoogleCudaEnabled(self):
# The test doesn't assert anything. It ensures the py wrapper
# function is generated correctly.
if test_util.IsGoogleCudaEnabled():
print("GoogleCuda is enabled")
else:
print("GoogleCuda is disabled")
def testAssertProtoEqualsStr(self):
graph_str = "node { name: 'w1' op: 'params' }"
graph_def = graph_pb2.GraphDef()
text_format.Merge(graph_str, graph_def)
# test string based comparison
self.assertProtoEquals(graph_str, graph_def)
# test original comparison
self.assertProtoEquals(graph_def, graph_def)
def testAssertProtoEqualsAny(self):
# Test assertProtoEquals with a protobuf.Any field.
meta_graph_def_str = """
meta_info_def {
meta_graph_version: "outer"
any_info {
[type.googleapis.com/tensorflow.MetaGraphDef] {
meta_info_def {
meta_graph_version: "inner"
}
}
}
}
"""
meta_graph_def_outer = meta_graph_pb2.MetaGraphDef()
meta_graph_def_outer.meta_info_def.meta_graph_version = "outer"
meta_graph_def_inner = meta_graph_pb2.MetaGraphDef()
meta_graph_def_inner.meta_info_def.meta_graph_version = "inner"
meta_graph_def_outer.meta_info_def.any_info.Pack(meta_graph_def_inner)
self.assertProtoEquals(meta_graph_def_str, meta_graph_def_outer)
self.assertProtoEquals(meta_graph_def_outer, meta_graph_def_outer)
# Check if the assertion failure message contains the content of
# the inner proto.
with self.assertRaisesRegexp(AssertionError,
r'meta_graph_version: "inner"'):
self.assertProtoEquals("", meta_graph_def_outer)
def testNDArrayNear(self):
a1 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a2 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a3 = np.array([[10.0, 20.0, 30.0], [40.0, 50.0, 60.0]])
self.assertTrue(self._NDArrayNear(a1, a2, 1e-5))
self.assertFalse(self._NDArrayNear(a1, a3, 1e-5))
def testCheckedThreadSucceeds(self):
def noop(ev):
ev.set()
event_arg = threading.Event()
self.assertFalse(event_arg.is_set())
t = self.checkedThread(target=noop, args=(event_arg,))
t.start()
t.join()
self.assertTrue(event_arg.is_set())
def testCheckedThreadFails(self):
def err_func():
return 1 // 0
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("integer division or modulo by zero" in str(fe.exception))
def testCheckedThreadWithWrongAssertionFails(self):
x = 37
def err_func():
self.assertTrue(x < 10)
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("False is not true" in str(fe.exception))
def testMultipleThreadsWithOneFailure(self):
def err_func(i):
self.assertTrue(i != 7)
threads = [
self.checkedThread(
target=err_func, args=(i,)) for i in range(10)
]
for t in threads:
t.start()
for i, t in enumerate(threads):
if i == 7:
with self.assertRaises(self.failureException):
t.join()
else:
t.join()
def _WeMustGoDeeper(self, msg):
with self.assertRaisesOpError(msg):
node_def = ops._NodeDef("op_type", "name")
node_def_orig = ops._NodeDef("op_type_orig", "orig")
op_orig = ops.Operation(node_def_orig, ops.get_default_graph())
op = ops.Operation(node_def, ops.get_default_graph(), original_op=op_orig)
raise errors.UnauthenticatedError(node_def, op, "true_err")
def testAssertRaisesOpErrorDoesNotPassMessageDueToLeakedStack(self):
with self.assertRaises(AssertionError):
self._WeMustGoDeeper("this_is_not_the_error_you_are_looking_for")
self._WeMustGoDeeper("true_err")
self._WeMustGoDeeper("name")
self._WeMustGoDeeper("orig")
def testAllCloseScalars(self):
self.assertAllClose(7, 7 + 1e-8)
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(7, 7 + 1e-5)
def testAllCloseDictToNonDict(self):
with self.assertRaisesRegexp(ValueError, r"Can't compare dict to non-dict"):
self.assertAllClose(1, {"a": 1})
with self.assertRaisesRegexp(ValueError, r"Can't compare dict to non-dict"):
self.assertAllClose({"a": 1}, 1)
def testAllCloseDicts(self):
a = 7
b = (2., 3.)
c = np.ones((3, 2, 4)) * 7.
expected = {"a": a, "b": b, "c": c}
# Identity.
self.assertAllClose(expected, expected)
self.assertAllClose(expected, dict(expected))
# With each item removed.
for k in expected:
actual = dict(expected)
del actual[k]
with self.assertRaisesRegexp(AssertionError, r"mismatched keys"):
self.assertAllClose(expected, actual)
# With each item changed.
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(expected, {"a": a + 1e-5, "b": b, "c": c})
with self.assertRaisesRegexp(AssertionError, r"Shape mismatch"):
self.assertAllClose(expected, {"a": a, "b": b + (4.,), "c": c})
c_copy = np.array(c)
c_copy[1, 1, 1] += 1e-5
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(expected, {"a": a, "b": b, "c": c_copy})
def testAllCloseNestedDicts(self):
a = {"a": 1, "b": 2, "nested": {"d": 3, "e": 4}}
with self.assertRaisesRegexp(
TypeError,
r"inputs could not be safely coerced to any supported types"):
self.assertAllClose(a, a)
def testArrayNear(self):
a = [1, 2]
b = [1, 2, 5]
with self.assertRaises(AssertionError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [[1, 2], [3, 4]]
with self.assertRaises(TypeError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [1, 2]
self.assertArrayNear(a, b, 0.001)
def testForceGPU(self):
with self.assertRaises(errors.InvalidArgumentError):
with self.test_session(force_gpu=True):
# this relies on us not having a GPU implementation for assert, which
# seems sensible
x = constant_op.constant(True)
y = [15]
control_flow_ops.Assert(x, y).run()
def testAssertAllCloseAccordingToType(self):
# test float64
self.assertAllCloseAccordingToType(
np.asarray([1e-8], dtype=np.float64),
np.asarray([2e-8], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float64),
np.asarray([2e-7], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
# test float32
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float32),
np.asarray([2e-7], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-6], dtype=np.float32),
np.asarray([2e-6], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
# test float16
self.assertAllCloseAccordingToType(
np.asarray([1e-4], dtype=np.float16),
np.asarray([2e-4], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-3], dtype=np.float16),
np.asarray([2e-3], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
def testRandomSeed(self):
a = random.randint(1, 1000)
a_np_rand = np.random.rand(1)
with self.test_session():
a_rand = random_ops.random_normal([1]).eval()
# ensure that randomness in multiple testCases is deterministic.
self.setUp()
b = random.randint(1, 1000)
b_np_rand = np.random.rand(1)
with self.test_session():
b_rand = random_ops.random_normal([1]).eval()
self.assertEqual(a, b)
self.assertEqual(a_np_rand, b_np_rand)
self.assertEqual(a_rand, b_rand)
if __name__ == "__main__":
googletest.main()
|
xleng/YCM_WIN_X86
|
refs/heads/master
|
third_party/ycmd/third_party/waitress/waitress/tests/fixtureapps/runner.py
|
40
|
def app(): # pragma: no cover
return None
def returns_app(): # pragma: no cover
return app
|
jorge2703/scikit-learn
|
refs/heads/master
|
examples/cluster/plot_ward_structured_vs_unstructured.py
|
320
|
"""
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
|
normanjaeckel/WLANSimulationGame
|
refs/heads/master
|
wlan_simulation_game/wsgi.py
|
1
|
"""
WSGI config.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "wlan_simulation_game_settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
darkenk/scripts
|
refs/heads/master
|
android/project_creator/venv/lib/python3.5/site-packages/pip/_vendor/colorama/ansitowin32.py
|
450
|
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
import re
import sys
import os
from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style
from .winterm import WinTerm, WinColor, WinStyle
from .win32 import windll, winapi_test
winterm = None
if windll is not None:
winterm = WinTerm()
def is_stream_closed(stream):
return not hasattr(stream, 'closed') or stream.closed
def is_a_tty(stream):
return hasattr(stream, 'isatty') and stream.isatty()
class StreamWrapper(object):
'''
Wraps a stream (such as stdout), acting as a transparent proxy for all
attribute access apart from method 'write()', which is delegated to our
Converter instance.
'''
def __init__(self, wrapped, converter):
# double-underscore everything to prevent clashes with names of
# attributes on the wrapped stream object.
self.__wrapped = wrapped
self.__convertor = converter
def __getattr__(self, name):
return getattr(self.__wrapped, name)
def write(self, text):
self.__convertor.write(text)
class AnsiToWin32(object):
'''
Implements a 'write()' method which, on Windows, will strip ANSI character
sequences from the text, and if outputting to a tty, will convert them into
win32 function calls.
'''
ANSI_CSI_RE = re.compile('\001?\033\[((?:\d|;)*)([a-zA-Z])\002?') # Control Sequence Introducer
ANSI_OSC_RE = re.compile('\001?\033\]((?:.|;)*?)(\x07)\002?') # Operating System Command
def __init__(self, wrapped, convert=None, strip=None, autoreset=False):
# The wrapped stream (normally sys.stdout or sys.stderr)
self.wrapped = wrapped
# should we reset colors to defaults after every .write()
self.autoreset = autoreset
# create the proxy wrapping our output stream
self.stream = StreamWrapper(wrapped, self)
on_windows = os.name == 'nt'
# We test if the WinAPI works, because even if we are on Windows
# we may be using a terminal that doesn't support the WinAPI
# (e.g. Cygwin Terminal). In this case it's up to the terminal
# to support the ANSI codes.
conversion_supported = on_windows and winapi_test()
# should we strip ANSI sequences from our output?
if strip is None:
strip = conversion_supported or (not is_stream_closed(wrapped) and not is_a_tty(wrapped))
self.strip = strip
# should we should convert ANSI sequences into win32 calls?
if convert is None:
convert = conversion_supported and not is_stream_closed(wrapped) and is_a_tty(wrapped)
self.convert = convert
# dict of ansi codes to win32 functions and parameters
self.win32_calls = self.get_win32_calls()
# are we wrapping stderr?
self.on_stderr = self.wrapped is sys.stderr
def should_wrap(self):
'''
True if this class is actually needed. If false, then the output
stream will not be affected, nor will win32 calls be issued, so
wrapping stdout is not actually required. This will generally be
False on non-Windows platforms, unless optional functionality like
autoreset has been requested using kwargs to init()
'''
return self.convert or self.strip or self.autoreset
def get_win32_calls(self):
if self.convert and winterm:
return {
AnsiStyle.RESET_ALL: (winterm.reset_all, ),
AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT),
AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL),
AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL),
AnsiFore.BLACK: (winterm.fore, WinColor.BLACK),
AnsiFore.RED: (winterm.fore, WinColor.RED),
AnsiFore.GREEN: (winterm.fore, WinColor.GREEN),
AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW),
AnsiFore.BLUE: (winterm.fore, WinColor.BLUE),
AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA),
AnsiFore.CYAN: (winterm.fore, WinColor.CYAN),
AnsiFore.WHITE: (winterm.fore, WinColor.GREY),
AnsiFore.RESET: (winterm.fore, ),
AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True),
AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True),
AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True),
AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True),
AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True),
AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True),
AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True),
AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True),
AnsiBack.BLACK: (winterm.back, WinColor.BLACK),
AnsiBack.RED: (winterm.back, WinColor.RED),
AnsiBack.GREEN: (winterm.back, WinColor.GREEN),
AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW),
AnsiBack.BLUE: (winterm.back, WinColor.BLUE),
AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA),
AnsiBack.CYAN: (winterm.back, WinColor.CYAN),
AnsiBack.WHITE: (winterm.back, WinColor.GREY),
AnsiBack.RESET: (winterm.back, ),
AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True),
AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True),
AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True),
AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True),
AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True),
AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True),
AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True),
AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True),
}
return dict()
def write(self, text):
if self.strip or self.convert:
self.write_and_convert(text)
else:
self.wrapped.write(text)
self.wrapped.flush()
if self.autoreset:
self.reset_all()
def reset_all(self):
if self.convert:
self.call_win32('m', (0,))
elif not self.strip and not is_stream_closed(self.wrapped):
self.wrapped.write(Style.RESET_ALL)
def write_and_convert(self, text):
'''
Write the given text to our wrapped stream, stripping any ANSI
sequences from the text, and optionally converting them into win32
calls.
'''
cursor = 0
text = self.convert_osc(text)
for match in self.ANSI_CSI_RE.finditer(text):
start, end = match.span()
self.write_plain_text(text, cursor, start)
self.convert_ansi(*match.groups())
cursor = end
self.write_plain_text(text, cursor, len(text))
def write_plain_text(self, text, start, end):
if start < end:
self.wrapped.write(text[start:end])
self.wrapped.flush()
def convert_ansi(self, paramstring, command):
if self.convert:
params = self.extract_params(command, paramstring)
self.call_win32(command, params)
def extract_params(self, command, paramstring):
if command in 'Hf':
params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';'))
while len(params) < 2:
# defaults:
params = params + (1,)
else:
params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0)
if len(params) == 0:
# defaults:
if command in 'JKm':
params = (0,)
elif command in 'ABCD':
params = (1,)
return params
def call_win32(self, command, params):
if command == 'm':
for param in params:
if param in self.win32_calls:
func_args = self.win32_calls[param]
func = func_args[0]
args = func_args[1:]
kwargs = dict(on_stderr=self.on_stderr)
func(*args, **kwargs)
elif command in 'J':
winterm.erase_screen(params[0], on_stderr=self.on_stderr)
elif command in 'K':
winterm.erase_line(params[0], on_stderr=self.on_stderr)
elif command in 'Hf': # cursor position - absolute
winterm.set_cursor_position(params, on_stderr=self.on_stderr)
elif command in 'ABCD': # cursor position - relative
n = params[0]
# A - up, B - down, C - forward, D - back
x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command]
winterm.cursor_adjust(x, y, on_stderr=self.on_stderr)
def convert_osc(self, text):
for match in self.ANSI_OSC_RE.finditer(text):
start, end = match.span()
text = text[:start] + text[end:]
paramstring, command = match.groups()
if command in '\x07': # \x07 = BEL
params = paramstring.split(";")
# 0 - change title and icon (we will only change title)
# 1 - change icon (we don't support this)
# 2 - change title
if params[0] in '02':
winterm.set_title(params[1])
return text
|
sujithshankar/anaconda
|
refs/heads/master
|
pyanaconda/ui/tui/spokes/storage.py
|
1
|
# Text storage configuration spoke classes
#
# Copyright (C) 2012-2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
import gi
gi.require_version("BlockDev", "1.0")
from gi.repository import BlockDev as blockdev
from pyanaconda.ui.lib.disks import getDisks, applyDiskSelection, checkDiskSelection
from pyanaconda.ui.categories.system import SystemCategory
from pyanaconda.ui.tui.spokes import NormalTUISpoke
from pyanaconda.ui.tui.simpleline import TextWidget, CheckboxWidget
from pyanaconda.ui.tui.tuiobject import YesNoDialog
from pyanaconda.storage_utils import AUTOPART_CHOICES, sanity_check, SanityError, SanityWarning
from blivet import arch
from blivet.size import Size
from blivet.errors import StorageError
from blivet.devices import DASDDevice, FcoeDiskDevice, iScsiDiskDevice, MultipathDevice, ZFCPDiskDevice
from pyanaconda.flags import flags
from pyanaconda.kickstart import doKickstartStorage, resetCustomStorageData
from pyanaconda.threads import threadMgr, AnacondaThread
from pyanaconda.constants import THREAD_STORAGE, THREAD_STORAGE_WATCHER, THREAD_DASDFMT, DEFAULT_AUTOPART_TYPE
from pyanaconda.constants import PAYLOAD_STATUS_PROBING_STORAGE
from pyanaconda.constants_text import INPUT_PROCESSED
from pyanaconda.i18n import _, P_, N_
from pyanaconda.bootloader import BootLoaderError
from pykickstart.constants import CLEARPART_TYPE_ALL, CLEARPART_TYPE_LINUX, CLEARPART_TYPE_NONE, AUTOPART_TYPE_LVM
from pykickstart.errors import KickstartParseError
from collections import OrderedDict
import logging
log = logging.getLogger("anaconda")
__all__ = ["StorageSpoke", "AutoPartSpoke"]
CLEARALL = N_("Use All Space")
CLEARLINUX = N_("Replace Existing Linux system(s)")
CLEARNONE = N_("Use Free Space")
PARTTYPES = {CLEARALL: CLEARPART_TYPE_ALL, CLEARLINUX: CLEARPART_TYPE_LINUX,
CLEARNONE: CLEARPART_TYPE_NONE}
class StorageSpoke(NormalTUISpoke):
"""Storage spoke where users proceed to customize storage features such
as disk selection, partitioning, and fs type.
.. inheritance-diagram:: StorageSpoke
:parts: 3
"""
title = N_("Installation Destination")
category = SystemCategory
def __init__(self, app, data, storage, payload, instclass):
NormalTUISpoke.__init__(self, app, data, storage, payload, instclass)
self._ready = False
self.selected_disks = self.data.ignoredisk.onlyuse[:]
self.selection = None
self.autopart = None
self.clearPartType = None
# This list gets set up once in initialize and should not be modified
# except perhaps to add advanced devices. It will remain the full list
# of disks that can be included in the install.
self.disks = []
self.errors = []
self.warnings = []
if self.data.zerombr.zerombr and arch.is_s390():
# if zerombr is specified in a ks file and there are unformatted
# dasds, automatically format them. pass in storage.devicetree here
# instead of storage.disks since media_present is checked on disks;
# a dasd needing dasdfmt will fail this media check though
to_format = [d for d in getDisks(self.storage.devicetree)
if d.type == "dasd" and blockdev.s390.dasd_needs_format(d.busid)]
if to_format:
self.run_dasdfmt(to_format)
if not flags.automatedInstall:
# default to using autopart for interactive installs
self.data.autopart.autopart = True
@property
def completed(self):
retval = bool(self.storage.root_device and not self.errors)
return retval
@property
def ready(self):
# By default, the storage spoke is not ready. We have to wait until
# storageInitialize is done.
return self._ready and not (threadMgr.get(THREAD_STORAGE_WATCHER) or threadMgr.get(THREAD_DASDFMT))
@property
def mandatory(self):
return True
@property
def showable(self):
return not flags.dirInstall
@property
def status(self):
""" A short string describing the current status of storage setup. """
msg = _("No disks selected")
if flags.automatedInstall and not self.storage.root_device:
msg = _("Kickstart insufficient")
elif self.data.ignoredisk.onlyuse:
msg = P_(("%d disk selected"),
("%d disks selected"),
len(self.data.ignoredisk.onlyuse)) % len(self.data.ignoredisk.onlyuse)
if self.errors:
msg = _("Error checking storage configuration")
elif self.warnings:
msg = _("Warning checking storage configuration")
# Maybe show what type of clearpart and which disks selected?
elif self.data.autopart.autopart:
msg = _("Automatic partitioning selected")
else:
msg = _("Custom partitioning selected")
return msg
def _update_disk_list(self, disk):
""" Update self.selected_disks based on the selection."""
name = disk.name
# if the disk isn't already selected, select it.
if name not in self.selected_disks:
self.selected_disks.append(name)
# If the disk is already selected, deselect it.
elif name in self.selected_disks:
self.selected_disks.remove(name)
def _update_summary(self):
""" Update the summary based on the UI. """
count = 0
capacity = 0
free = Size(0)
# pass in our disk list so hidden disks' free space is available
free_space = self.storage.get_free_space(disks=self.disks)
selected = [d for d in self.disks if d.name in self.selected_disks]
for disk in selected:
capacity += disk.size
free += free_space[disk.name][0]
count += 1
summary = (P_(("%d disk selected; %s capacity; %s free ..."),
("%d disks selected; %s capacity; %s free ..."),
count) % (count, str(Size(capacity)), free))
if len(self.disks) == 0:
summary = _("No disks detected. Please shut down the computer, connect at least one disk, and restart to complete installation.")
elif count == 0:
summary = (_("No disks selected; please select at least one disk to install to."))
# Append storage errors to the summary
if self.errors:
summary = summary + "\n" + "\n".join(self.errors)
elif self.warnings:
summary = summary + "\n" + "\n".join(self.warnings)
return summary
def refresh(self, args=None):
NormalTUISpoke.refresh(self, args)
# Join the initialization thread to block on it
# This print is foul. Need a better message display
print(_(PAYLOAD_STATUS_PROBING_STORAGE))
threadMgr.wait(THREAD_STORAGE_WATCHER)
# synchronize our local data store with the global ksdata
# Commment out because there is no way to select a disk right
# now without putting it in ksdata. Seems wrong?
#self.selected_disks = self.data.ignoredisk.onlyuse[:]
self.autopart = self.data.autopart.autopart
message = self._update_summary()
# loop through the disks and present them.
for disk in self.disks:
disk_info = self._format_disk_info(disk)
c = CheckboxWidget(title="%i) %s" % (self.disks.index(disk) + 1, disk_info),
completed=(disk.name in self.selected_disks))
self._window += [c, ""]
# if we have more than one disk, present an option to just
# select all disks
if len(self.disks) > 1:
c = CheckboxWidget(title="%i) %s" % (len(self.disks) + 1, _("Select all")),
completed=(self.selection == len(self.disks)))
self._window += [c, ""]
self._window += [TextWidget(message), ""]
return True
def _select_all_disks(self):
""" Mark all disks as selected for use in partitioning. """
for disk in self.disks:
if disk.name not in self.selected_disks:
self._update_disk_list(disk)
def _format_disk_info(self, disk):
""" Some specialized disks are difficult to identify in the storage
spoke, so add and return extra identifying information about them.
Since this is going to be ugly to do within the confines of the
CheckboxWidget, pre-format the display string right here.
"""
# show this info for all disks
format_str = "%s: %s (%s)" % (disk.model, disk.size, disk.name)
disk_attrs = []
# now check for/add info about special disks
if (isinstance(disk, MultipathDevice) or isinstance(disk, iScsiDiskDevice) or isinstance(disk, FcoeDiskDevice)):
if hasattr(disk, "wwid"):
disk_attrs.append(disk.wwid)
elif isinstance(disk, DASDDevice):
if hasattr(disk, "busid"):
disk_attrs.append(disk.busid)
elif isinstance(disk, ZFCPDiskDevice):
if hasattr(disk, "fcp_lun"):
disk_attrs.append(disk.fcp_lun)
if hasattr(disk, "wwpn"):
disk_attrs.append(disk.wwpn)
if hasattr(disk, "hba_id"):
disk_attrs.append(disk.hba_id)
# now append all additional attributes to our string
for attr in disk_attrs:
format_str += ", %s" % attr
return format_str
def input(self, args, key):
"""Grab the disk choice and update things"""
self.errors = []
try:
keyid = int(key) - 1
if keyid < 0:
return key
self.selection = keyid
if len(self.disks) > 1 and keyid == len(self.disks):
self._select_all_disks()
else:
self._update_disk_list(self.disks[keyid])
return INPUT_PROCESSED
except (ValueError, IndexError):
if key.lower() == "c":
if self.selected_disks:
# check selected disks to see if we have any unformatted DASDs
# if we're on s390x, since they need to be formatted before we
# can use them.
if arch.is_s390():
_disks = [d for d in self.disks if d.name in self.selected_disks]
to_format = [d for d in _disks if d.type == "dasd" and
blockdev.s390.dasd_needs_format(d.busid)]
if to_format:
self.run_dasdfmt(to_format)
return None
# make sure no containers were split up by the user's disk
# selection
self.errors.extend(checkDiskSelection(self.storage,
self.selected_disks))
if self.errors:
# The disk selection has to make sense before we can
# proceed.
return None
newspoke = AutoPartSpoke(self.app, self.data, self.storage,
self.payload, self.instclass)
self.app.switch_screen_modal(newspoke)
self.apply()
self.execute()
self.close()
return INPUT_PROCESSED
else:
return key
def run_dasdfmt(self, to_format):
"""
This generates the list of DASDs requiring dasdfmt and runs dasdfmt
against them.
"""
# if the storage thread is running, wait on it to complete before taking
# any further actions on devices; most likely to occur if user has
# zerombr in their ks file
threadMgr.wait(THREAD_STORAGE)
# ask user to verify they want to format if zerombr not in ks file
if not self.data.zerombr.zerombr:
# prepare our msg strings; copied directly from dasdfmt.glade
summary = _("The following unformatted DASDs have been detected on your system. You can choose to format them now with dasdfmt or cancel to leave them unformatted. Unformatted DASDs cannot be used during installation.\n\n")
warntext = _("Warning: All storage changes made using the installer will be lost when you choose to format.\n\nProceed to run dasdfmt?\n")
displaytext = summary + "\n".join("/dev/" + d.name for d in to_format) + "\n" + warntext
# now show actual prompt; note -- in cmdline mode, auto-answer for
# this is 'no', so unformatted DASDs will remain so unless zerombr
# is added to the ks file
question_window = YesNoDialog(self._app, displaytext)
self._app.switch_screen_modal(question_window)
if not question_window.answer:
# no? well fine then, back to the storage spoke with you;
return None
for disk in to_format:
try:
print(_("Formatting /dev/%s. This may take a moment.") % disk.name)
blockdev.s390.dasd_format(disk.name)
except blockdev.S390Error as err:
# Log errors if formatting fails, but don't halt the installer
log.error(str(err))
continue
def apply(self):
self.autopart = self.data.autopart.autopart
self.data.ignoredisk.onlyuse = self.selected_disks[:]
self.data.clearpart.drives = self.selected_disks[:]
if self.data.autopart.type is None:
self.data.autopart.type = AUTOPART_TYPE_LVM
if self.autopart:
self.clearPartType = CLEARPART_TYPE_ALL
else:
self.clearPartType = CLEARPART_TYPE_NONE
for disk in self.disks:
if disk.name not in self.selected_disks and \
disk in self.storage.devices:
self.storage.devicetree.hide(disk)
elif disk.name in self.selected_disks and \
disk not in self.storage.devices:
self.storage.devicetree.unhide(disk)
self.data.bootloader.location = "mbr"
if self.data.bootloader.bootDrive and \
self.data.bootloader.bootDrive not in self.selected_disks:
self.data.bootloader.bootDrive = ""
self.storage.bootloader.reset()
self.storage.config.update(self.data)
# If autopart is selected we want to remove whatever has been
# created/scheduled to make room for autopart.
# If custom is selected, we want to leave alone any storage layout the
# user may have set up before now.
self.storage.config.clear_non_existent = self.data.autopart.autopart
def execute(self):
print(_("Generating updated storage configuration"))
try:
doKickstartStorage(self.storage, self.data, self.instclass)
except (StorageError, KickstartParseError) as e:
log.error("storage configuration failed: %s", e)
print(_("storage configuration failed: %s") % e)
self.errors = [str(e)]
self.data.bootloader.bootDrive = ""
self.data.clearpart.type = CLEARPART_TYPE_ALL
self.data.clearpart.initAll = False
self.storage.config.update(self.data)
self.storage.autopart_type = self.data.autopart.type
self.storage.reset()
# now set ksdata back to the user's specified config
applyDiskSelection(self.storage, self.data, self.selected_disks)
except BootLoaderError as e:
log.error("BootLoader setup failed: %s", e)
print(_("storage configuration failed: %s") % e)
self.errors = [str(e)]
self.data.bootloader.bootDrive = ""
else:
print(_("Checking storage configuration..."))
exns = sanity_check(self.storage)
errors = [str(exn) for exn in exns if isinstance(exn, SanityError)]
warnings = [str(exn) for exn in exns if isinstance(exn, SanityWarning)]
(self.errors, self.warnings) = (errors, warnings)
for e in self.errors:
log.error(e)
print(e)
for w in self.warnings:
log.warning(w)
print(w)
finally:
resetCustomStorageData(self.data)
self._ready = True
def initialize(self):
NormalTUISpoke.initialize(self)
threadMgr.add(AnacondaThread(name=THREAD_STORAGE_WATCHER,
target=self._initialize))
self.selected_disks = self.data.ignoredisk.onlyuse[:]
# Probably need something here to track which disks are selected?
def _initialize(self):
"""
Secondary initialize so wait for the storage thread to complete before
populating our disk list
"""
threadMgr.wait(THREAD_STORAGE)
self.disks = sorted(getDisks(self.storage.devicetree),
key=lambda d: d.name)
# if only one disk is available, go ahead and mark it as selected
if len(self.disks) == 1:
self._update_disk_list(self.disks[0])
self._update_summary()
self._ready = True
class AutoPartSpoke(NormalTUISpoke):
""" Autopartitioning options are presented here.
.. inheritance-diagram:: AutoPartSpoke
:parts: 3
"""
title = N_("Autopartitioning Options")
category = SystemCategory
def __init__(self, app, data, storage, payload, instclass):
NormalTUISpoke.__init__(self, app, data, storage, payload, instclass)
self.clearPartType = self.data.clearpart.type
self.parttypelist = sorted(PARTTYPES.keys())
@property
def indirect(self):
return True
def refresh(self, args=None):
NormalTUISpoke.refresh(self, args)
# synchronize our local data store with the global ksdata
self.clearPartType = self.data.clearpart.type
# I dislike "is None", but bool(0) returns false :(
if self.clearPartType is None:
# Default to clearing everything.
self.clearPartType = CLEARPART_TYPE_ALL
for i, parttype in enumerate(self.parttypelist):
c = CheckboxWidget(title="%i) %s" % (i + 1, _(parttype)),
completed=(PARTTYPES[parttype] == self.clearPartType))
self._window += [c, ""]
message = _("Installation requires partitioning of your hard drive. Select what space to use for the install target.")
self._window += [TextWidget(message), ""]
return True
def apply(self):
# kind of a hack, but if we're actually getting to this spoke, there
# is no doubt that we are doing autopartitioning, so set autopart to
# True. In the case of ks installs which may not have defined any
# partition options, autopart was never set to True, causing some
# issues. (rhbz#1001061)
self.data.autopart.autopart = True
self.data.clearpart.type = self.clearPartType
self.data.clearpart.initAll = True
def input(self, args, key):
"""Grab the choice and update things"""
try:
keyid = int(key) - 1
except ValueError:
if key.lower() == "c":
newspoke = PartitionSchemeSpoke(self.app, self.data, self.storage,
self.payload, self.instclass)
self.app.switch_screen_modal(newspoke)
self.apply()
self.close()
return INPUT_PROCESSED
else:
return key
if 0 <= keyid < len(self.parttypelist):
self.clearPartType = PARTTYPES[self.parttypelist[keyid]]
self.apply()
return INPUT_PROCESSED
class PartitionSchemeSpoke(NormalTUISpoke):
""" Spoke to select what partitioning scheme to use on disk(s). """
title = N_("Partition Scheme Options")
category = SystemCategory
def __init__(self, app, data, storage, payload, instclass):
NormalTUISpoke.__init__(self, app, data, storage, payload, instclass)
self.partschemes = OrderedDict()
pre_select = self.data.autopart.type or DEFAULT_AUTOPART_TYPE
for i, item in enumerate(AUTOPART_CHOICES):
self.partschemes[item[0]] = item[1]
if item[1] == pre_select:
self._selection = i
@property
def indirect(self):
return True
def refresh(self, args=None):
NormalTUISpoke.refresh(self, args)
schemelist = self.partschemes.keys()
for i, sch in enumerate(schemelist):
box = CheckboxWidget(title="%i) %s" %(i + 1, _(sch)), completed=(i == self._selection))
self._window += [box, ""]
message = _("Select a partition scheme configuration.")
self._window += [TextWidget(message), ""]
return True
def input(self, args, key):
""" Grab the choice and update things. """
try:
keyid = int(key) - 1
except ValueError:
if key.lower() == "c":
self.apply()
self.close()
return INPUT_PROCESSED
else:
return key
if 0 <= keyid < len(self.partschemes):
self._selection = keyid
return INPUT_PROCESSED
def apply(self):
""" Apply our selections. """
schemelist = list(self.partschemes.values())
self.data.autopart.type = schemelist[self._selection]
|
rrampage/rethinkdb
|
refs/heads/next
|
external/v8_3.30.33.16/build/gyp/test/win/gyptest-link-pdb.py
|
135
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that the 'Profile' attribute in VCLinker is extracted properly.
"""
import TestGyp
import os
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('program-database.gyp', chdir=CHDIR)
test.build('program-database.gyp', test.ALL, chdir=CHDIR)
def FindFile(pdb):
full_path = test.built_file_path(pdb, chdir=CHDIR)
return os.path.isfile(full_path)
# Verify the specified PDB is created when ProgramDatabaseFile
# is provided.
if not FindFile('name_outdir.pdb'):
test.fail_test()
if not FindFile('name_proddir.pdb'):
test.fail_test()
test.pass_test()
|
broferek/ansible
|
refs/heads/devel
|
lib/ansible/modules/windows/win_product_facts.py
|
35
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Dag Wieers (@dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_product_facts
short_description: Provides Windows product and license information
description:
- Provides Windows product and license information.
version_added: '2.5'
author:
- Dag Wieers (@dagwieers)
'''
EXAMPLES = r'''
- name: Get product id and product key
win_product_facts:
'''
RETURN = r'''
ansible_facts:
description: Dictionary containing all the detailed information about the Windows product and license.
returned: always
type: complex
contains:
ansible_os_license_channel:
description: The Windows license channel.
returned: always
type: str
sample: Volume:MAK
version_added: '2.8'
ansible_os_license_edition:
description: The Windows license edition.
returned: always
type: str
sample: Windows(R) ServerStandard edition
version_added: '2.8'
ansible_os_license_status:
description: The Windows license status.
returned: always
type: str
sample: Licensed
version_added: '2.8'
ansible_os_product_id:
description: The Windows product ID.
returned: always
type: str
sample: 00326-10000-00000-AA698
ansible_os_product_key:
description: The Windows product key.
returned: always
type: str
sample: T49TD-6VFBW-VV7HY-B2PXY-MY47H
'''
|
feigames/Odoo
|
refs/heads/master
|
addons/mass_mailing/__openerp__.py
|
312
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
{
'name': 'Mass Mailing Campaigns',
'summary': 'Design, send and track emails',
'description': """
Easily send mass mailing to your leads, opportunities or customers. Track
marketing campaigns performance to improve conversion rates. Design
professional emails and reuse templates in a few clicks.
""",
'version': '2.0',
'author': 'OpenERP',
'website': 'https://www.odoo.com/page/mailing',
'category': 'Marketing',
'depends': [
'mail',
'email_template',
'marketing',
'web_kanban_gauge',
'web_kanban_sparkline',
'website_mail',
],
'data': [
'data/mail_data.xml',
'data/mass_mailing_data.xml',
'wizard/mail_compose_message_view.xml',
'wizard/test_mailing.xml',
'views/mass_mailing_report.xml',
'views/mass_mailing.xml',
'views/res_config.xml',
'views/res_partner.xml',
'views/email_template.xml',
'views/website_mass_mailing.xml',
'views/snippets.xml',
'security/ir.model.access.csv',
'views/mass_mailing.xml',
],
'qweb': [],
'demo': [
'data/mass_mailing_demo.xml',
],
'installable': True,
'auto_install': False,
}
|
dannyboi104/SickRage
|
refs/heads/master
|
lib/simplejson/__init__.py
|
448
|
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)
>>> print '\n'.join([l.rstrip() for l in s.splitlines()])
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> import decimal
>>> json.loads('1.1', parse_float=decimal.Decimal) == decimal.Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError(repr(o) + " is not JSON serializable")
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
__version__ = '2.0.9'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONEncoder',
]
__author__ = 'Bob Ippolito <bob@redivi.com>'
from decoder import JSONDecoder
from encoder import JSONEncoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and object
members will be pretty-printed with that indent level. An indent level
of 0 will only insert newlines. ``None`` is the most compact representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is false then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
If the contents of ``fp`` is encoded with an ASCII based encoding other
than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
be specified. Encodings that are not ASCII based (such as UCS-2) are
not allowed, and should be wrapped with
``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
object and passed to ``loads()``
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN, null, true, false.
This can be used to raise an exception if invalid JSON numbers
are encountered.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
return cls(encoding=encoding, **kw).decode(s)
|
xinjiguaike/edx-platform
|
refs/heads/master
|
common/djangoapps/track/utils.py
|
230
|
"""Utility functions and classes for track backends"""
from datetime import datetime, date
import json
from pytz import UTC
class DateTimeJSONEncoder(json.JSONEncoder):
"""JSON encoder aware of datetime.datetime and datetime.date objects"""
def default(self, obj): # pylint: disable=method-hidden
"""
Serialize datetime and date objects of iso format.
datatime objects are converted to UTC.
"""
if isinstance(obj, datetime):
if obj.tzinfo is None:
# Localize to UTC naive datetime objects
obj = UTC.localize(obj)
else:
# Convert to UTC datetime objects from other timezones
obj = obj.astimezone(UTC)
return obj.isoformat()
elif isinstance(obj, date):
return obj.isoformat()
return super(DateTimeJSONEncoder, self).default(obj)
|
alexsalo/genenetwork2
|
refs/heads/master
|
wqflask/flask_security/registerable.py
|
3
|
# -*- coding: utf-8 -*-
"""
flask.ext.security.registerable
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Flask-Security registerable module
:copyright: (c) 2012 by Matt Wright.
:license: MIT, see LICENSE for more details.
"""
from flask import current_app as app
from werkzeug.local import LocalProxy
from .confirmable import generate_confirmation_link
from .signals import user_registered
from .utils import do_flash, get_message, send_mail, encrypt_password, \
config_value
# Convenient references
_security = LocalProxy(lambda: app.extensions['security'])
_datastore = LocalProxy(lambda: _security.datastore)
def register_user(**kwargs):
print "in register_user kwargs:", kwargs
confirmation_link, token = None, None
kwargs['password'] = encrypt_password(kwargs['password'])
user = _datastore.create_user(**kwargs)
_datastore.commit()
if _security.confirmable:
confirmation_link, token = generate_confirmation_link(user)
do_flash(*get_message('CONFIRM_REGISTRATION', email=user.email))
user_registered.send(dict(user=user, confirm_token=token),
app=app._get_current_object())
if config_value('SEND_REGISTER_EMAIL'):
send_mail(config_value('EMAIL_SUBJECT_REGISTER'), user.email, 'welcome',
user=user, confirmation_link=confirmation_link)
return user
|
jessedhillon/zulip
|
refs/heads/master
|
zerver/worker/queue_processors.py
|
116
|
from __future__ import absolute_import
from django.conf import settings
from django.core.handlers.wsgi import WSGIRequest
from django.core.handlers.base import BaseHandler
from zerver.models import get_user_profile_by_email, \
get_user_profile_by_id, get_prereg_user_by_email, get_client
from zerver.lib.context_managers import lockfile
from zerver.lib.queue import SimpleQueueClient, queue_json_publish
from zerver.lib.timestamp import timestamp_to_datetime
from zerver.lib.notifications import handle_missedmessage_emails, enqueue_welcome_emails, \
clear_followup_emails_queue, send_local_email_template_with_delay
from zerver.lib.actions import do_send_confirmation_email, \
do_update_user_activity, do_update_user_activity_interval, do_update_user_presence, \
internal_send_message, check_send_message, extract_recipients, \
handle_push_notification
from zerver.lib.digest import handle_digest_email
from zerver.lib.email_mirror import process_message as mirror_email
from zerver.decorator import JsonableError
from zerver.lib.socket import req_redis_key
from confirmation.models import Confirmation
from zerver.lib.db import reset_queries
from django.core.mail import EmailMessage
from zerver.lib.redis_utils import get_redis_client
import os
import sys
import ujson
from collections import defaultdict
import email
import time
import datetime
import logging
import simplejson
import StringIO
def assign_queue(queue_name, enabled=True):
def decorate(clazz):
clazz.queue_name = queue_name
if enabled:
register_worker(queue_name, clazz)
return clazz
return decorate
worker_classes = {}
def register_worker(queue_name, clazz):
worker_classes[queue_name] = clazz
def get_worker(queue_name):
return worker_classes[queue_name]()
def get_active_worker_queues():
return worker_classes.iterkeys()
class QueueProcessingWorker(object):
def __init__(self):
self.q = SimpleQueueClient()
def consume_wrapper(self, data):
try:
self.consume(data)
except Exception:
self._log_problem()
if not os.path.exists(settings.QUEUE_ERROR_DIR):
os.mkdir(settings.QUEUE_ERROR_DIR)
fname = '%s.errors' % (self.queue_name,)
fn = os.path.join(settings.QUEUE_ERROR_DIR, fname)
line = '%s\t%s\n' % (time.asctime(), ujson.dumps(data))
lock_fn = fn + '.lock'
with lockfile(lock_fn):
with open(fn, 'a') as f:
f.write(line)
reset_queries()
def _log_problem(self):
logging.exception("Problem handling data on queue %s" % (self.queue_name,))
def start(self):
self.q.register_json_consumer(self.queue_name, self.consume_wrapper)
self.q.start_consuming()
def stop(self):
self.q.stop_consuming()
if settings.MAILCHIMP_API_KEY:
from postmonkey import PostMonkey, MailChimpException
@assign_queue('signups')
class SignupWorker(QueueProcessingWorker):
def __init__(self):
super(SignupWorker, self).__init__()
if settings.MAILCHIMP_API_KEY:
self.pm = PostMonkey(settings.MAILCHIMP_API_KEY, timeout=10)
def consume(self, data):
merge_vars=data['merge_vars']
# This should clear out any invitation reminder emails
clear_followup_emails_queue(data["EMAIL"])
if settings.MAILCHIMP_API_KEY and settings.PRODUCTION:
try:
self.pm.listSubscribe(
id=settings.ZULIP_FRIENDS_LIST_ID,
email_address=data['EMAIL'],
merge_vars=merge_vars,
double_optin=False,
send_welcome=False)
except MailChimpException, e:
if e.code == 214:
logging.warning("Attempted to sign up already existing email to list: %s" % (data['EMAIL'],))
else:
raise e
email = data.get("EMAIL")
name = merge_vars.get("NAME")
enqueue_welcome_emails(email, name)
@assign_queue('invites')
class ConfirmationEmailWorker(QueueProcessingWorker):
def consume(self, data):
invitee = get_prereg_user_by_email(data["email"])
referrer = get_user_profile_by_email(data["referrer_email"])
do_send_confirmation_email(invitee, referrer)
# queue invitation reminder for two days from now.
link = Confirmation.objects.get_link_for_object(invitee)
send_local_email_template_with_delay([{'email': data["email"], 'name': ""}],
"zerver/emails/invitation/invitation_reminder_email",
{'activate_url': link,
'referrer': referrer,
'voyager': settings.VOYAGER,
'external_host': settings.EXTERNAL_HOST,
'support_email': settings.ZULIP_ADMINISTRATOR},
datetime.timedelta(days=2),
tags=["invitation-reminders"],
sender={'email': settings.ZULIP_ADMINISTRATOR, 'name': 'Zulip'})
@assign_queue('user_activity')
class UserActivityWorker(QueueProcessingWorker):
def consume(self, event):
user_profile = get_user_profile_by_id(event["user_profile_id"])
client = get_client(event["client"])
log_time = timestamp_to_datetime(event["time"])
query = event["query"]
do_update_user_activity(user_profile, client, query, log_time)
@assign_queue('user_activity_interval')
class UserActivityIntervalWorker(QueueProcessingWorker):
def consume(self, event):
user_profile = get_user_profile_by_id(event["user_profile_id"])
log_time = timestamp_to_datetime(event["time"])
do_update_user_activity_interval(user_profile, log_time)
@assign_queue('user_presence')
class UserPresenceWorker(QueueProcessingWorker):
def consume(self, event):
logging.info("Received event: %s" % (event),)
user_profile = get_user_profile_by_id(event["user_profile_id"])
client = get_client(event["client"])
log_time = timestamp_to_datetime(event["time"])
status = event["status"]
do_update_user_presence(user_profile, client, log_time, status)
@assign_queue('missedmessage_emails')
class MissedMessageWorker(QueueProcessingWorker):
def start(self):
while True:
missed_events = self.q.drain_queue("missedmessage_emails", json=True)
by_recipient = defaultdict(list)
for event in missed_events:
logging.info("Received event: %s" % (event,))
by_recipient[event['user_profile_id']].append(event)
for user_profile_id, events in by_recipient.items():
handle_missedmessage_emails(user_profile_id, events)
reset_queries()
# Aggregate all messages received every 2 minutes to let someone finish sending a batch
# of messages
time.sleep(2 * 60)
@assign_queue('missedmessage_mobile_notifications')
class PushNotificationsWorker(QueueProcessingWorker):
def consume(self, data):
handle_push_notification(data['user_profile_id'], data)
def make_feedback_client():
sys.path.append(os.path.join(os.path.dirname(__file__), '../../api'))
import zulip
return zulip.Client(
client="ZulipFeedback/0.1",
email=settings.DEPLOYMENT_ROLE_NAME,
api_key=settings.DEPLOYMENT_ROLE_KEY,
verbose=True,
site=settings.FEEDBACK_TARGET)
# We probably could stop running this queue worker at all if ENABLE_FEEDBACK is False
@assign_queue('feedback_messages')
class FeedbackBot(QueueProcessingWorker):
def start(self):
if settings.ENABLE_FEEDBACK and settings.FEEDBACK_EMAIL is None:
self.staging_client = make_feedback_client()
self.staging_client._register(
'forward_feedback',
method='POST',
url='deployments/feedback',
make_request=(lambda request: {'message': simplejson.dumps(request)}),
)
QueueProcessingWorker.start(self)
def consume(self, event):
if not settings.ENABLE_FEEDBACK:
return
if settings.FEEDBACK_EMAIL is not None:
to_email = settings.FEEDBACK_EMAIL
subject = "Zulip feedback from %s" % (event["sender_email"],)
content = event["content"]
from_email = '"%s" <%s>' % (event["sender_full_name"], event["sender_email"])
headers = {'Reply-To' : '"%s" <%s>' % (event["sender_full_name"], event["sender_email"])}
msg = EmailMessage(subject, content, from_email, [to_email], headers=headers)
msg.send()
else:
self.staging_client.forward_feedback(event)
@assign_queue('error_reports')
class ErrorReporter(QueueProcessingWorker):
def start(self):
if settings.DEPLOYMENT_ROLE_KEY:
self.staging_client = make_feedback_client()
self.staging_client._register(
'forward_error',
method='POST',
url='deployments/report_error',
make_request=(lambda type, report: {'type': type, 'report': simplejson.dumps(report)}),
)
QueueProcessingWorker.start(self)
def consume(self, event):
if not settings.DEPLOYMENT_ROLE_KEY:
return
self.staging_client.forward_error(event['type'], event['report'])
@assign_queue('slow_queries')
class SlowQueryWorker(QueueProcessingWorker):
def start(self):
while True:
self.process_one_batch()
# Aggregate all slow query messages in 1-minute chunks to avoid message spam
time.sleep(1 * 60)
def process_one_batch(self):
slow_queries = self.q.drain_queue("slow_queries", json=True)
if settings.ERROR_BOT is None:
return
if len(slow_queries) > 0:
topic = "%s: slow queries" % (settings.STATSD_PREFIX,)
content = ""
for query in slow_queries:
content += " %s\n" % (query,)
internal_send_message(settings.ERROR_BOT, "stream", "logs", topic, content)
reset_queries()
@assign_queue("message_sender")
class MessageSenderWorker(QueueProcessingWorker):
def __init__(self):
super(MessageSenderWorker, self).__init__()
self.redis_client = get_redis_client()
self.handler = BaseHandler()
self.handler.load_middleware()
def consume(self, event):
server_meta = event['server_meta']
environ = {'REQUEST_METHOD': 'SOCKET',
'SCRIPT_NAME': '',
'PATH_INFO': '/json/send_message',
'SERVER_NAME': 'localhost',
'SERVER_PORT': 9993,
'SERVER_PROTOCOL': 'ZULIP_SOCKET/1.0',
'wsgi.version': (1, 0),
'wsgi.input': StringIO.StringIO(),
'wsgi.errors': sys.stderr,
'wsgi.multithread': False,
'wsgi.multiprocess': True,
'wsgi.run_once': False,
'zulip.emulated_method': 'POST'}
# We're mostly using a WSGIRequest for convenience
environ.update(server_meta['request_environ'])
request = WSGIRequest(environ)
request._request = event['request']
request.csrf_processing_done = True
user_profile = get_user_profile_by_id(server_meta['user_id'])
request._cached_user = user_profile
resp = self.handler.get_response(request)
server_meta['time_request_finished'] = time.time()
server_meta['worker_log_data'] = request._log_data
resp_content = resp.content
result = {'response': ujson.loads(resp_content), 'req_id': event['req_id'],
'server_meta': server_meta}
redis_key = req_redis_key(event['req_id'])
self.redis_client.hmset(redis_key, {'status': 'complete',
'response': resp_content});
queue_json_publish(server_meta['return_queue'], result, lambda e: None)
@assign_queue('digest_emails')
class DigestWorker(QueueProcessingWorker):
# Who gets a digest is entirely determined by the enqueue_digest_emails
# management command, not here.
def consume(self, event):
logging.info("Received digest event: %s" % (event,))
handle_digest_email(event["user_profile_id"], event["cutoff"])
@assign_queue('email_mirror')
class MirrorWorker(QueueProcessingWorker):
# who gets a digest is entirely determined by the enqueue_digest_emails
# management command, not here.
def consume(self, event):
mirror_email(email.message_from_string(event["message"].encode("utf-8")),
rcpt_to=event["rcpt_to"], pre_checked=True)
@assign_queue('test')
class TestWorker(QueueProcessingWorker):
# This worker allows you to test the queue worker infrastructure without
# creating significant side effects. It can be useful in development or
# for troubleshooting prod/staging. It pulls a message off the test queue
# and appends it to a file in /tmp.
def consume(self, event):
fn = settings.ZULIP_WORKER_TEST_FILE
message = ujson.dumps(event)
logging.info("TestWorker should append this message to %s: %s" % (fn, message))
with open(fn, 'a') as f:
f.write(message + '\n')
|
chugunovyar/factoryForBuild
|
refs/heads/master
|
env/lib/python2.7/site-packages/matplotlib/backends/backend_mixed.py
|
10
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import six
from matplotlib.backends.backend_agg import RendererAgg
from matplotlib.tight_bbox import process_figure_for_rasterizing
class MixedModeRenderer(object):
"""
A helper class to implement a renderer that switches between
vector and raster drawing. An example may be a PDF writer, where
most things are drawn with PDF vector commands, but some very
complex objects, such as quad meshes, are rasterised and then
output as images.
"""
def __init__(self, figure, width, height, dpi, vector_renderer,
raster_renderer_class=None,
bbox_inches_restore=None):
"""
figure: The figure instance.
width: The width of the canvas in logical units
height: The height of the canvas in logical units
dpi: The dpi of the canvas
vector_renderer: An instance of a subclass of RendererBase
that will be used for the vector drawing.
raster_renderer_class: The renderer class to use for the
raster drawing. If not provided, this will use the Agg
backend (which is currently the only viable option anyway.)
"""
if raster_renderer_class is None:
raster_renderer_class = RendererAgg
self._raster_renderer_class = raster_renderer_class
self._width = width
self._height = height
self.dpi = dpi
self._vector_renderer = vector_renderer
self._raster_renderer = None
self._rasterizing = 0
# A reference to the figure is needed as we need to change
# the figure dpi before and after the rasterization. Although
# this looks ugly, I couldn't find a better solution. -JJL
self.figure = figure
self._figdpi = figure.get_dpi()
self._bbox_inches_restore = bbox_inches_restore
self._set_current_renderer(vector_renderer)
_methods = """
close_group draw_image draw_markers draw_path
draw_path_collection draw_quad_mesh draw_tex draw_text
finalize flipy get_canvas_width_height get_image_magnification
get_texmanager get_text_width_height_descent new_gc open_group
option_image_nocomposite points_to_pixels strip_math
start_filter stop_filter draw_gouraud_triangle
draw_gouraud_triangles option_scale_image
_text2path _get_text_path_transform height width
""".split()
def _set_current_renderer(self, renderer):
self._renderer = renderer
for method in self._methods:
if hasattr(renderer, method):
setattr(self, method, getattr(renderer, method))
renderer.start_rasterizing = self.start_rasterizing
renderer.stop_rasterizing = self.stop_rasterizing
def start_rasterizing(self):
"""
Enter "raster" mode. All subsequent drawing commands (until
stop_rasterizing is called) will be drawn with the raster
backend.
If start_rasterizing is called multiple times before
stop_rasterizing is called, this method has no effect.
"""
# change the dpi of the figure temporarily.
self.figure.set_dpi(self.dpi)
if self._bbox_inches_restore: # when tight bbox is used
r = process_figure_for_rasterizing(self.figure,
self._bbox_inches_restore)
self._bbox_inches_restore = r
if self._rasterizing == 0:
self._raster_renderer = self._raster_renderer_class(
self._width*self.dpi, self._height*self.dpi, self.dpi)
self._set_current_renderer(self._raster_renderer)
self._rasterizing += 1
def stop_rasterizing(self):
"""
Exit "raster" mode. All of the drawing that was done since
the last start_rasterizing command will be copied to the
vector backend by calling draw_image.
If stop_rasterizing is called multiple times before
start_rasterizing is called, this method has no effect.
"""
self._rasterizing -= 1
if self._rasterizing == 0:
self._set_current_renderer(self._vector_renderer)
height = self._height * self.dpi
buffer, bounds = self._raster_renderer.tostring_rgba_minimized()
l, b, w, h = bounds
if w > 0 and h > 0:
image = np.frombuffer(buffer, dtype=np.uint8)
image = image.reshape((h, w, 4))
image = image[::-1]
gc = self._renderer.new_gc()
# TODO: If the mixedmode resolution differs from the figure's
# dpi, the image must be scaled (dpi->_figdpi). Not all
# backends support this.
self._renderer.draw_image(
gc,
float(l) / self.dpi * self._figdpi,
(float(height)-b-h) / self.dpi * self._figdpi,
image)
self._raster_renderer = None
self._rasterizing = False
# restore the figure dpi.
self.figure.set_dpi(self._figdpi)
if self._bbox_inches_restore: # when tight bbox is used
r = process_figure_for_rasterizing(self.figure,
self._bbox_inches_restore,
self._figdpi)
self._bbox_inches_restore = r
|
cern-it-sdc-id/davix
|
refs/heads/devel
|
test/pywebdav/lib/delete.py
|
4
|
import os
import string
import urllib
from StringIO import StringIO
from utils import gen_estring, quote_uri, make_xmlresponse
from davcmd import deltree
class DELETE:
def __init__(self,uri,dataclass):
self.__dataclass=dataclass
self.__uri=uri
def delcol(self):
""" delete a collection """
dc=self.__dataclass
result=dc.deltree(self.__uri)
if not len(result.items()):
return None # everything ok
# create the result element
return make_xmlresponse(result)
def delone(self):
""" delete a resource """
dc=self.__dataclass
return dc.delone(self.__uri)
|
nathanial/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.2.5/django/core/handlers/__init__.py
|
12133432
| |
a10networks/a10sdk-python
|
refs/heads/master
|
a10sdk/core/A10_file/file_geo_location_oper.py
|
2
|
from a10sdk.common.A10BaseClass import A10BaseClass
class FileList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param file: {"type": "string", "format": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "file-list"
self.DeviceProxy = ""
self.A10WW_file = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Oper(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param file_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "file": {"type": "string", "format": "string"}}}]}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "oper"
self.DeviceProxy = ""
self.file_list = []
for keys, value in kwargs.items():
setattr(self,keys, value)
class GeoLocation(A10BaseClass):
"""Class Description::
Operational Status for the object geo-location.
Class geo-location supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/file/geo-location/oper`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "geo-location"
self.a10_url="/axapi/v3/file/geo-location/oper"
self.DeviceProxy = ""
self.oper = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
|
cwyark/micropython
|
refs/heads/master
|
tests/cpydiff/types_bytearray_sliceassign.py
|
45
|
"""
categories: Types,bytearray
description: Array slice assignment with unsupported RHS
cause: Unknown
workaround: Unknown
"""
b = bytearray(4)
b[0:1] = [1, 2]
print(b)
|
catapult-project/catapult
|
refs/heads/master
|
third_party/gsutil/gslib/vendored/boto/boto/awslambda/__init__.py
|
22
|
# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.regioninfo import RegionInfo, get_regions
from boto.regioninfo import connect
def regions():
"""
Get all available regions for the AWS Lambda service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
from boto.awslambda.layer1 import AWSLambdaConnection
return get_regions('awslambda',
connection_cls=AWSLambdaConnection)
def connect_to_region(region_name, **kw_params):
from boto.awslambda.layer1 import AWSLambdaConnection
return connect('awslambda', region_name,
connection_cls=AWSLambdaConnection, **kw_params)
|
Anonymous-X6/django
|
refs/heads/master
|
tests/defer_regress/tests.py
|
269
|
from __future__ import unicode_literals
from operator import attrgetter
from django.apps import apps
from django.contrib.contenttypes.models import ContentType
from django.contrib.sessions.backends.db import SessionStore
from django.db.models import Count
from django.db.models.query_utils import (
DeferredAttribute, deferred_class_factory,
)
from django.test import TestCase, override_settings
from .models import (
Base, Child, Derived, Feature, Item, ItemAndSimpleItem, Leaf, Location,
OneToOneItem, Proxy, ProxyRelated, RelatedItem, Request, ResolveThis,
SimpleItem, SpecialFeature,
)
class DeferRegressionTest(TestCase):
def test_basic(self):
# Deferred fields should really be deferred and not accidentally use
# the field's default value just because they aren't passed to __init__
Item.objects.create(name="first", value=42)
obj = Item.objects.only("name", "other_value").get(name="first")
# Accessing "name" doesn't trigger a new database query. Accessing
# "value" or "text" should.
with self.assertNumQueries(0):
self.assertEqual(obj.name, "first")
self.assertEqual(obj.other_value, 0)
with self.assertNumQueries(1):
self.assertEqual(obj.value, 42)
with self.assertNumQueries(1):
self.assertEqual(obj.text, "xyzzy")
with self.assertNumQueries(0):
self.assertEqual(obj.text, "xyzzy")
# Regression test for #10695. Make sure different instances don't
# inadvertently share data in the deferred descriptor objects.
i = Item.objects.create(name="no I'm first", value=37)
items = Item.objects.only("value").order_by("-value")
self.assertEqual(items[0].name, "first")
self.assertEqual(items[1].name, "no I'm first")
RelatedItem.objects.create(item=i)
r = RelatedItem.objects.defer("item").get()
self.assertEqual(r.item_id, i.id)
self.assertEqual(r.item, i)
# Some further checks for select_related() and inherited model
# behavior (regression for #10710).
c1 = Child.objects.create(name="c1", value=42)
c2 = Child.objects.create(name="c2", value=37)
Leaf.objects.create(name="l1", child=c1, second_child=c2)
obj = Leaf.objects.only("name", "child").select_related()[0]
self.assertEqual(obj.child.name, "c1")
self.assertQuerysetEqual(
Leaf.objects.select_related().only("child__name", "second_child__name"), [
"l1",
],
attrgetter("name")
)
# Models instances with deferred fields should still return the same
# content types as their non-deferred versions (bug #10738).
ctype = ContentType.objects.get_for_model
c1 = ctype(Item.objects.all()[0])
c2 = ctype(Item.objects.defer("name")[0])
c3 = ctype(Item.objects.only("name")[0])
self.assertTrue(c1 is c2 is c3)
# Regression for #10733 - only() can be used on a model with two
# foreign keys.
results = Leaf.objects.only("name", "child", "second_child").select_related()
self.assertEqual(results[0].child.name, "c1")
self.assertEqual(results[0].second_child.name, "c2")
results = Leaf.objects.only(
"name", "child", "second_child", "child__name", "second_child__name"
).select_related()
self.assertEqual(results[0].child.name, "c1")
self.assertEqual(results[0].second_child.name, "c2")
# Regression for #16409 - make sure defer() and only() work with annotate()
self.assertIsInstance(
list(SimpleItem.objects.annotate(Count('feature')).defer('name')),
list)
self.assertIsInstance(
list(SimpleItem.objects.annotate(Count('feature')).only('name')),
list)
def test_ticket_11936(self):
app_config = apps.get_app_config("defer_regress")
# Regression for #11936 - get_models should not return deferred models
# by default. Run a couple of defer queries so that app registry must
# contain some deferred classes. It might contain a lot more classes
# depending on the order the tests are ran.
list(Item.objects.defer("name"))
list(Child.objects.defer("value"))
klasses = {model.__name__ for model in app_config.get_models()}
self.assertIn("Child", klasses)
self.assertIn("Item", klasses)
self.assertNotIn("Child_Deferred_value", klasses)
self.assertNotIn("Item_Deferred_name", klasses)
self.assertFalse(any(k._deferred for k in app_config.get_models()))
klasses_with_deferred = {model.__name__ for model in app_config.get_models(include_deferred=True)}
self.assertIn("Child", klasses_with_deferred)
self.assertIn("Item", klasses_with_deferred)
self.assertIn("Child_Deferred_value", klasses_with_deferred)
self.assertIn("Item_Deferred_name", klasses_with_deferred)
self.assertTrue(any(k._deferred for k in app_config.get_models(include_deferred=True)))
@override_settings(SESSION_SERIALIZER='django.contrib.sessions.serializers.PickleSerializer')
def test_ticket_12163(self):
# Test for #12163 - Pickling error saving session with unsaved model
# instances.
SESSION_KEY = '2b1189a188b44ad18c35e1baac6ceead'
item = Item()
item._deferred = False
s = SessionStore(SESSION_KEY)
s.clear()
s["item"] = item
s.save()
s = SessionStore(SESSION_KEY)
s.modified = True
s.save()
i2 = s["item"]
self.assertFalse(i2._deferred)
def test_ticket_16409(self):
# Regression for #16409 - make sure defer() and only() work with annotate()
self.assertIsInstance(
list(SimpleItem.objects.annotate(Count('feature')).defer('name')),
list)
self.assertIsInstance(
list(SimpleItem.objects.annotate(Count('feature')).only('name')),
list)
def test_ticket_23270(self):
Derived.objects.create(text="foo", other_text="bar")
with self.assertNumQueries(1):
obj = Base.objects.select_related("derived").defer("text")[0]
self.assertIsInstance(obj.derived, Derived)
self.assertEqual("bar", obj.derived.other_text)
self.assertNotIn("text", obj.__dict__)
self.assertEqual(1, obj.derived.base_ptr_id)
def test_only_and_defer_usage_on_proxy_models(self):
# Regression for #15790 - only() broken for proxy models
proxy = Proxy.objects.create(name="proxy", value=42)
msg = 'QuerySet.only() return bogus results with proxy models'
dp = Proxy.objects.only('other_value').get(pk=proxy.pk)
self.assertEqual(dp.name, proxy.name, msg=msg)
self.assertEqual(dp.value, proxy.value, msg=msg)
# also test things with .defer()
msg = 'QuerySet.defer() return bogus results with proxy models'
dp = Proxy.objects.defer('name', 'text', 'value').get(pk=proxy.pk)
self.assertEqual(dp.name, proxy.name, msg=msg)
self.assertEqual(dp.value, proxy.value, msg=msg)
def test_resolve_columns(self):
ResolveThis.objects.create(num=5.0, name='Foobar')
qs = ResolveThis.objects.defer('num')
self.assertEqual(1, qs.count())
self.assertEqual('Foobar', qs[0].name)
def test_reverse_one_to_one_relations(self):
# Refs #14694. Test reverse relations which are known unique (reverse
# side has o2ofield or unique FK) - the o2o case
item = Item.objects.create(name="first", value=42)
o2o = OneToOneItem.objects.create(item=item, name="second")
self.assertEqual(len(Item.objects.defer('one_to_one_item__name')), 1)
self.assertEqual(len(Item.objects.select_related('one_to_one_item')), 1)
self.assertEqual(len(Item.objects.select_related(
'one_to_one_item').defer('one_to_one_item__name')), 1)
self.assertEqual(len(Item.objects.select_related('one_to_one_item').defer('value')), 1)
# Make sure that `only()` doesn't break when we pass in a unique relation,
# rather than a field on the relation.
self.assertEqual(len(Item.objects.only('one_to_one_item')), 1)
with self.assertNumQueries(1):
i = Item.objects.select_related('one_to_one_item')[0]
self.assertEqual(i.one_to_one_item.pk, o2o.pk)
self.assertEqual(i.one_to_one_item.name, "second")
with self.assertNumQueries(1):
i = Item.objects.select_related('one_to_one_item').defer(
'value', 'one_to_one_item__name')[0]
self.assertEqual(i.one_to_one_item.pk, o2o.pk)
self.assertEqual(i.name, "first")
with self.assertNumQueries(1):
self.assertEqual(i.one_to_one_item.name, "second")
with self.assertNumQueries(1):
self.assertEqual(i.value, 42)
def test_defer_with_select_related(self):
item1 = Item.objects.create(name="first", value=47)
item2 = Item.objects.create(name="second", value=42)
simple = SimpleItem.objects.create(name="simple", value="23")
ItemAndSimpleItem.objects.create(item=item1, simple=simple)
obj = ItemAndSimpleItem.objects.defer('item').select_related('simple').get()
self.assertEqual(obj.item, item1)
self.assertEqual(obj.item_id, item1.id)
obj.item = item2
obj.save()
obj = ItemAndSimpleItem.objects.defer('item').select_related('simple').get()
self.assertEqual(obj.item, item2)
self.assertEqual(obj.item_id, item2.id)
def test_proxy_model_defer_with_select_related(self):
# Regression for #22050
item = Item.objects.create(name="first", value=47)
RelatedItem.objects.create(item=item)
# Defer fields with only()
obj = ProxyRelated.objects.all().select_related().only('item__name')[0]
with self.assertNumQueries(0):
self.assertEqual(obj.item.name, "first")
with self.assertNumQueries(1):
self.assertEqual(obj.item.value, 47)
def test_only_with_select_related(self):
# Test for #17485.
item = SimpleItem.objects.create(name='first', value=47)
feature = Feature.objects.create(item=item)
SpecialFeature.objects.create(feature=feature)
qs = Feature.objects.only('item__name').select_related('item')
self.assertEqual(len(qs), 1)
qs = SpecialFeature.objects.only('feature__item__name').select_related('feature__item')
self.assertEqual(len(qs), 1)
def test_deferred_class_factory(self):
new_class = deferred_class_factory(
Item,
('this_is_some_very_long_attribute_name_so_modelname_truncation_is_triggered',))
self.assertEqual(
new_class.__name__,
'Item_Deferred_this_is_some_very_long_attribute_nac34b1f495507dad6b02e2cb235c875e')
def test_deferred_class_factory_already_deferred(self):
deferred_item1 = deferred_class_factory(Item, ('name',))
deferred_item2 = deferred_class_factory(deferred_item1, ('value',))
self.assertIs(deferred_item2._meta.proxy_for_model, Item)
self.assertNotIsInstance(deferred_item2.__dict__.get('name'), DeferredAttribute)
self.assertIsInstance(deferred_item2.__dict__.get('value'), DeferredAttribute)
def test_deferred_class_factory_no_attrs(self):
deferred_cls = deferred_class_factory(Item, ())
self.assertFalse(deferred_cls._deferred)
class DeferAnnotateSelectRelatedTest(TestCase):
def test_defer_annotate_select_related(self):
location = Location.objects.create()
Request.objects.create(location=location)
self.assertIsInstance(list(Request.objects
.annotate(Count('items')).select_related('profile', 'location')
.only('profile', 'location')), list)
self.assertIsInstance(list(Request.objects
.annotate(Count('items')).select_related('profile', 'location')
.only('profile__profile1', 'location__location1')), list)
self.assertIsInstance(list(Request.objects
.annotate(Count('items')).select_related('profile', 'location')
.defer('request1', 'request2', 'request3', 'request4')), list)
|
MoutainOne/opendcp
|
refs/heads/master
|
orion/shells/encode.py
|
5
|
#!python
import json
import sys
filename = sys.argv[1]
lines = list(open(filename))
text = ''.join(lines)
encoded = json.dumps(text)
print encoded[1:len(encoded)-1]
|
bsmrstu-warriors/Moytri--The-Drone-Aider
|
refs/heads/master
|
Lib/UserString.py
|
312
|
#!/usr/bin/env python
## vim:ts=4:et:nowrap
"""A user-defined wrapper around string objects
Note: string objects have grown methods in Python 1.6
This module requires Python 1.6 or later.
"""
import sys
import collections
__all__ = ["UserString","MutableString"]
class UserString(collections.Sequence):
def __init__(self, seq):
if isinstance(seq, basestring):
self.data = seq
elif isinstance(seq, UserString):
self.data = seq.data[:]
else:
self.data = str(seq)
def __str__(self): return str(self.data)
def __repr__(self): return repr(self.data)
def __int__(self): return int(self.data)
def __long__(self): return long(self.data)
def __float__(self): return float(self.data)
def __complex__(self): return complex(self.data)
def __hash__(self): return hash(self.data)
def __cmp__(self, string):
if isinstance(string, UserString):
return cmp(self.data, string.data)
else:
return cmp(self.data, string)
def __contains__(self, char):
return char in self.data
def __len__(self): return len(self.data)
def __getitem__(self, index): return self.__class__(self.data[index])
def __getslice__(self, start, end):
start = max(start, 0); end = max(end, 0)
return self.__class__(self.data[start:end])
def __add__(self, other):
if isinstance(other, UserString):
return self.__class__(self.data + other.data)
elif isinstance(other, basestring):
return self.__class__(self.data + other)
else:
return self.__class__(self.data + str(other))
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other + self.data)
else:
return self.__class__(str(other) + self.data)
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __mod__(self, args):
return self.__class__(self.data % args)
# the following methods are defined in alphabetical order:
def capitalize(self): return self.__class__(self.data.capitalize())
def center(self, width, *args):
return self.__class__(self.data.center(width, *args))
def count(self, sub, start=0, end=sys.maxint):
return self.data.count(sub, start, end)
def decode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.decode(encoding, errors))
else:
return self.__class__(self.data.decode(encoding))
else:
return self.__class__(self.data.decode())
def encode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.encode(encoding, errors))
else:
return self.__class__(self.data.encode(encoding))
else:
return self.__class__(self.data.encode())
def endswith(self, suffix, start=0, end=sys.maxint):
return self.data.endswith(suffix, start, end)
def expandtabs(self, tabsize=8):
return self.__class__(self.data.expandtabs(tabsize))
def find(self, sub, start=0, end=sys.maxint):
return self.data.find(sub, start, end)
def index(self, sub, start=0, end=sys.maxint):
return self.data.index(sub, start, end)
def isalpha(self): return self.data.isalpha()
def isalnum(self): return self.data.isalnum()
def isdecimal(self): return self.data.isdecimal()
def isdigit(self): return self.data.isdigit()
def islower(self): return self.data.islower()
def isnumeric(self): return self.data.isnumeric()
def isspace(self): return self.data.isspace()
def istitle(self): return self.data.istitle()
def isupper(self): return self.data.isupper()
def join(self, seq): return self.data.join(seq)
def ljust(self, width, *args):
return self.__class__(self.data.ljust(width, *args))
def lower(self): return self.__class__(self.data.lower())
def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars))
def partition(self, sep):
return self.data.partition(sep)
def replace(self, old, new, maxsplit=-1):
return self.__class__(self.data.replace(old, new, maxsplit))
def rfind(self, sub, start=0, end=sys.maxint):
return self.data.rfind(sub, start, end)
def rindex(self, sub, start=0, end=sys.maxint):
return self.data.rindex(sub, start, end)
def rjust(self, width, *args):
return self.__class__(self.data.rjust(width, *args))
def rpartition(self, sep):
return self.data.rpartition(sep)
def rstrip(self, chars=None): return self.__class__(self.data.rstrip(chars))
def split(self, sep=None, maxsplit=-1):
return self.data.split(sep, maxsplit)
def rsplit(self, sep=None, maxsplit=-1):
return self.data.rsplit(sep, maxsplit)
def splitlines(self, keepends=0): return self.data.splitlines(keepends)
def startswith(self, prefix, start=0, end=sys.maxint):
return self.data.startswith(prefix, start, end)
def strip(self, chars=None): return self.__class__(self.data.strip(chars))
def swapcase(self): return self.__class__(self.data.swapcase())
def title(self): return self.__class__(self.data.title())
def translate(self, *args):
return self.__class__(self.data.translate(*args))
def upper(self): return self.__class__(self.data.upper())
def zfill(self, width): return self.__class__(self.data.zfill(width))
class MutableString(UserString, collections.MutableSequence):
"""mutable string objects
Python strings are immutable objects. This has the advantage, that
strings may be used as dictionary keys. If this property isn't needed
and you insist on changing string values in place instead, you may cheat
and use MutableString.
But the purpose of this class is an educational one: to prevent
people from inventing their own mutable string class derived
from UserString and than forget thereby to remove (override) the
__hash__ method inherited from UserString. This would lead to
errors that would be very hard to track down.
A faster and better solution is to rewrite your program using lists."""
def __init__(self, string=""):
from warnings import warnpy3k
warnpy3k('the class UserString.MutableString has been removed in '
'Python 3.0', stacklevel=2)
self.data = string
# We inherit object.__hash__, so we must deny this explicitly
__hash__ = None
def __setitem__(self, index, sub):
if isinstance(index, slice):
if isinstance(sub, UserString):
sub = sub.data
elif not isinstance(sub, basestring):
sub = str(sub)
start, stop, step = index.indices(len(self.data))
if step == -1:
start, stop = stop+1, start+1
sub = sub[::-1]
elif step != 1:
# XXX(twouters): I guess we should be reimplementing
# the extended slice assignment/deletion algorithm here...
raise TypeError, "invalid step in slicing assignment"
start = min(start, stop)
self.data = self.data[:start] + sub + self.data[stop:]
else:
if index < 0:
index += len(self.data)
if index < 0 or index >= len(self.data): raise IndexError
self.data = self.data[:index] + sub + self.data[index+1:]
def __delitem__(self, index):
if isinstance(index, slice):
start, stop, step = index.indices(len(self.data))
if step == -1:
start, stop = stop+1, start+1
elif step != 1:
# XXX(twouters): see same block in __setitem__
raise TypeError, "invalid step in slicing deletion"
start = min(start, stop)
self.data = self.data[:start] + self.data[stop:]
else:
if index < 0:
index += len(self.data)
if index < 0 or index >= len(self.data): raise IndexError
self.data = self.data[:index] + self.data[index+1:]
def __setslice__(self, start, end, sub):
start = max(start, 0); end = max(end, 0)
if isinstance(sub, UserString):
self.data = self.data[:start]+sub.data+self.data[end:]
elif isinstance(sub, basestring):
self.data = self.data[:start]+sub+self.data[end:]
else:
self.data = self.data[:start]+str(sub)+self.data[end:]
def __delslice__(self, start, end):
start = max(start, 0); end = max(end, 0)
self.data = self.data[:start] + self.data[end:]
def immutable(self):
return UserString(self.data)
def __iadd__(self, other):
if isinstance(other, UserString):
self.data += other.data
elif isinstance(other, basestring):
self.data += other
else:
self.data += str(other)
return self
def __imul__(self, n):
self.data *= n
return self
def insert(self, index, value):
self[index:index] = value
if __name__ == "__main__":
# execute the regression test to stdout, if called as a script:
import os
called_in_dir, called_as = os.path.split(sys.argv[0])
called_as, py = os.path.splitext(called_as)
if '-q' in sys.argv:
from test import test_support
test_support.verbose = 0
__import__('test.test_' + called_as.lower())
|
zorroblue/scikit-learn
|
refs/heads/master
|
sklearn/cluster/spectral.py
|
7
|
# -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# Brian Cheung
# Wei LI <kuantkid@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / np.linalg.norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int, RandomState instance or None, optional, default: None
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when eigen_solver == 'amg' and by
the K-Means initialization. If int, random_state is the seed used by
the random number generator; If RandomState instance, random_state is
the random number generator; If None, the random number generator is
the RandomState instance used by `np.random`.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int, RandomState instance or None, optional, default: None
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when eigen_solver == 'amg' and by
the K-Means initialization. If int, random_state is the seed used by
the random number generator; If RandomState instance, random_state is
the random number generator; If None, the random number generator is
the RandomState instance used by `np.random`.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
gamma : float, default=1.0
Kernel coefficient for rbf, poly, sigmoid, laplacian and chi2 kernels.
Ignored for ``affinity='nearest_neighbors'``.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- dist_matrix ** 2 / (2. * delta ** 2))
Where ``delta`` is a free parameter representing the width of the Gaussian
kernel.
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None, n_jobs=1):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
y : Ignored
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64, ensure_min_samples=2)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors,
include_self=True,
n_jobs=self.n_jobs)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
|
X-DataInitiative/tick
|
refs/heads/master
|
tick/solver/history/__init__.py
|
2
|
# License: BSD 3 clause
# import tick.base
from .history import History
__all__ = ["History"]
|
mcxiaoke/python-labs
|
refs/heads/master
|
archives/penti/decorators.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: mcxiaoke
# @Date: 2015-08-14 15:55:34
from __future__ import print_function
def hello(level):
def decorator(func,*args, **kwargs):
print('before hello',level)
func(*args, **kwargs)
print('after hello')
return decorator
@hello(level=12345)
def print_me(first=None, *args, **options):
print('this is func 1', first, args, options)
t=(1,2,3)
print_me(first='First',**dict({'key': 'value'}))
|
dennis-sheil/commandergenius
|
refs/heads/sdl_android
|
project/jni/python/src/Lib/encodings/mac_centeuro.py
|
593
|
""" Python Character Mapping Codec mac_centeuro generated from 'MAPPINGS/VENDORS/APPLE/CENTEURO.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-centeuro',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\u0100' # 0x81 -> LATIN CAPITAL LETTER A WITH MACRON
u'\u0101' # 0x82 -> LATIN SMALL LETTER A WITH MACRON
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\u0104' # 0x84 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\u0105' # 0x88 -> LATIN SMALL LETTER A WITH OGONEK
u'\u010c' # 0x89 -> LATIN CAPITAL LETTER C WITH CARON
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u010d' # 0x8B -> LATIN SMALL LETTER C WITH CARON
u'\u0106' # 0x8C -> LATIN CAPITAL LETTER C WITH ACUTE
u'\u0107' # 0x8D -> LATIN SMALL LETTER C WITH ACUTE
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\u0179' # 0x8F -> LATIN CAPITAL LETTER Z WITH ACUTE
u'\u017a' # 0x90 -> LATIN SMALL LETTER Z WITH ACUTE
u'\u010e' # 0x91 -> LATIN CAPITAL LETTER D WITH CARON
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\u010f' # 0x93 -> LATIN SMALL LETTER D WITH CARON
u'\u0112' # 0x94 -> LATIN CAPITAL LETTER E WITH MACRON
u'\u0113' # 0x95 -> LATIN SMALL LETTER E WITH MACRON
u'\u0116' # 0x96 -> LATIN CAPITAL LETTER E WITH DOT ABOVE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\u0117' # 0x98 -> LATIN SMALL LETTER E WITH DOT ABOVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\u011a' # 0x9D -> LATIN CAPITAL LETTER E WITH CARON
u'\u011b' # 0x9E -> LATIN SMALL LETTER E WITH CARON
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\u0118' # 0xA2 -> LATIN CAPITAL LETTER E WITH OGONEK
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\u0119' # 0xAB -> LATIN SMALL LETTER E WITH OGONEK
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\u0123' # 0xAE -> LATIN SMALL LETTER G WITH CEDILLA
u'\u012e' # 0xAF -> LATIN CAPITAL LETTER I WITH OGONEK
u'\u012f' # 0xB0 -> LATIN SMALL LETTER I WITH OGONEK
u'\u012a' # 0xB1 -> LATIN CAPITAL LETTER I WITH MACRON
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON
u'\u0136' # 0xB5 -> LATIN CAPITAL LETTER K WITH CEDILLA
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u0142' # 0xB8 -> LATIN SMALL LETTER L WITH STROKE
u'\u013b' # 0xB9 -> LATIN CAPITAL LETTER L WITH CEDILLA
u'\u013c' # 0xBA -> LATIN SMALL LETTER L WITH CEDILLA
u'\u013d' # 0xBB -> LATIN CAPITAL LETTER L WITH CARON
u'\u013e' # 0xBC -> LATIN SMALL LETTER L WITH CARON
u'\u0139' # 0xBD -> LATIN CAPITAL LETTER L WITH ACUTE
u'\u013a' # 0xBE -> LATIN SMALL LETTER L WITH ACUTE
u'\u0145' # 0xBF -> LATIN CAPITAL LETTER N WITH CEDILLA
u'\u0146' # 0xC0 -> LATIN SMALL LETTER N WITH CEDILLA
u'\u0143' # 0xC1 -> LATIN CAPITAL LETTER N WITH ACUTE
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0144' # 0xC4 -> LATIN SMALL LETTER N WITH ACUTE
u'\u0147' # 0xC5 -> LATIN CAPITAL LETTER N WITH CARON
u'\u2206' # 0xC6 -> INCREMENT
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\u0148' # 0xCB -> LATIN SMALL LETTER N WITH CARON
u'\u0150' # 0xCC -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0151' # 0xCE -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
u'\u014c' # 0xCF -> LATIN CAPITAL LETTER O WITH MACRON
u'\u2013' # 0xD0 -> EN DASH
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\u014d' # 0xD8 -> LATIN SMALL LETTER O WITH MACRON
u'\u0154' # 0xD9 -> LATIN CAPITAL LETTER R WITH ACUTE
u'\u0155' # 0xDA -> LATIN SMALL LETTER R WITH ACUTE
u'\u0158' # 0xDB -> LATIN CAPITAL LETTER R WITH CARON
u'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\u0159' # 0xDE -> LATIN SMALL LETTER R WITH CARON
u'\u0156' # 0xDF -> LATIN CAPITAL LETTER R WITH CEDILLA
u'\u0157' # 0xE0 -> LATIN SMALL LETTER R WITH CEDILLA
u'\u0160' # 0xE1 -> LATIN CAPITAL LETTER S WITH CARON
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u0161' # 0xE4 -> LATIN SMALL LETTER S WITH CARON
u'\u015a' # 0xE5 -> LATIN CAPITAL LETTER S WITH ACUTE
u'\u015b' # 0xE6 -> LATIN SMALL LETTER S WITH ACUTE
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\u0164' # 0xE8 -> LATIN CAPITAL LETTER T WITH CARON
u'\u0165' # 0xE9 -> LATIN SMALL LETTER T WITH CARON
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\u017d' # 0xEB -> LATIN CAPITAL LETTER Z WITH CARON
u'\u017e' # 0xEC -> LATIN SMALL LETTER Z WITH CARON
u'\u016a' # 0xED -> LATIN CAPITAL LETTER U WITH MACRON
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u016b' # 0xF0 -> LATIN SMALL LETTER U WITH MACRON
u'\u016e' # 0xF1 -> LATIN CAPITAL LETTER U WITH RING ABOVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\u016f' # 0xF3 -> LATIN SMALL LETTER U WITH RING ABOVE
u'\u0170' # 0xF4 -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
u'\u0171' # 0xF5 -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
u'\u0172' # 0xF6 -> LATIN CAPITAL LETTER U WITH OGONEK
u'\u0173' # 0xF7 -> LATIN SMALL LETTER U WITH OGONEK
u'\xdd' # 0xF8 -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xfd' # 0xF9 -> LATIN SMALL LETTER Y WITH ACUTE
u'\u0137' # 0xFA -> LATIN SMALL LETTER K WITH CEDILLA
u'\u017b' # 0xFB -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\u0141' # 0xFC -> LATIN CAPITAL LETTER L WITH STROKE
u'\u017c' # 0xFD -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\u0122' # 0xFE -> LATIN CAPITAL LETTER G WITH CEDILLA
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
jacquesd/indico
|
refs/heads/master
|
indico/modules/events/management/controllers.py
|
2
|
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from flask import flash, redirect, session
from werkzeug.exceptions import Forbidden
from indico.modules.events.management.util import can_lock
from indico.util.i18n import _
from indico.web.flask.util import url_for, jsonify_data
from indico.web.util import jsonify_template
from MaKaC.webinterface.rh.base import RH
from MaKaC.webinterface.rh.conferenceModif import RHConferenceModifBase
class RHDeleteEvent(RHConferenceModifBase):
"""Delete an event."""
CSRF_ENABLED = True
def _process(self):
return RH._process(self)
def _process_GET(self):
return jsonify_template('events/management/delete_event.html', event=self._conf)
def _process_POST(self):
category = self._conf.getOwner()
self._conf.delete(session.user)
flash(_('Event "{}" successfully deleted.').format(self._conf.title), 'success')
if category:
redirect_url = url_for('category_mgmt.categoryModification', category)
else:
redirect_url = url_for('misc.index')
return jsonify_data(url=redirect_url, flash=False)
class RHLockEvent(RHConferenceModifBase):
"""Lock an event."""
CSRF_ENABLED = True
def _checkProtection(self):
RHConferenceModifBase._checkProtection(self)
if not can_lock(self._conf, session.user):
raise Forbidden
def _process(self):
return RH._process(self)
def _process_GET(self):
return jsonify_template('events/management/lock_event.html', event=self._conf)
def _process_POST(self):
self._conf.setClosed(True)
flash(_('The event is now locked.'), 'success')
return jsonify_data(url=url_for('event_mgmt.conferenceModification', self._conf), flash=False)
class RHUnlockEvent(RHConferenceModifBase):
"""Unlock an event."""
CSRF_ENABLED = True
def _checkProtection(self):
self._allowClosed = can_lock(self._conf, session.user)
RHConferenceModifBase._checkProtection(self)
def _process(self):
self._conf.setClosed(False)
flash(_('The event is now unlocked.'), 'success')
return redirect(url_for('event_mgmt.conferenceModification', self._conf))
|
drjeep/django
|
refs/heads/master
|
tests/file_uploads/__init__.py
|
12133432
| |
riteshshrv/django
|
refs/heads/master
|
tests/logging_tests/__init__.py
|
12133432
| |
kevgathuku/ponycheckup
|
refs/heads/master
|
ponycheckup/check/__init__.py
|
12133432
| |
ingokegel/intellij-community
|
refs/heads/master
|
python/testData/addImport/relativeImportWithDotsOnly/foo/bar/__init__.py
|
12133432
| |
nagyistoce/edx-platform
|
refs/heads/master
|
common/djangoapps/contentserver/tests/__init__.py
|
12133432
| |
pwong-mapr/private-hue
|
refs/heads/HUE-1096-abe
|
desktop/core/ext-py/Django-1.4.5/tests/regressiontests/test_utils/__init__.py
|
12133432
| |
muranga/laughing-octo-nemesis
|
refs/heads/master
|
src/ansiapp/__init__.py
|
12133432
| |
GuneetAtwal/kernel_mt6589
|
refs/heads/nightlies
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
|
12980
|
# SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
|
tartavull/google-cloud-python
|
refs/heads/master
|
language/google/cloud/language/__init__.py
|
2
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
# TRANSITION CODE
# -----------------------------------------------------------------------------
# The old Language manual layer is now deprecated, but to allow
# users the time to move from the manual layer to the mostly auto-generated
# layer, they are both living side by side for a few months.
#
# Instantiating the old manual layer (`google.cloud.language.Client`) will
# issue a DeprecationWarning.
#
# When it comes time to remove the old layer, everything in this directory
# should go away EXCEPT __init__.py (which can be renamed to language.py and
# put one directory above).
#
# Additionally, the import and export of `Client`, `Document`, and `Encoding`
# should be removed from this file (along with this note), and the rest should
# be left intact.
# -----------------------------------------------------------------------------
"""Client library for Google Cloud Natural Language API."""
from __future__ import absolute_import
from pkg_resources import get_distribution
__version__ = get_distribution('google-cloud-language').version
from google.cloud.language_v1 import * # noqa
from google.cloud.language.client import Client
from google.cloud.language.document import Document
from google.cloud.language.document import Encoding
__all__ = (
# Common
'__version__',
# Manual Layer
'Client',
'Document',
'Encoding',
# Auto-gen
'enums',
'LanguageServiceClient',
'types',
)
|
Fluent-networks/floranet
|
refs/heads/master
|
floranet/lora/bands.py
|
1
|
class LoraBand(object):
"""Base class for Lora radio bands."""
BANDS = {'AU915', 'US915', 'EU868'}
def _rx1receive(self, txch, txdr, rx1droffset):
"""Get first receive window parameters
Args:
txch (int): Transmit channel
txdr (str): Transmit data rate 'SFnBWxxx'
rx1droffset (int): RX1DROffset parameter
Returns:
A dict of RX1 frequency, datarate string, datarate index
"""
rx1 = {'freq': None, 'datr': None, 'index': None}
rx1['freq'] = self.downstream[txch % 8]
txindex = self.datarate_rev[txdr]
rx1['index'] = self.rx1dr[txindex][rx1droffset]
rx1['datr'] = self.datarate[rx1['index']]
return rx1
def _rx2receive(self):
"""Get second receive window parameters
RX2 (second receive window) settings uses a fixed data
rate and frequency. Default parameters are 923.3Mhz / DR8
Returns:
A dict of RX2 frequency, datarate string, datarate index
"""
rxindex = 8
return {'freq': 923.3, 'datr': self.datarate[rxindex],
'index': rxindex}
def rxparams(self, (tx_chan, tx_datr), join=False):
"""Get RX1 and RX2 receive window parameters
Args:
rxpk (Rxpk): The received Rxpk object
join (bool): Use join timers if true
Retrurns:
Dict of RX1 and RX2 parameter dicts {freq, datarate, drindex, delay}
"""
rx1 = self._rx1receive(tx_chan, tx_datr, self.rx1droffset)
rx2 = self._rx2receive()
if join:
rx1['delay'] = self.join_accept_delay[1]
rx2['delay'] = self.join_accept_delay[2]
else:
rx1['delay'] = self.receive_delay[1]
rx2['delay'] = self.receive_delay[2]
return {1: rx1, 2: rx2}
def checkAppPayloadLen(self, datarate, length):
"""Check if the length is greater than the maximum allowed
application payload length
Args:
datarate: (str) Datarate code
length (int): Length of payload
"""
return self.maxappdatalen[self.datarate_rev[datarate]] >= length
class US915(LoraBand):
"""US 902-928 ISM Band
upstream (list): 72 upstream (from device) channels:
64 channels (0 to 63) utilizing LoRa 125 kHz BW
starting at 902.3 MHz and incrementing
linearly by 200 kHz to 914.9 MHz.
8 channels (64 to 71) utilizing LoRa 500 kHz BW
starting at 903.0 MHz and incrementing linearly
by 1.6 MHz to 914.2 MHz. Units of MHz
downstream (list): 8 channels numbered 0 to 7 utilizing
LoRa 500 kHz BW starting at 923.3 MHz and incrementing
linearly by 600 kHz to 927.5 MHz. Units of MHz
datarate (dict): Data rate configuration as per Table 18 of the
LoRa specification
datarate_rev (dict): Reverse lookup for datarate.
maxpayload (dict): Maximim payload size, indexed by datarate as per
Table 20 of the LoRa specification
rx1dr (dict): Dictionary of lists to lookup the RX1 window data rate by
transmit data rate and Rx1DROffset parameters. Lookup by
rx1dr[txdatarate][rx1droffset]
rx1droffset (int): RX1 default DR offset
receive_delay (dict): First and second window window receive delays
join_accept_delay (dict): First and second window join accept delay
max_fcnt_gap (int): Maximum tolerable frame count gap.
maxmac (dict): List of maximum MACpayload sizes (in bytes) for each
datarate.
maxapp (dict): List of maximum application sizes (in bytes) for each
datarate.
"""
def __init__(self):
"""Initialize a US915 band object."""
# Upstream channels in MHz
self.upstream = []
for i in range(0, 64):
self.upstream.append((9033 + 2.0 * i)/10)
for i in range(0, 8):
self.upstream.append((9030 + 16.0 * (i - 64))/10)
# Downstream channels in MHz
self.downstream = []
for i in range(0, 8):
self.downstream.append((9233 + 6.0 * i)/10)
self.datarate = {
0: 'SF10BW125',
1: 'SF9BW125',
2: 'SF8BW125',
3: 'SF7BW125',
4: 'SF8BW500',
8: 'SF12BW500',
9: 'SF11BW500',
10: 'SF10BW500',
11: 'SF9BW500',
12: 'SF8BW500',
13: 'SF7BW500'
}
self.datarate_rev = {v:k for k, v in self.datarate.items()}
self.txpower = {0: 30, 1: 28, 2: 26, 3: 24, 4: 22, 5: 20,
6: 18, 7: 16, 8: 14, 9: 12, 10: 10 }
self.rx1dr = {
0: [10, 9, 8, 8],
1: [11, 10, 9, 8],
2: [12, 11, 10, 9],
3: [13, 12, 11, 10],
4: [13, 13, 12, 11],
8: [8, 8, 8, 8],
9: [9, 8, 8, 8],
10: [10, 9, 8, 8],
11: [11, 10, 9, 8],
12: [11, 11, 10, 9],
13: [13, 12, 11, 9] }
self.rx1droffset = 0
self.receive_delay = {1: 1, 2: 2}
self.join_accept_delay = {1: 5, 2: 6}
self.max_fcnt_gap = 16384
self.maxpayloadlen = {
0: 19,
1: 61,
2: 137,
3: 250,
4: 250,
8: 61,
9: 137,
10: 250,
11: 250,
12: 250,
13: 250 }
self.maxappdatalen = {
0: 11,
1: 53,
2: 129,
3: 242,
4: 242,
8: 53,
9: 129,
10: 242,
11: 242,
12: 242,
13: 242 }
class AU915(US915):
"""Australian 915-928 ISM Band
Subclass of US915 band. Same parameters apply, with the exception
of upstream channels, which are upshifted to start at 915.2 MHz
(channels 0 to 63) and 915.9 (channels 64 to 71).
"""
def __init__(self):
super(AU915, self).__init__()
self.upstream = []
for i in range(0, 64):
self.upstream.append((9152 + 2.0 * i)/10)
for i in range(0, 8):
self.upstream.append((9159 + 16.0 * i)/10)
class EU868(LoraBand):
""" EUROPEAN 863-870 ISM Band
based on LoRoWAN_Regional_Parameters_v1_0
"""
def __init__(self):
super(EU868, self).__init__()
self.upstream = [ 868.10 , 868.30 , 868.50 , 867.1 , 867.3 , 867.5 , 867.7 , 867.9 , 868.8 ]
self.downstream = self.upstream
self.datarate = {
0: 'SF12BW125',
1: 'SF11BW125',
2: 'SF10BW125',
3: 'SF9BW125',
4: 'SF8BW125',
5: 'SF7BW125',
6: 'SF7BW250'
#7: FSK ?
}
self.datarate_rev = {v:k for k, v in self.datarate.items()}
self.txpower = { 0:20 , 1:14 , 2:11 , 3:8 , 4:5 , 5:2 }
self.rx1dr = {
0: [ 0 , 0 , 0 , 0 , 0 , 0 ],
1: [ 1 , 0 , 0 , 0 , 0 , 0 ],
2: [ 2 , 1 , 0 , 0 , 0 , 0 ],
3: [ 3 , 2 , 1 , 0 , 0 , 0 ],
4: [ 4 , 3 , 2 , 1 , 0 , 0 ],
5: [ 5 , 4 , 3 , 2 , 1 , 0 ],
6: [ 6 , 5 , 4 , 3 , 2 , 1 ],
7: [ 7 , 6 , 5 , 4 , 3 , 2 ],
}
self.rx1droffset = 0
self.receive_delay = {1: 1, 2: 2}
self.join_accept_delay = {1: 5, 2: 6}
self.max_fcnt_gap = 16384
self.maxpayloadlen = {
0: 59,
1: 59,
2: 59,
3:123,
4:250,
5:250,
6:250,
7:250,
}
self.maxappdatalen = {
0: 51,
1: 51,
2: 51,
3:115,
4:242,
5:242,
6:242,
7:242
}
def _rx2receive(self):
"""Get second receive window parameters
RX2 (second receive window) settings uses a fixed data
rate and frequency. Default parameters are 869.525Mhz / DR0
Returns:
A dict of RX2 frequency, datarate string, datarate index
"""
rxindex = 0
return {'freq': 869.525, 'datr': self.datarate[rxindex],
'index': rxindex}
|
pigeonflight/strider-plone
|
refs/heads/master
|
docker/appengine/lib/django-1.5/django/contrib/auth/tests/urls.py
|
100
|
from django.conf.urls import patterns, url
from django.contrib.auth import context_processors
from django.contrib.auth.urls import urlpatterns
from django.contrib.auth.views import password_reset
from django.contrib.auth.decorators import login_required
from django.contrib.messages.api import info
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import Template, RequestContext
from django.views.decorators.cache import never_cache
@never_cache
def remote_user_auth_view(request):
"Dummy view for remote user tests"
t = Template("Username is {{ user }}.")
c = RequestContext(request, {})
return HttpResponse(t.render(c))
def auth_processor_no_attr_access(request):
r1 = render_to_response('context_processors/auth_attrs_no_access.html',
RequestContext(request, {}, processors=[context_processors.auth]))
# *After* rendering, we check whether the session was accessed
return render_to_response('context_processors/auth_attrs_test_access.html',
{'session_accessed':request.session.accessed})
def auth_processor_attr_access(request):
r1 = render_to_response('context_processors/auth_attrs_access.html',
RequestContext(request, {}, processors=[context_processors.auth]))
return render_to_response('context_processors/auth_attrs_test_access.html',
{'session_accessed':request.session.accessed})
def auth_processor_user(request):
return render_to_response('context_processors/auth_attrs_user.html',
RequestContext(request, {}, processors=[context_processors.auth]))
def auth_processor_perms(request):
return render_to_response('context_processors/auth_attrs_perms.html',
RequestContext(request, {}, processors=[context_processors.auth]))
def auth_processor_perm_in_perms(request):
return render_to_response('context_processors/auth_attrs_perm_in_perms.html',
RequestContext(request, {}, processors=[context_processors.auth]))
def auth_processor_messages(request):
info(request, "Message 1")
return render_to_response('context_processors/auth_attrs_messages.html',
RequestContext(request, {}, processors=[context_processors.auth]))
def userpage(request):
pass
# special urls for auth test cases
urlpatterns = urlpatterns + patterns('',
(r'^logout/custom_query/$', 'django.contrib.auth.views.logout', dict(redirect_field_name='follow')),
(r'^logout/next_page/$', 'django.contrib.auth.views.logout', dict(next_page='/somewhere/')),
(r'^remote_user/$', remote_user_auth_view),
(r'^password_reset_from_email/$', 'django.contrib.auth.views.password_reset', dict(from_email='staffmember@example.com')),
(r'^admin_password_reset/$', 'django.contrib.auth.views.password_reset', dict(is_admin_site=True)),
(r'^login_required/$', login_required(password_reset)),
(r'^login_required_login_url/$', login_required(password_reset, login_url='/somewhere/')),
(r'^auth_processor_no_attr_access/$', auth_processor_no_attr_access),
(r'^auth_processor_attr_access/$', auth_processor_attr_access),
(r'^auth_processor_user/$', auth_processor_user),
(r'^auth_processor_perms/$', auth_processor_perms),
(r'^auth_processor_perm_in_perms/$', auth_processor_perm_in_perms),
(r'^auth_processor_messages/$', auth_processor_messages),
url(r'^userpage/(.+)/$', userpage, name="userpage"),
)
|
NextThought/pypy-numpy
|
refs/heads/so-path
|
numpy/core/tests/test_ufunc.py
|
30
|
from __future__ import division, absolute_import, print_function
import sys
import numpy as np
from numpy.testing import *
import numpy.core.umath_tests as umt
import numpy.core.operand_flag_tests as opflag_tests
from numpy.compat import asbytes
from numpy.core.test_rational import *
class TestUfunc(TestCase):
def test_pickle(self):
import pickle
assert pickle.loads(pickle.dumps(np.sin)) is np.sin
# Check that ufunc not defined in the top level numpy namespace such as
# numpy.core.test_rational.test_add can also be pickled
assert pickle.loads(pickle.dumps(test_add)) is test_add
def test_pickle_withstring(self):
import pickle
astring = asbytes("cnumpy.core\n_ufunc_reconstruct\np0\n"
"(S'numpy.core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.")
assert pickle.loads(astring) is np.cos
def test_reduceat_shifting_sum(self) :
L = 6
x = np.arange(L)
idx = np.array(list(zip(np.arange(L - 2), np.arange(L - 2) + 2))).ravel()
assert_array_equal(np.add.reduceat(x, idx)[::2], [1, 3, 5, 7])
def test_generic_loops(self) :
"""Test generic loops.
The loops to be tested are:
PyUFunc_ff_f_As_dd_d
PyUFunc_ff_f
PyUFunc_dd_d
PyUFunc_gg_g
PyUFunc_FF_F_As_DD_D
PyUFunc_DD_D
PyUFunc_FF_F
PyUFunc_GG_G
PyUFunc_OO_O
PyUFunc_OO_O_method
PyUFunc_f_f_As_d_d
PyUFunc_d_d
PyUFunc_f_f
PyUFunc_g_g
PyUFunc_F_F_As_D_D
PyUFunc_F_F
PyUFunc_D_D
PyUFunc_G_G
PyUFunc_O_O
PyUFunc_O_O_method
PyUFunc_On_Om
Where:
f -- float
d -- double
g -- long double
F -- complex float
D -- complex double
G -- complex long double
O -- python object
It is difficult to assure that each of these loops is entered from the
Python level as the special cased loops are a moving target and the
corresponding types are architecture dependent. We probably need to
define C level testing ufuncs to get at them. For the time being, I've
just looked at the signatures registered in the build directory to find
relevant functions.
Fixme, currently untested:
PyUFunc_ff_f_As_dd_d
PyUFunc_FF_F_As_DD_D
PyUFunc_f_f_As_d_d
PyUFunc_F_F_As_D_D
PyUFunc_On_Om
"""
fone = np.exp
ftwo = lambda x, y : x**y
fone_val = 1
ftwo_val = 1
# check unary PyUFunc_f_f.
msg = "PyUFunc_f_f"
x = np.zeros(10, dtype=np.single)[0::2]
assert_almost_equal(fone(x), fone_val, err_msg=msg)
# check unary PyUFunc_d_d.
msg = "PyUFunc_d_d"
x = np.zeros(10, dtype=np.double)[0::2]
assert_almost_equal(fone(x), fone_val, err_msg=msg)
# check unary PyUFunc_g_g.
msg = "PyUFunc_g_g"
x = np.zeros(10, dtype=np.longdouble)[0::2]
assert_almost_equal(fone(x), fone_val, err_msg=msg)
# check unary PyUFunc_F_F.
msg = "PyUFunc_F_F"
x = np.zeros(10, dtype=np.csingle)[0::2]
assert_almost_equal(fone(x), fone_val, err_msg=msg)
# check unary PyUFunc_D_D.
msg = "PyUFunc_D_D"
x = np.zeros(10, dtype=np.cdouble)[0::2]
assert_almost_equal(fone(x), fone_val, err_msg=msg)
# check unary PyUFunc_G_G.
msg = "PyUFunc_G_G"
x = np.zeros(10, dtype=np.clongdouble)[0::2]
assert_almost_equal(fone(x), fone_val, err_msg=msg)
# check binary PyUFunc_ff_f.
msg = "PyUFunc_ff_f"
x = np.ones(10, dtype=np.single)[0::2]
assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
# check binary PyUFunc_dd_d.
msg = "PyUFunc_dd_d"
x = np.ones(10, dtype=np.double)[0::2]
assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
# check binary PyUFunc_gg_g.
msg = "PyUFunc_gg_g"
x = np.ones(10, dtype=np.longdouble)[0::2]
assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
# check binary PyUFunc_FF_F.
msg = "PyUFunc_FF_F"
x = np.ones(10, dtype=np.csingle)[0::2]
assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
# check binary PyUFunc_DD_D.
msg = "PyUFunc_DD_D"
x = np.ones(10, dtype=np.cdouble)[0::2]
assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
# check binary PyUFunc_GG_G.
msg = "PyUFunc_GG_G"
x = np.ones(10, dtype=np.clongdouble)[0::2]
assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
# class to use in testing object method loops
class foo(object):
def conjugate(self) :
return np.bool_(1)
def logical_xor(self, obj) :
return np.bool_(1)
# check unary PyUFunc_O_O
msg = "PyUFunc_O_O"
x = np.ones(10, dtype=np.object)[0::2]
assert_(np.all(np.abs(x) == 1), msg)
# check unary PyUFunc_O_O_method
msg = "PyUFunc_O_O_method"
x = np.zeros(10, dtype=np.object)[0::2]
for i in range(len(x)) :
x[i] = foo()
assert_(np.all(np.conjugate(x) == True), msg)
# check binary PyUFunc_OO_O
msg = "PyUFunc_OO_O"
x = np.ones(10, dtype=np.object)[0::2]
assert_(np.all(np.add(x, x) == 2), msg)
# check binary PyUFunc_OO_O_method
msg = "PyUFunc_OO_O_method"
x = np.zeros(10, dtype=np.object)[0::2]
for i in range(len(x)) :
x[i] = foo()
assert_(np.all(np.logical_xor(x, x)), msg)
# check PyUFunc_On_Om
# fixme -- I don't know how to do this yet
def test_all_ufunc(self) :
"""Try to check presence and results of all ufuncs.
The list of ufuncs comes from generate_umath.py and is as follows:
===== ==== ============= =============== ========================
done args function types notes
===== ==== ============= =============== ========================
n 1 conjugate nums + O
n 1 absolute nums + O complex -> real
n 1 negative nums + O
n 1 sign nums + O -> int
n 1 invert bool + ints + O flts raise an error
n 1 degrees real + M cmplx raise an error
n 1 radians real + M cmplx raise an error
n 1 arccos flts + M
n 1 arccosh flts + M
n 1 arcsin flts + M
n 1 arcsinh flts + M
n 1 arctan flts + M
n 1 arctanh flts + M
n 1 cos flts + M
n 1 sin flts + M
n 1 tan flts + M
n 1 cosh flts + M
n 1 sinh flts + M
n 1 tanh flts + M
n 1 exp flts + M
n 1 expm1 flts + M
n 1 log flts + M
n 1 log10 flts + M
n 1 log1p flts + M
n 1 sqrt flts + M real x < 0 raises error
n 1 ceil real + M
n 1 trunc real + M
n 1 floor real + M
n 1 fabs real + M
n 1 rint flts + M
n 1 isnan flts -> bool
n 1 isinf flts -> bool
n 1 isfinite flts -> bool
n 1 signbit real -> bool
n 1 modf real -> (frac, int)
n 1 logical_not bool + nums + M -> bool
n 2 left_shift ints + O flts raise an error
n 2 right_shift ints + O flts raise an error
n 2 add bool + nums + O boolean + is ||
n 2 subtract bool + nums + O boolean - is ^
n 2 multiply bool + nums + O boolean * is &
n 2 divide nums + O
n 2 floor_divide nums + O
n 2 true_divide nums + O bBhH -> f, iIlLqQ -> d
n 2 fmod nums + M
n 2 power nums + O
n 2 greater bool + nums + O -> bool
n 2 greater_equal bool + nums + O -> bool
n 2 less bool + nums + O -> bool
n 2 less_equal bool + nums + O -> bool
n 2 equal bool + nums + O -> bool
n 2 not_equal bool + nums + O -> bool
n 2 logical_and bool + nums + M -> bool
n 2 logical_or bool + nums + M -> bool
n 2 logical_xor bool + nums + M -> bool
n 2 maximum bool + nums + O
n 2 minimum bool + nums + O
n 2 bitwise_and bool + ints + O flts raise an error
n 2 bitwise_or bool + ints + O flts raise an error
n 2 bitwise_xor bool + ints + O flts raise an error
n 2 arctan2 real + M
n 2 remainder ints + real + O
n 2 hypot real + M
===== ==== ============= =============== ========================
Types other than those listed will be accepted, but they are cast to
the smallest compatible type for which the function is defined. The
casting rules are:
bool -> int8 -> float32
ints -> double
"""
pass
def test_signature(self):
# the arguments to test_signature are: nin, nout, core_signature
# pass
assert_equal(umt.test_signature(2, 1, "(i),(i)->()"), 1)
# pass. empty core signature; treat as plain ufunc (with trivial core)
assert_equal(umt.test_signature(2, 1, "(),()->()"), 0)
# in the following calls, a ValueError should be raised because
# of error in core signature
# error: extra parenthesis
msg = "core_sig: extra parenthesis"
try:
ret = umt.test_signature(2, 1, "((i)),(i)->()")
assert_equal(ret, None, err_msg=msg)
except ValueError: None
# error: parenthesis matching
msg = "core_sig: parenthesis matching"
try:
ret = umt.test_signature(2, 1, "(i),)i(->()")
assert_equal(ret, None, err_msg=msg)
except ValueError: None
# error: incomplete signature. letters outside of parenthesis are ignored
msg = "core_sig: incomplete signature"
try:
ret = umt.test_signature(2, 1, "(i),->()")
assert_equal(ret, None, err_msg=msg)
except ValueError: None
# error: incomplete signature. 2 output arguments are specified
msg = "core_sig: incomplete signature"
try:
ret = umt.test_signature(2, 2, "(i),(i)->()")
assert_equal(ret, None, err_msg=msg)
except ValueError: None
# more complicated names for variables
assert_equal(umt.test_signature(2, 1, "(i1,i2),(J_1)->(_kAB)"), 1)
def test_get_signature(self):
assert_equal(umt.inner1d.signature, "(i),(i)->()")
def test_forced_sig(self):
a = 0.5*np.arange(3, dtype='f8')
assert_equal(np.add(a, 0.5), [0.5, 1, 1.5])
assert_equal(np.add(a, 0.5, sig='i', casting='unsafe'), [0, 0, 1])
assert_equal(np.add(a, 0.5, sig='ii->i', casting='unsafe'), [0, 0, 1])
assert_equal(np.add(a, 0.5, sig=('i4',), casting='unsafe'), [0, 0, 1])
assert_equal(np.add(a, 0.5, sig=('i4', 'i4', 'i4'),
casting='unsafe'), [0, 0, 1])
b = np.zeros((3,), dtype='f8')
np.add(a, 0.5, out=b)
assert_equal(b, [0.5, 1, 1.5])
b[:] = 0
np.add(a, 0.5, sig='i', out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
b[:] = 0
np.add(a, 0.5, sig='ii->i', out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
b[:] = 0
np.add(a, 0.5, sig=('i4',), out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
b[:] = 0
np.add(a, 0.5, sig=('i4', 'i4', 'i4'), out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
def test_sum_stability(self):
a = np.ones(500, dtype=np.float32)
assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 4)
a = np.ones(500, dtype=np.float64)
assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 13)
def test_sum(self):
for dt in (np.int, np.float16, np.float32, np.float64, np.longdouble):
for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127,
128, 1024, 1235):
tgt = dt(v * (v + 1) / 2)
d = np.arange(1, v + 1, dtype=dt)
assert_almost_equal(np.sum(d), tgt)
assert_almost_equal(np.sum(d[::-1]), tgt)
d = np.ones(500, dtype=dt)
assert_almost_equal(np.sum(d[::2]), 250.)
assert_almost_equal(np.sum(d[1::2]), 250.)
assert_almost_equal(np.sum(d[::3]), 167.)
assert_almost_equal(np.sum(d[1::3]), 167.)
assert_almost_equal(np.sum(d[::-2]), 250.)
assert_almost_equal(np.sum(d[-1::-2]), 250.)
assert_almost_equal(np.sum(d[::-3]), 167.)
assert_almost_equal(np.sum(d[-1::-3]), 167.)
# sum with first reduction entry != 0
d = np.ones((1,), dtype=dt)
d += d
assert_almost_equal(d, 2.)
def test_sum_complex(self):
for dt in (np.complex64, np.complex128, np.clongdouble):
for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127,
128, 1024, 1235):
tgt = dt(v * (v + 1) / 2) - dt((v * (v + 1) / 2) *1j)
d = np.empty(v, dtype=dt)
d.real = np.arange(1, v + 1)
d.imag = -np.arange(1, v + 1)
assert_almost_equal(np.sum(d), tgt)
assert_almost_equal(np.sum(d[::-1]), tgt)
d = np.ones(500, dtype=dt) + 1j
assert_almost_equal(np.sum(d[::2]), 250. + 250j)
assert_almost_equal(np.sum(d[1::2]), 250. + 250j)
assert_almost_equal(np.sum(d[::3]), 167. + 167j)
assert_almost_equal(np.sum(d[1::3]), 167. + 167j)
assert_almost_equal(np.sum(d[::-2]), 250. + 250j)
assert_almost_equal(np.sum(d[-1::-2]), 250. + 250j)
assert_almost_equal(np.sum(d[::-3]), 167. + 167j)
assert_almost_equal(np.sum(d[-1::-3]), 167. + 167j)
# sum with first reduction entry != 0
d = np.ones((1,), dtype=dt) + 1j
d += d
assert_almost_equal(d, 2. + 2j)
def test_inner1d(self):
a = np.arange(6).reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1))
a = np.arange(6)
assert_array_equal(umt.inner1d(a, a), np.sum(a*a))
def test_broadcast(self):
msg = "broadcast"
a = np.arange(4).reshape((2, 1, 2))
b = np.arange(4).reshape((1, 2, 2))
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
msg = "extend & broadcast loop dimensions"
b = np.arange(4).reshape((2, 2))
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
msg = "broadcast in core dimensions"
a = np.arange(8).reshape((4, 2))
b = np.arange(4).reshape((4, 1))
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
msg = "extend & broadcast core and loop dimensions"
a = np.arange(8).reshape((4, 2))
b = np.array(7)
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
msg = "broadcast should fail"
a = np.arange(2).reshape((2, 1, 1))
b = np.arange(3).reshape((3, 1, 1))
try:
ret = umt.inner1d(a, b)
assert_equal(ret, None, err_msg=msg)
except ValueError: None
def test_type_cast(self):
msg = "type cast"
a = np.arange(6, dtype='short').reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), err_msg=msg)
msg = "type cast on one argument"
a = np.arange(6).reshape((2, 3))
b = a+0.1
assert_array_almost_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1),
err_msg=msg)
def test_endian(self):
msg = "big endian"
a = np.arange(6, dtype='>i4').reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), err_msg=msg)
msg = "little endian"
a = np.arange(6, dtype='<i4').reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), err_msg=msg)
# Output should always be native-endian
Ba = np.arange(1, dtype='>f8')
La = np.arange(1, dtype='<f8')
assert_equal((Ba+Ba).dtype, np.dtype('f8'))
assert_equal((Ba+La).dtype, np.dtype('f8'))
assert_equal((La+Ba).dtype, np.dtype('f8'))
assert_equal((La+La).dtype, np.dtype('f8'))
assert_equal(np.absolute(La).dtype, np.dtype('f8'))
assert_equal(np.absolute(Ba).dtype, np.dtype('f8'))
assert_equal(np.negative(La).dtype, np.dtype('f8'))
assert_equal(np.negative(Ba).dtype, np.dtype('f8'))
def test_incontiguous_array(self):
msg = "incontiguous memory layout of array"
x = np.arange(64).reshape((2, 2, 2, 2, 2, 2))
a = x[:, 0,:, 0,:, 0]
b = x[:, 1,:, 1,:, 1]
a[0, 0, 0] = -1
msg2 = "make sure it references to the original array"
assert_equal(x[0, 0, 0, 0, 0, 0], -1, err_msg=msg2)
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
x = np.arange(24).reshape(2, 3, 4)
a = x.T
b = x.T
a[0, 0, 0] = -1
assert_equal(x[0, 0, 0], -1, err_msg=msg2)
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
def test_output_argument(self):
msg = "output argument"
a = np.arange(12).reshape((2, 3, 2))
b = np.arange(4).reshape((2, 1, 2)) + 1
c = np.zeros((2, 3), dtype='int')
umt.inner1d(a, b, c)
assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
c[:] = -1
umt.inner1d(a, b, out=c)
assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
msg = "output argument with type cast"
c = np.zeros((2, 3), dtype='int16')
umt.inner1d(a, b, c)
assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
c[:] = -1
umt.inner1d(a, b, out=c)
assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
msg = "output argument with incontiguous layout"
c = np.zeros((2, 3, 4), dtype='int16')
umt.inner1d(a, b, c[..., 0])
assert_array_equal(c[..., 0], np.sum(a*b, axis=-1), err_msg=msg)
c[:] = -1
umt.inner1d(a, b, out=c[..., 0])
assert_array_equal(c[..., 0], np.sum(a*b, axis=-1), err_msg=msg)
def test_innerwt(self):
a = np.arange(6).reshape((2, 3))
b = np.arange(10, 16).reshape((2, 3))
w = np.arange(20, 26).reshape((2, 3))
assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
a = np.arange(100, 124).reshape((2, 3, 4))
b = np.arange(200, 224).reshape((2, 3, 4))
w = np.arange(300, 324).reshape((2, 3, 4))
assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
def test_innerwt_empty(self):
"""Test generalized ufunc with zero-sized operands"""
a = np.array([], dtype='f8')
b = np.array([], dtype='f8')
w = np.array([], dtype='f8')
assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
def test_matrix_multiply(self):
self.compare_matrix_multiply_results(np.long)
self.compare_matrix_multiply_results(np.double)
def compare_matrix_multiply_results(self, tp):
d1 = np.array(rand(2, 3, 4), dtype=tp)
d2 = np.array(rand(2, 3, 4), dtype=tp)
msg = "matrix multiply on type %s" % d1.dtype.name
def permute_n(n):
if n == 1:
return ([0],)
ret = ()
base = permute_n(n-1)
for perm in base:
for i in range(n):
new = perm + [n-1]
new[n-1] = new[i]
new[i] = n-1
ret += (new,)
return ret
def slice_n(n):
if n == 0:
return ((),)
ret = ()
base = slice_n(n-1)
for sl in base:
ret += (sl+(slice(None),),)
ret += (sl+(slice(0, 1),),)
return ret
def broadcastable(s1, s2):
return s1 == s2 or s1 == 1 or s2 == 1
permute_3 = permute_n(3)
slice_3 = slice_n(3) + ((slice(None, None, -1),)*3,)
ref = True
for p1 in permute_3:
for p2 in permute_3:
for s1 in slice_3:
for s2 in slice_3:
a1 = d1.transpose(p1)[s1]
a2 = d2.transpose(p2)[s2]
ref = ref and a1.base != None
ref = ref and a2.base != None
if broadcastable(a1.shape[-1], a2.shape[-2]) and \
broadcastable(a1.shape[0], a2.shape[0]):
assert_array_almost_equal(
umt.matrix_multiply(a1, a2),
np.sum(a2[..., np.newaxis].swapaxes(-3, -1) *
a1[..., np.newaxis,:], axis=-1),
err_msg = msg+' %s %s' % (str(a1.shape),
str(a2.shape)))
assert_equal(ref, True, err_msg="reference check")
def test_object_logical(self):
a = np.array([3, None, True, False, "test", ""], dtype=object)
assert_equal(np.logical_or(a, None),
np.array([x or None for x in a], dtype=object))
assert_equal(np.logical_or(a, True),
np.array([x or True for x in a], dtype=object))
assert_equal(np.logical_or(a, 12),
np.array([x or 12 for x in a], dtype=object))
assert_equal(np.logical_or(a, "blah"),
np.array([x or "blah" for x in a], dtype=object))
assert_equal(np.logical_and(a, None),
np.array([x and None for x in a], dtype=object))
assert_equal(np.logical_and(a, True),
np.array([x and True for x in a], dtype=object))
assert_equal(np.logical_and(a, 12),
np.array([x and 12 for x in a], dtype=object))
assert_equal(np.logical_and(a, "blah"),
np.array([x and "blah" for x in a], dtype=object))
assert_equal(np.logical_not(a),
np.array([not x for x in a], dtype=object))
assert_equal(np.logical_or.reduce(a), 3)
assert_equal(np.logical_and.reduce(a), None)
def test_object_array_reduction(self):
# Reductions on object arrays
a = np.array(['a', 'b', 'c'], dtype=object)
assert_equal(np.sum(a), 'abc')
assert_equal(np.max(a), 'c')
assert_equal(np.min(a), 'a')
a = np.array([True, False, True], dtype=object)
assert_equal(np.sum(a), 2)
assert_equal(np.prod(a), 0)
assert_equal(np.any(a), True)
assert_equal(np.all(a), False)
assert_equal(np.max(a), True)
assert_equal(np.min(a), False)
assert_equal(np.array([[1]], dtype=object).sum(), 1)
assert_equal(np.array([[[1, 2]]], dtype=object).sum((0, 1)), [1, 2])
def test_object_scalar_multiply(self):
# Tickets #2469 and #4482
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.multiply(arr, 3), desired)
assert_equal(np.multiply(3, arr), desired)
def test_zerosize_reduction(self):
# Test with default dtype and object dtype
for a in [[], np.array([], dtype=object)]:
assert_equal(np.sum(a), 0)
assert_equal(np.prod(a), 1)
assert_equal(np.any(a), False)
assert_equal(np.all(a), True)
assert_raises(ValueError, np.max, a)
assert_raises(ValueError, np.min, a)
def test_axis_out_of_bounds(self):
a = np.array([False, False])
assert_raises(ValueError, a.all, axis=1)
a = np.array([False, False])
assert_raises(ValueError, a.all, axis=-2)
a = np.array([False, False])
assert_raises(ValueError, a.any, axis=1)
a = np.array([False, False])
assert_raises(ValueError, a.any, axis=-2)
def test_scalar_reduction(self):
# The functions 'sum', 'prod', etc allow specifying axis=0
# even for scalars
assert_equal(np.sum(3, axis=0), 3)
assert_equal(np.prod(3.5, axis=0), 3.5)
assert_equal(np.any(True, axis=0), True)
assert_equal(np.all(False, axis=0), False)
assert_equal(np.max(3, axis=0), 3)
assert_equal(np.min(2.5, axis=0), 2.5)
# Check scalar behaviour for ufuncs without an identity
assert_equal(np.power.reduce(3), 3)
# Make sure that scalars are coming out from this operation
assert_(type(np.prod(np.float32(2.5), axis=0)) is np.float32)
assert_(type(np.sum(np.float32(2.5), axis=0)) is np.float32)
assert_(type(np.max(np.float32(2.5), axis=0)) is np.float32)
assert_(type(np.min(np.float32(2.5), axis=0)) is np.float32)
# check if scalars/0-d arrays get cast
assert_(type(np.any(0, axis=0)) is np.bool_)
# assert that 0-d arrays get wrapped
class MyArray(np.ndarray):
pass
a = np.array(1).view(MyArray)
assert_(type(np.any(a)) is MyArray)
def test_casting_out_param(self):
# Test that it's possible to do casts on output
a = np.ones((200, 100), np.int64)
b = np.ones((200, 100), np.int64)
c = np.ones((200, 100), np.float64)
np.add(a, b, out=c)
assert_equal(c, 2)
a = np.zeros(65536)
b = np.zeros(65536, dtype=np.float32)
np.subtract(a, 0, out=b)
assert_equal(b, 0)
def test_where_param(self):
# Test that the where= ufunc parameter works with regular arrays
a = np.arange(7)
b = np.ones(7)
c = np.zeros(7)
np.add(a, b, out=c, where=(a % 2 == 1))
assert_equal(c, [0, 2, 0, 4, 0, 6, 0])
a = np.arange(4).reshape(2, 2) + 2
np.power(a, [2, 3], out=a, where=[[0, 1], [1, 0]])
assert_equal(a, [[2, 27], [16, 5]])
# Broadcasting the where= parameter
np.subtract(a, 2, out=a, where=[True, False])
assert_equal(a, [[0, 27], [14, 5]])
def test_where_param_buffer_output(self):
# This test is temporarily skipped because it requires
# adding masking features to the nditer to work properly
# With casting on output
a = np.ones(10, np.int64)
b = np.ones(10, np.int64)
c = 1.5 * np.ones(10, np.float64)
np.add(a, b, out=c, where=[1, 0, 0, 1, 0, 0, 1, 1, 1, 0])
assert_equal(c, [2, 1.5, 1.5, 2, 1.5, 1.5, 2, 2, 2, 1.5])
def check_identityless_reduction(self, a):
# np.minimum.reduce is a identityless reduction
# Verify that it sees the zero at various positions
a[...] = 1
a[1, 0, 0] = 0
assert_equal(np.minimum.reduce(a, axis=None), 0)
assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(1, 2)), [1, 0])
assert_equal(np.minimum.reduce(a, axis=0),
[[0, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=1),
[[1, 1, 1, 1], [0, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=2),
[[1, 1, 1], [0, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=()), a)
a[...] = 1
a[0, 1, 0] = 0
assert_equal(np.minimum.reduce(a, axis=None), 0)
assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(0, 2)), [1, 0, 1])
assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1])
assert_equal(np.minimum.reduce(a, axis=0),
[[1, 1, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=1),
[[0, 1, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=2),
[[1, 0, 1], [1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=()), a)
a[...] = 1
a[0, 0, 1] = 0
assert_equal(np.minimum.reduce(a, axis=None), 0)
assert_equal(np.minimum.reduce(a, axis=(0, 1)), [1, 0, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1])
assert_equal(np.minimum.reduce(a, axis=0),
[[1, 0, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=1),
[[1, 0, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=2),
[[0, 1, 1], [1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=()), a)
def test_identityless_reduction_corder(self):
a = np.empty((2, 3, 4), order='C')
self.check_identityless_reduction(a)
def test_identityless_reduction_forder(self):
a = np.empty((2, 3, 4), order='F')
self.check_identityless_reduction(a)
def test_identityless_reduction_otherorder(self):
a = np.empty((2, 4, 3), order='C').swapaxes(1, 2)
self.check_identityless_reduction(a)
def test_identityless_reduction_noncontig(self):
a = np.empty((3, 5, 4), order='C').swapaxes(1, 2)
a = a[1:, 1:, 1:]
self.check_identityless_reduction(a)
def test_identityless_reduction_noncontig_unaligned(self):
a = np.empty((3*4*5*8 + 1,), dtype='i1')
a = a[1:].view(dtype='f8')
a.shape = (3, 4, 5)
a = a[1:, 1:, 1:]
self.check_identityless_reduction(a)
def test_identityless_reduction_nonreorderable(self):
a = np.array([[8.0, 2.0, 2.0], [1.0, 0.5, 0.25]])
res = np.divide.reduce(a, axis=0)
assert_equal(res, [8.0, 4.0, 8.0])
res = np.divide.reduce(a, axis=1)
assert_equal(res, [2.0, 8.0])
res = np.divide.reduce(a, axis=())
assert_equal(res, a)
assert_raises(ValueError, np.divide.reduce, a, axis=(0, 1))
def test_reduce_zero_axis(self):
# If we have a n x m array and do a reduction with axis=1, then we are
# doing n reductions, and each reduction takes an m-element array. For
# a reduction operation without an identity, then:
# n > 0, m > 0: fine
# n = 0, m > 0: fine, doing 0 reductions of m-element arrays
# n > 0, m = 0: can't reduce a 0-element array, ValueError
# n = 0, m = 0: can't reduce a 0-element array, ValueError (for
# consistency with the above case)
# This test doesn't actually look at return values, it just checks to
# make sure that error we get an error in exactly those cases where we
# expect one, and assumes the calculations themselves are done
# correctly.
def ok(f, *args, **kwargs):
f(*args, **kwargs)
def err(f, *args, **kwargs):
assert_raises(ValueError, f, *args, **kwargs)
def t(expect, func, n, m):
expect(func, np.zeros((n, m)), axis=1)
expect(func, np.zeros((m, n)), axis=0)
expect(func, np.zeros((n // 2, n // 2, m)), axis=2)
expect(func, np.zeros((n // 2, m, n // 2)), axis=1)
expect(func, np.zeros((n, m // 2, m // 2)), axis=(1, 2))
expect(func, np.zeros((m // 2, n, m // 2)), axis=(0, 2))
expect(func, np.zeros((m // 3, m // 3, m // 3,
n // 2, n //2)),
axis=(0, 1, 2))
# Check what happens if the inner (resp. outer) dimensions are a
# mix of zero and non-zero:
expect(func, np.zeros((10, m, n)), axis=(0, 1))
expect(func, np.zeros((10, n, m)), axis=(0, 2))
expect(func, np.zeros((m, 10, n)), axis=0)
expect(func, np.zeros((10, m, n)), axis=1)
expect(func, np.zeros((10, n, m)), axis=2)
# np.maximum is just an arbitrary ufunc with no reduction identity
assert_equal(np.maximum.identity, None)
t(ok, np.maximum.reduce, 30, 30)
t(ok, np.maximum.reduce, 0, 30)
t(err, np.maximum.reduce, 30, 0)
t(err, np.maximum.reduce, 0, 0)
err(np.maximum.reduce, [])
np.maximum.reduce(np.zeros((0, 0)), axis=())
# all of the combinations are fine for a reduction that has an
# identity
t(ok, np.add.reduce, 30, 30)
t(ok, np.add.reduce, 0, 30)
t(ok, np.add.reduce, 30, 0)
t(ok, np.add.reduce, 0, 0)
np.add.reduce([])
np.add.reduce(np.zeros((0, 0)), axis=())
# OTOH, accumulate always makes sense for any combination of n and m,
# because it maps an m-element array to an m-element array. These
# tests are simpler because accumulate doesn't accept multiple axes.
for uf in (np.maximum, np.add):
uf.accumulate(np.zeros((30, 0)), axis=0)
uf.accumulate(np.zeros((0, 30)), axis=0)
uf.accumulate(np.zeros((30, 30)), axis=0)
uf.accumulate(np.zeros((0, 0)), axis=0)
def test_safe_casting(self):
# In old versions of numpy, in-place operations used the 'unsafe'
# casting rules. In some future version, 'same_kind' will become the
# default.
a = np.array([1, 2, 3], dtype=int)
# Non-in-place addition is fine
assert_array_equal(assert_no_warnings(np.add, a, 1.1),
[2.1, 3.1, 4.1])
assert_warns(DeprecationWarning, np.add, a, 1.1, out=a)
assert_array_equal(a, [2, 3, 4])
def add_inplace(a, b):
a += b
assert_warns(DeprecationWarning, add_inplace, a, 1.1)
assert_array_equal(a, [3, 4, 5])
# Make sure that explicitly overriding the warning is allowed:
assert_no_warnings(np.add, a, 1.1, out=a, casting="unsafe")
assert_array_equal(a, [4, 5, 6])
# There's no way to propagate exceptions from the place where we issue
# this deprecation warning, so we must throw the exception away
# entirely rather than cause it to be raised at some other point, or
# trigger some other unsuspecting if (PyErr_Occurred()) { ...} at some
# other location entirely.
import warnings
import sys
if sys.version_info[0] >= 3:
from io import StringIO
else:
from StringIO import StringIO
with warnings.catch_warnings():
warnings.simplefilter("error")
old_stderr = sys.stderr
try:
sys.stderr = StringIO()
# No error, but dumps to stderr
a += 1.1
# No error on the next bit of code executed either
1 + 1
assert_("Implicitly casting" in sys.stderr.getvalue())
finally:
sys.stderr = old_stderr
def test_ufunc_custom_out(self):
# Test ufunc with built in input types and custom output type
a = np.array([0, 1, 2], dtype='i8')
b = np.array([0, 1, 2], dtype='i8')
c = np.empty(3, dtype=rational)
# Output must be specified so numpy knows what
# ufunc signature to look for
result = test_add(a, b, c)
assert_equal(result, np.array([0, 2, 4], dtype=rational))
# no output type should raise TypeError
assert_raises(TypeError, test_add, a, b)
def test_operand_flags(self):
a = np.arange(16, dtype='l').reshape(4, 4)
b = np.arange(9, dtype='l').reshape(3, 3)
opflag_tests.inplace_add(a[:-1, :-1], b)
assert_equal(a, np.array([[0, 2, 4, 3], [7, 9, 11, 7],
[14, 16, 18, 11], [12, 13, 14, 15]], dtype='l'))
a = np.array(0)
opflag_tests.inplace_add(a, 3)
assert_equal(a, 3)
opflag_tests.inplace_add(a, [3, 4])
assert_equal(a, 10)
def test_struct_ufunc(self):
import numpy.core.struct_ufunc_test as struct_ufunc
a = np.array([(1, 2, 3)], dtype='u8,u8,u8')
b = np.array([(1, 2, 3)], dtype='u8,u8,u8')
result = struct_ufunc.add_triplet(a, b)
assert_equal(result, np.array([(2, 4, 6)], dtype='u8,u8,u8'))
def test_custom_ufunc(self):
a = np.array([rational(1, 2), rational(1, 3), rational(1, 4)],
dtype=rational);
b = np.array([rational(1, 2), rational(1, 3), rational(1, 4)],
dtype=rational);
result = test_add_rationals(a, b)
expected = np.array([rational(1), rational(2, 3), rational(1, 2)],
dtype=rational);
assert_equal(result, expected);
def test_custom_array_like(self):
class MyThing(object):
__array_priority__ = 1000
rmul_count = 0
getitem_count = 0
def __init__(self, shape):
self.shape = shape
def __len__(self):
return self.shape[0]
def __getitem__(self, i):
MyThing.getitem_count += 1
if not isinstance(i, tuple):
i = (i,)
if len(i) > len(self.shape):
raise IndexError("boo")
return MyThing(self.shape[len(i):])
def __rmul__(self, other):
MyThing.rmul_count += 1
return self
np.float64(5)*MyThing((3, 3))
assert_(MyThing.rmul_count == 1, MyThing.rmul_count)
assert_(MyThing.getitem_count <= 2, MyThing.getitem_count)
def test_inplace_fancy_indexing(self):
a = np.arange(10)
np.add.at(a, [2, 5, 2], 1)
assert_equal(a, [0, 1, 4, 3, 4, 6, 6, 7, 8, 9])
a = np.arange(10)
b = np.array([100, 100, 100])
np.add.at(a, [2, 5, 2], b)
assert_equal(a, [0, 1, 202, 3, 4, 105, 6, 7, 8, 9])
a = np.arange(9).reshape(3, 3)
b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]])
np.add.at(a, (slice(None), [1, 2, 1]), b)
assert_equal(a, [[0, 201, 102], [3, 404, 205], [6, 607, 308]])
a = np.arange(27).reshape(3, 3, 3)
b = np.array([100, 200, 300])
np.add.at(a, (slice(None), slice(None), [1, 2, 1]), b)
assert_equal(a,
[[[0, 401, 202],
[3, 404, 205],
[6, 407, 208]],
[[9, 410, 211],
[12, 413, 214],
[15, 416, 217]],
[[18, 419, 220],
[21, 422, 223],
[24, 425, 226]]])
a = np.arange(9).reshape(3, 3)
b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]])
np.add.at(a, ([1, 2, 1], slice(None)), b)
assert_equal(a, [[0, 1, 2], [403, 404, 405], [206, 207, 208]])
a = np.arange(27).reshape(3, 3, 3)
b = np.array([100, 200, 300])
np.add.at(a, (slice(None), [1, 2, 1], slice(None)), b)
assert_equal(a,
[[[0, 1, 2 ],
[203, 404, 605],
[106, 207, 308]],
[[9, 10, 11 ],
[212, 413, 614],
[115, 216, 317]],
[[18, 19, 20 ],
[221, 422, 623],
[124, 225, 326]]])
a = np.arange(9).reshape(3, 3)
b = np.array([100, 200, 300])
np.add.at(a, (0, [1, 2, 1]), b)
assert_equal(a, [[0, 401, 202], [3, 4, 5], [6, 7, 8]])
a = np.arange(27).reshape(3, 3, 3)
b = np.array([100, 200, 300])
np.add.at(a, ([1, 2, 1], 0, slice(None)), b)
assert_equal(a,
[[[0, 1, 2],
[3, 4, 5],
[6, 7, 8]],
[[209, 410, 611],
[12, 13, 14],
[15, 16, 17]],
[[118, 219, 320],
[21, 22, 23],
[24, 25, 26]]])
a = np.arange(27).reshape(3, 3, 3)
b = np.array([100, 200, 300])
np.add.at(a, (slice(None), slice(None), slice(None)), b)
assert_equal(a,
[[[100, 201, 302],
[103, 204, 305],
[106, 207, 308]],
[[109, 210, 311],
[112, 213, 314],
[115, 216, 317]],
[[118, 219, 320],
[121, 222, 323],
[124, 225, 326]]])
a = np.arange(10)
np.negative.at(a, [2, 5, 2])
assert_equal(a, [0, 1, 2, 3, 4, -5, 6, 7, 8, 9])
# Test 0-dim array
a = np.array(0)
np.add.at(a, (), 1)
assert_equal(a, 1)
assert_raises(IndexError, np.add.at, a, 0, 1)
assert_raises(IndexError, np.add.at, a, [], 1)
# Test mixed dtypes
a = np.arange(10)
np.power.at(a, [1, 2, 3, 2], 3.5)
assert_equal(a, np.array([0, 1, 4414, 46, 4, 5, 6, 7, 8, 9]))
# Test boolean indexing and boolean ufuncs
a = np.arange(10)
index = a % 2 == 0
np.equal.at(a, index, [0, 2, 4, 6, 8])
assert_equal(a, [1, 1, 1, 3, 1, 5, 1, 7, 1, 9])
# Test unary operator
a = np.arange(10, dtype='u4')
np.invert.at(a, [2, 5, 2])
assert_equal(a, [0, 1, 2, 3, 4, 5 ^ 0xffffffff, 6, 7, 8, 9])
# Test empty subspace
orig = np.arange(4)
a = orig[:, None][:, 0:0]
np.add.at(a, [0, 1], 3)
assert_array_equal(orig, np.arange(4))
# Test with swapped byte order
index = np.array([1, 2, 1], np.dtype('i').newbyteorder())
values = np.array([1, 2, 3, 4], np.dtype('f').newbyteorder())
np.add.at(values, index, 3)
assert_array_equal(values, [1, 8, 6, 4])
# Test exception thrown
values = np.array(['a', 1], dtype=np.object)
self.assertRaises(TypeError, np.add.at, values, [0, 1], 1)
assert_array_equal(values, np.array(['a', 1], dtype=np.object))
def test_reduce_arguments(self):
f = np.add.reduce
d = np.ones((5,2), dtype=int)
o = np.ones((2,), dtype=d.dtype)
r = o * 5
assert_equal(f(d), r)
# a, axis=0, dtype=None, out=None, keepdims=False
assert_equal(f(d, axis=0), r)
assert_equal(f(d, 0), r)
assert_equal(f(d, 0, dtype=None), r)
assert_equal(f(d, 0, dtype='i'), r)
assert_equal(f(d, 0, 'i'), r)
assert_equal(f(d, 0, None), r)
assert_equal(f(d, 0, None, out=None), r)
assert_equal(f(d, 0, None, out=o), r)
assert_equal(f(d, 0, None, o), r)
assert_equal(f(d, 0, None, None), r)
assert_equal(f(d, 0, None, None, keepdims=False), r)
assert_equal(f(d, 0, None, None, True), r.reshape((1,) + r.shape))
# multiple keywords
assert_equal(f(d, axis=0, dtype=None, out=None, keepdims=False), r)
assert_equal(f(d, 0, dtype=None, out=None, keepdims=False), r)
assert_equal(f(d, 0, None, out=None, keepdims=False), r)
# too little
assert_raises(TypeError, f)
# too much
assert_raises(TypeError, f, d, 0, None, None, False, 1)
# invalid axis
assert_raises(TypeError, f, d, "invalid")
assert_raises(TypeError, f, d, axis="invalid")
assert_raises(TypeError, f, d, axis="invalid", dtype=None,
keepdims=True)
# invalid dtype
assert_raises(TypeError, f, d, 0, "invalid")
assert_raises(TypeError, f, d, dtype="invalid")
assert_raises(TypeError, f, d, dtype="invalid", out=None)
# invalid out
assert_raises(TypeError, f, d, 0, None, "invalid")
assert_raises(TypeError, f, d, out="invalid")
assert_raises(TypeError, f, d, out="invalid", dtype=None)
# keepdims boolean, no invalid value
# assert_raises(TypeError, f, d, 0, None, None, "invalid")
# assert_raises(TypeError, f, d, keepdims="invalid", axis=0, dtype=None)
# invalid mix
assert_raises(TypeError, f, d, 0, keepdims="invalid", dtype="invalid",
out=None)
# invalid keyord
assert_raises(TypeError, f, d, 0, keepdims=True, invalid="invalid",
out=None)
assert_raises(TypeError, f, d, invalid=0)
assert_raises(TypeError, f, d, axis=0, dtype=None, keepdims=True,
out=None, invalid=0)
assert_raises(TypeError, f, d, axis=0, dtype=None,
out=None, invalid=0)
assert_raises(TypeError, f, d, axis=0, dtype=None, invalid=0)
if __name__ == "__main__":
run_module_suite()
|
40123237/w17test
|
refs/heads/master
|
static/Brython3.1.0-20150301-090019/Lib/unittest/test/testmock/support.py
|
829
|
import sys
def is_instance(obj, klass):
"""Version of is_instance that doesn't access __class__"""
return issubclass(type(obj), klass)
class SomeClass(object):
class_attribute = None
def wibble(self):
pass
class X(object):
pass
def examine_warnings(func):
def wrapper():
with catch_warnings(record=True) as ws:
func(ws)
return wrapper
|
cainmatt/django
|
refs/heads/master
|
tests/select_related_regress/__init__.py
|
12133432
| |
amith01994/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/contrib/localflavor/fi/__init__.py
|
12133432
| |
rockneurotiko/django
|
refs/heads/master
|
django/contrib/flatpages/templatetags/__init__.py
|
12133432
| |
pascalchevrel/bedrock
|
refs/heads/master
|
tests/functional/firefox/channel/__init__.py
|
12133432
| |
Titulacion-Sistemas/PythonTitulacion-EV
|
refs/heads/master
|
Lib/site-packages/logilab/common/test/data/deprecation.py
|
14
|
# placeholder used by unittest_deprecation
def moving_target():
pass
|
MoKee/android_kernel_htc_m7
|
refs/heads/jb-mr2_mkt
|
tools/perf/scripts/python/sched-migration.py
|
11215
|
#!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
|
40223214/-2015cd_midterm2
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/site-packages/turtle.py
|
619
|
import math
from javascript import console
from browser import document, html
import _svg
_CFG = {"width" : 0.5, # Screen
"height" : 0.75,
"canvwidth" : 400,
"canvheight": 300,
"leftright": None,
"topbottom": None,
"mode": "standard", # TurtleScreen
"colormode": 1.0,
"delay": 10,
"undobuffersize": 1000, # RawTurtle
"shape": "classic",
"pencolor" : "black",
"fillcolor" : "black",
"resizemode" : "noresize",
"visible" : True,
"language": "english", # docstrings
"exampleturtle": "turtle",
"examplescreen": "screen",
"title": "Python Turtle Graphics",
"using_IDLE": False
}
class Vec2D(tuple):
"""A 2 dimensional vector class, used as a helper class
for implementing turtle graphics.
May be useful for turtle graphics programs also.
Derived from tuple, so a vector is a tuple!
Provides (for a, b vectors, k number):
a+b vector addition
a-b vector subtraction
a*b inner product
k*a and a*k multiplication with scalar
|a| absolute value of a
a.rotate(angle) rotation
"""
def __new__(cls, x, y):
return tuple.__new__(cls, (x, y))
def __add__(self, other):
return Vec2D(self[0]+other[0], self[1]+other[1])
def __mul__(self, other):
if isinstance(other, Vec2D):
return self[0]*other[0]+ self[1]*other[1]
return Vec2D(self[0]*other, self[1]*other)
def __rmul__(self, other):
if isinstance(other, int) or isinstance(other, float):
return Vec2D(self[0]*other, self[1]*other)
def __sub__(self, other):
return Vec2D(self[0]-other[0], self[1]-other[1])
def __neg__(self):
return Vec2D(-self[0], -self[1])
def __abs__(self):
return (self[0]**2 + self[1]**2)**0.5
def rotate(self, angle):
"""rotate self counterclockwise by angle
"""
perp = Vec2D(-self[1], self[0])
angle = angle * math.pi / 180.0
c, s = math.cos(angle), math.sin(angle)
return Vec2D(self[0]*c+perp[0]*s, self[1]*c+perp[1]*s)
def __getnewargs__(self):
return (self[0], self[1])
def __repr__(self):
return "(%.2f,%.2f)" % self
##############################################################################
### From here up to line : Tkinter - Interface for turtle.py ###
### May be replaced by an interface to some different graphics toolkit ###
##############################################################################
class _Root:
"""Root class for Screen based on Tkinter."""
def setupcanvas(self, width, height, cwidth, cheight):
self._svg=_svg.svg(Id="mycanvas", width=cwidth, height=cheight)
self._canvas=_svg.g(transform="translate(%d,%d)" % (cwidth//2, cheight//2))
self._svg <= self._canvas
def end(self):
def set_svg():
#have to do this to get animate to work...
document['container'].html=document['container'].html
if "mycanvas" not in document:
document["container"] <= self._svg
from browser import timer
#need this for chrome so that first few draw commands are viewed properly.
timer.set_timeout(set_svg, 1)
def _getcanvas(self):
return self._canvas
def win_width(self):
return self._canvas.width
def win_height(self):
return self._canvas.height
class TurtleScreenBase:
"""Provide the basic graphics functionality.
Interface between Tkinter and turtle.py.
To port turtle.py to some different graphics toolkit
a corresponding TurtleScreenBase class has to be implemented.
"""
#@staticmethod
#def _blankimage():
# """return a blank image object
# """
# pass
#@staticmethod
#def _image(filename):
# """return an image object containing the
# imagedata from a gif-file named filename.
# """
# pass
def __init__(self, cv):
self.cv = cv
self._previous_turtle_attributes={}
self._draw_pos=0
self.canvwidth = cv.width
self.canvheight = cv.height
self.xscale = self.yscale = 1.0
def _createpoly(self):
"""Create an invisible polygon item on canvas self.cv)
"""
#console.log("_createpoly")
pass
def _drawpoly(self, polyitem, coordlist, fill=None,
outline=None, width=None, top=False):
"""Configure polygonitem polyitem according to provided
arguments:
coordlist is sequence of coordinates
fill is filling color
outline is outline color
top is a boolean value, which specifies if polyitem
will be put on top of the canvas' displaylist so it
will not be covered by other items.
"""
#console.log("_drawpoly")
pass
def _drawline(self, lineitem, coordlist=None,
fill=None, width=None, top=False):
"""Configure lineitem according to provided arguments:
coordlist is sequence of coordinates
fill is drawing color
width is width of drawn line.
top is a boolean value, which specifies if polyitem
will be put on top of the canvas' displaylist so it
will not be covered by other items.
"""
#console.log("_drawline")
#if not isinstance(lineitem, Turtle):
# return
if coordlist is not None:
_x0, _y0=coordlist[0]
_x1, _y1=coordlist[1]
_dist=math.sqrt( (_x0-_x1)*(_x0-_x1) + (_y0-_y1)*(_y0-_y1) )
_dur="%4.2fs" % (0.01*_dist)
if _dur == '0.00s':
_dur='0.1s'
#_dur="%ss" % 1
self._draw_pos+=1
_shape=["%s,%s" % (_x, _y) for _x,_y in lineitem.get_shapepoly()]
if 0:
#if lineitem.isvisible():
if lineitem in self._previous_turtle_attributes:
_previous=self._previous_turtle_attributes[lineitem]
if _previous.heading() != lineitem.heading():
#if self._turtle_heading[lineitem] != lineitem.heading():
_rotate=_previous.heading()
_turtle=_svg.polygon(points=" ".join(_shape),
transform="rotate(%s)" % (_rotate-90),
style={'stroke': fill, 'fill': fill,
'stroke-width': width, 'display': 'none'})
# we need to rotate our turtle..
_turtle <= _svg.animateTransform(
Id="animateLine%s" % self._draw_pos,
attributeName="transform",
type="rotate",
attributeType="XML",
From=_rotate - 90,
to=lineitem.heading() -90,
dur=_dur,
begin="animateLine%s.end" % (self._draw_pos-1))
_turtle <= _svg.set(attributeName="display",
attributeType="CSS", to="block",
begin="animateLine%s.begin" % self._draw_pos,
end="animateLine%s.end" % self._draw_pos)
#_turtle <= _svg.animateMotion(From="%s,%s" % (_x0*self.xscale, _y0*self.yscale),
# to="%s,%s" % (_x0*self.xscale, _y0*self.yscale),
# begin="animateLine%s.begin" % self._draw_pos,
# end="animateLine%s.end" % self._draw_pos)
#_turtle <= _svg.animate(attributeName="fill",
# From=_previous.fill, to=fill, dur=_dur,
# begin="animateLine%s.begin" % self._draw_pos)
self._draw_pos+=1
self._canvas <= _turtle
_line= _svg.line(x1=_x0*self.xscale, y1=_y0*self.yscale,
x2=_x0*self.xscale, y2=_y0*self.yscale,
style={'stroke': fill, 'stroke-width': width})
_an1=_svg.animate(Id="animateLine%s" % self._draw_pos,
attributeName="x2", attributeType="XML",
From=_x0*self.xscale, to=_x1*self.xscale,
dur=_dur, fill='freeze')
_an2=_svg.animate(attributeName="y2", attributeType="XML",
begin="animateLine%s.begin" % self._draw_pos,
From=_y0*self.xscale, to=_y1*self.xscale,
dur=_dur, fill='freeze')
# draw turtle
if lineitem.isvisible():
_turtle=_svg.polygon(points=" ".join(_shape),
transform="rotate(%s)" % (lineitem.heading() - 90),
style={'stroke': fill, 'fill': fill,
'stroke-width': width, 'display': 'none'})
_turtle <= _svg.animateMotion(From="%s,%s" % (_x0*self.xscale, _y0*self.yscale),
to="%s,%s" % (_x1*self.xscale, _y1*self.yscale),
dur=_dur, begin="animateLine%s.begin" % self._draw_pos)
_turtle <= _svg.set(attributeName="display", attributeType="CSS",
to="block",
begin="animateLine%s.begin" % self._draw_pos,
end="animateLine%s.end" % self._draw_pos)
self._canvas <= _turtle
self._previous_turtle_attributes[lineitem]=lineitem
if self._draw_pos == 1:
_an1.setAttribute('begin', "0s")
else:
_an1.setAttribute('begin', "animateLine%s.end" % (self._draw_pos-1))
_line <= _an1
_line <= _an2
self._canvas <= _line
def _delete(self, item):
"""Delete graphics item from canvas.
If item is"all" delete all graphics items.
"""
pass
def _update(self):
"""Redraw graphics items on canvas
"""
pass
def _delay(self, delay):
"""Delay subsequent canvas actions for delay ms."""
pass
def _iscolorstring(self, color):
"""Check if the string color is a legal Tkinter color string.
"""
return True #fix me
#try:
# rgb = self.cv.winfo_rgb(color)
# ok = True
#except TK.TclError:
# ok = False
#return ok
def _bgcolor(self, color=None):
"""Set canvas' backgroundcolor if color is not None,
else return backgroundcolor."""
if color is not None:
self.cv.style.backgroundColor=color
else:
return self.cv.style.backgroundColor
def _write(self, pos, txt, align, font, pencolor):
"""Write txt at pos in canvas with specified font
and color.
Return text item and x-coord of right bottom corner
of text's bounding box."""
self._draw_pos+=1
_text= _svg.text(txt, x=pos[0], y=pos[1], fill=pencolor,
style={'display': 'none'})
_text <= _svg.animate(Id="animateLine%s" % self._draw_pos,
attributeName="display", attributeType="CSS",
From="block", to="block", dur="1s", fill='freeze',
begin="animateLine%s.end" % (self._draw_pos-1))
self._canvas <= _text
return Vec2D(pos[0]+50, pos[1]+50) #fix me
## def _dot(self, pos, size, color):
## """may be implemented for some other graphics toolkit"""
def _createimage(self, image):
"""Create and return image item on canvas.
"""
pass
def _drawimage(self, item, pos, image):
"""Configure image item as to draw image object
at position (x,y) on canvas)
"""
pass
def _setbgpic(self, item, image):
"""Configure image item as to draw image object
at center of canvas. Set item to the first item
in the displaylist, so it will be drawn below
any other item ."""
pass
def _type(self, item):
"""Return 'line' or 'polygon' or 'image' depending on
type of item.
"""
pass
def _resize(self, canvwidth=None, canvheight=None, bg=None):
"""Resize the canvas the turtles are drawing on. Does
not alter the drawing window.
"""
self.cv.style.width=canvwidth
self.cv.style.height=canvheight
if bg is not None:
self.cv.style.backgroundColor=bg
def _window_size(self):
""" Return the width and height of the turtle window.
"""
#for now just return canvas width/height
return self.cv.width, self.cv.height
def mainloop(self):
"""Starts event loop - calling Tkinter's mainloop function.
No argument.
Must be last statement in a turtle graphics program.
Must NOT be used if a script is run from within IDLE in -n mode
(No subprocess) - for interactive use of turtle graphics.
Example (for a TurtleScreen instance named screen):
>>> screen.mainloop()
"""
pass
def textinput(self, title, prompt):
"""Pop up a dialog window for input of a string.
Arguments: title is the title of the dialog window,
prompt is a text mostly describing what information to input.
Return the string input
If the dialog is canceled, return None.
Example (for a TurtleScreen instance named screen):
>>> screen.textinput("NIM", "Name of first player:")
"""
pass
def numinput(self, title, prompt, default=None, minval=None, maxval=None):
"""Pop up a dialog window for input of a number.
Arguments: title is the title of the dialog window,
prompt is a text mostly describing what numerical information to input.
default: default value
minval: minimum value for imput
maxval: maximum value for input
The number input must be in the range minval .. maxval if these are
given. If not, a hint is issued and the dialog remains open for
correction. Return the number input.
If the dialog is canceled, return None.
Example (for a TurtleScreen instance named screen):
>>> screen.numinput("Poker", "Your stakes:", 1000, minval=10, maxval=10000)
"""
pass
##############################################################################
### End of Tkinter - interface ###
##############################################################################
class Terminator (Exception):
"""Will be raised in TurtleScreen.update, if _RUNNING becomes False.
This stops execution of a turtle graphics script.
Main purpose: use in the Demo-Viewer turtle.Demo.py.
"""
pass
class TurtleGraphicsError(Exception):
"""Some TurtleGraphics Error
"""
pass
class Shape:
"""Data structure modeling shapes.
attribute _type is one of "polygon", "image", "compound"
attribute _data is - depending on _type a poygon-tuple,
an image or a list constructed using the addcomponent method.
"""
def __init__(self, type_, data=None):
self._type = type_
if type_ == "polygon":
if isinstance(data, list):
data = tuple(data)
elif type_ == "image":
if isinstance(data, str):
if data.lower().endswith(".gif") and isfile(data):
data = TurtleScreen._image(data)
# else data assumed to be Photoimage
elif type_ == "compound":
data = []
else:
raise TurtleGraphicsError("There is no shape type %s" % type_)
self._data = data
def addcomponent(self, poly, fill, outline=None):
"""Add component to a shape of type compound.
Arguments: poly is a polygon, i. e. a tuple of number pairs.
fill is the fillcolor of the component,
outline is the outline color of the component.
call (for a Shapeobject namend s):
-- s.addcomponent(((0,0), (10,10), (-10,10)), "red", "blue")
Example:
>>> poly = ((0,0),(10,-5),(0,10),(-10,-5))
>>> s = Shape("compound")
>>> s.addcomponent(poly, "red", "blue")
>>> # .. add more components and then use register_shape()
"""
if self._type != "compound":
raise TurtleGraphicsError("Cannot add component to %s Shape"
% self._type)
if outline is None:
outline = fill
self._data.append([poly, fill, outline])
class TurtleScreen(TurtleScreenBase):
"""Provides screen oriented methods like setbg etc.
Only relies upon the methods of TurtleScreenBase and NOT
upon components of the underlying graphics toolkit -
which is Tkinter in this case.
"""
_RUNNING = True
def __init__(self, cv, mode=_CFG["mode"],
colormode=_CFG["colormode"], delay=_CFG["delay"]):
self._shapes = {
"arrow" : Shape("polygon", ((-10,0), (10,0), (0,10))),
"turtle" : Shape("polygon", ((0,16), (-2,14), (-1,10), (-4,7),
(-7,9), (-9,8), (-6,5), (-7,1), (-5,-3), (-8,-6),
(-6,-8), (-4,-5), (0,-7), (4,-5), (6,-8), (8,-6),
(5,-3), (7,1), (6,5), (9,8), (7,9), (4,7), (1,10),
(2,14))),
"circle" : Shape("polygon", ((10,0), (9.51,3.09), (8.09,5.88),
(5.88,8.09), (3.09,9.51), (0,10), (-3.09,9.51),
(-5.88,8.09), (-8.09,5.88), (-9.51,3.09), (-10,0),
(-9.51,-3.09), (-8.09,-5.88), (-5.88,-8.09),
(-3.09,-9.51), (-0.00,-10.00), (3.09,-9.51),
(5.88,-8.09), (8.09,-5.88), (9.51,-3.09))),
"square" : Shape("polygon", ((10,-10), (10,10), (-10,10),
(-10,-10))),
"triangle" : Shape("polygon", ((10,-5.77), (0,11.55),
(-10,-5.77))),
"classic": Shape("polygon", ((0,0),(-5,-9),(0,-7),(5,-9))),
"blank" : Shape("image", None) #self._blankimage())
}
self._bgpics = {"nopic" : ""}
TurtleScreenBase.__init__(self, cv)
self._mode = mode
self._delayvalue = delay
self._colormode = _CFG["colormode"]
self._keys = []
self.clear()
def clear(self):
"""Delete all drawings and all turtles from the TurtleScreen.
No argument.
Reset empty TurtleScreen to its initial state: white background,
no backgroundimage, no eventbindings and tracing on.
Example (for a TurtleScreen instance named screen):
>>> screen.clear()
Note: this method is not available as function.
"""
self._delayvalue = _CFG["delay"]
self._colormode = _CFG["colormode"]
self._delete("all")
self._bgpic = self._createimage("")
self._bgpicname = "nopic"
self._tracing = 1
self._updatecounter = 0
self._turtles = []
self.bgcolor("white")
#for btn in 1, 2, 3:
# self.onclick(None, btn)
#self.onkeypress(None)
#for key in self._keys[:]:
# self.onkey(None, key)
# self.onkeypress(None, key)
Turtle._pen = None
def mode(self, mode=None):
"""Set turtle-mode ('standard', 'logo' or 'world') and perform reset.
Optional argument:
mode -- on of the strings 'standard', 'logo' or 'world'
Mode 'standard' is compatible with turtle.py.
Mode 'logo' is compatible with most Logo-Turtle-Graphics.
Mode 'world' uses userdefined 'worldcoordinates'. *Attention*: in
this mode angles appear distorted if x/y unit-ratio doesn't equal 1.
If mode is not given, return the current mode.
Mode Initial turtle heading positive angles
------------|-------------------------|-------------------
'standard' to the right (east) counterclockwise
'logo' upward (north) clockwise
Examples:
>>> mode('logo') # resets turtle heading to north
>>> mode()
'logo'
"""
if mode is None:
return self._mode
mode = mode.lower()
if mode not in ["standard", "logo", "world"]:
raise TurtleGraphicsError("No turtle-graphics-mode %s" % mode)
self._mode = mode
if mode in ["standard", "logo"]:
self._setscrollregion(-self.canvwidth//2, -self.canvheight//2,
self.canvwidth//2, self.canvheight//2)
self.xscale = self.yscale = 1.0
self.reset()
def setworldcoordinates(self, llx, lly, urx, ury):
"""Set up a user defined coordinate-system.
Arguments:
llx -- a number, x-coordinate of lower left corner of canvas
lly -- a number, y-coordinate of lower left corner of canvas
urx -- a number, x-coordinate of upper right corner of canvas
ury -- a number, y-coordinate of upper right corner of canvas
Set up user coodinat-system and switch to mode 'world' if necessary.
This performs a screen.reset. If mode 'world' is already active,
all drawings are redrawn according to the new coordinates.
But ATTENTION: in user-defined coordinatesystems angles may appear
distorted. (see Screen.mode())
Example (for a TurtleScreen instance named screen):
>>> screen.setworldcoordinates(-10,-0.5,50,1.5)
>>> for _ in range(36):
... left(10)
... forward(0.5)
"""
if self.mode() != "world":
self.mode("world")
xspan = float(urx - llx)
yspan = float(ury - lly)
wx, wy = self._window_size()
self.screensize(wx-20, wy-20)
oldxscale, oldyscale = self.xscale, self.yscale
self.xscale = self.canvwidth / xspan
self.yscale = self.canvheight / yspan
srx1 = llx * self.xscale
sry1 = -ury * self.yscale
srx2 = self.canvwidth + srx1
sry2 = self.canvheight + sry1
self._setscrollregion(srx1, sry1, srx2, sry2)
self._rescale(self.xscale/oldxscale, self.yscale/oldyscale)
#self.update()
def register_shape(self, name, shape=None):
"""Adds a turtle shape to TurtleScreen's shapelist.
Arguments:
(1) name is the name of a gif-file and shape is None.
Installs the corresponding image shape.
!! Image-shapes DO NOT rotate when turning the turtle,
!! so they do not display the heading of the turtle!
(2) name is an arbitrary string and shape is a tuple
of pairs of coordinates. Installs the corresponding
polygon shape
(3) name is an arbitrary string and shape is a
(compound) Shape object. Installs the corresponding
compound shape.
To use a shape, you have to issue the command shape(shapename).
call: register_shape("turtle.gif")
--or: register_shape("tri", ((0,0), (10,10), (-10,10)))
Example (for a TurtleScreen instance named screen):
>>> screen.register_shape("triangle", ((5,-3),(0,5),(-5,-3)))
"""
if shape is None:
# image
if name.lower().endswith(".gif"):
shape = Shape("image", self._image(name))
else:
raise TurtleGraphicsError("Bad arguments for register_shape.\n"
+ "Use help(register_shape)" )
elif isinstance(shape, tuple):
shape = Shape("polygon", shape)
## else shape assumed to be Shape-instance
self._shapes[name] = shape
def _colorstr(self, color):
"""Return color string corresponding to args.
Argument may be a string or a tuple of three
numbers corresponding to actual colormode,
i.e. in the range 0<=n<=colormode.
If the argument doesn't represent a color,
an error is raised.
"""
if len(color) == 1:
color = color[0]
if isinstance(color, str):
if self._iscolorstring(color) or color == "":
return color
else:
raise TurtleGraphicsError("bad color string: %s" % str(color))
try:
r, g, b = color
except:
raise TurtleGraphicsError("bad color arguments: %s" % str(color))
if self._colormode == 1.0:
r, g, b = [round(255.0*x) for x in (r, g, b)]
if not ((0 <= r <= 255) and (0 <= g <= 255) and (0 <= b <= 255)):
raise TurtleGraphicsError("bad color sequence: %s" % str(color))
return "#%02x%02x%02x" % (r, g, b)
def _color(self, cstr):
if not cstr.startswith("#"):
return cstr
if len(cstr) == 7:
cl = [int(cstr[i:i+2], 16) for i in (1, 3, 5)]
elif len(cstr) == 4:
cl = [16*int(cstr[h], 16) for h in cstr[1:]]
else:
raise TurtleGraphicsError("bad colorstring: %s" % cstr)
return tuple([c * self._colormode/255 for c in cl])
def colormode(self, cmode=None):
"""Return the colormode or set it to 1.0 or 255.
Optional argument:
cmode -- one of the values 1.0 or 255
r, g, b values of colortriples have to be in range 0..cmode.
Example (for a TurtleScreen instance named screen):
>>> screen.colormode()
1.0
>>> screen.colormode(255)
>>> pencolor(240,160,80)
"""
if cmode is None:
return self._colormode
if cmode == 1.0:
self._colormode = float(cmode)
elif cmode == 255:
self._colormode = int(cmode)
def reset(self):
"""Reset all Turtles on the Screen to their initial state.
No argument.
Example (for a TurtleScreen instance named screen):
>>> screen.reset()
"""
for turtle in self._turtles:
turtle._setmode(self._mode)
turtle.reset()
def turtles(self):
"""Return the list of turtles on the screen.
Example (for a TurtleScreen instance named screen):
>>> screen.turtles()
[<turtle.Turtle object at 0x00E11FB0>]
"""
return self._turtles
def bgcolor(self, *args):
"""Set or return backgroundcolor of the TurtleScreen.
Arguments (if given): a color string or three numbers
in the range 0..colormode or a 3-tuple of such numbers.
Example (for a TurtleScreen instance named screen):
>>> screen.bgcolor("orange")
>>> screen.bgcolor()
'orange'
>>> screen.bgcolor(0.5,0,0.5)
>>> screen.bgcolor()
'#800080'
"""
if args:
color = self._colorstr(args)
else:
color = None
color = self._bgcolor(color)
if color is not None:
color = self._color(color)
return color
def tracer(self, n=None, delay=None):
"""Turns turtle animation on/off and set delay for update drawings.
Optional arguments:
n -- nonnegative integer
delay -- nonnegative integer
If n is given, only each n-th regular screen update is really performed.
(Can be used to accelerate the drawing of complex graphics.)
Second arguments sets delay value (see RawTurtle.delay())
Example (for a TurtleScreen instance named screen):
>>> screen.tracer(8, 25)
>>> dist = 2
>>> for i in range(200):
... fd(dist)
... rt(90)
... dist += 2
"""
if n is None:
return self._tracing
self._tracing = int(n)
self._updatecounter = 0
if delay is not None:
self._delayvalue = int(delay)
if self._tracing:
self.update()
def delay(self, delay=None):
""" Return or set the drawing delay in milliseconds.
Optional argument:
delay -- positive integer
Example (for a TurtleScreen instance named screen):
>>> screen.delay(15)
>>> screen.delay()
15
"""
if delay is None:
return self._delayvalue
self._delayvalue = int(delay)
def _incrementudc(self):
"""Increment update counter."""
if not TurtleScreen._RUNNING:
TurtleScreen._RUNNNING = True
raise Terminator
if self._tracing > 0:
self._updatecounter += 1
self._updatecounter %= self._tracing
def update(self):
"""Perform a TurtleScreen update.
"""
return
tracing = self._tracing
self._tracing = True
for t in self.turtles():
#t._update_data()
t._drawturtle()
self._tracing = tracing
self._update()
def window_width(self):
""" Return the width of the turtle window.
Example (for a TurtleScreen instance named screen):
>>> screen.window_width()
640
"""
return self._window_size()[0]
def window_height(self):
""" Return the height of the turtle window.
Example (for a TurtleScreen instance named screen):
>>> screen.window_height()
480
"""
return self._window_size()[1]
def getcanvas(self):
"""Return the Canvas of this TurtleScreen.
No argument.
Example (for a Screen instance named screen):
>>> cv = screen.getcanvas()
>>> cv
<turtle.ScrolledCanvas instance at 0x010742D8>
"""
return self.cv
def getshapes(self):
"""Return a list of names of all currently available turtle shapes.
No argument.
Example (for a TurtleScreen instance named screen):
>>> screen.getshapes()
['arrow', 'blank', 'circle', ... , 'turtle']
"""
return sorted(self._shapes.keys())
def onclick(self, fun, btn=1, add=None):
"""Bind fun to mouse-click event on canvas.
Arguments:
fun -- a function with two arguments, the coordinates of the
clicked point on the canvas.
num -- the number of the mouse-button, defaults to 1
Example (for a TurtleScreen instance named screen)
>>> screen.onclick(goto)
>>> # Subsequently clicking into the TurtleScreen will
>>> # make the turtle move to the clicked point.
>>> screen.onclick(None)
"""
self._onscreenclick(fun, btn, add)
def onkey(self, fun, key):
"""Bind fun to key-release event of key.
Arguments:
fun -- a function with no arguments
key -- a string: key (e.g. "a") or key-symbol (e.g. "space")
In order to be able to register key-events, TurtleScreen
must have focus. (See method listen.)
Example (for a TurtleScreen instance named screen):
>>> def f():
... fd(50)
... lt(60)
...
>>> screen.onkey(f, "Up")
>>> screen.listen()
Subsequently the turtle can be moved by repeatedly pressing
the up-arrow key, consequently drawing a hexagon
"""
if fun is None:
if key in self._keys:
self._keys.remove(key)
elif key not in self._keys:
self._keys.append(key)
self._onkeyrelease(fun, key)
def onkeypress(self, fun, key=None):
"""Bind fun to key-press event of key if key is given,
or to any key-press-event if no key is given.
Arguments:
fun -- a function with no arguments
key -- a string: key (e.g. "a") or key-symbol (e.g. "space")
In order to be able to register key-events, TurtleScreen
must have focus. (See method listen.)
Example (for a TurtleScreen instance named screen
and a Turtle instance named turtle):
>>> def f():
... fd(50)
... lt(60)
...
>>> screen.onkeypress(f, "Up")
>>> screen.listen()
Subsequently the turtle can be moved by repeatedly pressing
the up-arrow key, or by keeping pressed the up-arrow key.
consequently drawing a hexagon.
"""
if fun is None:
if key in self._keys:
self._keys.remove(key)
elif key is not None and key not in self._keys:
self._keys.append(key)
self._onkeypress(fun, key)
def listen(self, xdummy=None, ydummy=None):
"""Set focus on TurtleScreen (in order to collect key-events)
No arguments.
Dummy arguments are provided in order
to be able to pass listen to the onclick method.
Example (for a TurtleScreen instance named screen):
>>> screen.listen()
"""
self._listen()
def ontimer(self, fun, t=0):
"""Install a timer, which calls fun after t milliseconds.
Arguments:
fun -- a function with no arguments.
t -- a number >= 0
Example (for a TurtleScreen instance named screen):
>>> running = True
>>> def f():
... if running:
... fd(50)
... lt(60)
... screen.ontimer(f, 250)
...
>>> f() # makes the turtle marching around
>>> running = False
"""
self._ontimer(fun, t)
def bgpic(self, picname=None):
"""Set background image or return name of current backgroundimage.
Optional argument:
picname -- a string, name of a gif-file or "nopic".
If picname is a filename, set the corresponding image as background.
If picname is "nopic", delete backgroundimage, if present.
If picname is None, return the filename of the current backgroundimage.
Example (for a TurtleScreen instance named screen):
>>> screen.bgpic()
'nopic'
>>> screen.bgpic("landscape.gif")
>>> screen.bgpic()
'landscape.gif'
"""
if picname is None:
return self._bgpicname
if picname not in self._bgpics:
self._bgpics[picname] = self._image(picname)
self._setbgpic(self._bgpic, self._bgpics[picname])
self._bgpicname = picname
def screensize(self, canvwidth=None, canvheight=None, bg=None):
"""Resize the canvas the turtles are drawing on.
Optional arguments:
canvwidth -- positive integer, new width of canvas in pixels
canvheight -- positive integer, new height of canvas in pixels
bg -- colorstring or color-tuple, new backgroundcolor
If no arguments are given, return current (canvaswidth, canvasheight)
Do not alter the drawing window. To observe hidden parts of
the canvas use the scrollbars. (Can make visible those parts
of a drawing, which were outside the canvas before!)
Example (for a Turtle instance named turtle):
>>> turtle.screensize(2000,1500)
>>> # e.g. to search for an erroneously escaped turtle ;-)
"""
return self._resize(canvwidth, canvheight, bg)
onscreenclick = onclick
resetscreen = reset
clearscreen = clear
addshape = register_shape
onkeyrelease = onkey
class TNavigator:
"""Navigation part of the RawTurtle.
Implements methods for turtle movement.
"""
START_ORIENTATION = {
"standard": Vec2D(1.0, 0.0),
"world" : Vec2D(1.0, 0.0),
"logo" : Vec2D(0.0, 1.0) }
DEFAULT_MODE = "standard"
DEFAULT_ANGLEOFFSET = 0
DEFAULT_ANGLEORIENT = 1
def __init__(self, mode=DEFAULT_MODE):
self._angleOffset = self.DEFAULT_ANGLEOFFSET
self._angleOrient = self.DEFAULT_ANGLEORIENT
self._mode = mode
self.undobuffer = None
self.degrees()
self._mode = None
self._setmode(mode)
TNavigator.reset(self)
def reset(self):
"""reset turtle to its initial values
Will be overwritten by parent class
"""
self._position = Vec2D(0.0, 0.0)
self._orient = TNavigator.START_ORIENTATION[self._mode]
def _setmode(self, mode=None):
"""Set turtle-mode to 'standard', 'world' or 'logo'.
"""
if mode is None:
return self._mode
if mode not in ["standard", "logo", "world"]:
return
self._mode = mode
if mode in ["standard", "world"]:
self._angleOffset = 0
self._angleOrient = 1
else: # mode == "logo":
self._angleOffset = self._fullcircle/4.
self._angleOrient = -1
def _setDegreesPerAU(self, fullcircle):
"""Helper function for degrees() and radians()"""
self._fullcircle = fullcircle
self._degreesPerAU = 360/fullcircle
if self._mode == "standard":
self._angleOffset = 0
else:
self._angleOffset = fullcircle/4.
def degrees(self, fullcircle=360.0):
""" Set angle measurement units to degrees.
Optional argument:
fullcircle - a number
Set angle measurement units, i. e. set number
of 'degrees' for a full circle. Dafault value is
360 degrees.
Example (for a Turtle instance named turtle):
>>> turtle.left(90)
>>> turtle.heading()
90
Change angle measurement unit to grad (also known as gon,
grade, or gradian and equals 1/100-th of the right angle.)
>>> turtle.degrees(400.0)
>>> turtle.heading()
100
"""
self._setDegreesPerAU(fullcircle)
def radians(self):
""" Set the angle measurement units to radians.
No arguments.
Example (for a Turtle instance named turtle):
>>> turtle.heading()
90
>>> turtle.radians()
>>> turtle.heading()
1.5707963267948966
"""
self._setDegreesPerAU(2*math.pi)
def _go(self, distance):
"""move turtle forward by specified distance"""
#console.log('_go')
ende = self._position + self._orient * distance
self._goto(ende)
def _rotate(self, angle):
"""Turn turtle counterclockwise by specified angle if angle > 0."""
#console.log('_rotate')
angle *= self._degreesPerAU
self._orient = self._orient.rotate(angle)
def _goto(self, end):
"""move turtle to position end."""
#console.log('_goto')
self._position = end
def forward(self, distance):
"""Move the turtle forward by the specified distance.
Aliases: forward | fd
Argument:
distance -- a number (integer or float)
Move the turtle forward by the specified distance, in the direction
the turtle is headed.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00, 0.00)
>>> turtle.forward(25)
>>> turtle.position()
(25.00,0.00)
>>> turtle.forward(-75)
>>> turtle.position()
(-50.00,0.00)
"""
self._go(distance)
def back(self, distance):
"""Move the turtle backward by distance.
Aliases: back | backward | bk
Argument:
distance -- a number
Move the turtle backward by distance ,opposite to the direction the
turtle is headed. Do not change the turtle's heading.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00, 0.00)
>>> turtle.backward(30)
>>> turtle.position()
(-30.00, 0.00)
"""
self._go(-distance)
def right(self, angle):
"""Turn turtle right by angle units.
Aliases: right | rt
Argument:
angle -- a number (integer or float)
Turn turtle right by angle units. (Units are by default degrees,
but can be set via the degrees() and radians() functions.)
Angle orientation depends on mode. (See this.)
Example (for a Turtle instance named turtle):
>>> turtle.heading()
22.0
>>> turtle.right(45)
>>> turtle.heading()
337.0
"""
self._rotate(-angle)
def left(self, angle):
"""Turn turtle left by angle units.
Aliases: left | lt
Argument:
angle -- a number (integer or float)
Turn turtle left by angle units. (Units are by default degrees,
but can be set via the degrees() and radians() functions.)
Angle orientation depends on mode. (See this.)
Example (for a Turtle instance named turtle):
>>> turtle.heading()
22.0
>>> turtle.left(45)
>>> turtle.heading()
67.0
"""
self._rotate(angle)
def pos(self):
"""Return the turtle's current location (x,y), as a Vec2D-vector.
Aliases: pos | position
No arguments.
Example (for a Turtle instance named turtle):
>>> turtle.pos()
(0.00, 240.00)
"""
return self._position
def xcor(self):
""" Return the turtle's x coordinate.
No arguments.
Example (for a Turtle instance named turtle):
>>> reset()
>>> turtle.left(60)
>>> turtle.forward(100)
>>> print turtle.xcor()
50.0
"""
return self._position[0]
def ycor(self):
""" Return the turtle's y coordinate
---
No arguments.
Example (for a Turtle instance named turtle):
>>> reset()
>>> turtle.left(60)
>>> turtle.forward(100)
>>> print turtle.ycor()
86.6025403784
"""
return self._position[1]
def goto(self, x, y=None):
"""Move turtle to an absolute position.
Aliases: setpos | setposition | goto:
Arguments:
x -- a number or a pair/vector of numbers
y -- a number None
call: goto(x, y) # two coordinates
--or: goto((x, y)) # a pair (tuple) of coordinates
--or: goto(vec) # e.g. as returned by pos()
Move turtle to an absolute position. If the pen is down,
a line will be drawn. The turtle's orientation does not change.
Example (for a Turtle instance named turtle):
>>> tp = turtle.pos()
>>> tp
(0.00, 0.00)
>>> turtle.setpos(60,30)
>>> turtle.pos()
(60.00,30.00)
>>> turtle.setpos((20,80))
>>> turtle.pos()
(20.00,80.00)
>>> turtle.setpos(tp)
>>> turtle.pos()
(0.00,0.00)
"""
if y is None:
self._goto(Vec2D(*x))
else:
self._goto(Vec2D(x, y))
def home(self):
"""Move turtle to the origin - coordinates (0,0).
No arguments.
Move turtle to the origin - coordinates (0,0) and set its
heading to its start-orientation (which depends on mode).
Example (for a Turtle instance named turtle):
>>> turtle.home()
"""
self.goto(0, 0)
self.setheading(0)
def setx(self, x):
"""Set the turtle's first coordinate to x
Argument:
x -- a number (integer or float)
Set the turtle's first coordinate to x, leave second coordinate
unchanged.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00, 240.00)
>>> turtle.setx(10)
>>> turtle.position()
(10.00, 240.00)
"""
self._goto(Vec2D(x, self._position[1]))
def sety(self, y):
"""Set the turtle's second coordinate to y
Argument:
y -- a number (integer or float)
Set the turtle's first coordinate to x, second coordinate remains
unchanged.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00, 40.00)
>>> turtle.sety(-10)
>>> turtle.position()
(0.00, -10.00)
"""
self._goto(Vec2D(self._position[0], y))
def distance(self, x, y=None):
"""Return the distance from the turtle to (x,y) in turtle step units.
Arguments:
x -- a number or a pair/vector of numbers or a turtle instance
y -- a number None None
call: distance(x, y) # two coordinates
--or: distance((x, y)) # a pair (tuple) of coordinates
--or: distance(vec) # e.g. as returned by pos()
--or: distance(mypen) # where mypen is another turtle
Example (for a Turtle instance named turtle):
>>> turtle.pos()
(0.00, 0.00)
>>> turtle.distance(30,40)
50.0
>>> pen = Turtle()
>>> pen.forward(77)
>>> turtle.distance(pen)
77.0
"""
if y is not None:
pos = Vec2D(x, y)
if isinstance(x, Vec2D):
pos = x
elif isinstance(x, tuple):
pos = Vec2D(*x)
elif isinstance(x, TNavigator):
pos = x._position
return abs(pos - self._position)
def towards(self, x, y=None):
"""Return the angle of the line from the turtle's position to (x, y).
Arguments:
x -- a number or a pair/vector of numbers or a turtle instance
y -- a number None None
call: distance(x, y) # two coordinates
--or: distance((x, y)) # a pair (tuple) of coordinates
--or: distance(vec) # e.g. as returned by pos()
--or: distance(mypen) # where mypen is another turtle
Return the angle, between the line from turtle-position to position
specified by x, y and the turtle's start orientation. (Depends on
modes - "standard" or "logo")
Example (for a Turtle instance named turtle):
>>> turtle.pos()
(10.00, 10.00)
>>> turtle.towards(0,0)
225.0
"""
if y is not None:
pos = Vec2D(x, y)
if isinstance(x, Vec2D):
pos = x
elif isinstance(x, tuple):
pos = Vec2D(*x)
elif isinstance(x, TNavigator):
pos = x._position
x, y = pos - self._position
result = round(math.atan2(y, x)*180.0/math.pi, 10) % 360.0
result /= self._degreesPerAU
return (self._angleOffset + self._angleOrient*result) % self._fullcircle
def heading(self):
""" Return the turtle's current heading.
No arguments.
Example (for a Turtle instance named turtle):
>>> turtle.left(67)
>>> turtle.heading()
67.0
"""
x, y = self._orient
result = round(math.atan2(y, x)*180.0/math.pi, 10) % 360.0
result /= self._degreesPerAU
return (self._angleOffset + self._angleOrient*result) % self._fullcircle
def setheading(self, to_angle):
"""Set the orientation of the turtle to to_angle.
Aliases: setheading | seth
Argument:
to_angle -- a number (integer or float)
Set the orientation of the turtle to to_angle.
Here are some common directions in degrees:
standard - mode: logo-mode:
-------------------|--------------------
0 - east 0 - north
90 - north 90 - east
180 - west 180 - south
270 - south 270 - west
Example (for a Turtle instance named turtle):
>>> turtle.setheading(90)
>>> turtle.heading()
90
"""
angle = (to_angle - self.heading())*self._angleOrient
full = self._fullcircle
angle = (angle+full/2.)%full - full/2.
self._rotate(angle)
def circle(self, radius, extent = None, steps = None):
""" Draw a circle with given radius.
Arguments:
radius -- a number
extent (optional) -- a number
steps (optional) -- an integer
Draw a circle with given radius. The center is radius units left
of the turtle; extent - an angle - determines which part of the
circle is drawn. If extent is not given, draw the entire circle.
If extent is not a full circle, one endpoint of the arc is the
current pen position. Draw the arc in counterclockwise direction
if radius is positive, otherwise in clockwise direction. Finally
the direction of the turtle is changed by the amount of extent.
As the circle is approximated by an inscribed regular polygon,
steps determines the number of steps to use. If not given,
it will be calculated automatically. Maybe used to draw regular
polygons.
call: circle(radius) # full circle
--or: circle(radius, extent) # arc
--or: circle(radius, extent, steps)
--or: circle(radius, steps=6) # 6-sided polygon
Example (for a Turtle instance named turtle):
>>> turtle.circle(50)
>>> turtle.circle(120, 180) # semicircle
"""
if self.undobuffer:
self.undobuffer.push(["seq"])
self.undobuffer.cumulate = True
speed = self.speed()
if extent is None:
extent = self._fullcircle
if steps is None:
frac = abs(extent)/self._fullcircle
steps = 1+int(min(11+abs(radius)/6.0, 59.0)*frac)
w = 1.0 * extent / steps
w2 = 0.5 * w
l = 2.0 * radius * math.sin(w2*math.pi/180.0*self._degreesPerAU)
if radius < 0:
l, w, w2 = -l, -w, -w2
tr = self._tracer()
dl = self._delay()
if speed == 0:
self._tracer(0, 0)
else:
self.speed(0)
self._rotate(w2)
for i in range(steps):
self.speed(speed)
self._go(l)
self.speed(0)
self._rotate(w)
self._rotate(-w2)
if speed == 0:
self._tracer(tr, dl)
self.speed(speed)
if self.undobuffer:
self.undobuffer.cumulate = False
## three dummy methods to be implemented by child class:
def speed(self, s=0):
"""dummy method - to be overwritten by child class"""
def _tracer(self, a=None, b=None):
"""dummy method - to be overwritten by child class"""
def _delay(self, n=None):
"""dummy method - to be overwritten by child class"""
fd = forward
bk = back
backward = back
rt = right
lt = left
position = pos
setpos = goto
setposition = goto
seth = setheading
class TPen:
"""Drawing part of the RawTurtle.
Implements drawing properties.
"""
def __init__(self, resizemode=_CFG["resizemode"]):
self._resizemode = resizemode # or "user" or "noresize"
self.undobuffer = None
TPen._reset(self)
def _reset(self, pencolor=_CFG["pencolor"],
fillcolor=_CFG["fillcolor"]):
self._pensize = 1
self._shown = True
self._pencolor = pencolor
self._fillcolor = fillcolor
self._drawing = True
self._speed = 3
self._stretchfactor = (1., 1.)
self._shearfactor = 0.
self._tilt = 0.
self._shapetrafo = (1., 0., 0., 1.)
self._outlinewidth = 1
def resizemode(self, rmode=None):
"""Set resizemode to one of the values: "auto", "user", "noresize".
(Optional) Argument:
rmode -- one of the strings "auto", "user", "noresize"
Different resizemodes have the following effects:
- "auto" adapts the appearance of the turtle
corresponding to the value of pensize.
- "user" adapts the appearance of the turtle according to the
values of stretchfactor and outlinewidth (outline),
which are set by shapesize()
- "noresize" no adaption of the turtle's appearance takes place.
If no argument is given, return current resizemode.
resizemode("user") is called by a call of shapesize with arguments.
Examples (for a Turtle instance named turtle):
>>> turtle.resizemode("noresize")
>>> turtle.resizemode()
'noresize'
"""
if rmode is None:
return self._resizemode
rmode = rmode.lower()
if rmode in ["auto", "user", "noresize"]:
self.pen(resizemode=rmode)
def pensize(self, width=None):
"""Set or return the line thickness.
Aliases: pensize | width
Argument:
width -- positive number
Set the line thickness to width or return it. If resizemode is set
to "auto" and turtleshape is a polygon, that polygon is drawn with
the same line thickness. If no argument is given, current pensize
is returned.
Example (for a Turtle instance named turtle):
>>> turtle.pensize()
1
>>> turtle.pensize(10) # from here on lines of width 10 are drawn
"""
if width is None:
return self._pensize
self.pen(pensize=width)
def penup(self):
"""Pull the pen up -- no drawing when moving.
Aliases: penup | pu | up
No argument
Example (for a Turtle instance named turtle):
>>> turtle.penup()
"""
if not self._drawing:
return
self.pen(pendown=False)
def pendown(self):
"""Pull the pen down -- drawing when moving.
Aliases: pendown | pd | down
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.pendown()
"""
if self._drawing:
return
self.pen(pendown=True)
def isdown(self):
"""Return True if pen is down, False if it's up.
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.penup()
>>> turtle.isdown()
False
>>> turtle.pendown()
>>> turtle.isdown()
True
"""
return self._drawing
def speed(self, speed=None):
""" Return or set the turtle's speed.
Optional argument:
speed -- an integer in the range 0..10 or a speedstring (see below)
Set the turtle's speed to an integer value in the range 0 .. 10.
If no argument is given: return current speed.
If input is a number greater than 10 or smaller than 0.5,
speed is set to 0.
Speedstrings are mapped to speedvalues in the following way:
'fastest' : 0
'fast' : 10
'normal' : 6
'slow' : 3
'slowest' : 1
speeds from 1 to 10 enforce increasingly faster animation of
line drawing and turtle turning.
Attention:
speed = 0 : *no* animation takes place. forward/back makes turtle jump
and likewise left/right make the turtle turn instantly.
Example (for a Turtle instance named turtle):
>>> turtle.speed(3)
"""
speeds = {'fastest':0, 'fast':10, 'normal':6, 'slow':3, 'slowest':1 }
if speed is None:
return self._speed
if speed in speeds:
speed = speeds[speed]
elif 0.5 < speed < 10.5:
speed = int(round(speed))
else:
speed = 0
self.pen(speed=speed)
def color(self, *args):
"""Return or set the pencolor and fillcolor.
Arguments:
Several input formats are allowed.
They use 0, 1, 2, or 3 arguments as follows:
color()
Return the current pencolor and the current fillcolor
as a pair of color specification strings as are returned
by pencolor and fillcolor.
color(colorstring), color((r,g,b)), color(r,g,b)
inputs as in pencolor, set both, fillcolor and pencolor,
to the given value.
color(colorstring1, colorstring2),
color((r1,g1,b1), (r2,g2,b2))
equivalent to pencolor(colorstring1) and fillcolor(colorstring2)
and analogously, if the other input format is used.
If turtleshape is a polygon, outline and interior of that polygon
is drawn with the newly set colors.
For mor info see: pencolor, fillcolor
Example (for a Turtle instance named turtle):
>>> turtle.color('red', 'green')
>>> turtle.color()
('red', 'green')
>>> colormode(255)
>>> color((40, 80, 120), (160, 200, 240))
>>> color()
('#285078', '#a0c8f0')
"""
if args:
l = len(args)
if l == 1:
pcolor = fcolor = args[0]
elif l == 2:
pcolor, fcolor = args
elif l == 3:
pcolor = fcolor = args
pcolor = self._colorstr(pcolor)
fcolor = self._colorstr(fcolor)
self.pen(pencolor=pcolor, fillcolor=fcolor)
else:
return self._color(self._pencolor), self._color(self._fillcolor)
def pencolor(self, *args):
""" Return or set the pencolor.
Arguments:
Four input formats are allowed:
- pencolor()
Return the current pencolor as color specification string,
possibly in hex-number format (see example).
May be used as input to another color/pencolor/fillcolor call.
- pencolor(colorstring)
s is a Tk color specification string, such as "red" or "yellow"
- pencolor((r, g, b))
*a tuple* of r, g, and b, which represent, an RGB color,
and each of r, g, and b are in the range 0..colormode,
where colormode is either 1.0 or 255
- pencolor(r, g, b)
r, g, and b represent an RGB color, and each of r, g, and b
are in the range 0..colormode
If turtleshape is a polygon, the outline of that polygon is drawn
with the newly set pencolor.
Example (for a Turtle instance named turtle):
>>> turtle.pencolor('brown')
>>> tup = (0.2, 0.8, 0.55)
>>> turtle.pencolor(tup)
>>> turtle.pencolor()
'#33cc8c'
"""
if args:
color = self._colorstr(args)
if color == self._pencolor:
return
self.pen(pencolor=color)
else:
return self._color(self._pencolor)
def fillcolor(self, *args):
""" Return or set the fillcolor.
Arguments:
Four input formats are allowed:
- fillcolor()
Return the current fillcolor as color specification string,
possibly in hex-number format (see example).
May be used as input to another color/pencolor/fillcolor call.
- fillcolor(colorstring)
s is a Tk color specification string, such as "red" or "yellow"
- fillcolor((r, g, b))
*a tuple* of r, g, and b, which represent, an RGB color,
and each of r, g, and b are in the range 0..colormode,
where colormode is either 1.0 or 255
- fillcolor(r, g, b)
r, g, and b represent an RGB color, and each of r, g, and b
are in the range 0..colormode
If turtleshape is a polygon, the interior of that polygon is drawn
with the newly set fillcolor.
Example (for a Turtle instance named turtle):
>>> turtle.fillcolor('violet')
>>> col = turtle.pencolor()
>>> turtle.fillcolor(col)
>>> turtle.fillcolor(0, .5, 0)
"""
if args:
color = self._colorstr(args)
if color == self._fillcolor:
return
self.pen(fillcolor=color)
else:
return self._color(self._fillcolor)
def showturtle(self):
"""Makes the turtle visible.
Aliases: showturtle | st
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.hideturtle()
>>> turtle.showturtle()
"""
self.pen(shown=True)
def hideturtle(self):
"""Makes the turtle invisible.
Aliases: hideturtle | ht
No argument.
It's a good idea to do this while you're in the
middle of a complicated drawing, because hiding
the turtle speeds up the drawing observably.
Example (for a Turtle instance named turtle):
>>> turtle.hideturtle()
"""
self.pen(shown=False)
def isvisible(self):
"""Return True if the Turtle is shown, False if it's hidden.
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.hideturtle()
>>> print turtle.isvisible():
False
"""
return self._shown
def pen(self, pen=None, **pendict):
"""Return or set the pen's attributes.
Arguments:
pen -- a dictionary with some or all of the below listed keys.
**pendict -- one or more keyword-arguments with the below
listed keys as keywords.
Return or set the pen's attributes in a 'pen-dictionary'
with the following key/value pairs:
"shown" : True/False
"pendown" : True/False
"pencolor" : color-string or color-tuple
"fillcolor" : color-string or color-tuple
"pensize" : positive number
"speed" : number in range 0..10
"resizemode" : "auto" or "user" or "noresize"
"stretchfactor": (positive number, positive number)
"shearfactor": number
"outline" : positive number
"tilt" : number
This dictionary can be used as argument for a subsequent
pen()-call to restore the former pen-state. Moreover one
or more of these attributes can be provided as keyword-arguments.
This can be used to set several pen attributes in one statement.
Examples (for a Turtle instance named turtle):
>>> turtle.pen(fillcolor="black", pencolor="red", pensize=10)
>>> turtle.pen()
{'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1,
'pencolor': 'red', 'pendown': True, 'fillcolor': 'black',
'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0}
>>> penstate=turtle.pen()
>>> turtle.color("yellow","")
>>> turtle.penup()
>>> turtle.pen()
{'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1,
'pencolor': 'yellow', 'pendown': False, 'fillcolor': '',
'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0}
>>> p.pen(penstate, fillcolor="green")
>>> p.pen()
{'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1,
'pencolor': 'red', 'pendown': True, 'fillcolor': 'green',
'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0}
"""
_pd = {"shown" : self._shown,
"pendown" : self._drawing,
"pencolor" : self._pencolor,
"fillcolor" : self._fillcolor,
"pensize" : self._pensize,
"speed" : self._speed,
"resizemode" : self._resizemode,
"stretchfactor" : self._stretchfactor,
"shearfactor" : self._shearfactor,
"outline" : self._outlinewidth,
"tilt" : self._tilt
}
#console.log('pen')
if not (pen or pendict):
return _pd
if isinstance(pen, dict):
p = pen
else:
p = {}
p.update(pendict)
_p_buf = {}
for key in p:
_p_buf[key] = _pd[key]
if self.undobuffer:
self.undobuffer.push(("pen", _p_buf))
newLine = False
if "pendown" in p:
if self._drawing != p["pendown"]:
newLine = True
if "pencolor" in p:
if isinstance(p["pencolor"], tuple):
p["pencolor"] = self._colorstr((p["pencolor"],))
if self._pencolor != p["pencolor"]:
newLine = True
if "pensize" in p:
if self._pensize != p["pensize"]:
newLine = True
if newLine:
self._newLine()
if "pendown" in p:
self._drawing = p["pendown"]
if "pencolor" in p:
self._pencolor = p["pencolor"]
if "pensize" in p:
self._pensize = p["pensize"]
if "fillcolor" in p:
if isinstance(p["fillcolor"], tuple):
p["fillcolor"] = self._colorstr((p["fillcolor"],))
self._fillcolor = p["fillcolor"]
if "speed" in p:
self._speed = p["speed"]
if "resizemode" in p:
self._resizemode = p["resizemode"]
if "stretchfactor" in p:
sf = p["stretchfactor"]
if isinstance(sf, (int, float)):
sf = (sf, sf)
self._stretchfactor = sf
if "shearfactor" in p:
self._shearfactor = p["shearfactor"]
if "outline" in p:
self._outlinewidth = p["outline"]
if "shown" in p:
self._shown = p["shown"]
if "tilt" in p:
self._tilt = p["tilt"]
if "stretchfactor" in p or "tilt" in p or "shearfactor" in p:
scx, scy = self._stretchfactor
shf = self._shearfactor
sa, ca = math.sin(self._tilt), math.cos(self._tilt)
self._shapetrafo = ( scx*ca, scy*(shf*ca + sa),
-scx*sa, scy*(ca - shf*sa))
self._update()
## three dummy methods to be implemented by child class:
def _newLine(self, usePos = True):
"""dummy method - to be overwritten by child class"""
def _update(self, count=True, forced=False):
"""dummy method - to be overwritten by child class"""
def _color(self, args):
"""dummy method - to be overwritten by child class"""
def _colorstr(self, args):
"""dummy method - to be overwritten by child class"""
width = pensize
up = penup
pu = penup
pd = pendown
down = pendown
st = showturtle
ht = hideturtle
class _TurtleImage:
"""Helper class: Datatype to store Turtle attributes
"""
def __init__(self, screen, shapeIndex):
self.screen = screen
self._type = None
self._setshape(shapeIndex)
def _setshape(self, shapeIndex):
#console.log("_setshape", self._type)
screen = self.screen
self.shapeIndex = shapeIndex
if self._type == "polygon" == screen._shapes[shapeIndex]._type:
return
if self._type == "image" == screen._shapes[shapeIndex]._type:
return
if self._type in ["image", "polygon"]:
screen._delete(self._item)
elif self._type == "compound":
for item in self._item:
screen._delete(item)
self._type = screen._shapes[shapeIndex]._type
return
#console.log(self._type)
if self._type == "polygon":
self._item = screen._createpoly()
elif self._type == "image":
self._item = screen._createimage(screen._shapes["blank"]._data)
elif self._type == "compound":
self._item = [screen._createpoly() for item in
screen._shapes[shapeIndex]._data]
#console.log(self._item)
class RawTurtle(TPen, TNavigator):
"""Animation part of the RawTurtle.
Puts RawTurtle upon a TurtleScreen and provides tools for
its animation.
"""
screens = []
def __init__(self, canvas=None,
shape=_CFG["shape"],
undobuffersize=_CFG["undobuffersize"],
visible=_CFG["visible"]):
if isinstance(canvas, _Screen):
self.screen = canvas
elif isinstance(canvas, TurtleScreen):
if canvas not in RawTurtle.screens:
RawTurtle.screens.append(canvas)
self.screen = canvas
#elif isinstance(canvas, (ScrolledCanvas, Canvas)):
# for screen in RawTurtle.screens:
# if screen.cv == canvas:
# self.screen = screen
# break
# else:
# self.screen = TurtleScreen(canvas)
# RawTurtle.screens.append(self.screen)
else:
raise TurtleGraphicsError("bad canvas argument %s" % canvas)
screen = self.screen
TNavigator.__init__(self, screen.mode())
TPen.__init__(self)
screen._turtles.append(self)
#self.drawingLineItem = screen._createline()
self.turtle = _TurtleImage(screen, shape)
self._poly = None
self._creatingPoly = False
self._fillitem = self._fillpath = None
self._shown = visible
self._hidden_from_screen = False
#self.currentLineItem = screen._createline()
self.currentLine = [self._position]
#self.items = [] #[self.currentLineItem]
self.stampItems = []
self._undobuffersize = undobuffersize
self.undobuffer = None #Tbuffer(undobuffersize)
#self._update()
def reset(self):
"""Delete the turtle's drawings and restore its default values.
No argument.
Delete the turtle's drawings from the screen, re-center the turtle
and set variables to the default values.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00,-22.00)
>>> turtle.heading()
100.0
>>> turtle.reset()
>>> turtle.position()
(0.00,0.00)
>>> turtle.heading()
0.0
"""
TNavigator.reset(self)
TPen._reset(self)
self._clear()
self._drawturtle()
#self._update()
def setundobuffer(self, size):
"""Set or disable undobuffer.
Argument:
size -- an integer or None
If size is an integer an empty undobuffer of given size is installed.
Size gives the maximum number of turtle-actions that can be undone
by the undo() function.
If size is None, no undobuffer is present.
Example (for a Turtle instance named turtle):
>>> turtle.setundobuffer(42)
"""
if size is None:
self.undobuffer = None
else:
self.undobuffer = Tbuffer(size)
def undobufferentries(self):
"""Return count of entries in the undobuffer.
No argument.
Example (for a Turtle instance named turtle):
>>> while undobufferentries():
... undo()
"""
if self.undobuffer is None:
return 0
return self.undobuffer.nr_of_items()
def _clear(self):
"""Delete all of pen's drawings"""
self._fillitem = self._fillpath = None
#for item in self.items:
# self.screen._delete(item)
#self.currentLineItem = #self.screen._createline()
self.currentLine = []
if self._drawing:
self.currentLine.append(self._position)
#self.items = [self.currentLineItem]
self.clearstamps()
#self.setundobuffer(self._undobuffersize)
def clear(self):
"""Delete the turtle's drawings from the screen. Do not move turtle.
No arguments.
Delete the turtle's drawings from the screen. Do not move turtle.
State and position of the turtle as well as drawings of other
turtles are not affected.
Examples (for a Turtle instance named turtle):
>>> turtle.clear()
"""
self._clear()
#self._update()
#def _update_data(self):
# self.screen._incrementudc()
# if self.screen._updatecounter != 0:
# return
# if len(self.currentLine)>1:
# self.screen._drawline(self.currentLineItem, self.currentLine,
# self._pencolor, self._pensize)
def _update(self):
"""Perform a Turtle-data update.
"""
return
screen = self.screen
if screen._tracing == 0:
return
elif screen._tracing == 1:
#self._update_data()
self._drawturtle()
#screen._update() # TurtleScreenBase
#screen._delay(screen._delayvalue) # TurtleScreenBase
else:
#self._update_data()
if screen._updatecounter == 0:
for t in screen.turtles():
t._drawturtle()
#screen._update()
def _tracer(self, flag=None, delay=None):
"""Turns turtle animation on/off and set delay for update drawings.
Optional arguments:
n -- nonnegative integer
delay -- nonnegative integer
If n is given, only each n-th regular screen update is really performed.
(Can be used to accelerate the drawing of complex graphics.)
Second arguments sets delay value (see RawTurtle.delay())
Example (for a Turtle instance named turtle):
>>> turtle.tracer(8, 25)
>>> dist = 2
>>> for i in range(200):
... turtle.fd(dist)
... turtle.rt(90)
... dist += 2
"""
return self.screen.tracer(flag, delay)
def _color(self, args):
return self.screen._color(args)
def _colorstr(self, args):
return self.screen._colorstr(args)
def _cc(self, args):
"""Convert colortriples to hexstrings.
"""
if isinstance(args, str):
return args
try:
r, g, b = args
except:
raise TurtleGraphicsError("bad color arguments: %s" % str(args))
if self.screen._colormode == 1.0:
r, g, b = [round(255.0*x) for x in (r, g, b)]
if not ((0 <= r <= 255) and (0 <= g <= 255) and (0 <= b <= 255)):
raise TurtleGraphicsError("bad color sequence: %s" % str(args))
return "#%02x%02x%02x" % (r, g, b)
def shape(self, name=None):
"""Set turtle shape to shape with given name / return current shapename.
Optional argument:
name -- a string, which is a valid shapename
Set turtle shape to shape with given name or, if name is not given,
return name of current shape.
Shape with name must exist in the TurtleScreen's shape dictionary.
Initially there are the following polygon shapes:
'arrow', 'turtle', 'circle', 'square', 'triangle', 'classic'.
To learn about how to deal with shapes see Screen-method register_shape.
Example (for a Turtle instance named turtle):
>>> turtle.shape()
'arrow'
>>> turtle.shape("turtle")
>>> turtle.shape()
'turtle'
"""
if name is None:
return self.turtle.shapeIndex
if not name in self.screen.getshapes():
raise TurtleGraphicsError("There is no shape named %s" % name)
self.turtle._setshape(name)
#self._update()
def shapesize(self, stretch_wid=None, stretch_len=None, outline=None):
"""Set/return turtle's stretchfactors/outline. Set resizemode to "user".
Optional arguments:
stretch_wid : positive number
stretch_len : positive number
outline : positive number
Return or set the pen's attributes x/y-stretchfactors and/or outline.
Set resizemode to "user".
If and only if resizemode is set to "user", the turtle will be displayed
stretched according to its stretchfactors:
stretch_wid is stretchfactor perpendicular to orientation
stretch_len is stretchfactor in direction of turtles orientation.
outline determines the width of the shapes's outline.
Examples (for a Turtle instance named turtle):
>>> turtle.resizemode("user")
>>> turtle.shapesize(5, 5, 12)
>>> turtle.shapesize(outline=8)
"""
if stretch_wid is stretch_len is outline is None:
stretch_wid, stretch_len = self._stretchfactor
return stretch_wid, stretch_len, self._outlinewidth
if stretch_wid == 0 or stretch_len == 0:
raise TurtleGraphicsError("stretch_wid/stretch_len must not be zero")
if stretch_wid is not None:
if stretch_len is None:
stretchfactor = stretch_wid, stretch_wid
else:
stretchfactor = stretch_wid, stretch_len
elif stretch_len is not None:
stretchfactor = self._stretchfactor[0], stretch_len
else:
stretchfactor = self._stretchfactor
if outline is None:
outline = self._outlinewidth
self.pen(resizemode="user",
stretchfactor=stretchfactor, outline=outline)
def shearfactor(self, shear=None):
"""Set or return the current shearfactor.
Optional argument: shear -- number, tangent of the shear angle
Shear the turtleshape according to the given shearfactor shear,
which is the tangent of the shear angle. DO NOT change the
turtle's heading (direction of movement).
If shear is not given: return the current shearfactor, i. e. the
tangent of the shear angle, by which lines parallel to the
heading of the turtle are sheared.
Examples (for a Turtle instance named turtle):
>>> turtle.shape("circle")
>>> turtle.shapesize(5,2)
>>> turtle.shearfactor(0.5)
>>> turtle.shearfactor()
>>> 0.5
"""
if shear is None:
return self._shearfactor
self.pen(resizemode="user", shearfactor=shear)
def settiltangle(self, angle):
"""Rotate the turtleshape to point in the specified direction
Argument: angle -- number
Rotate the turtleshape to point in the direction specified by angle,
regardless of its current tilt-angle. DO NOT change the turtle's
heading (direction of movement).
Examples (for a Turtle instance named turtle):
>>> turtle.shape("circle")
>>> turtle.shapesize(5,2)
>>> turtle.settiltangle(45)
>>> stamp()
>>> turtle.fd(50)
>>> turtle.settiltangle(-45)
>>> stamp()
>>> turtle.fd(50)
"""
tilt = -angle * self._degreesPerAU * self._angleOrient
tilt = (tilt * math.pi / 180.0) % (2*math.pi)
self.pen(resizemode="user", tilt=tilt)
def tiltangle(self, angle=None):
"""Set or return the current tilt-angle.
Optional argument: angle -- number
Rotate the turtleshape to point in the direction specified by angle,
regardless of its current tilt-angle. DO NOT change the turtle's
heading (direction of movement).
If angle is not given: return the current tilt-angle, i. e. the angle
between the orientation of the turtleshape and the heading of the
turtle (its direction of movement).
Deprecated since Python 3.1
Examples (for a Turtle instance named turtle):
>>> turtle.shape("circle")
>>> turtle.shapesize(5,2)
>>> turtle.tilt(45)
>>> turtle.tiltangle()
"""
if angle is None:
tilt = -self._tilt * (180.0/math.pi) * self._angleOrient
return (tilt / self._degreesPerAU) % self._fullcircle
else:
self.settiltangle(angle)
def tilt(self, angle):
"""Rotate the turtleshape by angle.
Argument:
angle - a number
Rotate the turtleshape by angle from its current tilt-angle,
but do NOT change the turtle's heading (direction of movement).
Examples (for a Turtle instance named turtle):
>>> turtle.shape("circle")
>>> turtle.shapesize(5,2)
>>> turtle.tilt(30)
>>> turtle.fd(50)
>>> turtle.tilt(30)
>>> turtle.fd(50)
"""
self.settiltangle(angle + self.tiltangle())
def shapetransform(self, t11=None, t12=None, t21=None, t22=None):
"""Set or return the current transformation matrix of the turtle shape.
Optional arguments: t11, t12, t21, t22 -- numbers.
If none of the matrix elements are given, return the transformation
matrix.
Otherwise set the given elements and transform the turtleshape
according to the matrix consisting of first row t11, t12 and
second row t21, 22.
Modify stretchfactor, shearfactor and tiltangle according to the
given matrix.
Examples (for a Turtle instance named turtle):
>>> turtle.shape("square")
>>> turtle.shapesize(4,2)
>>> turtle.shearfactor(-0.5)
>>> turtle.shapetransform()
(4.0, -1.0, -0.0, 2.0)
"""
#console.log("shapetransform")
if t11 is t12 is t21 is t22 is None:
return self._shapetrafo
m11, m12, m21, m22 = self._shapetrafo
if t11 is not None: m11 = t11
if t12 is not None: m12 = t12
if t21 is not None: m21 = t21
if t22 is not None: m22 = t22
if t11 * t22 - t12 * t21 == 0:
raise TurtleGraphicsError("Bad shape transform matrix: must not be singular")
self._shapetrafo = (m11, m12, m21, m22)
alfa = math.atan2(-m21, m11) % (2 * math.pi)
sa, ca = math.sin(alfa), math.cos(alfa)
a11, a12, a21, a22 = (ca*m11 - sa*m21, ca*m12 - sa*m22,
sa*m11 + ca*m21, sa*m12 + ca*m22)
self._stretchfactor = a11, a22
self._shearfactor = a12/a22
self._tilt = alfa
self._update()
def _polytrafo(self, poly):
"""Computes transformed polygon shapes from a shape
according to current position and heading.
"""
screen = self.screen
p0, p1 = self._position
e0, e1 = self._orient
e = Vec2D(e0, e1 * screen.yscale / screen.xscale)
e0, e1 = (1.0 / abs(e)) * e
return [(p0+(e1*x+e0*y)/screen.xscale, p1+(-e0*x+e1*y)/screen.yscale)
for (x, y) in poly]
def get_shapepoly(self):
"""Return the current shape polygon as tuple of coordinate pairs.
No argument.
Examples (for a Turtle instance named turtle):
>>> turtle.shape("square")
>>> turtle.shapetransform(4, -1, 0, 2)
>>> turtle.get_shapepoly()
((50, -20), (30, 20), (-50, 20), (-30, -20))
"""
shape = self.screen._shapes[self.turtle.shapeIndex]
if shape._type == "polygon":
return self._getshapepoly(shape._data, shape._type == "compound")
# else return None
def _getshapepoly(self, polygon, compound=False):
"""Calculate transformed shape polygon according to resizemode
and shapetransform.
"""
if self._resizemode == "user" or compound:
t11, t12, t21, t22 = self._shapetrafo
elif self._resizemode == "auto":
l = max(1, self._pensize/5.0)
t11, t12, t21, t22 = l, 0, 0, l
elif self._resizemode == "noresize":
return polygon
return tuple([(t11*x + t12*y, t21*x + t22*y) for (x, y) in polygon])
def _drawturtle(self):
"""Manages the correct rendering of the turtle with respect to
its shape, resizemode, stretch and tilt etc."""
return
############################## stamp stuff ###############################
def stamp(self):
"""Stamp a copy of the turtleshape onto the canvas and return its id.
No argument.
Stamp a copy of the turtle shape onto the canvas at the current
turtle position. Return a stamp_id for that stamp, which can be
used to delete it by calling clearstamp(stamp_id).
Example (for a Turtle instance named turtle):
>>> turtle.color("blue")
>>> turtle.stamp()
13
>>> turtle.fd(50)
"""
screen = self.screen
shape = screen._shapes[self.turtle.shapeIndex]
ttype = shape._type
tshape = shape._data
if ttype == "polygon":
stitem = screen._createpoly()
if self._resizemode == "noresize": w = 1
elif self._resizemode == "auto": w = self._pensize
else: w =self._outlinewidth
shape = self._polytrafo(self._getshapepoly(tshape))
fc, oc = self._fillcolor, self._pencolor
screen._drawpoly(stitem, shape, fill=fc, outline=oc,
width=w, top=True)
elif ttype == "image":
stitem = screen._createimage("")
screen._drawimage(stitem, self._position, tshape)
elif ttype == "compound":
stitem = []
for element in tshape:
item = screen._createpoly()
stitem.append(item)
stitem = tuple(stitem)
for item, (poly, fc, oc) in zip(stitem, tshape):
poly = self._polytrafo(self._getshapepoly(poly, True))
screen._drawpoly(item, poly, fill=self._cc(fc),
outline=self._cc(oc), width=self._outlinewidth, top=True)
self.stampItems.append(stitem)
self.undobuffer.push(("stamp", stitem))
return stitem
def _clearstamp(self, stampid):
"""does the work for clearstamp() and clearstamps()
"""
if stampid in self.stampItems:
if isinstance(stampid, tuple):
for subitem in stampid:
self.screen._delete(subitem)
else:
self.screen._delete(stampid)
self.stampItems.remove(stampid)
# Delete stampitem from undobuffer if necessary
# if clearstamp is called directly.
item = ("stamp", stampid)
buf = self.undobuffer
if item not in buf.buffer:
return
index = buf.buffer.index(item)
buf.buffer.remove(item)
if index <= buf.ptr:
buf.ptr = (buf.ptr - 1) % buf.bufsize
buf.buffer.insert((buf.ptr+1)%buf.bufsize, [None])
def clearstamp(self, stampid):
"""Delete stamp with given stampid
Argument:
stampid - an integer, must be return value of previous stamp() call.
Example (for a Turtle instance named turtle):
>>> turtle.color("blue")
>>> astamp = turtle.stamp()
>>> turtle.fd(50)
>>> turtle.clearstamp(astamp)
"""
self._clearstamp(stampid)
self._update()
def clearstamps(self, n=None):
"""Delete all or first/last n of turtle's stamps.
Optional argument:
n -- an integer
If n is None, delete all of pen's stamps,
else if n > 0 delete first n stamps
else if n < 0 delete last n stamps.
Example (for a Turtle instance named turtle):
>>> for i in range(8):
... turtle.stamp(); turtle.fd(30)
...
>>> turtle.clearstamps(2)
>>> turtle.clearstamps(-2)
>>> turtle.clearstamps()
"""
if n is None:
toDelete = self.stampItems[:]
elif n >= 0:
toDelete = self.stampItems[:n]
else:
toDelete = self.stampItems[n:]
for item in toDelete:
self._clearstamp(item)
self._update()
def _goto(self, end):
"""Move the pen to the point end, thereby drawing a line
if pen is down. All other methods for turtle movement depend
on this one.
"""
if self._speed and self.screen._tracing == 1:
if self._drawing:
#console.log('%s:%s:%s:%s:%s' % (self, start, end, self._pencolor,
# self._pensize))
self.screen._drawline(self, #please remove me eventually
(self._position, end),
self._pencolor, self._pensize, False)
if isinstance(self._fillpath, list):
self._fillpath.append(end)
###### vererbung!!!!!!!!!!!!!!!!!!!!!!
self._position = end
def _rotate(self, angle):
"""Turns pen clockwise by angle.
"""
#console.log('_rotate')
if self.undobuffer:
self.undobuffer.push(("rot", angle, self._degreesPerAU))
angle *= self._degreesPerAU
neworient = self._orient.rotate(angle)
tracing = self.screen._tracing
self._orient = neworient
#self._update()
def _newLine(self, usePos=True):
"""Closes current line item and starts a new one.
Remark: if current line became too long, animation
performance (via _drawline) slowed down considerably.
"""
#console.log('_newLine')
return
def filling(self):
"""Return fillstate (True if filling, False else).
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.begin_fill()
>>> if turtle.filling():
... turtle.pensize(5)
... else:
... turtle.pensize(3)
"""
return isinstance(self._fillpath, list)
def begin_fill(self):
"""Called just before drawing a shape to be filled.
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.color("black", "red")
>>> turtle.begin_fill()
>>> turtle.circle(60)
>>> turtle.end_fill()
"""
if not self.filling():
self._fillitem = self.screen._createpoly()
#self.items.append(self._fillitem)
self._fillpath = [self._position]
#self._newLine()
if self.undobuffer:
self.undobuffer.push(("beginfill", self._fillitem))
#self._update()
def end_fill(self):
"""Fill the shape drawn after the call begin_fill().
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.color("black", "red")
>>> turtle.begin_fill()
>>> turtle.circle(60)
>>> turtle.end_fill()
"""
if self.filling():
if len(self._fillpath) > 2:
self.screen._drawpoly(self._fillitem, self._fillpath,
fill=self._fillcolor)
if self.undobuffer:
self.undobuffer.push(("dofill", self._fillitem))
self._fillitem = self._fillpath = None
self._update()
def dot(self, size=None, *color):
"""Draw a dot with diameter size, using color.
Optional arguments:
size -- an integer >= 1 (if given)
color -- a colorstring or a numeric color tuple
Draw a circular dot with diameter size, using color.
If size is not given, the maximum of pensize+4 and 2*pensize is used.
Example (for a Turtle instance named turtle):
>>> turtle.dot()
>>> turtle.fd(50); turtle.dot(20, "blue"); turtle.fd(50)
"""
if not color:
if isinstance(size, (str, tuple)):
color = self._colorstr(size)
size = self._pensize + max(self._pensize, 4)
else:
color = self._pencolor
if not size:
size = self._pensize + max(self._pensize, 4)
else:
if size is None:
size = self._pensize + max(self._pensize, 4)
color = self._colorstr(color)
if hasattr(self.screen, "_dot"):
item = self.screen._dot(self._position, size, color)
#self.items.append(item)
if self.undobuffer:
self.undobuffer.push(("dot", item))
else:
pen = self.pen()
if self.undobuffer:
self.undobuffer.push(["seq"])
self.undobuffer.cumulate = True
try:
if self.resizemode() == 'auto':
self.ht()
self.pendown()
self.pensize(size)
self.pencolor(color)
self.forward(0)
finally:
self.pen(pen)
if self.undobuffer:
self.undobuffer.cumulate = False
def _write(self, txt, align, font):
"""Performs the writing for write()
"""
item, end = self.screen._write(self._position, txt, align, font,
self._pencolor)
#self.items.append(item)
if self.undobuffer:
self.undobuffer.push(("wri", item))
return end
def write(self, arg, move=False, align="left", font=("Arial", 8, "normal")):
"""Write text at the current turtle position.
Arguments:
arg -- info, which is to be written to the TurtleScreen
move (optional) -- True/False
align (optional) -- one of the strings "left", "center" or right"
font (optional) -- a triple (fontname, fontsize, fonttype)
Write text - the string representation of arg - at the current
turtle position according to align ("left", "center" or right")
and with the given font.
If move is True, the pen is moved to the bottom-right corner
of the text. By default, move is False.
Example (for a Turtle instance named turtle):
>>> turtle.write('Home = ', True, align="center")
>>> turtle.write((0,0), True)
"""
if self.undobuffer:
self.undobuffer.push(["seq"])
self.undobuffer.cumulate = True
end = self._write(str(arg), align.lower(), font)
if move:
x, y = self.pos()
self.setpos(end, y)
if self.undobuffer:
self.undobuffer.cumulate = False
def begin_poly(self):
"""Start recording the vertices of a polygon.
No argument.
Start recording the vertices of a polygon. Current turtle position
is first point of polygon.
Example (for a Turtle instance named turtle):
>>> turtle.begin_poly()
"""
self._poly = [self._position]
self._creatingPoly = True
def end_poly(self):
"""Stop recording the vertices of a polygon.
No argument.
Stop recording the vertices of a polygon. Current turtle position is
last point of polygon. This will be connected with the first point.
Example (for a Turtle instance named turtle):
>>> turtle.end_poly()
"""
self._creatingPoly = False
def get_poly(self):
"""Return the lastly recorded polygon.
No argument.
Example (for a Turtle instance named turtle):
>>> p = turtle.get_poly()
>>> turtle.register_shape("myFavouriteShape", p)
"""
## check if there is any poly?
if self._poly is not None:
return tuple(self._poly)
def getscreen(self):
"""Return the TurtleScreen object, the turtle is drawing on.
No argument.
Return the TurtleScreen object, the turtle is drawing on.
So TurtleScreen-methods can be called for that object.
Example (for a Turtle instance named turtle):
>>> ts = turtle.getscreen()
>>> ts
<turtle.TurtleScreen object at 0x0106B770>
>>> ts.bgcolor("pink")
"""
return self.screen
def getturtle(self):
"""Return the Turtleobject itself.
No argument.
Only reasonable use: as a function to return the 'anonymous turtle':
Example:
>>> pet = getturtle()
>>> pet.fd(50)
>>> pet
<turtle.Turtle object at 0x0187D810>
>>> turtles()
[<turtle.Turtle object at 0x0187D810>]
"""
return self
getpen = getturtle
################################################################
### screen oriented methods recurring to methods of TurtleScreen
################################################################
def _delay(self, delay=None):
"""Set delay value which determines speed of turtle animation.
"""
return self.screen.delay(delay)
turtlesize = shapesize
RawPen = RawTurtle
### Screen - Singleton ########################
def Screen():
"""Return the singleton screen object.
If none exists at the moment, create a new one and return it,
else return the existing one."""
if Turtle._screen is None:
Turtle._screen = _Screen()
return Turtle._screen
class _Screen(TurtleScreen):
_root = None
_canvas = None
_title = _CFG["title"]
def __init__(self):
# XXX there is no need for this code to be conditional,
# as there will be only a single _Screen instance, anyway
# XXX actually, the turtle demo is injecting root window,
# so perhaps the conditional creation of a root should be
# preserved (perhaps by passing it as an optional parameter)
if _Screen._root is None:
_Screen._root = self._root = _Root()
#self._root.title(_Screen._title)
#self._root.ondestroy(self._destroy)
if _Screen._canvas is None:
width = _CFG["width"]
height = _CFG["height"]
canvwidth = _CFG["canvwidth"]
canvheight = _CFG["canvheight"]
leftright = _CFG["leftright"]
topbottom = _CFG["topbottom"]
self._root.setupcanvas(width, height, canvwidth, canvheight)
_Screen._canvas = self._root._getcanvas()
TurtleScreen.__init__(self, _Screen._canvas)
self.setup(width, height, leftright, topbottom)
def end(self):
self._root.end()
def setup(self, width=_CFG["width"], height=_CFG["height"],
startx=_CFG["leftright"], starty=_CFG["topbottom"]):
""" Set the size and position of the main window.
Arguments:
width: as integer a size in pixels, as float a fraction of the screen.
Default is 50% of screen.
height: as integer the height in pixels, as float a fraction of the
screen. Default is 75% of screen.
startx: if positive, starting position in pixels from the left
edge of the screen, if negative from the right edge
Default, startx=None is to center window horizontally.
starty: if positive, starting position in pixels from the top
edge of the screen, if negative from the bottom edge
Default, starty=None is to center window vertically.
Examples (for a Screen instance named screen):
>>> screen.setup (width=200, height=200, startx=0, starty=0)
sets window to 200x200 pixels, in upper left of screen
>>> screen.setup(width=.75, height=0.5, startx=None, starty=None)
sets window to 75% of screen by 50% of screen and centers
"""
if not hasattr(self._root, "set_geometry"):
return
sw = self._root.win_width()
sh = self._root.win_height()
if isinstance(width, float) and 0 <= width <= 1:
width = sw*width
if startx is None:
startx = (sw - width) / 2
if isinstance(height, float) and 0 <= height <= 1:
height = sh*height
if starty is None:
starty = (sh - height) / 2
self._root.set_geometry(width, height, startx, starty)
self.update()
class Turtle(RawTurtle):
"""RawTurtle auto-creating (scrolled) canvas.
When a Turtle object is created or a function derived from some
Turtle method is called a TurtleScreen object is automatically created.
"""
_pen = None
_screen = None
def __init__(self,
shape=_CFG["shape"],
undobuffersize=_CFG["undobuffersize"],
visible=_CFG["visible"]):
if Turtle._screen is None:
Turtle._screen = Screen()
RawTurtle.__init__(self, Turtle._screen,
shape=shape,
undobuffersize=undobuffersize,
visible=visible)
Pen = Turtle
def _getpen():
"""Create the 'anonymous' turtle if not already present."""
if Turtle._pen is None:
Turtle._pen = Turtle()
return Turtle._pen
def _getscreen():
"""Create a TurtleScreen if not already present."""
if Turtle._screen is None:
Turtle._screen = Screen()
return Turtle._screen
if __name__ == "__main__":
def switchpen():
if isdown():
pu()
else:
pd()
def demo1():
"""Demo of old turtle.py - module"""
reset()
tracer(True)
up()
backward(100)
down()
# draw 3 squares; the last filled
width(3)
for i in range(3):
if i == 2:
begin_fill()
for _ in range(4):
forward(20)
left(90)
if i == 2:
color("maroon")
end_fill()
up()
forward(30)
down()
width(1)
color("black")
# move out of the way
tracer(False)
up()
right(90)
forward(100)
right(90)
forward(100)
right(180)
down()
# some text
write("startstart", 1)
write("start", 1)
color("red")
# staircase
for i in range(5):
forward(20)
left(90)
forward(20)
right(90)
# filled staircase
tracer(True)
begin_fill()
for i in range(5):
forward(20)
left(90)
forward(20)
right(90)
end_fill()
# more text
def demo2():
"""Demo of some new features."""
speed(1)
st()
pensize(3)
setheading(towards(0, 0))
radius = distance(0, 0)/2.0
rt(90)
for _ in range(18):
switchpen()
circle(radius, 10)
write("wait a moment...")
while undobufferentries():
undo()
reset()
lt(90)
colormode(255)
laenge = 10
pencolor("green")
pensize(3)
lt(180)
for i in range(-2, 16):
if i > 0:
begin_fill()
fillcolor(255-15*i, 0, 15*i)
for _ in range(3):
fd(laenge)
lt(120)
end_fill()
laenge += 10
lt(15)
speed((speed()+1)%12)
#end_fill()
lt(120)
pu()
fd(70)
rt(30)
pd()
color("red","yellow")
speed(0)
begin_fill()
for _ in range(4):
circle(50, 90)
rt(90)
fd(30)
rt(90)
end_fill()
lt(90)
pu()
fd(30)
pd()
shape("turtle")
tri = getturtle()
tri.resizemode("auto")
turtle = Turtle()
turtle.resizemode("auto")
turtle.shape("turtle")
turtle.reset()
turtle.left(90)
turtle.speed(0)
turtle.up()
turtle.goto(280, 40)
turtle.lt(30)
turtle.down()
turtle.speed(6)
turtle.color("blue","orange")
turtle.pensize(2)
tri.speed(6)
setheading(towards(turtle))
count = 1
while tri.distance(turtle) > 4:
turtle.fd(3.5)
turtle.lt(0.6)
tri.setheading(tri.towards(turtle))
tri.fd(4)
if count % 20 == 0:
turtle.stamp()
tri.stamp()
switchpen()
count += 1
tri.write("CAUGHT! ", font=("Arial", 16, "bold"), align="right")
tri.pencolor("black")
tri.pencolor("red")
def baba(xdummy, ydummy):
clearscreen()
bye()
time.sleep(2)
while undobufferentries():
tri.undo()
turtle.undo()
tri.fd(50)
tri.write(" Click me!", font = ("Courier", 12, "bold") )
tri.onclick(baba, 1)
demo1()
demo2()
exitonclick()
|
kelseyoo14/Wander
|
refs/heads/master
|
venv_2_7/lib/python2.7/site-packages/pandas/io/excel.py
|
9
|
"""
Module parse to/from Excel
"""
#----------------------------------------------------------------------
# ExcelFile class
import os
import datetime
import abc
import numpy as np
from pandas.core.frame import DataFrame
from pandas.io.parsers import TextParser
from pandas.io.common import _is_url, _urlopen, _validate_header_arg
from pandas.tseries.period import Period
from pandas import json
from pandas.compat import (map, zip, reduce, range, lrange, u, add_metaclass,
BytesIO, string_types)
from pandas.core import config
from pandas.core.common import pprint_thing
from pandas.util.decorators import Appender
import pandas.compat as compat
import pandas.compat.openpyxl_compat as openpyxl_compat
import pandas.core.common as com
from warnings import warn
from distutils.version import LooseVersion
__all__ = ["read_excel", "ExcelWriter", "ExcelFile"]
_writer_extensions = ["xlsx", "xls", "xlsm"]
_writers = {}
def register_writer(klass):
"""Adds engine to the excel writer registry. You must use this method to
integrate with ``to_excel``. Also adds config options for any new
``supported_extensions`` defined on the writer."""
if not compat.callable(klass):
raise ValueError("Can only register callables as engines")
engine_name = klass.engine
_writers[engine_name] = klass
for ext in klass.supported_extensions:
if ext.startswith('.'):
ext = ext[1:]
if ext not in _writer_extensions:
config.register_option("io.excel.%s.writer" % ext,
engine_name, validator=str)
_writer_extensions.append(ext)
def get_writer(engine_name):
if engine_name == 'openpyxl':
try:
import openpyxl
# with version-less openpyxl engine
# make sure we make the intelligent choice for the user
if LooseVersion(openpyxl.__version__) < '2.0.0':
return _writers['openpyxl1']
elif LooseVersion(openpyxl.__version__) < '2.2.0':
return _writers['openpyxl20']
else:
return _writers['openpyxl22']
except ImportError:
# fall through to normal exception handling below
pass
try:
return _writers[engine_name]
except KeyError:
raise ValueError("No Excel writer '%s'" % engine_name)
def read_excel(io, sheetname=0, header=0, skiprows=None, skip_footer=0,
index_col=None, parse_cols=None, parse_dates=False,
date_parser=None, na_values=None, thousands=None,
convert_float=True, has_index_names=None, converters=None,
engine=None, **kwds):
"""
Read an Excel table into a pandas DataFrame
Parameters
----------
io : string, file-like object, pandas ExcelFile, or xlrd workbook.
The string could be a URL. Valid URL schemes include http, ftp, s3,
and file. For file URLs, a host is expected. For instance, a local
file could be file://localhost/path/to/workbook.xlsx
sheetname : string, int, mixed list of strings/ints, or None, default 0
Strings are used for sheet names, Integers are used in zero-indexed sheet
positions.
Lists of strings/integers are used to request multiple sheets.
Specify None to get all sheets.
str|int -> DataFrame is returned.
list|None -> Dict of DataFrames is returned, with keys representing sheets.
Available Cases
* Defaults to 0 -> 1st sheet as a DataFrame
* 1 -> 2nd sheet as a DataFrame
* "Sheet1" -> 1st sheet as a DataFrame
* [0,1,"Sheet5"] -> 1st, 2nd & 5th sheet as a dictionary of DataFrames
* None -> All sheets as a dictionary of DataFrames
header : int, list of ints, default 0
Row (0-indexed) to use for the column labels of the parsed
DataFrame. If a list of integers is passed those row positions will
be combined into a ``MultiIndex``
skiprows : list-like
Rows to skip at the beginning (0-indexed)
skip_footer : int, default 0
Rows at the end to skip (0-indexed)
index_col : int, list of ints, default None
Column (0-indexed) to use as the row labels of the DataFrame.
Pass None if there is no such column. If a list is passed,
those columns will be combined into a ``MultiIndex``
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the Excel cell content, and return the transformed
content.
parse_cols : int or list, default None
* If None then parse all columns,
* If int then indicates last column to be parsed
* If list of ints then indicates list of column numbers to be parsed
* If string then indicates comma separated list of column names and
column ranges (e.g. "A:E" or "A,C,E:F")
na_values : list-like, default None
List of additional strings to recognize as NA/NaN
thousands : str, default None
Thousands separator for parsing string columns to numeric. Note that
this parameter is only necessary for columns stored as TEXT in Excel,
any numeric columns will automatically be parsed, regardless of display
format.
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to
verbose : boolean, default False
Indicate number of NA values placed in non-numeric columns
engine: string, default None
If io is not a buffer or path, this must be set to identify io.
Acceptable values are None or xlrd
convert_float : boolean, default True
convert integral floats to int (i.e., 1.0 --> 1). If False, all numeric
data will be read in as floats: Excel stores all numbers as floats
internally
has_index_names : boolean, default None
DEPRECATED: for version 0.17+ index names will be automatically inferred
based on index_col. To read Excel output from 0.16.2 and prior that
had saved index names, use True.
Returns
-------
parsed : DataFrame or Dict of DataFrames
DataFrame from the passed in Excel file. See notes in sheetname argument
for more information on when a Dict of Dataframes is returned.
"""
if not isinstance(io, ExcelFile):
io = ExcelFile(io, engine=engine)
return io._parse_excel(
sheetname=sheetname, header=header, skiprows=skiprows,
index_col=index_col, parse_cols=parse_cols, parse_dates=parse_dates,
date_parser=date_parser, na_values=na_values, thousands=thousands,
convert_float=convert_float, has_index_names=has_index_names,
skip_footer=skip_footer, converters=converters, **kwds)
class ExcelFile(object):
"""
Class for parsing tabular excel sheets into DataFrame objects.
Uses xlrd. See read_excel for more documentation
Parameters
----------
io : string, file-like object or xlrd workbook
If a string, expected to be a path to xls or xlsx file
engine: string, default None
If io is not a buffer or path, this must be set to identify io.
Acceptable values are None or xlrd
"""
def __init__(self, io, **kwds):
import xlrd # throw an ImportError if we need to
ver = tuple(map(int, xlrd.__VERSION__.split(".")[:2]))
if ver < (0, 9): # pragma: no cover
raise ImportError("pandas requires xlrd >= 0.9.0 for excel "
"support, current version " + xlrd.__VERSION__)
self.io = io
engine = kwds.pop('engine', None)
if engine is not None and engine != 'xlrd':
raise ValueError("Unknown engine: %s" % engine)
if isinstance(io, compat.string_types):
if _is_url(io):
data = _urlopen(io).read()
self.book = xlrd.open_workbook(file_contents=data)
else:
self.book = xlrd.open_workbook(io)
elif engine == 'xlrd' and isinstance(io, xlrd.Book):
self.book = io
elif not isinstance(io, xlrd.Book) and hasattr(io, "read"):
# N.B. xlrd.Book has a read attribute too
data = io.read()
self.book = xlrd.open_workbook(file_contents=data)
else:
raise ValueError('Must explicitly set engine if not passing in'
' buffer or path for io.')
def parse(self, sheetname=0, header=0, skiprows=None, skip_footer=0,
index_col=None, parse_cols=None, parse_dates=False,
date_parser=None, na_values=None, thousands=None,
convert_float=True, has_index_names=None, converters=None, **kwds):
"""
Parse specified sheet(s) into a DataFrame
Equivalent to read_excel(ExcelFile, ...) See the read_excel
docstring for more info on accepted parameters
"""
return self._parse_excel(sheetname=sheetname, header=header,
skiprows=skiprows,
index_col=index_col,
has_index_names=has_index_names,
parse_cols=parse_cols,
parse_dates=parse_dates,
date_parser=date_parser, na_values=na_values,
thousands=thousands,
skip_footer=skip_footer,
convert_float=convert_float,
converters=converters,
**kwds)
def _should_parse(self, i, parse_cols):
def _range2cols(areas):
"""
Convert comma separated list of column names and column ranges to a
list of 0-based column indexes.
>>> _range2cols('A:E')
[0, 1, 2, 3, 4]
>>> _range2cols('A,C,Z:AB')
[0, 2, 25, 26, 27]
"""
def _excel2num(x):
"Convert Excel column name like 'AB' to 0-based column index"
return reduce(lambda s, a: s * 26 + ord(a) - ord('A') + 1,
x.upper().strip(), 0) - 1
cols = []
for rng in areas.split(','):
if ':' in rng:
rng = rng.split(':')
cols += lrange(_excel2num(rng[0]), _excel2num(rng[1]) + 1)
else:
cols.append(_excel2num(rng))
return cols
if isinstance(parse_cols, int):
return i <= parse_cols
elif isinstance(parse_cols, compat.string_types):
return i in _range2cols(parse_cols)
else:
return i in parse_cols
def _parse_excel(self, sheetname=0, header=0, skiprows=None, skip_footer=0,
index_col=None, has_index_names=None, parse_cols=None,
parse_dates=False, date_parser=None, na_values=None,
thousands=None, convert_float=True,
verbose=False, **kwds):
skipfooter = kwds.pop('skipfooter', None)
if skipfooter is not None:
skip_footer = skipfooter
_validate_header_arg(header)
if has_index_names is not None:
warn("\nThe has_index_names argument is deprecated; index names "
"will be automatically inferred based on index_col.\n"
"This argmument is still necessary if reading Excel output "
"from 0.16.2 or prior with index names.", FutureWarning,
stacklevel=3)
if 'chunksize' in kwds:
raise NotImplementedError("Reading an Excel file in chunks "
"is not implemented")
import xlrd
from xlrd import (xldate, XL_CELL_DATE,
XL_CELL_ERROR, XL_CELL_BOOLEAN,
XL_CELL_NUMBER)
epoch1904 = self.book.datemode
def _parse_cell(cell_contents,cell_typ):
"""converts the contents of the cell into a pandas
appropriate object"""
if cell_typ == XL_CELL_DATE:
if xlrd_0_9_3:
# Use the newer xlrd datetime handling.
cell_contents = xldate.xldate_as_datetime(cell_contents,
epoch1904)
# Excel doesn't distinguish between dates and time,
# so we treat dates on the epoch as times only.
# Also, Excel supports 1900 and 1904 epochs.
year = (cell_contents.timetuple())[0:3]
if ((not epoch1904 and year == (1899, 12, 31))
or (epoch1904 and year == (1904, 1, 1))):
cell_contents = datetime.time(cell_contents.hour,
cell_contents.minute,
cell_contents.second,
cell_contents.microsecond)
else:
# Use the xlrd <= 0.9.2 date handling.
dt = xldate.xldate_as_tuple(cell_contents, epoch1904)
if dt[0] < datetime.MINYEAR:
cell_contents = datetime.time(*dt[3:])
else:
cell_contents = datetime.datetime(*dt)
elif cell_typ == XL_CELL_ERROR:
cell_contents = np.nan
elif cell_typ == XL_CELL_BOOLEAN:
cell_contents = bool(cell_contents)
elif convert_float and cell_typ == XL_CELL_NUMBER:
# GH5394 - Excel 'numbers' are always floats
# it's a minimal perf hit and less suprising
val = int(cell_contents)
if val == cell_contents:
cell_contents = val
return cell_contents
# xlrd >= 0.9.3 can return datetime objects directly.
if LooseVersion(xlrd.__VERSION__) >= LooseVersion("0.9.3"):
xlrd_0_9_3 = True
else:
xlrd_0_9_3 = False
ret_dict = False
#Keep sheetname to maintain backwards compatibility.
if isinstance(sheetname, list):
sheets = sheetname
ret_dict = True
elif sheetname is None:
sheets = self.sheet_names
ret_dict = True
else:
sheets = [sheetname]
#handle same-type duplicates.
sheets = list(set(sheets))
output = {}
for asheetname in sheets:
if verbose:
print("Reading sheet %s" % asheetname)
if isinstance(asheetname, compat.string_types):
sheet = self.book.sheet_by_name(asheetname)
else: # assume an integer if not a string
sheet = self.book.sheet_by_index(asheetname)
data = []
should_parse = {}
for i in range(sheet.nrows):
row = []
for j, (value, typ) in enumerate(zip(sheet.row_values(i),
sheet.row_types(i))):
if parse_cols is not None and j not in should_parse:
should_parse[j] = self._should_parse(j, parse_cols)
if parse_cols is None or should_parse[j]:
row.append(_parse_cell(value,typ))
data.append(row)
if sheet.nrows == 0:
return DataFrame()
if com.is_list_like(header) and len(header) == 1:
header = header[0]
# forward fill and pull out names for MultiIndex column
header_names = None
if header is not None:
if com.is_list_like(header):
header_names = []
for row in header:
if com.is_integer(skiprows):
row += skiprows
data[row] = _fill_mi_header(data[row])
header_name, data[row] = _pop_header_name(data[row], index_col)
header_names.append(header_name)
else:
data[header] = _trim_excel_header(data[header])
if com.is_list_like(index_col):
# forward fill values for MultiIndex index
if not com.is_list_like(header):
offset = 1 + header
else:
offset = 1 + max(header)
for col in index_col:
last = data[offset][col]
for row in range(offset + 1, len(data)):
if data[row][col] == '' or data[row][col] is None:
data[row][col] = last
else:
last = data[row][col]
if com.is_list_like(header) and len(header) > 1:
has_index_names = True
parser = TextParser(data, header=header, index_col=index_col,
has_index_names=has_index_names,
na_values=na_values,
thousands=thousands,
parse_dates=parse_dates,
date_parser=date_parser,
skiprows=skiprows,
skip_footer=skip_footer,
**kwds)
output[asheetname] = parser.read()
output[asheetname].columns = output[asheetname].columns.set_names(header_names)
if ret_dict:
return output
else:
return output[asheetname]
@property
def sheet_names(self):
return self.book.sheet_names()
def close(self):
"""close io if necessary"""
if hasattr(self.io, 'close'):
self.io.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def _trim_excel_header(row):
# trim header row so auto-index inference works
# xlrd uses '' , openpyxl None
while len(row) > 0 and (row[0] == '' or row[0] is None):
row = row[1:]
return row
def _fill_mi_header(row):
# forward fill blanks entries
# from headers if parsing as MultiIndex
last = row[0]
for i in range(1, len(row)):
if row[i] == '' or row[i] is None:
row[i] = last
else:
last = row[i]
return row
# fill blank if index_col not None
def _pop_header_name(row, index_col):
""" (header, new_data) for header rows in MultiIndex parsing"""
none_fill = lambda x: None if x == '' else x
if index_col is None:
# no index col specified, trim data for inference path
return none_fill(row[0]), row[1:]
else:
# pop out header name and fill w/ blank
i = index_col if not com.is_list_like(index_col) else max(index_col)
return none_fill(row[i]), row[:i] + [''] + row[i+1:]
def _conv_value(val):
# Convert numpy types to Python types for the Excel writers.
if com.is_integer(val):
val = int(val)
elif com.is_float(val):
val = float(val)
elif com.is_bool(val):
val = bool(val)
elif isinstance(val, Period):
val = "%s" % val
elif com.is_list_like(val):
val = str(val)
return val
@add_metaclass(abc.ABCMeta)
class ExcelWriter(object):
"""
Class for writing DataFrame objects into excel sheets, default is to use
xlwt for xls, openpyxl for xlsx. See DataFrame.to_excel for typical usage.
Parameters
----------
path : string
Path to xls or xlsx file.
engine : string (optional)
Engine to use for writing. If None, defaults to
``io.excel.<extension>.writer``. NOTE: can only be passed as a keyword
argument.
date_format : string, default None
Format string for dates written into Excel files (e.g. 'YYYY-MM-DD')
datetime_format : string, default None
Format string for datetime objects written into Excel files
(e.g. 'YYYY-MM-DD HH:MM:SS')
Notes
-----
For compatibility with CSV writers, ExcelWriter serializes lists
and dicts to strings before writing.
"""
# Defining an ExcelWriter implementation (see abstract methods for more...)
# - Mandatory
# - ``write_cells(self, cells, sheet_name=None, startrow=0, startcol=0)``
# --> called to write additional DataFrames to disk
# - ``supported_extensions`` (tuple of supported extensions), used to
# check that engine supports the given extension.
# - ``engine`` - string that gives the engine name. Necessary to
# instantiate class directly and bypass ``ExcelWriterMeta`` engine
# lookup.
# - ``save(self)`` --> called to save file to disk
# - Mostly mandatory (i.e. should at least exist)
# - book, cur_sheet, path
# - Optional:
# - ``__init__(self, path, engine=None, **kwargs)`` --> always called
# with path as first argument.
# You also need to register the class with ``register_writer()``.
# Technically, ExcelWriter implementations don't need to subclass
# ExcelWriter.
def __new__(cls, path, engine=None, **kwargs):
# only switch class if generic(ExcelWriter)
if issubclass(cls, ExcelWriter):
if engine is None:
if isinstance(path, string_types):
ext = os.path.splitext(path)[-1][1:]
else:
ext = 'xlsx'
try:
engine = config.get_option('io.excel.%s.writer' % ext)
except KeyError:
error = ValueError("No engine for filetype: '%s'" % ext)
raise error
cls = get_writer(engine)
return object.__new__(cls)
# declare external properties you can count on
book = None
curr_sheet = None
path = None
@abc.abstractproperty
def supported_extensions(self):
"extensions that writer engine supports"
pass
@abc.abstractproperty
def engine(self):
"name of engine"
pass
@abc.abstractmethod
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
"""
Write given formated cells into Excel an excel sheet
Parameters
----------
cells : generator
cell of formated data to save to Excel sheet
sheet_name : string, default None
Name of Excel sheet, if None, then use self.cur_sheet
startrow: upper left cell row to dump data frame
startcol: upper left cell column to dump data frame
"""
pass
@abc.abstractmethod
def save(self):
"""
Save workbook to disk.
"""
pass
def __init__(self, path, engine=None,
date_format=None, datetime_format=None, **engine_kwargs):
# validate that this engine can handle the extension
if isinstance(path, string_types):
ext = os.path.splitext(path)[-1]
else:
ext = 'xls' if engine == 'xlwt' else 'xlsx'
self.check_extension(ext)
self.path = path
self.sheets = {}
self.cur_sheet = None
if date_format is None:
self.date_format = 'YYYY-MM-DD'
else:
self.date_format = date_format
if datetime_format is None:
self.datetime_format = 'YYYY-MM-DD HH:MM:SS'
else:
self.datetime_format = datetime_format
def _get_sheet_name(self, sheet_name):
if sheet_name is None:
sheet_name = self.cur_sheet
if sheet_name is None: # pragma: no cover
raise ValueError('Must pass explicit sheet_name or set '
'cur_sheet property')
return sheet_name
@classmethod
def check_extension(cls, ext):
"""checks that path's extension against the Writer's supported
extensions. If it isn't supported, raises UnsupportedFiletypeError."""
if ext.startswith('.'):
ext = ext[1:]
if not any(ext in extension for extension in cls.supported_extensions):
msg = (u("Invalid extension for engine '%s': '%s'") %
(pprint_thing(cls.engine), pprint_thing(ext)))
raise ValueError(msg)
else:
return True
# Allow use as a contextmanager
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""synonym for save, to make it more file-like"""
return self.save()
class _Openpyxl1Writer(ExcelWriter):
engine = 'openpyxl1'
supported_extensions = ('.xlsx', '.xlsm')
openpyxl_majorver = 1
def __init__(self, path, engine=None, **engine_kwargs):
if not openpyxl_compat.is_compat(major_ver=self.openpyxl_majorver):
raise ValueError('Installed openpyxl is not supported at this '
'time. Use {0}.x.y.'
.format(self.openpyxl_majorver))
# Use the openpyxl module as the Excel writer.
from openpyxl.workbook import Workbook
super(_Openpyxl1Writer, self).__init__(path, **engine_kwargs)
# Create workbook object with default optimized_write=True.
self.book = Workbook()
# Openpyxl 1.6.1 adds a dummy sheet. We remove it.
if self.book.worksheets:
self.book.remove_sheet(self.book.worksheets[0])
def save(self):
"""
Save workbook to disk.
"""
return self.book.save(self.path)
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
# Write the frame cells using openpyxl.
from openpyxl.cell import get_column_letter
sheet_name = self._get_sheet_name(sheet_name)
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.create_sheet()
wks.title = sheet_name
self.sheets[sheet_name] = wks
for cell in cells:
colletter = get_column_letter(startcol + cell.col + 1)
xcell = wks.cell("%s%s" % (colletter, startrow + cell.row + 1))
if (isinstance(cell.val, compat.string_types)
and xcell.data_type_for_value(cell.val)
!= xcell.TYPE_STRING):
xcell.set_value_explicit(cell.val)
else:
xcell.value = _conv_value(cell.val)
style = None
if cell.style:
style = self._convert_to_style(cell.style)
for field in style.__fields__:
xcell.style.__setattr__(field,
style.__getattribute__(field))
if isinstance(cell.val, datetime.datetime):
xcell.style.number_format.format_code = self.datetime_format
elif isinstance(cell.val, datetime.date):
xcell.style.number_format.format_code = self.date_format
if cell.mergestart is not None and cell.mergeend is not None:
cletterstart = get_column_letter(startcol + cell.col + 1)
cletterend = get_column_letter(startcol + cell.mergeend + 1)
wks.merge_cells('%s%s:%s%s' % (cletterstart,
startrow + cell.row + 1,
cletterend,
startrow + cell.mergestart + 1))
# Excel requires that the format of the first cell in a merged
# range is repeated in the rest of the merged range.
if style:
first_row = startrow + cell.row + 1
last_row = startrow + cell.mergestart + 1
first_col = startcol + cell.col + 1
last_col = startcol + cell.mergeend + 1
for row in range(first_row, last_row + 1):
for col in range(first_col, last_col + 1):
if row == first_row and col == first_col:
# Ignore first cell. It is already handled.
continue
colletter = get_column_letter(col)
xcell = wks.cell("%s%s" % (colletter, row))
for field in style.__fields__:
xcell.style.__setattr__(
field, style.__getattribute__(field))
@classmethod
def _convert_to_style(cls, style_dict):
"""
converts a style_dict to an openpyxl style object
Parameters
----------
style_dict: style dictionary to convert
"""
from openpyxl.style import Style
xls_style = Style()
for key, value in style_dict.items():
for nk, nv in value.items():
if key == "borders":
(xls_style.borders.__getattribute__(nk)
.__setattr__('border_style', nv))
else:
xls_style.__getattribute__(key).__setattr__(nk, nv)
return xls_style
register_writer(_Openpyxl1Writer)
class _OpenpyxlWriter(_Openpyxl1Writer):
engine = 'openpyxl'
register_writer(_OpenpyxlWriter)
class _Openpyxl20Writer(_Openpyxl1Writer):
"""
Note: Support for OpenPyxl v2 is currently EXPERIMENTAL (GH7565).
"""
engine = 'openpyxl20'
openpyxl_majorver = 2
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
# Write the frame cells using openpyxl.
from openpyxl.cell import get_column_letter
sheet_name = self._get_sheet_name(sheet_name)
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.create_sheet()
wks.title = sheet_name
self.sheets[sheet_name] = wks
for cell in cells:
colletter = get_column_letter(startcol + cell.col + 1)
xcell = wks.cell("%s%s" % (colletter, startrow + cell.row + 1))
xcell.value = _conv_value(cell.val)
style_kwargs = {}
# Apply format codes before cell.style to allow override
if isinstance(cell.val, datetime.datetime):
style_kwargs.update(self._convert_to_style_kwargs({
'number_format':{'format_code': self.datetime_format}}))
elif isinstance(cell.val, datetime.date):
style_kwargs.update(self._convert_to_style_kwargs({
'number_format':{'format_code': self.date_format}}))
if cell.style:
style_kwargs.update(self._convert_to_style_kwargs(cell.style))
if style_kwargs:
xcell.style = xcell.style.copy(**style_kwargs)
if cell.mergestart is not None and cell.mergeend is not None:
cletterstart = get_column_letter(startcol + cell.col + 1)
cletterend = get_column_letter(startcol + cell.mergeend + 1)
wks.merge_cells('%s%s:%s%s' % (cletterstart,
startrow + cell.row + 1,
cletterend,
startrow + cell.mergestart + 1))
# Excel requires that the format of the first cell in a merged
# range is repeated in the rest of the merged range.
if style_kwargs:
first_row = startrow + cell.row + 1
last_row = startrow + cell.mergestart + 1
first_col = startcol + cell.col + 1
last_col = startcol + cell.mergeend + 1
for row in range(first_row, last_row + 1):
for col in range(first_col, last_col + 1):
if row == first_row and col == first_col:
# Ignore first cell. It is already handled.
continue
colletter = get_column_letter(col)
xcell = wks.cell("%s%s" % (colletter, row))
xcell.style = xcell.style.copy(**style_kwargs)
@classmethod
def _convert_to_style_kwargs(cls, style_dict):
"""
Convert a style_dict to a set of kwargs suitable for initializing
or updating-on-copy an openpyxl v2 style object
Parameters
----------
style_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'font'
'fill'
'border' ('borders')
'alignment'
'number_format'
'protection'
Returns
-------
style_kwargs : dict
A dict with the same, normalized keys as ``style_dict`` but each
value has been replaced with a native openpyxl style object of the
appropriate class.
"""
_style_key_map = {
'borders': 'border',
}
style_kwargs = {}
for k, v in style_dict.items():
if k in _style_key_map:
k = _style_key_map[k]
_conv_to_x = getattr(cls, '_convert_to_{0}'.format(k),
lambda x: None)
new_v = _conv_to_x(v)
if new_v:
style_kwargs[k] = new_v
return style_kwargs
@classmethod
def _convert_to_color(cls, color_spec):
"""
Convert ``color_spec`` to an openpyxl v2 Color object
Parameters
----------
color_spec : str, dict
A 32-bit ARGB hex string, or a dict with zero or more of the
following keys.
'rgb'
'indexed'
'auto'
'theme'
'tint'
'index'
'type'
Returns
-------
color : openpyxl.styles.Color
"""
from openpyxl.styles import Color
if isinstance(color_spec, str):
return Color(color_spec)
else:
return Color(**color_spec)
@classmethod
def _convert_to_font(cls, font_dict):
"""
Convert ``font_dict`` to an openpyxl v2 Font object
Parameters
----------
font_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'name'
'size' ('sz')
'bold' ('b')
'italic' ('i')
'underline' ('u')
'strikethrough' ('strike')
'color'
'vertAlign' ('vertalign')
'charset'
'scheme'
'family'
'outline'
'shadow'
'condense'
Returns
-------
font : openpyxl.styles.Font
"""
from openpyxl.styles import Font
_font_key_map = {
'sz': 'size',
'b': 'bold',
'i': 'italic',
'u': 'underline',
'strike': 'strikethrough',
'vertalign': 'vertAlign',
}
font_kwargs = {}
for k, v in font_dict.items():
if k in _font_key_map:
k = _font_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
font_kwargs[k] = v
return Font(**font_kwargs)
@classmethod
def _convert_to_stop(cls, stop_seq):
"""
Convert ``stop_seq`` to a list of openpyxl v2 Color objects,
suitable for initializing the ``GradientFill`` ``stop`` parameter.
Parameters
----------
stop_seq : iterable
An iterable that yields objects suitable for consumption by
``_convert_to_color``.
Returns
-------
stop : list of openpyxl.styles.Color
"""
return map(cls._convert_to_color, stop_seq)
@classmethod
def _convert_to_fill(cls, fill_dict):
"""
Convert ``fill_dict`` to an openpyxl v2 Fill object
Parameters
----------
fill_dict : dict
A dict with one or more of the following keys (or their synonyms),
'fill_type' ('patternType', 'patterntype')
'start_color' ('fgColor', 'fgcolor')
'end_color' ('bgColor', 'bgcolor')
or one or more of the following keys (or their synonyms).
'type' ('fill_type')
'degree'
'left'
'right'
'top'
'bottom'
'stop'
Returns
-------
fill : openpyxl.styles.Fill
"""
from openpyxl.styles import PatternFill, GradientFill
_pattern_fill_key_map = {
'patternType': 'fill_type',
'patterntype': 'fill_type',
'fgColor': 'start_color',
'fgcolor': 'start_color',
'bgColor': 'end_color',
'bgcolor': 'end_color',
}
_gradient_fill_key_map = {
'fill_type': 'type',
}
pfill_kwargs = {}
gfill_kwargs = {}
for k, v in fill_dict.items():
pk = gk = None
if k in _pattern_fill_key_map:
pk = _pattern_fill_key_map[k]
if k in _gradient_fill_key_map:
gk = _gradient_fill_key_map[k]
if pk in ['start_color', 'end_color']:
v = cls._convert_to_color(v)
if gk == 'stop':
v = cls._convert_to_stop(v)
if pk:
pfill_kwargs[pk] = v
elif gk:
gfill_kwargs[gk] = v
else:
pfill_kwargs[k] = v
gfill_kwargs[k] = v
try:
return PatternFill(**pfill_kwargs)
except TypeError:
return GradientFill(**gfill_kwargs)
@classmethod
def _convert_to_side(cls, side_spec):
"""
Convert ``side_spec`` to an openpyxl v2 Side object
Parameters
----------
side_spec : str, dict
A string specifying the border style, or a dict with zero or more
of the following keys (or their synonyms).
'style' ('border_style')
'color'
Returns
-------
side : openpyxl.styles.Side
"""
from openpyxl.styles import Side
_side_key_map = {
'border_style': 'style',
}
if isinstance(side_spec, str):
return Side(style=side_spec)
side_kwargs = {}
for k, v in side_spec.items():
if k in _side_key_map:
k = _side_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
side_kwargs[k] = v
return Side(**side_kwargs)
@classmethod
def _convert_to_border(cls, border_dict):
"""
Convert ``border_dict`` to an openpyxl v2 Border object
Parameters
----------
border_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'left'
'right'
'top'
'bottom'
'diagonal'
'diagonal_direction'
'vertical'
'horizontal'
'diagonalUp' ('diagonalup')
'diagonalDown' ('diagonaldown')
'outline'
Returns
-------
border : openpyxl.styles.Border
"""
from openpyxl.styles import Border
_border_key_map = {
'diagonalup': 'diagonalUp',
'diagonaldown': 'diagonalDown',
}
border_kwargs = {}
for k, v in border_dict.items():
if k in _border_key_map:
k = _border_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
if k in ['left', 'right', 'top', 'bottom', 'diagonal']:
v = cls._convert_to_side(v)
border_kwargs[k] = v
return Border(**border_kwargs)
@classmethod
def _convert_to_alignment(cls, alignment_dict):
"""
Convert ``alignment_dict`` to an openpyxl v2 Alignment object
Parameters
----------
alignment_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'horizontal'
'vertical'
'text_rotation'
'wrap_text'
'shrink_to_fit'
'indent'
Returns
-------
alignment : openpyxl.styles.Alignment
"""
from openpyxl.styles import Alignment
return Alignment(**alignment_dict)
@classmethod
def _convert_to_number_format(cls, number_format_dict):
"""
Convert ``number_format_dict`` to an openpyxl v2.1.0 number format
initializer.
Parameters
----------
number_format_dict : dict
A dict with zero or more of the following keys.
'format_code' : str
Returns
-------
number_format : str
"""
try:
# >= 2.0.0 < 2.1.0
from openpyxl.styles import NumberFormat
return NumberFormat(**number_format_dict)
except:
# >= 2.1.0
return number_format_dict['format_code']
@classmethod
def _convert_to_protection(cls, protection_dict):
"""
Convert ``protection_dict`` to an openpyxl v2 Protection object.
Parameters
----------
protection_dict : dict
A dict with zero or more of the following keys.
'locked'
'hidden'
Returns
-------
"""
from openpyxl.styles import Protection
return Protection(**protection_dict)
register_writer(_Openpyxl20Writer)
class _Openpyxl22Writer(_Openpyxl20Writer):
"""
Note: Support for OpenPyxl v2.2 is currently EXPERIMENTAL (GH7565).
"""
engine = 'openpyxl22'
openpyxl_majorver = 2
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
# Write the frame cells using openpyxl.
from openpyxl import styles
sheet_name = self._get_sheet_name(sheet_name)
_style_cache = {}
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.create_sheet()
wks.title = sheet_name
self.sheets[sheet_name] = wks
for cell in cells:
xcell = wks.cell(
row=startrow + cell.row + 1,
column=startcol + cell.col + 1
)
xcell.value = _conv_value(cell.val)
style_kwargs = {}
if cell.style:
key = str(cell.style)
style_kwargs = _style_cache.get(key)
if style_kwargs is None:
style_kwargs = self._convert_to_style_kwargs(cell.style)
_style_cache[key] = style_kwargs
if style_kwargs:
for k, v in style_kwargs.items():
setattr(xcell, k, v)
if cell.mergestart is not None and cell.mergeend is not None:
wks.merge_cells(
start_row=startrow + cell.row + 1,
start_column=startcol + cell.col + 1,
end_column=startcol + cell.mergeend + 1,
end_row=startrow + cell.mergestart + 1
)
# When cells are merged only the top-left cell is preserved
# The behaviour of the other cells in a merged range is undefined
if style_kwargs:
first_row = startrow + cell.row + 1
last_row = startrow + cell.mergestart + 1
first_col = startcol + cell.col + 1
last_col = startcol + cell.mergeend + 1
for row in range(first_row, last_row + 1):
for col in range(first_col, last_col + 1):
if row == first_row and col == first_col:
# Ignore first cell. It is already handled.
continue
xcell = wks.cell(column=col, row=row)
for k, v in style_kwargs.items():
setattr(xcell, k, v)
register_writer(_Openpyxl22Writer)
class _XlwtWriter(ExcelWriter):
engine = 'xlwt'
supported_extensions = ('.xls',)
def __init__(self, path, engine=None, encoding=None, **engine_kwargs):
# Use the xlwt module as the Excel writer.
import xlwt
engine_kwargs['engine'] = engine
super(_XlwtWriter, self).__init__(path, **engine_kwargs)
if encoding is None:
encoding = 'ascii'
self.book = xlwt.Workbook(encoding=encoding)
self.fm_datetime = xlwt.easyxf(num_format_str=self.datetime_format)
self.fm_date = xlwt.easyxf(num_format_str=self.date_format)
def save(self):
"""
Save workbook to disk.
"""
return self.book.save(self.path)
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
# Write the frame cells using xlwt.
sheet_name = self._get_sheet_name(sheet_name)
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.add_sheet(sheet_name)
self.sheets[sheet_name] = wks
style_dict = {}
for cell in cells:
val = _conv_value(cell.val)
num_format_str = None
if isinstance(cell.val, datetime.datetime):
num_format_str = self.datetime_format
elif isinstance(cell.val, datetime.date):
num_format_str = self.date_format
stylekey = json.dumps(cell.style)
if num_format_str:
stylekey += num_format_str
if stylekey in style_dict:
style = style_dict[stylekey]
else:
style = self._convert_to_style(cell.style, num_format_str)
style_dict[stylekey] = style
if cell.mergestart is not None and cell.mergeend is not None:
wks.write_merge(startrow + cell.row,
startrow + cell.mergestart,
startcol + cell.col,
startcol + cell.mergeend,
val, style)
else:
wks.write(startrow + cell.row,
startcol + cell.col,
val, style)
@classmethod
def _style_to_xlwt(cls, item, firstlevel=True, field_sep=',',
line_sep=';'):
"""helper which recursively generate an xlwt easy style string
for example:
hstyle = {"font": {"bold": True},
"border": {"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin"},
"align": {"horiz": "center"}}
will be converted to
font: bold on; \
border: top thin, right thin, bottom thin, left thin; \
align: horiz center;
"""
if hasattr(item, 'items'):
if firstlevel:
it = ["%s: %s" % (key, cls._style_to_xlwt(value, False))
for key, value in item.items()]
out = "%s " % (line_sep).join(it)
return out
else:
it = ["%s %s" % (key, cls._style_to_xlwt(value, False))
for key, value in item.items()]
out = "%s " % (field_sep).join(it)
return out
else:
item = "%s" % item
item = item.replace("True", "on")
item = item.replace("False", "off")
return item
@classmethod
def _convert_to_style(cls, style_dict, num_format_str=None):
"""
converts a style_dict to an xlwt style object
Parameters
----------
style_dict: style dictionary to convert
num_format_str: optional number format string
"""
import xlwt
if style_dict:
xlwt_stylestr = cls._style_to_xlwt(style_dict)
style = xlwt.easyxf(xlwt_stylestr, field_sep=',', line_sep=';')
else:
style = xlwt.XFStyle()
if num_format_str is not None:
style.num_format_str = num_format_str
return style
register_writer(_XlwtWriter)
class _XlsxWriter(ExcelWriter):
engine = 'xlsxwriter'
supported_extensions = ('.xlsx',)
def __init__(self, path, engine=None,
date_format=None, datetime_format=None, **engine_kwargs):
# Use the xlsxwriter module as the Excel writer.
import xlsxwriter
super(_XlsxWriter, self).__init__(path, engine=engine,
date_format=date_format,
datetime_format=datetime_format,
**engine_kwargs)
self.book = xlsxwriter.Workbook(path, **engine_kwargs)
def save(self):
"""
Save workbook to disk.
"""
return self.book.close()
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
# Write the frame cells using xlsxwriter.
sheet_name = self._get_sheet_name(sheet_name)
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.add_worksheet(sheet_name)
self.sheets[sheet_name] = wks
style_dict = {}
for cell in cells:
val = _conv_value(cell.val)
num_format_str = None
if isinstance(cell.val, datetime.datetime):
num_format_str = self.datetime_format
elif isinstance(cell.val, datetime.date):
num_format_str = self.date_format
stylekey = json.dumps(cell.style)
if num_format_str:
stylekey += num_format_str
if stylekey in style_dict:
style = style_dict[stylekey]
else:
style = self._convert_to_style(cell.style, num_format_str)
style_dict[stylekey] = style
if cell.mergestart is not None and cell.mergeend is not None:
wks.merge_range(startrow + cell.row,
startcol + cell.col,
startrow + cell.mergestart,
startcol + cell.mergeend,
cell.val, style)
else:
wks.write(startrow + cell.row,
startcol + cell.col,
val, style)
def _convert_to_style(self, style_dict, num_format_str=None):
"""
converts a style_dict to an xlsxwriter format object
Parameters
----------
style_dict: style dictionary to convert
num_format_str: optional number format string
"""
# If there is no formatting we don't create a format object.
if num_format_str is None and style_dict is None:
return None
# Create a XlsxWriter format object.
xl_format = self.book.add_format()
if num_format_str is not None:
xl_format.set_num_format(num_format_str)
if style_dict is None:
return xl_format
# Map the cell font to XlsxWriter font properties.
if style_dict.get('font'):
font = style_dict['font']
if font.get('bold'):
xl_format.set_bold()
# Map the alignment to XlsxWriter alignment properties.
alignment = style_dict.get('alignment')
if alignment:
if (alignment.get('horizontal')
and alignment['horizontal'] == 'center'):
xl_format.set_align('center')
if (alignment.get('vertical')
and alignment['vertical'] == 'top'):
xl_format.set_align('top')
# Map the cell borders to XlsxWriter border properties.
if style_dict.get('borders'):
xl_format.set_border()
return xl_format
register_writer(_XlsxWriter)
|
aapav01/android_kernel_samsung_ms013g-2
|
refs/heads/MM-caf-port
|
tools/perf/scripts/python/sched-migration.py
|
11215
|
#!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
|
geerlingguy/ansible-modules-extras
|
refs/heads/devel
|
packaging/os/urpmi.py
|
73
|
#!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# (c) 2013, Philippe Makowski
# Written by Philippe Makowski <philippem@mageia.org>
# Based on apt module written by Matthew Williams <matthew@flowroute.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: urpmi
short_description: Urpmi manager
description:
- Manages packages with I(urpmi) (such as for Mageia or Mandriva)
version_added: "1.3.4"
options:
pkg:
description:
- name of package to install, upgrade or remove.
required: true
default: null
state:
description:
- Indicates the desired package state
required: false
default: present
choices: [ "absent", "present" ]
update_cache:
description:
- update the package database first C(urpmi.update -a).
required: false
default: no
choices: [ "yes", "no" ]
no-suggests:
description:
- Corresponds to the C(--no-suggests) option for I(urpmi).
required: false
default: yes
choices: [ "yes", "no" ]
force:
description:
- Assume "yes" is the answer to any question urpmi has to ask.
Corresponds to the C(--force) option for I(urpmi).
required: false
default: yes
choices: [ "yes", "no" ]
author: "Philippe Makowski (@pmakowski)"
notes: []
'''
EXAMPLES = '''
# install package foo
- urpmi: pkg=foo state=present
# remove package foo
- urpmi: pkg=foo state=absent
# description: remove packages foo and bar
- urpmi: pkg=foo,bar state=absent
# description: update the package database (urpmi.update -a -q) and install bar (bar will be the updated if a newer version exists)
- urpmi: name=bar, state=present, update_cache=yes
'''
import json
import shlex
import os
import sys
URPMI_PATH = '/usr/sbin/urpmi'
URPME_PATH = '/usr/sbin/urpme'
def query_package(module, name):
# rpm -q returns 0 if the package is installed,
# 1 if it is not installed
cmd = "rpm -q %s" % (name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
return True
else:
return False
def query_package_provides(module, name):
# rpm -q returns 0 if the package is installed,
# 1 if it is not installed
cmd = "rpm -q --provides %s" % (name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
return rc == 0
def update_package_db(module):
cmd = "urpmi.update -a -q"
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="could not update package db")
def remove_packages(module, packages):
remove_c = 0
# Using a for loop incase of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, package):
continue
cmd = "%s --auto %s" % (URPME_PATH, package)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to remove %s" % (package))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, pkgspec, force=True, no_suggests=True):
packages = ""
for package in pkgspec:
if not query_package_provides(module, package):
packages += "'%s' " % package
if len(packages) != 0:
if no_suggests:
no_suggests_yes = '--no-suggests'
else:
no_suggests_yes = ''
if force:
force_yes = '--force'
else:
force_yes = ''
cmd = ("%s --auto %s --quiet %s %s" % (URPMI_PATH, force_yes, no_suggests_yes, packages))
rc, out, err = module.run_command(cmd)
installed = True
for packages in pkgspec:
if not query_package_provides(module, package):
installed = False
# urpmi always have 0 for exit code if --force is used
if rc or not installed:
module.fail_json(msg="'urpmi %s' failed: %s" % (packages, err))
else:
module.exit_json(changed=True, msg="%s present(s)" % packages)
else:
module.exit_json(changed=False)
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='installed', choices=['installed', 'removed', 'absent', 'present']),
update_cache = dict(default=False, aliases=['update-cache'], type='bool'),
force = dict(default=True, type='bool'),
no_suggests = dict(default=True, aliases=['no-suggests'], type='bool'),
package = dict(aliases=['pkg', 'name'], required=True)))
if not os.path.exists(URPMI_PATH):
module.fail_json(msg="cannot find urpmi, looking for %s" % (URPMI_PATH))
p = module.params
force_yes = p['force']
no_suggest_yes = p['no_suggests']
if p['update_cache']:
update_package_db(module)
packages = p['package'].split(',')
if p['state'] in [ 'installed', 'present' ]:
install_packages(module, packages, force_yes, no_suggest_yes)
elif p['state'] in [ 'removed', 'absent' ]:
remove_packages(module, packages)
# import module snippets
from ansible.module_utils.basic import *
main()
|
JioEducation/edx-platform
|
refs/heads/master
|
common/djangoapps/track/contexts.py
|
126
|
"""Generates common contexts"""
import logging
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.keys import CourseKey
from opaque_keys import InvalidKeyError
from util.request import COURSE_REGEX
log = logging.getLogger(__name__)
def course_context_from_url(url):
"""
Extracts the course_context from the given `url` and passes it on to
`course_context_from_course_id()`.
"""
url = url or ''
match = COURSE_REGEX.match(url)
course_id = None
if match:
course_id_string = match.group('course_id')
try:
course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id_string)
except InvalidKeyError:
log.warning(
'unable to parse course_id "{course_id}"'.format(
course_id=course_id_string
),
exc_info=True
)
return course_context_from_course_id(course_id)
def course_context_from_course_id(course_id):
"""
Creates a course context from a `course_id`.
Example Returned Context::
{
'course_id': 'org/course/run',
'org_id': 'org'
}
"""
if course_id is None:
return {'course_id': '', 'org_id': ''}
# TODO: Make this accept any CourseKey, and serialize it using .to_string
assert isinstance(course_id, CourseKey)
return {
'course_id': course_id.to_deprecated_string(),
'org_id': course_id.org,
}
|
postlund/pyatv
|
refs/heads/master
|
tests/support/test_device_info.py
|
1
|
"""Unit tests for device_info."""
from pyatv.const import DeviceModel
from pyatv.support.device_info import lookup_internal_name, lookup_model, lookup_version
def test_lookup_existing_model():
assert lookup_model("AppleTV6,2") == DeviceModel.Gen4K
def test_lookup_homepod():
assert lookup_model("AudioAccessory5,1") == DeviceModel.HomePodMini
def test_lookup_missing_model():
assert lookup_model("bad_model") == DeviceModel.Unknown
def test_lookup_existing_internal_name():
assert lookup_internal_name("J105aAP") == DeviceModel.Gen4K
def test_lookup_missing_internal_name():
assert lookup_internal_name("bad_name") == DeviceModel.Unknown
def test_lookup_existing_version():
assert lookup_version("17J586") == "13.0"
def test_lookup_bad_version():
assert not lookup_version(None)
assert not lookup_version("bad_version")
def test_lookup_guess_major_version():
assert lookup_version("16F123") == "12.x"
assert lookup_version("17F123") == "13.x"
|
mbauskar/alec_frappe5_erpnext
|
refs/heads/develop
|
erpnext/patches/v4_2/__init__.py
|
12133432
| |
jasag/Phytoliths-recognition-system
|
refs/heads/master
|
code/__init__.py
|
12133432
| |
msabramo/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/openstack/__init__.py
|
12133432
| |
hlin117/statsmodels
|
refs/heads/master
|
statsmodels/genmod/_prediction.py
|
27
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 19 11:29:18 2014
Author: Josef Perktold
License: BSD-3
"""
import numpy as np
from scipy import stats
# this is similar to ContrastResults after t_test, partially copied and adjusted
class PredictionResults(object):
def __init__(self, predicted_mean, var_pred_mean, var_resid=None,
df=None, dist=None, row_labels=None, linpred=None, link=None):
# TODO: is var_resid used? drop from arguments?
self.predicted_mean = predicted_mean
self.var_pred_mean = var_pred_mean
self.df = df
self.var_resid = var_resid
self.row_labels = row_labels
self.linpred = linpred
self.link = link
if dist is None or dist == 'norm':
self.dist = stats.norm
self.dist_args = ()
elif dist == 't':
self.dist = stats.t
self.dist_args = (self.df,)
else:
self.dist = dist
self.dist_args = ()
@property
def se_obs(self):
raise NotImplementedError
return np.sqrt(self.var_pred_mean + self.var_resid)
@property
def se_mean(self):
return np.sqrt(self.var_pred_mean)
@property
def tvalues(self):
return self.predicted_mean / self.se_mean
def t_test(self, value=0, alternative='two-sided'):
'''z- or t-test for hypothesis that mean is equal to value
Parameters
----------
value : array_like
value under the null hypothesis
alternative : string
'two-sided', 'larger', 'smaller'
Returns
-------
stat : ndarray
test statistic
pvalue : ndarray
p-value of the hypothesis test, the distribution is given by
the attribute of the instance, specified in `__init__`. Default
if not specified is the normal distribution.
'''
# from statsmodels.stats.weightstats
# assumes symmetric distribution
stat = (self.predicted_mean - value) / self.se_mean
if alternative in ['two-sided', '2-sided', '2s']:
pvalue = self.dist.sf(np.abs(stat), *self.dist_args)*2
elif alternative in ['larger', 'l']:
pvalue = self.dist.sf(stat, *self.dist_args)
elif alternative in ['smaller', 's']:
pvalue = self.dist.cdf(stat, *self.dist_args)
else:
raise ValueError('invalid alternative')
return stat, pvalue
def conf_int(self, method='endpoint', alpha=0.05, **kwds):
"""
Returns the confidence interval of the value, `effect` of the constraint.
This is currently only available for t and z tests.
Parameters
----------
alpha : float, optional
The significance level for the confidence interval.
ie., The default `alpha` = .05 returns a 95% confidence interval.
kwds : extra keyword arguments
currently ignored, only for compatibility, consistent signature
Returns
-------
ci : ndarray, (k_constraints, 2)
The array has the lower and the upper limit of the confidence
interval in the columns.
"""
tmp = np.linspace(0, 1, 6)
is_linear = (self.link.inverse(tmp) == tmp).all()
if method == 'endpoint' and not is_linear:
ci_linear = self.linpred.conf_int(alpha=alpha, obs=False)
ci = self.link.inverse(ci_linear)
elif method == 'delta' or is_linear:
se = self.se_mean
q = self.dist.ppf(1 - alpha / 2., *self.dist_args)
lower = self.predicted_mean - q * se
upper = self.predicted_mean + q * se
ci = np.column_stack((lower, upper))
# if we want to stack at a new last axis, for lower.ndim > 1
# np.concatenate((lower[..., None], upper[..., None]), axis=-1)
return ci
def summary_frame(self, what='all', alpha=0.05):
# TODO: finish and cleanup
import pandas as pd
from statsmodels.compat.collections import OrderedDict
#ci_obs = self.conf_int(alpha=alpha, obs=True) # need to split
ci_mean = self.conf_int(alpha=alpha)
to_include = OrderedDict()
to_include['mean'] = self.predicted_mean
to_include['mean_se'] = self.se_mean
to_include['mean_ci_lower'] = ci_mean[:, 0]
to_include['mean_ci_upper'] = ci_mean[:, 1]
self.table = to_include
#OrderedDict doesn't work to preserve sequence
# pandas dict doesn't handle 2d_array
#data = np.column_stack(list(to_include.values()))
#names = ....
res = pd.DataFrame(to_include, index=self.row_labels,
columns=to_include.keys())
return res
def get_prediction_glm(self, exog=None, transform=True, weights=None,
row_labels=None, linpred=None, link=None, pred_kwds=None):
"""
compute prediction results
Parameters
----------
exog : array-like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
weights : array_like, optional
Weights interpreted as in WLS, used for the variance of the predicted
residual.
args, kwargs :
Some models can take additional arguments or keywords, see the
predict method of the model for the details.
Returns
-------
prediction_results : instance
The prediction results instance contains prediction and prediction
variance and can on demand calculate confidence intervals and summary
tables for the prediction of the mean and of new observations.
"""
### prepare exog and row_labels, based on base Results.predict
if transform and hasattr(self.model, 'formula') and exog is not None:
from patsy import dmatrix
exog = dmatrix(self.model.data.design_info.builder,
exog)
if exog is not None:
if row_labels is None:
if hasattr(exog, 'index'):
row_labels = exog.index
else:
row_labels = None
exog = np.asarray(exog)
if exog.ndim == 1 and (self.model.exog.ndim == 1 or
self.model.exog.shape[1] == 1):
exog = exog[:, None]
exog = np.atleast_2d(exog) # needed in count model shape[1]
else:
exog = self.model.exog
if weights is None:
weights = getattr(self.model, 'weights', None)
if row_labels is None:
row_labels = getattr(self.model.data, 'row_labels', None)
# need to handle other arrays, TODO: is delegating to model possible ?
if weights is not None:
weights = np.asarray(weights)
if (weights.size > 1 and
(weights.ndim != 1 or weights.shape[0] == exog.shape[1])):
raise ValueError('weights has wrong shape')
### end
pred_kwds['linear'] = False
predicted_mean = self.model.predict(self.params, exog, **pred_kwds)
covb = self.cov_params()
link_deriv = self.model.family.link.inverse_deriv(linpred.predicted_mean)
var_pred_mean = link_deriv**2 * (exog * np.dot(covb, exog.T).T).sum(1)
# TODO: check that we have correct scale, Refactor scale #???
var_resid = self.scale / weights # self.mse_resid / weights
# special case for now:
if self.cov_type == 'fixed scale':
var_resid = self.cov_kwds['scale'] / weights
dist = ['norm', 't'][self.use_t]
return PredictionResults(predicted_mean, var_pred_mean, var_resid,
df=self.df_resid, dist=dist,
row_labels=row_labels, linpred=linpred, link=link)
def params_transform_univariate(params, cov_params, link=None, transform=None,
row_labels=None):
"""
results for univariate, nonlinear, monotonicaly transformed parameters
This provides transformed values, standard errors and confidence interval
for transformations of parameters, for example in calculating rates with
`exp(params)` in the case of Poisson or other models with exponential
mean function.
"""
from statsmodels.genmod.families import links
if link is None and transform is None:
link = links.Log()
if row_labels is None and hasattr(params, 'index'):
row_labels = params.index
params = np.asarray(params)
predicted_mean = link.inverse(params)
link_deriv = link.inverse_deriv(params)
var_pred_mean = link_deriv**2 * np.diag(cov_params)
# TODO: do we want covariance also, or just var/se
dist = stats.norm
# TODO: need ci for linear prediction, method of `lin_pred
linpred = PredictionResults(params, np.diag(cov_params), dist=dist,
row_labels=row_labels, link=links.identity())
res = PredictionResults(predicted_mean, var_pred_mean, dist=dist,
row_labels=row_labels, linpred=linpred, link=link)
return res
|
kantel/processingpy
|
refs/heads/master
|
sketches/modes/PythonMode/examples/Topics/Geometry/Icosahedra/icosahedron.py
|
6
|
from shape3d import Shape3D
class Icosahedron(Shape3D):
def __init__(self, radius=150):
Shape3D.__init__(self)
self.topPent = [PVector() for _ in range(5)]
self.bottomPent = [PVector() for _ in range(5)]
c = dist(cos(0) * radius,
sin(0) * radius,
cos(radians(72)) * radius,
sin(radians(72)) * radius)
b = radius
a = sqrt((c**2) - (b**2))
self.triHeight = sqrt((c**2) - (c / 2)**2)
angle = 0
for i in range(5):
self.topPent[i] = PVector(cos(angle) * radius,
sin(angle) * radius,
self.triHeight / 2.0)
angle += radians(72)
self.topPoint = PVector(0, 0, self.triHeight / 2.0 + a)
angle = 72.0 / 2.0
for i in range(5):
self.bottomPent[i] = PVector(cos(angle) * radius,
sin(angle) * radius,
-self.triHeight / 2.0)
angle += radians(72)
self.bottomPoint = PVector(0, 0, -(self.triHeight / 2.0 + a))
# Draw icosahedron.
def create(self):
for i in range(5):
if i < 4:
# Icosahedron top.
self.makeTriangle(self.topPent[i],
self.topPoint,
self.topPent[i + 1])
# Icosahedron bottom.
self.makeTriangle(self.bottomPent[i],
self.bottomPoint,
self.bottomPent[i + 1])
else:
self.makeTriangle(self.topPent[i],
self.topPoint,
self.topPent[0])
self.makeTriangle(self.bottomPent[i],
self.bottomPoint,
self.bottomPent[0])
# Icosahedron body.
for i in range(5):
if i < 3:
self.makeTriangle(self.topPent[i],
self.bottomPent[i + 1],
self.bottomPent[i + 2])
self.makeTriangle(self.bottomPent[i + 2],
self.topPent[i],
self.topPent[i + 1])
elif i == 3:
self.makeTriangle(self.topPent[i],
self.bottomPent[i + 1],
self.bottomPent[0])
self.makeTriangle(self.bottomPent[0],
self.topPent[i],
self.topPent[i + 1])
elif i == 4:
self.makeTriangle(self.topPent[i],
self.bottomPent[0],
self.bottomPent[1])
self.makeTriangle(self.bottomPent[1],
self.topPent[i],
self.topPent[0])
def makeTriangle(self, a, b, c):
with beginShape():
vertex(a.x, a.y, a.z)
vertex(b.x, b.y, b.z)
vertex(c.x, c.y, c.z)
|
djipko/rdopkg
|
refs/heads/master
|
tests/test_push_update.py
|
4
|
import os
import shutil
import rdopkg.shell
from rdopkg.utils.cmd import git
from common import ASSETS_DIR
from common import cfind
FILE_LISTS = {
'griz.yml': {
'grizzly/epel-6/potato-1.0.0-3.el6.noarch.rpm',
'grizzly/epel-6/potato-1.0.0-3.el6.src.rpm',
'grizzly/fedora-20/potato-1.0.0-2.fc21.noarch.rpm',
'grizzly/fedora-20/potato-1.0.0-2.fc21.src.rpm',
},
'iceh.yml': {
'icehouse/epel-6/banana-1.0.0-3.el6.noarch.rpm',
'icehouse/epel-6/banana-1.0.0-3.el6.src.rpm',
'icehouse/fedora-20/banana-1.0.0-2.fc21.noarch.rpm',
'icehouse/fedora-20/banana-1.0.0-2.fc21.src.rpm',
},
'hava.yml': {
'havana/epel-6/orange-1.0.0-3.el6.noarch.rpm',
'havana/epel-6/orange-1.0.0-3.el6.src.rpm',
'havana/fedora-20/orange-1.0.0-2.fc21.noarch.rpm',
'havana/fedora-20/orange-1.0.0-2.fc21.src.rpm',
},
}
def setup_module(module):
bin_path = os.path.abspath(os.path.join(ASSETS_DIR, 'bin'))
os.environ['PATH'] = '%s:%s' % (bin_path, os.environ['PATH'])
def prep_push_test(tmpdir, update_repo, dest):
rdoup_path = tmpdir.join('rdo-update')
dest_path = tmpdir.join('dest')
shutil.copytree(os.path.join(ASSETS_DIR, 'rdo-update.git', update_repo),
str(rdoup_path))
shutil.copytree(os.path.join(ASSETS_DIR, 'dest', dest), str(dest_path))
with rdoup_path.as_cwd():
git('init')
git('add', '.')
git('commit', '-m', 'Initial import')
return rdoup_path, dest_path
def assert_files_list(file_list_path, dest_base, files):
lines = file(file_list_path, 'r').readlines()
pkgs = set()
for line in lines:
line = line.rstrip()
assert line.startswith(dest_base)
pkg = line[len(dest_base):]
pkgs.add(pkg)
assert set(files) == pkgs
def test_push_one_clean(tmpdir):
rdoup_path, dest_path = prep_push_test(tmpdir, 'one_added', 'clean')
dest_base = str(dest_path.join('openstack-'))
with tmpdir.as_cwd():
rdopkg.shell.main(cargs=['push-updates', str(rdoup_path), dest_base])
# pushed packages
tree = cfind(dest_path)
exp_tree = {
'./openstack-grizzly',
'./openstack-grizzly/EMPTY',
'./openstack-havana',
'./openstack-havana/EMPTY',
'./openstack-icehouse',
'./openstack-icehouse/EMPTY',
'./openstack-icehouse/epel-6',
'./openstack-icehouse/epel-6/banana-1.0.0-3.el6.noarch.rpm',
'./openstack-icehouse/epel-6/banana-1.0.0-3.el6.src.rpm',
'./openstack-icehouse/fedora-20',
'./openstack-icehouse/fedora-20/banana-1.0.0-2.fc21.noarch.rpm',
'./openstack-icehouse/fedora-20/banana-1.0.0-2.fc21.src.rpm',
'./sign-rpms',
}
assert tree == exp_tree
# rdo-update repo changes
tree = cfind(rdoup_path, include_dirs=False)
exp_tree = {
'./pushed/iceh.yml',
'./pushed/iceh.yml.files',
}
assert tree == exp_tree
# list of pushed files
assert_files_list(
str(rdoup_path.join('pushed/iceh.yml.files')), dest_base,
FILE_LISTS['iceh.yml'])
def test_push_one_collision(tmpdir):
rdoup_path, dest_path = prep_push_test(tmpdir, 'one_added', 'collision')
dest_base = str(dest_path.join('openstack-'))
pre_rdoup_tree = cfind(rdoup_path)
pre_dest_tree = cfind(dest_path)
with tmpdir.as_cwd():
rdopkg.shell.main(cargs=['push-updates', str(rdoup_path), dest_base])
# Nothing should change on collision - ensure rollback
dest_tree = cfind(dest_path)
assert dest_tree == pre_dest_tree
rdoup_tree = cfind(rdoup_path)
assert rdoup_tree == pre_rdoup_tree
def test_push_all_mixed_clean(tmpdir):
rdoup_path, dest_path = prep_push_test(tmpdir, 'mixed', 'clean')
dest_base = str(dest_path.join('openstack-'))
with tmpdir.as_cwd():
rdopkg.shell.main(cargs=['push-updates', str(rdoup_path), dest_base])
tree = cfind(dest_path)
exp_tree = {
'./openstack-grizzly',
'./openstack-grizzly/EMPTY',
'./openstack-havana',
'./openstack-havana/EMPTY',
'./openstack-havana/epel-6',
'./openstack-havana/epel-6/orange-1.0.0-3.el6.noarch.rpm',
'./openstack-havana/epel-6/orange-1.0.0-3.el6.src.rpm',
'./openstack-havana/fedora-20',
'./openstack-havana/fedora-20/orange-1.0.0-2.fc21.noarch.rpm',
'./openstack-havana/fedora-20/orange-1.0.0-2.fc21.src.rpm',
'./openstack-icehouse',
'./openstack-icehouse/EMPTY',
'./openstack-icehouse/epel-6',
'./openstack-icehouse/epel-6/banana-1.0.0-3.el6.noarch.rpm',
'./openstack-icehouse/epel-6/banana-1.0.0-3.el6.src.rpm',
'./openstack-icehouse/fedora-20',
'./openstack-icehouse/fedora-20/banana-1.0.0-2.fc21.noarch.rpm',
'./openstack-icehouse/fedora-20/banana-1.0.0-2.fc21.src.rpm',
'./sign-rpms',
}
assert tree == exp_tree
tree = cfind(rdoup_path, include_dirs=False)
# failed update shoud remain as well as updates outside of ready/
assert tree == {
'./pushed/hava.yml',
'./pushed/hava.yml.files',
'./pushed/iceh.yml',
'./pushed/iceh.yml.files',
'./ready/fail.yml',
'./updates/griz.yml',
}
assert_files_list(
str(rdoup_path.join('pushed/iceh.yml.files')), dest_base,
FILE_LISTS['iceh.yml'])
assert_files_list(
str(rdoup_path.join('pushed/hava.yml.files')), dest_base,
FILE_LISTS['hava.yml'])
def test_push_all_mixed_collision(tmpdir):
rdoup_path, dest_path = prep_push_test(tmpdir, 'mixed', 'collision')
dest_base = str(dest_path.join('openstack-'))
with tmpdir.as_cwd():
rdopkg.shell.main(cargs=['push-updates', str(rdoup_path), dest_base])
tree = cfind(dest_path)
exp_tree = {
'./openstack-grizzly',
'./openstack-grizzly/EMPTY',
'./openstack-havana',
'./openstack-havana/epel-6',
'./openstack-havana/epel-6/banana-1.0.0-3.el6.src.rpm',
'./openstack-havana/epel-6/orange-1.0.0-3.el6.noarch.rpm',
'./openstack-havana/epel-6/orange-1.0.0-3.el6.src.rpm',
'./openstack-havana/fedora-20',
'./openstack-havana/fedora-20/orange-1.0.0-2.fc21.noarch.rpm',
'./openstack-havana/fedora-20/orange-1.0.0-2.fc21.src.rpm',
'./openstack-icehouse',
'./openstack-icehouse/epel-6',
'./openstack-icehouse/epel-6/banana-1.0.0-3.el6.src.rpm',
'./sign-rpms',
}
assert tree == exp_tree
tree = cfind(rdoup_path, include_dirs=False)
# failed update shoud remain as well as updates outside of ready/
assert tree == {
'./pushed/hava.yml',
'./pushed/hava.yml.files',
'./ready/fail.yml',
'./ready/iceh.yml',
'./updates/griz.yml',
}
assert_files_list(
str(rdoup_path.join('pushed/hava.yml.files')), dest_base,
FILE_LISTS['hava.yml'])
def test_push_all_mixed_collision_overwrite(tmpdir):
rdoup_path, dest_path = prep_push_test(tmpdir, 'mixed', 'collision')
dest_base = str(dest_path.join('openstack-'))
with tmpdir.as_cwd():
rdopkg.shell.main(
cargs=['push-updates', str(rdoup_path), dest_base, '-w'])
tree = cfind(dest_path)
exp_tree = {
'./openstack-grizzly',
'./openstack-grizzly/EMPTY',
'./openstack-havana',
'./openstack-havana/epel-6',
'./openstack-havana/epel-6/banana-1.0.0-3.el6.src.rpm',
'./openstack-havana/epel-6/orange-1.0.0-3.el6.noarch.rpm',
'./openstack-havana/epel-6/orange-1.0.0-3.el6.src.rpm',
'./openstack-havana/fedora-20',
'./openstack-havana/fedora-20/orange-1.0.0-2.fc21.noarch.rpm',
'./openstack-havana/fedora-20/orange-1.0.0-2.fc21.src.rpm',
'./openstack-icehouse',
'./openstack-icehouse/epel-6',
'./openstack-icehouse/epel-6/banana-1.0.0-3.el6.src.rpm',
'./openstack-icehouse/fedora-20',
'./openstack-icehouse/fedora-20/banana-1.0.0-2.fc21.noarch.rpm',
'./openstack-icehouse/fedora-20/banana-1.0.0-2.fc21.src.rpm',
'./openstack-icehouse/epel-6/banana-1.0.0-3.el6.noarch.rpm',
'./sign-rpms',
}
assert tree == exp_tree
tree = cfind(rdoup_path, include_dirs=False)
# failed update shoud remain as well as updates outside of ready/
assert tree == {
'./pushed/hava.yml',
'./pushed/hava.yml.files',
'./pushed/iceh.yml',
'./pushed/iceh.yml.files',
'./ready/fail.yml',
'./updates/griz.yml',
}
assert_files_list(
str(rdoup_path.join('pushed/hava.yml.files')), dest_base,
FILE_LISTS['hava.yml'])
def test_push_selected_mixed_clean(tmpdir):
rdoup_path, dest_path = prep_push_test(tmpdir, 'mixed', 'clean')
dest_base = str(dest_path.join('openstack-'))
with tmpdir.as_cwd():
rdopkg.shell.main(cargs=['push-updates',
str(rdoup_path), dest_base,
'-f', 'ready/iceh.yml', 'updates/griz.yml',
])
tree = cfind(dest_path)
exp_tree = {
'./openstack-grizzly',
'./openstack-grizzly/EMPTY',
'./openstack-grizzly/epel-6',
'./openstack-grizzly/epel-6/potato-1.0.0-3.el6.noarch.rpm',
'./openstack-grizzly/epel-6/potato-1.0.0-3.el6.src.rpm',
'./openstack-grizzly/fedora-20',
'./openstack-grizzly/fedora-20/potato-1.0.0-2.fc21.noarch.rpm',
'./openstack-grizzly/fedora-20/potato-1.0.0-2.fc21.src.rpm',
'./openstack-havana',
'./openstack-havana/EMPTY',
'./openstack-icehouse',
'./openstack-icehouse/EMPTY',
'./openstack-icehouse/epel-6',
'./openstack-icehouse/epel-6/banana-1.0.0-3.el6.noarch.rpm',
'./openstack-icehouse/epel-6/banana-1.0.0-3.el6.src.rpm',
'./openstack-icehouse/fedora-20',
'./openstack-icehouse/fedora-20/banana-1.0.0-2.fc21.noarch.rpm',
'./openstack-icehouse/fedora-20/banana-1.0.0-2.fc21.src.rpm',
'./sign-rpms',
}
assert tree == exp_tree
tree = cfind(rdoup_path, include_dirs=False)
assert tree == {
'./ready/fail.yml',
'./ready/hava.yml',
'./pushed/iceh.yml',
'./pushed/iceh.yml.files',
'./pushed/griz.yml',
'./pushed/griz.yml.files',
}
assert_files_list(
str(rdoup_path.join('pushed/iceh.yml.files')), dest_base,
FILE_LISTS['iceh.yml'])
assert_files_list(
str(rdoup_path.join('pushed/griz.yml.files')), dest_base,
FILE_LISTS['griz.yml'])
|
da1z/intellij-community
|
refs/heads/master
|
python/testData/refactoring/move/namespacePackageUsedInMovedFunction/before/src/a.py
|
79
|
import nspkg
def func():
print(nspkg)
|
odoo-arg/odoo_l10n_ar
|
refs/heads/master
|
base_vat_ar/models/res_country.py
|
1
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, api
class ResCountry(models.Model):
_inherit = 'res.country'
@api.model
def set_ar_no_prefix(self):
self.env.ref('base.ar').no_prefix = True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
dulems/hue
|
refs/heads/master
|
apps/jobbrowser/src/jobbrowser/urls.py
|
28
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls import patterns, url
urlpatterns = patterns('jobbrowser.views',
# "Default"
url(r'^$', 'jobs'),
url(r'^jobs/$', 'jobs', name='jobs'),
url(r'^jobs/(?P<job>\w+)$','single_job', name='single_job'),
url(r'^jobs/(?P<job>\w+)/counters$', 'job_counters', name='job_counters'),
url(r'^jobs/(?P<job>\w+)/kill$', 'kill_job', name='kill_job'),
url(r'^jobs/(?P<job>\w+)/single_logs$', 'job_single_logs', name='job_single_logs'),
url(r'^jobs/(?P<job>\w+)/tasks$','tasks', name='tasks'),
url(r'^jobs/(?P<job>\w+)/tasks/(?P<taskid>\w+)$', 'single_task', name='single_task'), # TODO s/single// ?
url(r'^jobs/(?P<job>\w+)/tasks/(?P<taskid>\w+)/attempts/(?P<attemptid>\w+)$', 'single_task_attempt', name='single_task_attempt'),
url(r'^jobs/(?P<job>\w+)/tasks/(?P<taskid>\w+)/attempts/(?P<attemptid>\w+)/counters$', 'task_attempt_counters', name='task_attempt_counters'),
url(r'^jobs/(?P<job>\w+)/tasks/(?P<taskid>\w+)/attempts/(?P<attemptid>\w+)/logs$', 'single_task_attempt_logs', name='single_task_attempt_logs'),
url(r'^jobs/(\w+)/tasks/(\w+)/attempts/(?P<attemptid>\w+)/kill$', 'kill_task_attempt', name='kill_task_attempt'),
url(r'^trackers/(?P<trackerid>.+)$', 'single_tracker', name='single_tracker'),
url(r'^container/(?P<node_manager_http_address>.+)/(?P<containerid>.+)$', 'container', name='container'),
# MR2 specific
url(r'^jobs/(?P<job>\w+)/job_attempt_logs/(?P<attempt_index>\d+)$', 'job_attempt_logs', name='job_attempt_logs'),
url(r'^jobs/(?P<job>\w+)/job_attempt_logs_json/(?P<attempt_index>\d+)/(?P<name>\w+)?/(?P<offset>\d+)?$', 'job_attempt_logs_json', name='job_attempt_logs_json'),
url(r'^jobs/(?P<jobid>\w+)/job_not_assigned/(?P<path>.+)$','job_not_assigned', name='job_not_assigned'),
# Unused
url(r'^jobs/(?P<job>\w+)/setpriority$', 'set_job_priority', name='set_job_priority'),
url(r'^trackers$', 'trackers', name='trackers'),
url(r'^clusterstatus$', 'clusterstatus', name='clusterstatus'),
url(r'^queues$', 'queues', name='queues'),
url(r'^jobbrowser$', 'jobbrowser', name='jobbrowser'),
url(r'^dock_jobs/$', 'dock_jobs', name='dock_jobs'),
)
|
Stratos42/EveBot
|
refs/heads/master
|
plugins.disabled/quote.py
|
1
|
import random
import re
import time
from util import hook
def add_quote(db, chan, nick, add_nick, msg):
db.execute('''insert or fail into quote (chan, nick, add_nick,
msg, time) values(?,?,?,?,?)''',
(chan, nick, add_nick, msg, time.time()))
db.commit()
def del_quote(db, chan, nick, add_nick, msg):
db.execute('''update quote set deleted = 1 where
chan=? and lower(nick)=lower(?) and msg=msg''')
db.commit()
def get_quotes_by_nick(db, chan, nick):
return db.execute("select time, nick, msg from quote where deleted!=1 "
"and chan=? and lower(nick)=lower(?) order by time",
(chan, nick)).fetchall()
def get_quotes_by_chan(db, chan):
return db.execute("select time, nick, msg from quote where deleted!=1 "
"and chan=? order by time", (chan,)).fetchall()
def format_quote(q, num, n_quotes):
ctime, nick, msg = q
return "[%d/%d] %s <%s> %s" % (num, n_quotes,
time.strftime("%Y-%m-%d", time.gmtime(ctime)), nick, msg)
@hook.command('q')
@hook.command
def quote(inp, nick='', chan='', db=None):
".q/.quote [#chan] [nick] [#n]/.quote add <nick> <msg> -- gets " \
"random or [#n]th quote by <nick> or from <#chan>/adds quote"
db.execute("create table if not exists quote"
"(chan, nick, add_nick, msg, time real, deleted default 0, "
"primary key (chan, nick, msg))")
db.commit()
add = re.match(r"add[^\w@]+(\S+?)>?\s+(.*)", inp, re.I)
retrieve = re.match(r"(\S+)(?:\s+#?(-?\d+))?$", inp)
retrieve_chan = re.match(r"(#\S+)\s+(\S+)(?:\s+#?(-?\d+))?$", inp)
if add:
quoted_nick, msg = add.groups()
try:
add_quote(db, chan, quoted_nick, nick, msg)
db.commit()
except db.IntegrityError:
return "message already stored, doing nothing."
return "quote added."
elif retrieve:
select, num = retrieve.groups()
by_chan = False
if select.startswith('#'):
by_chan = True
quotes = get_quotes_by_chan(db, select)
else:
quotes = get_quotes_by_nick(db, chan, select)
elif retrieve_chan:
chan, nick, num = retrieve_chan.groups()
quotes = get_quotes_by_nick(db, chan, nick)
else:
return quote.__doc__
n_quotes = len(quotes)
if not n_quotes:
return "no quotes found"
if num:
num = int(num)
if num:
if num > n_quotes or (num < 0 and num < -n_quotes):
return "I only have %d quote%s for %s" % (n_quotes,
('s', '')[n_quotes == 1], select)
elif num < 0:
selected_quote = quotes[num]
num = n_quotes + num + 1
else:
selected_quote = quotes[num - 1]
else:
num = random.randint(1, n_quotes)
selected_quote = quotes[num - 1]
return format_quote(selected_quote, num, n_quotes)
|
mheap/ansible
|
refs/heads/devel
|
test/units/plugins/action/test_win_updates.py
|
4
|
# -*- coding: utf-8 -*-
# (c) 2018, Jordan Borean <jborean@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from ansible.compat.tests.mock import patch, MagicMock, mock_open
from ansible.plugins.action.win_updates import ActionModule
from ansible.playbook.task import Task
class TestWinUpdatesActionPlugin(object):
INVALID_OPTIONS = (
(
{"category_names": ["fake category"]},
False,
"Unknown category_name fake category, must be one of (Application,"
"Connectors,CriticalUpdates,DefinitionUpdates,DeveloperKits,"
"FeaturePacks,Guidance,SecurityUpdates,ServicePacks,Tools,"
"UpdateRollups,Updates)"
),
(
{"state": "invalid"},
False,
"state must be either installed or searched"
),
(
{"reboot": "nonsense"},
False,
"cannot parse reboot as a boolean: The value 'nonsense' is not a "
"valid boolean."
),
(
{"reboot_timeout": "string"},
False,
"reboot_timeout must be an integer"
),
(
{"reboot": True},
True,
"async is not supported for this task when reboot=yes"
)
)
# pylint bug: https://github.com/PyCQA/pylint/issues/511
# pylint: disable=undefined-variable
@pytest.mark.parametrize('task_args, async_val, expected',
((t, a, e) for t, a, e in INVALID_OPTIONS))
def test_invalid_options(self, task_args, async_val, expected):
task = MagicMock(Task)
task.args = task_args
task.async_val = async_val
connection = MagicMock()
play_context = MagicMock()
play_context.check_mode = False
plugin = ActionModule(task, connection, play_context, loader=None,
templar=None, shared_loader_obj=None)
res = plugin.run()
assert res['failed']
assert expected in res['msg']
BECOME_OPTIONS = (
(False, False, "sudo", "root", True, "runas", "SYSTEM"),
(False, True, "sudo", "root", True, "runas", "SYSTEM"),
(False, False, "runas", "root", True, "runas", "SYSTEM"),
(False, False, "sudo", "user", True, "runas", "user"),
(False, None, "sudo", None, True, "runas", "SYSTEM"),
# use scheduled task, we shouldn't change anything
(True, False, "sudo", None, False, "sudo", None),
(True, True, "runas", "SYSTEM", True, "runas", "SYSTEM"),
)
# pylint bug: https://github.com/PyCQA/pylint/issues/511
# pylint: disable=undefined-variable
@pytest.mark.parametrize('use_task, o_b, o_bmethod, o_buser, e_b, e_bmethod, e_buser',
((u, ob, obm, obu, eb, ebm, ebu)
for u, ob, obm, obu, eb, ebm, ebu in BECOME_OPTIONS))
def test_module_exec_with_become(self, use_task, o_b, o_bmethod, o_buser,
e_b, e_bmethod, e_buser):
def mock_execute_module(self, **kwargs):
pc = self._play_context
return {"become": pc.become, "become_method": pc.become_method,
"become_user": pc.become_user}
task = MagicMock(Task)
task.args = {}
connection = MagicMock()
connection.module_implementation_preferences = ('.ps1', '.exe', '')
play_context = MagicMock()
play_context.check_mode = False
play_context.become = o_b
play_context.become_method = o_bmethod
play_context.become_user = o_buser
plugin = ActionModule(task, connection, play_context, loader=None,
templar=None, shared_loader_obj=None)
with patch('ansible.plugins.action.ActionBase._execute_module',
new=mock_execute_module):
actual = plugin._execute_module_with_become('win_updates', {}, {},
True, use_task)
# always make sure we reset back to the defaults
assert play_context.become == o_b
assert play_context.become_method == o_bmethod
assert play_context.become_user == o_buser
# verify what was set when _execute_module was called
assert actual['become'] == e_b
assert actual['become_method'] == e_bmethod
assert actual['become_user'] == e_buser
|
ging/horizon
|
refs/heads/master
|
openstack_dashboard/dashboards/admin/networks/tables.py
|
4
|
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.template import defaultfilters as filters
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import exceptions
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.networks \
import tables as project_tables
from openstack_dashboard import policy
LOG = logging.getLogger(__name__)
class DeleteNetwork(policy.PolicyTargetMixin, tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Network",
u"Delete Networks",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Network",
u"Deleted Networks",
count
)
policy_rules = (("network", "delete_network"),)
def delete(self, request, obj_id):
try:
api.neutron.network_delete(request, obj_id)
except Exception:
msg = _('Failed to delete network %s') % obj_id
LOG.info(msg)
redirect = reverse('horizon:admin:networks:index')
exceptions.handle(request, msg, redirect=redirect)
class CreateNetwork(tables.LinkAction):
name = "create"
verbose_name = _("Create Network")
url = "horizon:admin:networks:create"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_network"),)
class EditNetwork(policy.PolicyTargetMixin, tables.LinkAction):
name = "update"
verbose_name = _("Edit Network")
url = "horizon:admin:networks:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("network", "update_network"),)
# def _get_subnets(network):
# cidrs = [subnet.get('cidr') for subnet in network.subnets]
# return ','.join(cidrs)
class NetworksTable(tables.DataTable):
tenant = tables.Column("tenant_name", verbose_name=_("Project"))
name = tables.Column("name", verbose_name=_("Network Name"),
link='horizon:admin:networks:detail')
subnets = tables.Column(project_tables.get_subnets,
verbose_name=_("Subnets Associated"),)
num_agents = tables.Column("num_agents",
verbose_name=_("DHCP Agents"))
shared = tables.Column("shared", verbose_name=_("Shared"),
filters=(filters.yesno, filters.capfirst))
status = tables.Column("status", verbose_name=_("Status"))
admin_state = tables.Column("admin_state",
verbose_name=_("Admin State"))
class Meta:
name = "networks"
verbose_name = _("Networks")
table_actions = (CreateNetwork, DeleteNetwork)
row_actions = (EditNetwork, DeleteNetwork)
def __init__(self, request, data=None, needs_form_wrapper=None, **kwargs):
super(NetworksTable, self).__init__(
request, data=data,
needs_form_wrapper=needs_form_wrapper,
**kwargs)
if not api.neutron.is_extension_supported(request,
'dhcp_agent_scheduler'):
del self.columns['num_agents']
|
darkoc/clowdflows
|
refs/heads/master
|
workflows/migrations/0017_auto__add_field_abstractwidget_wsdl_method.py
|
6
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'AbstractWidget.wsdl_method'
db.add_column('workflows_abstractwidget', 'wsdl_method', self.gf('django.db.models.fields.CharField')(default='', max_length=200, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'AbstractWidget.wsdl_method'
db.delete_column('workflows_abstractwidget', 'wsdl_method')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'workflows.abstractinput': {
'Meta': {'object_name': 'AbstractInput'},
'default': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'multi': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parameter_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'variable': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'widget': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inputs'", 'to': "orm['workflows.AbstractWidget']"})
},
'workflows.abstractoption': {
'Meta': {'object_name': 'AbstractOption'},
'abstract_input': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['workflows.AbstractInput']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'value': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'workflows.abstractoutput': {
'Meta': {'object_name': 'AbstractOutput'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'variable': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'widget': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'outputs'", 'to': "orm['workflows.AbstractWidget']"})
},
'workflows.abstractwidget': {
'Meta': {'object_name': 'AbstractWidget'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'widgets'", 'to': "orm['workflows.Category']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interaction_view': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'interactive': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'post_interact_action': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'widgets'", 'null': 'True', 'to': "orm['auth.User']"}),
'visualization_template': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'wsdl': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'wsdl_method': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'})
},
'workflows.category': {
'Meta': {'ordering': "['name']", 'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['workflows.Category']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'categories'", 'null': 'True', 'to': "orm['auth.User']"})
},
'workflows.connection': {
'Meta': {'object_name': 'Connection'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'input': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'connections'", 'to': "orm['workflows.Input']"}),
'output': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'connections'", 'to': "orm['workflows.Output']"}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'connections'", 'to': "orm['workflows.Workflow']"})
},
'workflows.data': {
'Meta': {'object_name': 'Data'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'workflows.input': {
'Meta': {'object_name': 'Input'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inner_output': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'outer_input_rel'", 'null': 'True', 'to': "orm['workflows.Output']"}),
'multi_id': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'outer_output': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'inner_input_rel'", 'null': 'True', 'to': "orm['workflows.Output']"}),
'parameter': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parameter_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'value': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'variable': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'widget': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inputs'", 'to': "orm['workflows.Widget']"})
},
'workflows.option': {
'Meta': {'object_name': 'Option'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'input': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['workflows.Input']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'workflows.output': {
'Meta': {'object_name': 'Output'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inner_input': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'outer_output_rel'", 'null': 'True', 'to': "orm['workflows.Input']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'outer_input': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'inner_output_rel'", 'null': 'True', 'to': "orm['workflows.Input']"}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'value': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'variable': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'widget': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'outputs'", 'to': "orm['workflows.Widget']"})
},
'workflows.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'active_workflow': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'users'", 'null': 'True', 'to': "orm['workflows.Workflow']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'userprofile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'workflows.widget': {
'Meta': {'object_name': 'Widget'},
'abstract_widget': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'instances'", 'null': 'True', 'to': "orm['workflows.AbstractWidget']"}),
'error': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'finished': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interaction_waiting': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'running': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'regular'", 'max_length': '50'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'widgets'", 'to': "orm['workflows.Workflow']"}),
'x': ('django.db.models.fields.IntegerField', [], {}),
'y': ('django.db.models.fields.IntegerField', [], {})
},
'workflows.workflow': {
'Meta': {'object_name': 'Workflow'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Untitled workflow'", 'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'workflows'", 'to': "orm['auth.User']"}),
'widget': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'workflow_link'", 'unique': 'True', 'null': 'True', 'to': "orm['workflows.Widget']"})
}
}
complete_apps = ['workflows']
|
anonion0/nsec3map
|
refs/heads/master
|
n3map/statusline.py
|
1
|
import math
def compose_leftright(leftlabels, leftvalues, rightlabels, rightvalues):
left = ('; '.join([lbl + " = " + val for lbl,val in zip(leftlabels,leftvalues)])+'; ')
right = ' ' +'; '.join([lbl + " = " + val for lbl,val in zip(rightlabels,rightvalues)]) + ' ;;'
return left,right
def format_statusline_nsec3(width,
zone,
queries,
records,
hashes,
coverage,
queryrate,
prediction
):
# first line ======
lines = []
left = ";; mapping {0:s}: ".format(zone)
right = " ;;"
pad = width - len(left) - len(right)
if prediction is not None and pad >= 10:
if prediction < records:
prediction = records
ratio = records/float(prediction) if prediction > 0 else 0
percentage = "{0:d}% ".format(int(ratio*100))
prlen = pad-len(percentage)-2
filllen = int(math.ceil(ratio*prlen))
progress = "[{0:s}{1:s}]".format("="*filllen," "*(prlen-filllen))
right = percentage + progress + right
elif pad > 0:
right = '.' * pad + right
lines.append(left + right)
# second line =======
leftlabels = ['records','queries','hashes']
leftshortlabels = ['r','q','h']
leftvalues = ["{0:3d}".format(records),
"{0:3d}".format(queries),
"{0:3d}".format(hashes)
]
if prediction is not None:
leftlabels.append("predicted zone size")
leftshortlabels.append("pred")
leftvalues.append("{0:3d}".format(prediction))
rightlabels = ['q/s', 'coverage']
rightshortlabels = ['q/s', 'c']
rightvalues = ["{0:.0f}".format(queryrate),
"{0:11.6%}".format(coverage)
]
left,right = compose_leftright(leftlabels, leftvalues,
rightlabels, rightvalues)
left = ";; " + left
if width < len(left) + len(right):
left,right = compose_leftright(leftshortlabels, leftvalues,
rightshortlabels, rightvalues)
left = ";; " + left
pad = width - len(left)
if pad > 0:
right = right.rjust(pad, ".")
lines.append(left + right)
return [l[:width] for l in lines]
def format_statusline_nsec(width,
zone,
queries,
records,
queryrate
):
mappinglabel = ";; walking {0:s}: ".format(zone)
leftlabels = ['records','queries']
leftvalues = ["{0:3d}".format(records),
"{0:3d}".format(queries),
]
rightlabels = ['q/s']
rightvalues = ["{0:.0f}".format(queryrate)]
left,right = compose_leftright(leftlabels, leftvalues,
rightlabels, rightvalues)
left = mappinglabel + left
pad = width - len(left)
if pad > 0:
right = right.rjust(pad, '.')
line = left + right
return [line[:width]]
|
google/mirandum
|
refs/heads/master
|
alerts/extralife/migrations/0001_initial.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0018_auto_20160802_1524'),
]
operations = [
migrations.CreateModel(
name='ExtralifeAlertConfig',
fields=[
('alertconfig_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='main.AlertConfig')),
('blacklist', models.TextField(null=True, blank=True)),
('filter_type', models.CharField(default=b'3default', help_text=b'When filtering for specific amounts, comparison to use.', max_length=20, choices=[(b'1equal', b'Equals'), (b'2gt', b'Greater than'), (b'3default', b'Default')])),
('filter_amount', models.FloatField(null=True, blank=True)),
('text_to_speech', models.BooleanField(default=False)),
],
bases=('main.alertconfig',),
),
migrations.CreateModel(
name='ExtralifeEvent',
fields=[
('updaterevent_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='main.UpdaterEvent')),
('details', models.TextField()),
],
bases=('main.updaterevent',),
),
migrations.CreateModel(
name='ExtralifeUpdate',
fields=[
('updater_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='main.Updater')),
('profile_id', models.IntegerField()),
],
bases=('main.updater',),
),
migrations.AddField(
model_name='extralifeevent',
name='updater',
field=models.ForeignKey(to='extralife.ExtralifeUpdate'),
),
]
|
bnagy/francis
|
refs/heads/master
|
exploitaben/lib/analyzers/x86_lldb.py
|
1
|
# BEGIN LICENSE ###
# Use of the triage tools and related source code is subject to the terms
# of the license below.
#
# ------------------------------------------------------------------------
# Copyright (C) 2011 Carnegie Mellon University. All Rights Reserved.
# Portions Copyright 2013 BlackBerry Ltd. All Rights Reserved.
# ------------------------------------------------------------------------
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following acknowledgments
# and disclaimers.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. All advertising materials for third-party software mentioning
# features or use of this software must display the following
# disclaimer:
#
# "Neither Carnegie Mellon University nor its Software Engineering
# Institute have reviewed or endorsed this software"
#
# 4. The names "Department of Homeland Security," "Carnegie Mellon
# University," "CERT" and/or "Software Engineering Institute" shall
# not be used to endorse or promote products derived from this software
# without prior written permission. For written permission, please
# contact permission@sei.cmu.edu.
#
# 5. Products derived from this software may not be called "CERT" nor
# may "CERT" appear in their names without prior written permission of
# permission@sei.cmu.edu.
#
# 6. Redistributions of any form whatsoever must retain the following
# acknowledgment:
#
# "This product includes software developed by CERT with funding
# and support from the Department of Homeland Security under
# Contract No. FA 8721-05-C-0003."
#
# THIS SOFTWARE IS PROVIDED BY CARNEGIE MELLON UNIVERSITY ``AS IS'' AND
# CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER
# EXPRESS OR IMPLIED, AS TO ANY MATTER, AND ALL SUCH WARRANTIES, INCLUDING
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
# EXPRESSLY DISCLAIMED. WITHOUT LIMITING THE GENERALITY OF THE FOREGOING,
# CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND
# RELATING TO EXCLUSIVITY, INFORMATIONAL CONTENT, ERROR-FREE OPERATION,
# RESULTS TO BE OBTAINED FROM USE, FREEDOM FROM PATENT, TRADEMARK AND
# COPYRIGHT INFRINGEMENT AND/OR FREEDOM FROM THEFT OF TRADE SECRETS.
# END LICENSE ###
'''
Contains analyzers used to match rules that are used to classify the state
of a LLDB inferior and some helper functions.
'''
import re
import signal
import os
import platform
import sys
import commands
import struct
from lib.tools import memoized
# TODO - DRY this snippet
# ----------------------------------------------------------------------
# Code that auto imports LLDB
# ----------------------------------------------------------------------
try:
# Just try for LLDB in case PYTHONPATH is already correctly setup
import lldb
except ImportError:
lldb_python_dirs = list()
# lldb is not in the PYTHONPATH, try some defaults for the current platform
platform_system = platform.system()
if platform_system == 'Darwin':
# On Darwin, try the currently selected Xcode directory
xcode_dir = commands.getoutput("xcode-select --print-path")
if xcode_dir:
lldb_python_dirs.append(os.path.realpath(xcode_dir + '/../SharedFrameworks/LLDB.framework/Resources/Python'))
lldb_python_dirs.append(xcode_dir + '/Library/PrivateFrameworks/LLDB.framework/Resources/Python')
lldb_python_dirs.append('/System/Library/PrivateFrameworks/LLDB.framework/Resources/Python')
success = False
for lldb_python_dir in lldb_python_dirs:
if os.path.exists(lldb_python_dir):
if not (sys.path.__contains__(lldb_python_dir)):
sys.path.append(lldb_python_dir)
try:
import lldb
except ImportError:
pass
else:
success = True
break
if not success:
print "error: couldn't locate the 'lldb' module, please set PYTHONPATH correctly"
sys.exit(1)
try:
from termcolor import colored
except ImportError:
def colored(s, color):
return s
PAGE_SIZE = 4096
QUITE_CLOSE = 1024
SANE_STACK_SIZE = 400 # matches CW recursive_write test
R_ALNUMSPACE = re.compile('[^\w\s]+')
class Analyzer(object):
'''
Contains methods that analyze a Target (an OSX LLDB inferior state) to
determine properties of the Target (such as how the application crashed).
'''
# CHANGES ( bn )
#
# I've removed all the compounded methods that correspond
# to the rules.py they used in jfoote/exploitable in favour of just
# supplying "indicators" than can be combined by higher-level triage
# tools. That simplifies this code and allows more granular triage and
# post-processing ( indicators are preserved in the crash information
# metadata instead of being lost in favour of a single classification )
#
# I've stripped a lot of the scaffolding that was used to make the GDB
# version work as a plugin inside GDB, in favour of making this a simple
# LLDB standalone tool using the python/SWIG API. This has both good and
# bad implications, but it's a lot easier to write. Hopefully it can still
# be folded back into mainline somehow.
def __init__(self, target, no_color):
global colored
if no_color:
def colored(s, color):
return s
self.target = target
self.stack_huge = None
self.process = target.process
self.thread = target.process.selected_thread
# SBAddress, can use self.GetLoadAddress(self.target) if required
self.pc = self.thread.GetFrameAtIndex(0).GetPCAddress()
if not self.process or not self.process.IsValid() \
or not self.thread or not self.thread.IsValid():
# pc is allowed to be invalid, natch
raise ValueError('No process / thread in target!')
triple = target.GetTriple()
(g_arch, manufacturer, platform) = triple.split('-')
if g_arch == 'x86' or g_arch == 'arm':
self.width = 4
elif g_arch == 'x86_64':
self.width = 8
# ---
# INDICATORS
# ---
@memoized
def isSuspiciousAv(self):
if not self.isAv:
return False
# CW check.
if abs(self.faultingAddress - 0x55555555) < QUITE_CLOSE:
# The access address indicates the use of freed memory if MallocScribble
# was used, or uninitialized memory if libgmalloc and MALLOC_FILL_SPACE was used.
return True
elif abs(self.faultingAddress - 0xaaaaaaaa) < QUITE_CLOSE:
# The access address indicates that uninitialized memory
# was being used if MallocScribble was used
return True
elif abs(self.faultingAddress - 0x41414141) < QUITE_CLOSE:
# you never know ;)
return True
else:
return False
@memoized
def isIllegalInstruction(self):
# OSX - Bad syscall is SIGSYS, bad instruction is EXC_BAD_INSTRUCTION
if self.isSignal():
return self.isSignalInList(["SIGILL", "SIGSYS"])
if self.isException():
return self.getExceptionType() == "EXC_BAD_INSTRUCTION"
return False
@memoized
def isBenign(self):
if self.isSignal():
return self.isBenignSignal
elif self.isException():
return self.isBenignException()
else:
return False
@memoized
def isAbort(self):
# OSX seems to mostly use EXC_CRASH, but some OSS apps still throw
# SIGABRT, so handle both styles.
#
# Not moved into isBenign. Aborts are a sign you're getting close. ;)
if self.isSignal() and self.isSignalInList(["SIGABRT"]):
return True
elif self.isException() and self.getExceptionType == "EXC_CRASH":
return True
else:
return False
@memoized
def getSuspiciousStackFuncs(self):
# CW Check
# Added __chk_fail_overflow for fortify_source - bn
if platform.system() == 'Darwin':
suspicious_functions = [
" __stack_chk_fail ", " __chk_fail_overflow ", " szone_error ", " CFRelease ", " CFRetain ",
" _CFRelease ", " _CFRetain", " malloc ", " calloc ", " realloc ", " objc_msgSend",
" szone_free ", " free_small ", " tiny_free_list_add_ptr ", " tiny_free_list_remove_ptr ",
" small_free_list_add_ptr ", " small_free_list_remove_ptr ", " large_entries_free_no_lock ",
" large_free_no_lock ", " szone_batch_free ", " szone_destroy ", " free ",
" CSMemDisposeHandle ", " CSMemDisposePtr ",
" append_int ", " release_file_streams_for_task ", " __guard_setup ",
" _CFStringAppendFormatAndArgumentsAux ", " WTF::fastFree ", " WTF::fastMalloc ",
" WTF::FastCalloc ", " WTF::FastRealloc ", " WTF::tryFastCalloc ", " WTF::tryFastMalloc ",
" WTF::tryFastRealloc ", " WTF::TCMalloc_Central_FreeList ", " GMfree ", " GMmalloc_zone_free ",
" GMrealloc ", " GMmalloc_zone_realloc ", " WTFCrashWithSecurityImplication "
]
elif platform.system() == 'Linux':
# TODO: should only match on these inside libc, need some double match or regexp?
suspicious_functions = [
"__GI_abort", "__libc_message", "malloc_printerr", "__malloc_assert"
]
else:
# TODO: catch this earlier
print "unsupprted platform, exiting..."
sys.exit(1)
idx = 0
susp_funcs = []
while idx < SANE_STACK_SIZE:
# This looks weird, but it's so that we can use the substring
# matching the way the CW dev intended. For example objc_msgSend
# is supposed to be a prefix match for objc_msgSend_vtable14,
# whereas most of the rest are supposed to be matches that avoid
# false positives ( eg if you tried free as a substring match )
frame = self.thread.GetFrameAtIndex(idx)
if not frame.IsValid():
break
func = " {} ".format(frame.GetFunctionName())
for susp in suspicious_functions:
if susp in func:
susp_funcs.append("%s" % (frame.name))
idx += 1
if idx >= SANE_STACK_SIZE:
self.stack_huge = True
return susp_funcs
@memoized
def isPcWeird(self):
# This is a ported check from CW. The theory is that if $pc is outside
# the range of any loaded module then we faulted executing either JIT
# code or just random junk that happens to be executable ( trying to
# run unmapped or NX memory would cause different matches )
#
# Because data executing like 0x00 0x00 is add al,%(rax) this shows up
# more than you might think, and is easy to accidentally triage in
# with null derefs.
if not self.pc.IsValid():
return True
if not self.pc.module and not self.pc.symbol.name:
return True
# TODO more checks?
return False
@memoized
def isSpWeird(self):
sp, bp = self.getSPBP()
# caters for underflow
if sp and abs(bp - sp) > PAGE_SIZE * 10:
return True
if sp > bp:
return True
return False
@memoized
def isStackHuge(self):
# written this way because the thread.frames and thread.num_frames
# accessors walk the whole stack, which takes forever on runaway
# recursion.
if self.stack_huge is not None:
# If they already called getSuspiciousStackFuncs() then we have
# walked the stack and this property will be set
return self.stack_huge
idx = 0
while idx < SANE_STACK_SIZE:
if not self.thread.GetFrameAtIndex(idx).IsValid():
return False
idx += 1
return True
@memoized
def isBlockMove(self):
if not self.pc.IsValid():
return False
# lldb presents rep blah as
# testBlockMoveAv.test`main + 21:
# -> 0x100000f95: f3 rep
# 0x100000f96: 48 a5 movsq
insns = self.disasmAtPc(2)
if not insns:
return False
i2 = insns[1].mnemonic
return insns[0].mnemonic == "rep" and i2.startswith("mov")
# Semantic change here from https://github.com/jfoote/exploitable They use
# SourceAv and DestAv but because we stole more granular access type
# classification from CrashWrangler we can support R/W/X
@memoized
def isAvRead(self):
if not self.isAv():
return False
return self.getAccessType(self.getCurrentInstruction()) == "read"
@memoized
def isAvWrite(self):
if not self.isAv():
return False
return self.getAccessType(self.getCurrentInstruction()) == "write"
@memoized
def isAvExec(self):
if not self.isAv():
return False
if self.isBranchAv():
return True
if self.isAvOnPc():
return True
# unknown is when the analyzer screwed up. Triage up so it will get looked at.
return self.getAccessType(self.getCurrentInstruction()) == "exec" \
or self.getAccessType(self.getCurrentInstruction()) == "unknown"
@memoized
def isAvRecursion(self):
if not self.isAv():
return False
return self.getAccessType(self.getCurrentInstruction()) == "recursion"
@memoized
def isAvNearNull(self):
if not self.isAv():
return False
# going to just handle the EXC_BAD_ACCESS case until I see a SIGSEGV
exc, code, extra = self.getExceptionData()
if exc == "EXC_BAD_ACCESS":
if code == "EXC_I386_GPFLT":
# These are when an address is invalid for the x64 ABI, but they
# should not be lumped in with null derefs.
return False
try:
# extra for BAD_ACCESS is the 0xaddress
return self.isNearNull(int(extra, 16))
except:
print "WARNING: Malformed exception data %s %s %s" % (desc, code, extra)
# don't know what went wrong, but don't mark it unexploitable :)
return False
return False
@memoized
def isAvNearSP(self):
# CW check
fa = self.faultingAddress()
if fa:
sp, bp = self.getSPBP()
if not sp:
# we're boned - triage up
print "BUG: Failed to get stack pointer!"
return True
if abs(sp - fa) <= QUITE_CLOSE or abs(bp - fa) <= QUITE_CLOSE:
return True
if sp < fa < bp:
# This could be a FP if the binary isn't using frame pointers
return True
return False
@memoized
def isAvBadBeef(self):
# CW check:
# WebCore functions call CRASH() in various assertions or if the amount to allocate was
# too big. CRASH writes a null byte to 0xbbadbeef.
# BN - looks like fastmalloc does this too, but with EXC_BAD_ACCESS
if not self.isAv():
return False
if not self.isException():
return False
if self.faultingAddress() and self.faultingAddress() == 0xbbadbeef:
return True
return False
# ---
# UTILITY METHODS
# ---
# Get Address Color
def GAC(self, addr):
def read_memory(addr, width=4):
error = lldb.SBError()
bytes_read = self.process.ReadMemory(addr, width, error)
if error.Success():
return bytes_read
return None
def is_value_null(addr):
return addr == 0
def is_value_all_ascii(addr_string):
address_bytes = [int(addr_string[i:i+2], 16) for i in xrange(0, len(addr_string), 2)]
return all(map(lambda x: x >= 0x20 and x <= 0x7f, address_bytes))
def is_value_unicode_le(addr_string):
address_bytes = [int(addr_string[i:i+4], 16) for i in xrange(0, len(addr_string), 4)]
return all(map(lambda x: x >= 0x20 and x <= 0xff, address_bytes))
def is_value_unicode_be(addr_string):
address_bytes = [int(addr_string[i+2:i+4] + addr_string[i:i+2], 16) for i in xrange(0, len(addr_string), 4)]
return all(map(lambda x: x >= 0x20 and x <= 0xff, address_bytes))
def does_addr_point_to_ascii(addr, width=4):
rv = read_memory(addr, width)
if rv is None:
return False
bytes_array = [ord(x) for x in rv]
return all(map(lambda x: x >= 0x20 and x <= 0x7f, bytes_array))
def does_addr_point_to_unicode_le(addr, width=4):
rv = read_memory(addr, width)
if rv is None:
return False
word_array = struct.unpack("<" + "H" * (width / 2), rv)
return all(map(lambda x: x >= 0x20 and x <= 0xff, word_array))
def does_addr_point_to_unicode_be(addr, width=4):
rv = read_memory(addr, width)
if rv is None:
return False
word_array = struct.unpack(">" + "H" * (width / 2), rv)
return all(map(lambda x: x >= 0x20 and x <= 0xff, word_array))
def does_addr_point_to_dynamic_memory(addr, width=4):
rv = read_memory(addr, width)
if rv is None:
return False
debugger = self.target.debugger
ci = debugger.GetCommandInterpreter()
res = lldb.SBCommandReturnObject()
ci.HandleCommand("image list -a 0x%x" % (addr, ), res)
return not res.Succeeded()
def does_addr_point_to_mapped_memory(addr, width=4):
rv = read_memory(addr, width)
if rv is None:
return False
return True
if self.width == 4:
addr_string = "%08x" % (addr, )
display_address = "0x%08x" % (addr, )
else:
addr_string = "%016x" % (addr, )
display_address = "0x%016x" % (addr, )
if is_value_null(addr):
return display_address
if is_value_all_ascii(addr_string) or is_value_unicode_le(addr_string) or is_value_unicode_be(addr_string):
return colored(display_address, 'red')
if does_addr_point_to_ascii(addr, self.width) or does_addr_point_to_unicode_le(addr, self.width) or \
does_addr_point_to_unicode_be(addr, self.width):
return colored(display_address, 'yellow')
if does_addr_point_to_dynamic_memory(addr, self.width):
return colored(display_address, 'green')
if does_addr_point_to_mapped_memory(addr, self.width):
return colored("%s %s" % (display_address, self.target.ResolveLoadAddress(addr)), 'blue')
return colored(display_address, 'cyan')
@memoized
def isBenignSignal(self):
'''
Return True if the current fault is a signal event and the signal is
one of several that don't usually indicate an exploitable issue.
'''
if not self.isSignal():
# if you forgot to check then we want you to triage this up not
# down so you notice.
return False
# Some of these probably never happen on OSX
# Change: put SIGFPE in here. Bad? - ben
siglist = ["SIGTERM", "SIGINT", "SIGQUIT", "SIGKILL", "SIGHUP",
"SIGALRM", "SIGVTALRM", "SIGPROF", "SIGIO", "SIGURG",
"SIGPOLL", "SIGUSR1", "SIGUSR2", "SIGWINCH", "SIGINFO",
"SIGCHLD", "SIGCONT", "SIGSTOP", "SIGTSTP", "SIGFPE"]
return self.isSignalInList(siglist)
@memoized
def isBenignException(self):
'''
Return True if the current fault is an exception which is usually
considered benign. Right now those are EXC_ARITHMETIC, EXC_SOFTWARE,
EXC_BREAKPOINT, and EXC_CRASH. However, EXC_CRASH should always be
checked along with isSuspiciousStack(), to catch a variety of stack
protection and malloc protection aborts.
'''
if not self.isException:
# if you forgot to check then we want you to triage this up not
# down so you notice.
return False
# CW note Re: EXC_CRASH being "benign":
# NOTE: if this is an abort due to -fstack-protector, MallocCorruptionAbort, etc,
# the log will later be patched so g_is_exploitable=YES
#
# For us that means that we can't rely on this indicator without also
# checking isStackSuspicious()
exceptions = ["EXC_ARITHMETIC", "EXC_SOFTWARE", "EXC_BREAKPOINT", "EXC_CRASH"]
this_e = self.getExceptionType()
for e in exceptions:
if this_e == e:
return True
return False
@memoized
def isAv(self):
'''
Returns True if the current fault is an Access Violation
'''
if self.isException() and self.getExceptionType() == "EXC_BAD_ACCESS":
return True
elif self.isSignal() and self.isSignalInList(['SIGSEGV', 'SIGBUS']):
# can this even happen?
return True
else:
return False
@memoized
def isAvOnPc(self):
'''
Returns True if the fault is an AV at the program counter
'''
if self.isAv() and \
self.faultingAddress() == self.pc.GetLoadAddress(self.target):
return True
return False
@memoized
def isBranchAv(self):
'''
Returns True if the fault is an AV on a branching instruction
'''
if not self.isAv():
return False
# Unlike gdb, lldb correctly treats calls as branching insructions
return self.isJumpInstruction(self.getCurrentInstruction())
@memoized
def getAccessType(self, insn):
'''
Attempts to classify EXC_BAD_ACCESS as `read`, `write`, `exec` or
`recursion`. Other possible string values are `<not an access violation>` and
`unknown`.
This algorithm is mostly a straight port of the logic from Apple's
CrashWrangler in exc_handler.m, with a couple of tweaks.
'''
if self.isAvOnPc():
return "exec"
if not insn or not self.isAv():
return "<not an access violation>"
# ASSUMES ACCESS VIOLATION FROM HERE
operands = insn.operands
mnemonic = insn.mnemonic
# logic ported from CrashWrangler exc_handler.m get_access_type()
# I'm only ever interested in "is it there?" and numeric comparisons,
# so adding one turns -1 into 0 which is falsey in python. FML.
last_comma = operands.rfind(',') + 1
right_paren = operands.rfind(')') + 1
dollar = operands.find('$') + 1
asterisk = operands.find('*') + 1
percent = operands.find('%') + 1
if operands.count(')') > 1:
# There's more than one right paren, therefore it's an instruction like
# rep/movsl (%esi),(%edi)
_, first, second, _ = R_ALNUMSPACE.sub('', operands).split()
registers = self.getRegisters()
if not registers[first]:
raise ValueError("BUG: Failed to look up source register")
if registers[first].value == self.faultingAddress():
return "read"
else:
return "write"
elif "call" in mnemonic:
# If the instruction looks like call 0x1fe6 <foo> then it's due to the stack pointer
# being out of bounds due to recursion or evil-sized variable size stack buffer
# If it looks like call *0x8(%eax) or call *%eax, or call (%eax) then it's exploitable
#
# TODO - I am not sure you ever want to treat fault-on-call as
# anything but exploitable. Am I wrong? Does this encourage that? - bn
if not right_paren and not asterisk: # optimize for common case
return "recursion"
elif self.isAvNearSP():
return "recursion"
else:
return "exec"
elif "cmp" in mnemonic or "test" in mnemonic or "fld" in mnemonic:
# These instructions are always reads, even when the right operand is the one being dereferenced.
return "read"
elif "fst" in mnemonic:
return "write" # floating point store
elif "mov" in mnemonic and not right_paren and not dollar and percent and last_comma:
# if there is no parenthesis and no dollar sign then it is
# something like mov 0x41414141,%eax which is deferencing the
# constant first argument.
if percent > last_comma:
return "read"
else:
return "write"
elif last_comma and right_paren:
# it has 2 operands and an explicit dereference
if right_paren < last_comma:
return "read"
else:
return "write"
elif mnemonic.startswith('j'): # CHANGE: any AV at a jmp is exec? - bn
return "exec"
elif "push" in mnemonic:
# push (%eax) might mean crashing reading eax, or crashing writing to (%esp)
# push eax crashing would always mean crashing writing to (%esp)
if right_paren:
return "read" # probably, anyways. (YOLO - bn)
else:
return "recursion"
elif "inc" in mnemonic or "dec" in mnemonic:
# increment or decrement instructions. Example: inc (%eax)
# inc %eax would never crash, so we must be writing to memory.
return "write"
elif "stos" in mnemonic:
return "write"
elif "lods" in mnemonic:
return "read"
elif "rep" in mnemonic: # ADDED: rep prefix with bad rdi is a nope - bn
registers = self.getRegisters(['rsi', 'rdi'])
if len(registers) < 2:
return "unknown"
if registers['rdi'] and registers['rdi'] == self.faultingAddress():
return "write"
else:
return "read"
else:
return "unknown"
# TODO: other instructions which take one operand and might cause a crash?
@memoized
def isNearNull(self, addr):
'''
Returns True if addr is near NULL, False otherwise
'''
if addr < 16 * PAGE_SIZE: # same as !exploitable
return True
return False
@memoized
def getInsnRegisters(self, insn):
'''
Takes an SBInstruction object, returns a dict of 'register_name' =>
'0xvalue' for registers used by that instruction (based on very simplistic string
parsing, don't get excited)
'''
if not insn or not insn.IsValid():
return {}
tokens = R_ALNUMSPACE.sub('', insn.operands).split()
registers = self.getRegisters()
involved = {}
for t in tokens:
if t in registers:
involved[t] = "0x%.16x" % registers[t]
return involved
@memoized
def getStopDescription(self):
'''
Returns the LLDB string description for the stop reason, eg
EXC_BREAKPOINT (code=EXC_I386_BPT, subcode=0x0)
'''
return self.thread.GetStopDescription(1024)
@memoized
def isSignal(self):
'''
Returns True if the current thread is stopped due to a signal
'''
return self.thread.stop_reason == lldb.eStopReasonSignal
@memoized
def isException(self):
'''
Returns True is the selected thread is stopped at an exception
'''
return self.thread.stop_reason == lldb.eStopReasonException
@memoized
def getExceptionData(self):
'''
Returns up to three values. Exception Type as a string
'EXC_BAD_ACCESS', Exception Code as a string (can be like '1'
or like 'EXC_I386_GPFLT') and Extra as a string (can be an address
'0x00000000' or a subcode '0x0')
'''
# EXC_BAD_ACCESS (code=2, address=0x100804000)
# EXC_BAD_ACCESS (code=EXC_I386_GPFLT)
# EXC_BAD_INSTRUCTION (code=EXC_I386_INVOP, subcode=0x0)
if not self.isException:
return None, None, None
desc = self.getStopDescription().translate(None, '(),')
if not (desc.startswith("EXC_") or desc.startswith("code=")):
print "WARNING: Malformed exception description output %s" % desc
return None, None, None
fields = desc.split()
exc = fields[0]
code = fields[1].split("=", 2)[1]
extra = None
for f in fields:
if f.startswith("address=") or f.startswith("subcode="):
extra = f.split("=", 2)[1]
break
return [exc, code, extra]
@memoized
def getExceptionType(self):
'''
Returns the exception code as a string, or None
'''
e = self.getExceptionData()
if e:
return e[0]
return None
@memoized
def isSignalInList(self, siglist):
'''
Returns True if target's signo is in siglist, False otherwise
'''
if not self.isSignal():
return False
tsigno = self.thread.GetStopReasonDataAtIndex(0)
for s in siglist:
signo = getattr(signal, s, None) # not all sigs may be defined
if signo and signo == tsigno:
return True
return False
# TODO - wtf are they talking about here?
# //<rdar://problem/7930393> _dispatch_hardware_crash should use something other than __builtin_trap
# if ([thread_log rangeOfString:@"\n0 libdispatch.dylib"].location != NSNotFound ||
# [thread_log rangeOfString:@"\n0 libxpc.dylib"].location != NSNotFound) {
# return CHANGE_TO_NOT_EXPLOITABLE;
# }
@memoized
def isJumpInstruction(self, ins):
'''
Returns True if this is a valid jump or call
'''
# Unlike GDB, LLDB correctly considers calls branches, so no special
# case required.
return ins and ins.IsValid() and ins.DoesBranch()
@memoized
def faultingAddress(self):
'''
Returns the address causing an AV as an int or None
'''
if not self.isAv():
return None
# going to just handle the EXC_BAD_ACCESS case until I see a SIGSEGV
exc, code, extra = self.getExceptionData()
if exc == "EXC_BAD_ACCESS" and not code == "EXC_I386_GPFLT":
try:
return int(extra, 16)
except:
print "WARNING: Malformed exception data %s %s %s" % (desc, code, extra)
return None
# The GPFLT case will have to be special-cased higher up, because the
# debugger will treat it as an address of 0x0, which would lead to
# false-negative triage.
return None
@memoized
def disasmAtPc(self, count=1):
'''
Returns a list of instructions at $pc ( one by default ). Returns None
on error ( invalid $pc, incomplete disassembly )
'''
if not self.pc.IsValid():
return None
pc = self.pc.GetLoadAddress(self.target)
return self.disasmAtAddress(pc, count)
def disasmAtAddress(self, address, count=1):
'''
Take a uint64 ( or lldb::addr_t ), look it up as a virtual address and
disassemble if possible, returning None on failure
'''
addr = self.lldbResolve(address)
if not addr.IsValid:
return None
insns = self.target.ReadInstructions(addr, count)
# TODO fancier handling of incomplete disassembly?
if len(insns) < count:
return None
return insns
@memoized
def getRegisters(self, want=[]):
'''
Returns the general purpose registers and returns them as a
map[string]uint64 or whatever you call that in python
'''
# get the registers
registerSet = self.thread.GetFrameAtIndex(0).GetRegisters()
# copied from the docs
for regs in registerSet:
if 'general purpose registers' in regs.GetName().lower():
GPRs = regs
break
got = {}
for reg in GPRs:
if want == [] or reg.name in want:
got[reg.name] = reg.GetValueAsUnsigned()
return got
@memoized
def prettyRegisters(self):
'''
Returns an OrderedDict of x64 registers with values as hexstrings,
formatted with colors where availble ( see GAC() )
'''
from collections import OrderedDict
registers = OrderedDict()
r = self.getRegisters()
flags = colored("O ", 'cyan') if (r['rflags'] >> 0xb) & 1 else "o "
flags += colored("D ", 'cyan') if (r['rflags'] >> 0xa) & 1 else "d "
flags += colored("I ", 'cyan') if (r['rflags'] >> 0x9) & 1 else "i "
flags += colored("T ", 'cyan') if (r['rflags'] >> 0x8) & 1 else "t "
flags += colored("S ", 'cyan') if (r['rflags'] >> 0x7) & 1 else "s "
flags += colored("Z ", 'cyan') if (r['rflags'] >> 0x6) & 1 else "z "
flags += colored("A ", 'cyan') if (r['rflags'] >> 0x5) & 1 else "a "
flags += colored("P ", 'cyan') if (r['rflags'] >> 0x4) & 1 else "p "
flags += colored("C ", 'cyan') if (r['rflags'] >> 0x3) & 1 else "c "
for reg in ["rax", "rbx", "rcx", "rdx",
"rsi", "rdi",
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
"rsp", "rbp", "rip" ]:
registers[reg] = self.GAC(r[reg])
registers['flags'] = flags
return registers
@memoized
def getCurrentInstruction(self):
'''
Returns the instruction at $pc in top frame of the current thread as
an LLDB.SBInstruction
'''
d = self.disasmAtPc()
if not d:
return None
return d[0]
def lldbResolve(self, addr):
'''
Resolve a uint or lldb::addr_t as a section/offset address in the
current target. Always returns an LLDB::SBAddress, which should be
checked with IsValid() before use.
'''
return self.target.ResolveLoadAddress(addr)
@memoized
def getSPBP(self):
'''
Returns a (stack_pointer, base_pointer) tuple, suitable for multiple
assignment or None, None on the off chance that they can't be retrieved.
'''
registers = self.getRegisters(['rbp', 'rsp'])
if len(registers) < 2:
# should probably panic about this
return None, None
return registers['rsp'], registers['rbp']
|
wheeler-microfluidics/dmf-control-board-firmware
|
refs/heads/master
|
pavement.py
|
3
|
import os
import re
import subprocess as sp
import sys
from paver.easy import task, needs, path, sh
from paver.setuputils import setup
import conda_helpers as ch
import path_helpers as ph
import platformio_helpers as pioh
import versioneer
DEFAULT_ARDUINO_BOARDS = ['mega2560']
setup(name='dmf-control-board-firmware',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='Arduino-based DMF control board firmware and Python API.',
author='Ryan Fobel and Christian Fobel',
author_email='ryan@fobel.net and christian@fobel.net',
url='https://github.com/wheeler-microfluidics/dmf-control-board-firmware',
license='GPLv2',
packages=['dmf_control_board_firmware'],
include_package_data=True,
install_requires=['decorator', 'functools32', 'matplotlib',
'microdrop-utility', 'scipy', 'serial_device>=0.4',
'svg-model>=0.5.post20', 'sympy', 'tables',
'wheeler.base-node>=0.3.post2', 'pandas>=0.17',
'arrow'],
extras_require={'build': ['arduino-scons>=v0.1.post11', 'SCons>=2.4.1']})
@task
def create_config():
def get_version_string():
version = sp.check_output('git describe', shell=True).strip()
branch = sp.check_output('git rev-parse --abbrev-ref HEAD',
shell=True).strip()
if branch == "master":
tags = ""
else:
tags = "-" + branch
m = re.search('^v(?P<major>\d+)\.(?P<minor>\d+)(-(?P<micro>\d+))?', version)
if m.group('micro'):
micro = m.group('micro')
else:
micro = '0'
return "%s.%s.%s%s" % (m.group('major'), m.group('minor'), micro, tags)
sketch_directory = path('src')
source_data = sketch_directory.joinpath('Config.h.skeleton').bytes()
config_data = source_data.replace('#define ___SOFTWARE_VERSION___ "0.1.0"',
'#define ___SOFTWARE_VERSION___ "{}"'
.format(get_version_string()))
sketch_directory.joinpath('Config.h').write_bytes(config_data)
@task
def nosetests():
nose_options = '-v'
sh('nosetests %s' % nose_options)
@task
@needs('create_config')
def build_firmware():
sp.call(['pio', 'run'])
@task
@needs('generate_setup', 'minilib', 'build_firmware', 'nosetests',
'setuptools.command.sdist')
def sdist():
"""Overrides sdist to make sure that our setup.py is generated."""
pass
@task
@needs('generate_setup', 'minilib', 'build_firmware', 'nosetests',
'setuptools.command.bdist_wheel')
def bdist_wheel():
"""Overrides bdist_wheel to make sure that our setup.py is generated."""
pass
@task
def develop_link(options, info):
'''
Prepare development environment.
Perform the following steps:
- Uninstall ``dmf_control_board_firmware`` if installed as Conda package.
- Install build and run-time Conda dependencies.
- Link working ``.pioenvs`` directory into Conda ``Library`` directory to
make development versions of compiled firmware binaries available to
Python API.
- Link ``dmf_control_board_firmware`` Python package into site packages
directory.
See Also
--------
:func:`develop_unlink`
'''
project_dir = ph.path(__file__).realpath().parent
# Uninstall ``dmf_control_board_firmware`` if installed as Conda package.
info('Check if Conda package is installed...')
version_info = ch.conda_version_info('dmf-control-board-firmware')
if version_info.get('installed') is not None:
info('Uninstall `dmf-control-board-firmware` package...')
ch.conda_exec('uninstall', '-y', 'dmf-control-board-firmware',
verbose=True)
else:
info('`dmf-control-board-firmware` package is not installed.')
# Install build and run-time Conda dependencies.
info('Install build and run-time Conda dependencies...')
recipe_dir = project_dir.joinpath('.conda-recipe').realpath()
ch.conda_exec('install', '-y', '-n', 'root', 'conda-build', verbose=True)
ch.development_setup(recipe_dir, verbose=True)
# Link working ``.pioenvs`` directory into Conda ``Library`` directory.
info('Link working firmware directories into Conda environment.')
pio_bin_dir = pioh.conda_bin_path()
fw_bin_dir = pio_bin_dir.joinpath('dmf-control-board-firmware')
if not fw_bin_dir.exists():
project_dir.joinpath('.pioenvs').junction(fw_bin_dir)
fw_config_ini = fw_bin_dir.joinpath('platformio.ini')
if not fw_config_ini.exists():
project_dir.joinpath('platformio.ini').link(fw_config_ini)
# Link ``dmf_control_board_firmware`` Python package `conda.pth` in site
# packages directory.
info('Link working Python directory into Conda environment...')
ch.conda_exec('develop', project_dir, verbose=True)
info(72 * '-' + '\nFinished')
@task
def develop_unlink(options, info):
'''
Prepare development environment.
Perform the following steps:
- Unlink working ``.pioenvs`` directory into Conda ``Library`` directory.
- Unlink ``dmf_control_board_firmware`` Python package from site packages
directory.
See Also
--------
:func:`develop_link`
'''
project_dir = ph.path(__file__).realpath().parent
# Unlink working ``.pioenvs`` directory into Conda ``Library`` directory.
info('Unlink working firmware directories from Conda environment.')
pio_bin_dir = pioh.conda_bin_path()
fw_bin_dir = pio_bin_dir.joinpath('dmf-control-board-firmware')
if fw_bin_dir.exists():
fw_config_ini = fw_bin_dir.joinpath('platformio.ini')
if fw_config_ini.exists():
fw_config_ini.unlink()
fw_bin_dir.unlink()
# Remove link to ``dmf_control_board_firmware`` Python package in
# `conda.pth` in site packages directory.
info('Unlink working Python directory from Conda environment...')
ch.conda_exec('develop', '-u', project_dir, verbose=True)
info(72 * '-' + '\nFinished')
|
NalinG/coala
|
refs/heads/master
|
tests/bearlib/abstractions/__init__.py
|
12133432
| |
maur1th/naxos
|
refs/heads/master
|
app/forum/user/migrations/__init__.py
|
12133432
| |
marckuz/django
|
refs/heads/master
|
tests/schema/__init__.py
|
12133432
| |
wearespindle/flindt
|
refs/heads/develop
|
backend/flindt/management/commands/__init__.py
|
12133432
| |
luogangyi/Ceilometer-oVirt
|
refs/heads/stable/juno
|
ceilometer/tests/compute/virt/libvirt/__init__.py
|
12133432
| |
falleco/sample-websockets
|
refs/heads/master
|
socketio_django/socketio_django/__init__.py
|
12133432
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.